]> git.ipfire.org Git - people/ms/ipfire-3.x.git/blob - pkgs/core/kernel/patches/grsecurity-2.2.0-2.6.34.1-201007070910.patch
kernel: Update to 2.6.34.1.
[people/ms/ipfire-3.x.git] / pkgs / core / kernel / patches / grsecurity-2.2.0-2.6.34.1-201007070910.patch
1 diff -urNp linux-2.6.34.1/Documentation/dontdiff linux-2.6.34.1/Documentation/dontdiff
2 --- linux-2.6.34.1/Documentation/dontdiff 2010-07-05 14:24:10.000000000 -0400
3 +++ linux-2.6.34.1/Documentation/dontdiff 2010-07-07 09:04:49.000000000 -0400
4 @@ -3,6 +3,7 @@
5 *.bin
6 *.cpio
7 *.csp
8 +*.dbg
9 *.dsp
10 *.dvi
11 *.elf
12 @@ -40,6 +41,7 @@
13 *.ver
14 *.xml
15 *_MODULES
16 +*_reg_safe.h
17 *_vga16.c
18 *~
19 *.9
20 @@ -49,11 +51,16 @@
21 53c700_d.h
22 CVS
23 ChangeSet
24 +GPATH
25 +GRTAGS
26 +GSYMS
27 +GTAGS
28 Image
29 Kerntypes
30 Module.markers
31 Module.symvers
32 PENDING
33 +PERF*
34 SCCS
35 System.map*
36 TAGS
37 @@ -76,7 +83,10 @@ btfixupprep
38 build
39 bvmlinux
40 bzImage*
41 +capflags.c
42 classlist.h*
43 +clut_vga16.c
44 +common-cmds.h
45 comp*.log
46 compile.h*
47 conf
48 @@ -106,13 +116,15 @@ generated
49 genheaders
50 genksyms
51 *_gray256.c
52 +hash
53 ihex2fw
54 ikconfig.h*
55 +inat-tables.c
56 initramfs_data.cpio
57 +initramfs_data.cpio.bz2
58 initramfs_data.cpio.gz
59 initramfs_list
60 kallsyms
61 -kconfig
62 keywords.c
63 ksym.c*
64 ksym.h*
65 @@ -136,10 +148,13 @@ mkboot
66 mkbugboot
67 mkcpustr
68 mkdep
69 +mkpiggy
70 mkprep
71 +mkregtable
72 mktables
73 mktree
74 modpost
75 +modules.builtin
76 modules.order
77 modversions.h*
78 ncscope.*
79 @@ -152,6 +167,7 @@ patches*
80 pca200e.bin
81 pca200e_ecd.bin2
82 piggy.gz
83 +piggy.S
84 piggyback
85 pnmtologo
86 ppc_defs.h*
87 @@ -166,6 +182,7 @@ setup
88 setup.bin
89 setup.elf
90 sImage
91 +slabinfo
92 sm_tbl*
93 split-include
94 syscalltab.h
95 @@ -189,14 +206,20 @@ version.h*
96 vmlinux
97 vmlinux-*
98 vmlinux.aout
99 +vmlinux.bin.all
100 +vmlinux.bin.bz2
101 vmlinux.lds
102 +vmlinux.relocs
103 +voffset.h
104 vsyscall.lds
105 vsyscall_32.lds
106 wanxlfw.inc
107 uImage
108 unifdef
109 +utsrelease.h
110 wakeup.bin
111 wakeup.elf
112 wakeup.lds
113 zImage*
114 zconf.hash.c
115 +zoffset.h
116 diff -urNp linux-2.6.34.1/Documentation/filesystems/sysfs.txt linux-2.6.34.1/Documentation/filesystems/sysfs.txt
117 --- linux-2.6.34.1/Documentation/filesystems/sysfs.txt 2010-07-05 14:24:10.000000000 -0400
118 +++ linux-2.6.34.1/Documentation/filesystems/sysfs.txt 2010-07-07 09:04:50.000000000 -0400
119 @@ -123,8 +123,8 @@ set of sysfs operations for forwarding r
120 show and store methods of the attribute owners.
121
122 struct sysfs_ops {
123 - ssize_t (*show)(struct kobject *, struct attribute *, char *);
124 - ssize_t (*store)(struct kobject *, struct attribute *, const char *);
125 + ssize_t (* const show)(struct kobject *, struct attribute *, char *);
126 + ssize_t (* const store)(struct kobject *, struct attribute *, const char *);
127 };
128
129 [ Subsystems should have already defined a struct kobj_type as a
130 diff -urNp linux-2.6.34.1/Documentation/kernel-parameters.txt linux-2.6.34.1/Documentation/kernel-parameters.txt
131 --- linux-2.6.34.1/Documentation/kernel-parameters.txt 2010-07-05 14:24:10.000000000 -0400
132 +++ linux-2.6.34.1/Documentation/kernel-parameters.txt 2010-07-07 09:04:50.000000000 -0400
133 @@ -1875,6 +1875,12 @@ and is between 256 and 4096 characters.
134 the specified number of seconds. This is to be used if
135 your oopses keep scrolling off the screen.
136
137 + pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain
138 + virtualization environments that don't cope well with the
139 + expand down segment used by UDEREF on X86-32.
140 +
141 + pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already.
142 +
143 pcbit= [HW,ISDN]
144
145 pcd. [PARIDE]
146 diff -urNp linux-2.6.34.1/Makefile linux-2.6.34.1/Makefile
147 --- linux-2.6.34.1/Makefile 2010-07-05 14:24:10.000000000 -0400
148 +++ linux-2.6.34.1/Makefile 2010-07-07 09:04:57.000000000 -0400
149 @@ -227,8 +227,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
150
151 HOSTCC = gcc
152 HOSTCXX = g++
153 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
154 -HOSTCXXFLAGS = -O2
155 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
156 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
157
158 # Decide whether to build built-in, modular, or both.
159 # Normally, just do built-in.
160 @@ -650,7 +650,7 @@ export mod_strip_cmd
161
162
163 ifeq ($(KBUILD_EXTMOD),)
164 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
165 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
166
167 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
168 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
169 diff -urNp linux-2.6.34.1/arch/alpha/include/asm/dma-mapping.h linux-2.6.34.1/arch/alpha/include/asm/dma-mapping.h
170 --- linux-2.6.34.1/arch/alpha/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
171 +++ linux-2.6.34.1/arch/alpha/include/asm/dma-mapping.h 2010-07-07 09:04:42.000000000 -0400
172 @@ -3,9 +3,9 @@
173
174 #include <linux/dma-attrs.h>
175
176 -extern struct dma_map_ops *dma_ops;
177 +extern const struct dma_map_ops *dma_ops;
178
179 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
180 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
181 {
182 return dma_ops;
183 }
184 diff -urNp linux-2.6.34.1/arch/alpha/include/asm/elf.h linux-2.6.34.1/arch/alpha/include/asm/elf.h
185 --- linux-2.6.34.1/arch/alpha/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
186 +++ linux-2.6.34.1/arch/alpha/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
187 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
188
189 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
190
191 +#ifdef CONFIG_PAX_ASLR
192 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
193 +
194 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
195 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
196 +#endif
197 +
198 /* $0 is set by ld.so to a pointer to a function which might be
199 registered using atexit. This provides a mean for the dynamic
200 linker to call DT_FINI functions for shared libraries that have
201 diff -urNp linux-2.6.34.1/arch/alpha/include/asm/pgtable.h linux-2.6.34.1/arch/alpha/include/asm/pgtable.h
202 --- linux-2.6.34.1/arch/alpha/include/asm/pgtable.h 2010-07-05 14:24:10.000000000 -0400
203 +++ linux-2.6.34.1/arch/alpha/include/asm/pgtable.h 2010-07-07 09:04:42.000000000 -0400
204 @@ -101,6 +101,17 @@ struct vm_area_struct;
205 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
206 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
207 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
208 +
209 +#ifdef CONFIG_PAX_PAGEEXEC
210 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
211 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
212 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
213 +#else
214 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
215 +# define PAGE_COPY_NOEXEC PAGE_COPY
216 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
217 +#endif
218 +
219 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
220
221 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
222 diff -urNp linux-2.6.34.1/arch/alpha/kernel/module.c linux-2.6.34.1/arch/alpha/kernel/module.c
223 --- linux-2.6.34.1/arch/alpha/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
224 +++ linux-2.6.34.1/arch/alpha/kernel/module.c 2010-07-07 09:04:42.000000000 -0400
225 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
226
227 /* The small sections were sorted to the end of the segment.
228 The following should definitely cover them. */
229 - gp = (u64)me->module_core + me->core_size - 0x8000;
230 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
231 got = sechdrs[me->arch.gotsecindex].sh_addr;
232
233 for (i = 0; i < n; i++) {
234 diff -urNp linux-2.6.34.1/arch/alpha/kernel/osf_sys.c linux-2.6.34.1/arch/alpha/kernel/osf_sys.c
235 --- linux-2.6.34.1/arch/alpha/kernel/osf_sys.c 2010-07-05 14:24:10.000000000 -0400
236 +++ linux-2.6.34.1/arch/alpha/kernel/osf_sys.c 2010-07-07 09:04:42.000000000 -0400
237 @@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp
238 merely specific addresses, but regions of memory -- perhaps
239 this feature should be incorporated into all ports? */
240
241 +#ifdef CONFIG_PAX_RANDMMAP
242 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
243 +#endif
244 +
245 if (addr) {
246 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
247 if (addr != (unsigned long) -ENOMEM)
248 @@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp
249 }
250
251 /* Next, try allocating at TASK_UNMAPPED_BASE. */
252 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
253 - len, limit);
254 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
255 +
256 if (addr != (unsigned long) -ENOMEM)
257 return addr;
258
259 diff -urNp linux-2.6.34.1/arch/alpha/kernel/pci-noop.c linux-2.6.34.1/arch/alpha/kernel/pci-noop.c
260 --- linux-2.6.34.1/arch/alpha/kernel/pci-noop.c 2010-07-05 14:24:10.000000000 -0400
261 +++ linux-2.6.34.1/arch/alpha/kernel/pci-noop.c 2010-07-07 09:04:42.000000000 -0400
262 @@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de
263 return 0;
264 }
265
266 -struct dma_map_ops alpha_noop_ops = {
267 +const struct dma_map_ops alpha_noop_ops = {
268 .alloc_coherent = alpha_noop_alloc_coherent,
269 .free_coherent = alpha_noop_free_coherent,
270 .map_page = alpha_noop_map_page,
271 @@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = {
272 .set_dma_mask = alpha_noop_set_mask,
273 };
274
275 -struct dma_map_ops *dma_ops = &alpha_noop_ops;
276 +const struct dma_map_ops *dma_ops = &alpha_noop_ops;
277 EXPORT_SYMBOL(dma_ops);
278
279 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
280 diff -urNp linux-2.6.34.1/arch/alpha/kernel/pci_iommu.c linux-2.6.34.1/arch/alpha/kernel/pci_iommu.c
281 --- linux-2.6.34.1/arch/alpha/kernel/pci_iommu.c 2010-07-05 14:24:10.000000000 -0400
282 +++ linux-2.6.34.1/arch/alpha/kernel/pci_iommu.c 2010-07-07 09:04:42.000000000 -0400
283 @@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev
284 return 0;
285 }
286
287 -struct dma_map_ops alpha_pci_ops = {
288 +const struct dma_map_ops alpha_pci_ops = {
289 .alloc_coherent = alpha_pci_alloc_coherent,
290 .free_coherent = alpha_pci_free_coherent,
291 .map_page = alpha_pci_map_page,
292 @@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
293 .set_dma_mask = alpha_pci_set_mask,
294 };
295
296 -struct dma_map_ops *dma_ops = &alpha_pci_ops;
297 +const struct dma_map_ops *dma_ops = &alpha_pci_ops;
298 EXPORT_SYMBOL(dma_ops);
299 diff -urNp linux-2.6.34.1/arch/alpha/mm/fault.c linux-2.6.34.1/arch/alpha/mm/fault.c
300 --- linux-2.6.34.1/arch/alpha/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
301 +++ linux-2.6.34.1/arch/alpha/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
302 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
303 __reload_thread(pcb);
304 }
305
306 +#ifdef CONFIG_PAX_PAGEEXEC
307 +/*
308 + * PaX: decide what to do with offenders (regs->pc = fault address)
309 + *
310 + * returns 1 when task should be killed
311 + * 2 when patched PLT trampoline was detected
312 + * 3 when unpatched PLT trampoline was detected
313 + */
314 +static int pax_handle_fetch_fault(struct pt_regs *regs)
315 +{
316 +
317 +#ifdef CONFIG_PAX_EMUPLT
318 + int err;
319 +
320 + do { /* PaX: patched PLT emulation #1 */
321 + unsigned int ldah, ldq, jmp;
322 +
323 + err = get_user(ldah, (unsigned int *)regs->pc);
324 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
325 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
326 +
327 + if (err)
328 + break;
329 +
330 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
331 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
332 + jmp == 0x6BFB0000U)
333 + {
334 + unsigned long r27, addr;
335 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
336 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
337 +
338 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
339 + err = get_user(r27, (unsigned long *)addr);
340 + if (err)
341 + break;
342 +
343 + regs->r27 = r27;
344 + regs->pc = r27;
345 + return 2;
346 + }
347 + } while (0);
348 +
349 + do { /* PaX: patched PLT emulation #2 */
350 + unsigned int ldah, lda, br;
351 +
352 + err = get_user(ldah, (unsigned int *)regs->pc);
353 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
354 + err |= get_user(br, (unsigned int *)(regs->pc+8));
355 +
356 + if (err)
357 + break;
358 +
359 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
360 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
361 + (br & 0xFFE00000U) == 0xC3E00000U)
362 + {
363 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
364 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
365 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
366 +
367 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
368 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
369 + return 2;
370 + }
371 + } while (0);
372 +
373 + do { /* PaX: unpatched PLT emulation */
374 + unsigned int br;
375 +
376 + err = get_user(br, (unsigned int *)regs->pc);
377 +
378 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
379 + unsigned int br2, ldq, nop, jmp;
380 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
381 +
382 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
383 + err = get_user(br2, (unsigned int *)addr);
384 + err |= get_user(ldq, (unsigned int *)(addr+4));
385 + err |= get_user(nop, (unsigned int *)(addr+8));
386 + err |= get_user(jmp, (unsigned int *)(addr+12));
387 + err |= get_user(resolver, (unsigned long *)(addr+16));
388 +
389 + if (err)
390 + break;
391 +
392 + if (br2 == 0xC3600000U &&
393 + ldq == 0xA77B000CU &&
394 + nop == 0x47FF041FU &&
395 + jmp == 0x6B7B0000U)
396 + {
397 + regs->r28 = regs->pc+4;
398 + regs->r27 = addr+16;
399 + regs->pc = resolver;
400 + return 3;
401 + }
402 + }
403 + } while (0);
404 +#endif
405 +
406 + return 1;
407 +}
408 +
409 +void pax_report_insns(void *pc, void *sp)
410 +{
411 + unsigned long i;
412 +
413 + printk(KERN_ERR "PAX: bytes at PC: ");
414 + for (i = 0; i < 5; i++) {
415 + unsigned int c;
416 + if (get_user(c, (unsigned int *)pc+i))
417 + printk(KERN_CONT "???????? ");
418 + else
419 + printk(KERN_CONT "%08x ", c);
420 + }
421 + printk("\n");
422 +}
423 +#endif
424
425 /*
426 * This routine handles page faults. It determines the address,
427 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
428 good_area:
429 si_code = SEGV_ACCERR;
430 if (cause < 0) {
431 - if (!(vma->vm_flags & VM_EXEC))
432 + if (!(vma->vm_flags & VM_EXEC)) {
433 +
434 +#ifdef CONFIG_PAX_PAGEEXEC
435 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
436 + goto bad_area;
437 +
438 + up_read(&mm->mmap_sem);
439 + switch (pax_handle_fetch_fault(regs)) {
440 +
441 +#ifdef CONFIG_PAX_EMUPLT
442 + case 2:
443 + case 3:
444 + return;
445 +#endif
446 +
447 + }
448 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
449 + do_group_exit(SIGKILL);
450 +#else
451 goto bad_area;
452 +#endif
453 +
454 + }
455 } else if (!cause) {
456 /* Allow reads even for write-only mappings */
457 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
458 diff -urNp linux-2.6.34.1/arch/arm/include/asm/elf.h linux-2.6.34.1/arch/arm/include/asm/elf.h
459 --- linux-2.6.34.1/arch/arm/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
460 +++ linux-2.6.34.1/arch/arm/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
461 @@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t
462 the loader. We need to make sure that it is out of the way of the program
463 that it will "exec", and that there is sufficient room for the brk. */
464
465 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
466 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
467 +
468 +#ifdef CONFIG_PAX_ASLR
469 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
470 +
471 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
472 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
473 +#endif
474
475 /* When the program starts, a1 contains a pointer to a function to be
476 registered with atexit, as per the SVR4 ABI. A value of 0 means we
477 diff -urNp linux-2.6.34.1/arch/arm/include/asm/kmap_types.h linux-2.6.34.1/arch/arm/include/asm/kmap_types.h
478 --- linux-2.6.34.1/arch/arm/include/asm/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
479 +++ linux-2.6.34.1/arch/arm/include/asm/kmap_types.h 2010-07-07 09:04:42.000000000 -0400
480 @@ -20,6 +20,7 @@ enum km_type {
481 KM_SOFTIRQ1,
482 KM_L1_CACHE,
483 KM_L2_CACHE,
484 + KM_CLEARPAGE,
485 KM_TYPE_NR
486 };
487
488 diff -urNp linux-2.6.34.1/arch/arm/include/asm/uaccess.h linux-2.6.34.1/arch/arm/include/asm/uaccess.h
489 --- linux-2.6.34.1/arch/arm/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
490 +++ linux-2.6.34.1/arch/arm/include/asm/uaccess.h 2010-07-07 09:04:42.000000000 -0400
491 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
492
493 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
494 {
495 + if ((long)n < 0)
496 + return n;
497 +
498 if (access_ok(VERIFY_READ, from, n))
499 n = __copy_from_user(to, from, n);
500 else /* security hole - plug it */
501 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
502
503 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
504 {
505 + if ((long)n < 0)
506 + return n;
507 +
508 if (access_ok(VERIFY_WRITE, to, n))
509 n = __copy_to_user(to, from, n);
510 return n;
511 diff -urNp linux-2.6.34.1/arch/arm/kernel/kgdb.c linux-2.6.34.1/arch/arm/kernel/kgdb.c
512 --- linux-2.6.34.1/arch/arm/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
513 +++ linux-2.6.34.1/arch/arm/kernel/kgdb.c 2010-07-07 09:04:42.000000000 -0400
514 @@ -203,7 +203,7 @@ void kgdb_arch_exit(void)
515 * and we handle the normal undef case within the do_undefinstr
516 * handler.
517 */
518 -struct kgdb_arch arch_kgdb_ops = {
519 +const struct kgdb_arch arch_kgdb_ops = {
520 #ifndef __ARMEB__
521 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
522 #else /* ! __ARMEB__ */
523 diff -urNp linux-2.6.34.1/arch/arm/mach-at91/pm.c linux-2.6.34.1/arch/arm/mach-at91/pm.c
524 --- linux-2.6.34.1/arch/arm/mach-at91/pm.c 2010-07-05 14:24:10.000000000 -0400
525 +++ linux-2.6.34.1/arch/arm/mach-at91/pm.c 2010-07-07 09:04:42.000000000 -0400
526 @@ -294,7 +294,7 @@ static void at91_pm_end(void)
527 }
528
529
530 -static struct platform_suspend_ops at91_pm_ops ={
531 +static const struct platform_suspend_ops at91_pm_ops ={
532 .valid = at91_pm_valid_state,
533 .begin = at91_pm_begin,
534 .enter = at91_pm_enter,
535 diff -urNp linux-2.6.34.1/arch/arm/mach-davinci/pm.c linux-2.6.34.1/arch/arm/mach-davinci/pm.c
536 --- linux-2.6.34.1/arch/arm/mach-davinci/pm.c 2010-07-05 14:24:10.000000000 -0400
537 +++ linux-2.6.34.1/arch/arm/mach-davinci/pm.c 2010-07-07 09:04:42.000000000 -0400
538 @@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_stat
539 return ret;
540 }
541
542 -static struct platform_suspend_ops davinci_pm_ops = {
543 +static const struct platform_suspend_ops davinci_pm_ops = {
544 .enter = davinci_pm_enter,
545 .valid = suspend_valid_only_mem,
546 };
547 diff -urNp linux-2.6.34.1/arch/arm/mach-omap1/pm.c linux-2.6.34.1/arch/arm/mach-omap1/pm.c
548 --- linux-2.6.34.1/arch/arm/mach-omap1/pm.c 2010-07-05 14:24:10.000000000 -0400
549 +++ linux-2.6.34.1/arch/arm/mach-omap1/pm.c 2010-07-07 09:04:42.000000000 -0400
550 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
551
552
553
554 -static struct platform_suspend_ops omap_pm_ops ={
555 +static const struct platform_suspend_ops omap_pm_ops ={
556 .prepare = omap_pm_prepare,
557 .enter = omap_pm_enter,
558 .finish = omap_pm_finish,
559 diff -urNp linux-2.6.34.1/arch/arm/mach-omap2/pm24xx.c linux-2.6.34.1/arch/arm/mach-omap2/pm24xx.c
560 --- linux-2.6.34.1/arch/arm/mach-omap2/pm24xx.c 2010-07-05 14:24:10.000000000 -0400
561 +++ linux-2.6.34.1/arch/arm/mach-omap2/pm24xx.c 2010-07-07 09:04:42.000000000 -0400
562 @@ -324,7 +324,7 @@ static void omap2_pm_finish(void)
563 enable_hlt();
564 }
565
566 -static struct platform_suspend_ops omap_pm_ops = {
567 +static const struct platform_suspend_ops omap_pm_ops = {
568 .prepare = omap2_pm_prepare,
569 .enter = omap2_pm_enter,
570 .finish = omap2_pm_finish,
571 diff -urNp linux-2.6.34.1/arch/arm/mach-omap2/pm34xx.c linux-2.6.34.1/arch/arm/mach-omap2/pm34xx.c
572 --- linux-2.6.34.1/arch/arm/mach-omap2/pm34xx.c 2010-07-05 14:24:10.000000000 -0400
573 +++ linux-2.6.34.1/arch/arm/mach-omap2/pm34xx.c 2010-07-07 09:04:42.000000000 -0400
574 @@ -651,7 +651,7 @@ static void omap3_pm_end(void)
575 return;
576 }
577
578 -static struct platform_suspend_ops omap_pm_ops = {
579 +static const struct platform_suspend_ops omap_pm_ops = {
580 .begin = omap3_pm_begin,
581 .end = omap3_pm_end,
582 .prepare = omap3_pm_prepare,
583 diff -urNp linux-2.6.34.1/arch/arm/mach-pnx4008/pm.c linux-2.6.34.1/arch/arm/mach-pnx4008/pm.c
584 --- linux-2.6.34.1/arch/arm/mach-pnx4008/pm.c 2010-07-05 14:24:10.000000000 -0400
585 +++ linux-2.6.34.1/arch/arm/mach-pnx4008/pm.c 2010-07-07 09:04:42.000000000 -0400
586 @@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_stat
587 (state == PM_SUSPEND_MEM);
588 }
589
590 -static struct platform_suspend_ops pnx4008_pm_ops = {
591 +static const struct platform_suspend_ops pnx4008_pm_ops = {
592 .enter = pnx4008_pm_enter,
593 .valid = pnx4008_pm_valid,
594 };
595 diff -urNp linux-2.6.34.1/arch/arm/mach-pxa/pm.c linux-2.6.34.1/arch/arm/mach-pxa/pm.c
596 --- linux-2.6.34.1/arch/arm/mach-pxa/pm.c 2010-07-05 14:24:10.000000000 -0400
597 +++ linux-2.6.34.1/arch/arm/mach-pxa/pm.c 2010-07-07 09:04:42.000000000 -0400
598 @@ -96,7 +96,7 @@ void pxa_pm_finish(void)
599 pxa_cpu_pm_fns->finish();
600 }
601
602 -static struct platform_suspend_ops pxa_pm_ops = {
603 +static const struct platform_suspend_ops pxa_pm_ops = {
604 .valid = pxa_pm_valid,
605 .enter = pxa_pm_enter,
606 .prepare = pxa_pm_prepare,
607 diff -urNp linux-2.6.34.1/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.34.1/arch/arm/mach-pxa/sharpsl_pm.c
608 --- linux-2.6.34.1/arch/arm/mach-pxa/sharpsl_pm.c 2010-07-05 14:24:10.000000000 -0400
609 +++ linux-2.6.34.1/arch/arm/mach-pxa/sharpsl_pm.c 2010-07-07 09:04:42.000000000 -0400
610 @@ -892,7 +892,7 @@ static void sharpsl_apm_get_power_status
611 }
612
613 #ifdef CONFIG_PM
614 -static struct platform_suspend_ops sharpsl_pm_ops = {
615 +static const struct platform_suspend_ops sharpsl_pm_ops = {
616 .prepare = pxa_pm_prepare,
617 .finish = pxa_pm_finish,
618 .enter = corgi_pxa_pm_enter,
619 diff -urNp linux-2.6.34.1/arch/arm/mach-sa1100/pm.c linux-2.6.34.1/arch/arm/mach-sa1100/pm.c
620 --- linux-2.6.34.1/arch/arm/mach-sa1100/pm.c 2010-07-05 14:24:10.000000000 -0400
621 +++ linux-2.6.34.1/arch/arm/mach-sa1100/pm.c 2010-07-07 09:04:42.000000000 -0400
622 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
623 return virt_to_phys(sp);
624 }
625
626 -static struct platform_suspend_ops sa11x0_pm_ops = {
627 +static const struct platform_suspend_ops sa11x0_pm_ops = {
628 .enter = sa11x0_pm_enter,
629 .valid = suspend_valid_only_mem,
630 };
631 diff -urNp linux-2.6.34.1/arch/arm/mm/fault.c linux-2.6.34.1/arch/arm/mm/fault.c
632 --- linux-2.6.34.1/arch/arm/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
633 +++ linux-2.6.34.1/arch/arm/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
634 @@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk,
635 }
636 #endif
637
638 +#ifdef CONFIG_PAX_PAGEEXEC
639 + if (fsr & FSR_LNX_PF) {
640 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
641 + do_group_exit(SIGKILL);
642 + }
643 +#endif
644 +
645 tsk->thread.address = addr;
646 tsk->thread.error_code = fsr;
647 tsk->thread.trap_no = 14;
648 @@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsign
649 }
650 #endif /* CONFIG_MMU */
651
652 +#ifdef CONFIG_PAX_PAGEEXEC
653 +void pax_report_insns(void *pc, void *sp)
654 +{
655 + long i;
656 +
657 + printk(KERN_ERR "PAX: bytes at PC: ");
658 + for (i = 0; i < 20; i++) {
659 + unsigned char c;
660 + if (get_user(c, (__force unsigned char __user *)pc+i))
661 + printk(KERN_CONT "?? ");
662 + else
663 + printk(KERN_CONT "%02x ", c);
664 + }
665 + printk("\n");
666 +
667 + printk(KERN_ERR "PAX: bytes at SP-4: ");
668 + for (i = -1; i < 20; i++) {
669 + unsigned long c;
670 + if (get_user(c, (__force unsigned long __user *)sp+i))
671 + printk(KERN_CONT "???????? ");
672 + else
673 + printk(KERN_CONT "%08lx ", c);
674 + }
675 + printk("\n");
676 +}
677 +#endif
678 +
679 /*
680 * First Level Translation Fault Handler
681 *
682 diff -urNp linux-2.6.34.1/arch/arm/mm/mmap.c linux-2.6.34.1/arch/arm/mm/mmap.c
683 --- linux-2.6.34.1/arch/arm/mm/mmap.c 2010-07-05 14:24:10.000000000 -0400
684 +++ linux-2.6.34.1/arch/arm/mm/mmap.c 2010-07-07 09:04:42.000000000 -0400
685 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
686 if (len > TASK_SIZE)
687 return -ENOMEM;
688
689 +#ifdef CONFIG_PAX_RANDMMAP
690 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
691 +#endif
692 +
693 if (addr) {
694 if (do_align)
695 addr = COLOUR_ALIGN(addr, pgoff);
696 @@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp
697 return addr;
698 }
699 if (len > mm->cached_hole_size) {
700 - start_addr = addr = mm->free_area_cache;
701 + start_addr = addr = mm->free_area_cache;
702 } else {
703 - start_addr = addr = TASK_UNMAPPED_BASE;
704 - mm->cached_hole_size = 0;
705 + start_addr = addr = mm->mmap_base;
706 + mm->cached_hole_size = 0;
707 }
708
709 full_search:
710 @@ -94,8 +98,8 @@ full_search:
711 * Start a new search - just in case we missed
712 * some holes.
713 */
714 - if (start_addr != TASK_UNMAPPED_BASE) {
715 - start_addr = addr = TASK_UNMAPPED_BASE;
716 + if (start_addr != mm->mmap_base) {
717 + start_addr = addr = mm->mmap_base;
718 mm->cached_hole_size = 0;
719 goto full_search;
720 }
721 diff -urNp linux-2.6.34.1/arch/arm/plat-samsung/pm.c linux-2.6.34.1/arch/arm/plat-samsung/pm.c
722 --- linux-2.6.34.1/arch/arm/plat-samsung/pm.c 2010-07-05 14:24:10.000000000 -0400
723 +++ linux-2.6.34.1/arch/arm/plat-samsung/pm.c 2010-07-07 09:04:42.000000000 -0400
724 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
725 s3c_pm_check_cleanup();
726 }
727
728 -static struct platform_suspend_ops s3c_pm_ops = {
729 +static const struct platform_suspend_ops s3c_pm_ops = {
730 .enter = s3c_pm_enter,
731 .prepare = s3c_pm_prepare,
732 .finish = s3c_pm_finish,
733 diff -urNp linux-2.6.34.1/arch/avr32/include/asm/elf.h linux-2.6.34.1/arch/avr32/include/asm/elf.h
734 --- linux-2.6.34.1/arch/avr32/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
735 +++ linux-2.6.34.1/arch/avr32/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
736 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
737 the loader. We need to make sure that it is out of the way of the program
738 that it will "exec", and that there is sufficient room for the brk. */
739
740 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
741 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
742
743 +#ifdef CONFIG_PAX_ASLR
744 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
745 +
746 +#define PAX_DELTA_MMAP_LEN 15
747 +#define PAX_DELTA_STACK_LEN 15
748 +#endif
749
750 /* This yields a mask that user programs can use to figure out what
751 instruction set this CPU supports. This could be done in user space,
752 diff -urNp linux-2.6.34.1/arch/avr32/include/asm/kmap_types.h linux-2.6.34.1/arch/avr32/include/asm/kmap_types.h
753 --- linux-2.6.34.1/arch/avr32/include/asm/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
754 +++ linux-2.6.34.1/arch/avr32/include/asm/kmap_types.h 2010-07-07 09:04:42.000000000 -0400
755 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
756 D(11) KM_IRQ1,
757 D(12) KM_SOFTIRQ0,
758 D(13) KM_SOFTIRQ1,
759 -D(14) KM_TYPE_NR
760 +D(14) KM_CLEARPAGE,
761 +D(15) KM_TYPE_NR
762 };
763
764 #undef D
765 diff -urNp linux-2.6.34.1/arch/avr32/mach-at32ap/pm.c linux-2.6.34.1/arch/avr32/mach-at32ap/pm.c
766 --- linux-2.6.34.1/arch/avr32/mach-at32ap/pm.c 2010-07-05 14:24:10.000000000 -0400
767 +++ linux-2.6.34.1/arch/avr32/mach-at32ap/pm.c 2010-07-07 09:04:42.000000000 -0400
768 @@ -176,7 +176,7 @@ out:
769 return 0;
770 }
771
772 -static struct platform_suspend_ops avr32_pm_ops = {
773 +static const struct platform_suspend_ops avr32_pm_ops = {
774 .valid = avr32_pm_valid_state,
775 .enter = avr32_pm_enter,
776 };
777 diff -urNp linux-2.6.34.1/arch/avr32/mm/fault.c linux-2.6.34.1/arch/avr32/mm/fault.c
778 --- linux-2.6.34.1/arch/avr32/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
779 +++ linux-2.6.34.1/arch/avr32/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
780 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
781
782 int exception_trace = 1;
783
784 +#ifdef CONFIG_PAX_PAGEEXEC
785 +void pax_report_insns(void *pc, void *sp)
786 +{
787 + unsigned long i;
788 +
789 + printk(KERN_ERR "PAX: bytes at PC: ");
790 + for (i = 0; i < 20; i++) {
791 + unsigned char c;
792 + if (get_user(c, (unsigned char *)pc+i))
793 + printk(KERN_CONT "???????? ");
794 + else
795 + printk(KERN_CONT "%02x ", c);
796 + }
797 + printk("\n");
798 +}
799 +#endif
800 +
801 /*
802 * This routine handles page faults. It determines the address and the
803 * problem, and then passes it off to one of the appropriate routines.
804 @@ -157,6 +174,16 @@ bad_area:
805 up_read(&mm->mmap_sem);
806
807 if (user_mode(regs)) {
808 +
809 +#ifdef CONFIG_PAX_PAGEEXEC
810 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
811 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
812 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
813 + do_group_exit(SIGKILL);
814 + }
815 + }
816 +#endif
817 +
818 if (exception_trace && printk_ratelimit())
819 printk("%s%s[%d]: segfault at %08lx pc %08lx "
820 "sp %08lx ecr %lu\n",
821 diff -urNp linux-2.6.34.1/arch/blackfin/kernel/kgdb.c linux-2.6.34.1/arch/blackfin/kernel/kgdb.c
822 --- linux-2.6.34.1/arch/blackfin/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
823 +++ linux-2.6.34.1/arch/blackfin/kernel/kgdb.c 2010-07-07 09:04:42.000000000 -0400
824 @@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vecto
825 return -1; /* this means that we do not want to exit from the handler */
826 }
827
828 -struct kgdb_arch arch_kgdb_ops = {
829 +const struct kgdb_arch arch_kgdb_ops = {
830 .gdb_bpt_instr = {0xa1},
831 #ifdef CONFIG_SMP
832 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
833 diff -urNp linux-2.6.34.1/arch/blackfin/mach-common/pm.c linux-2.6.34.1/arch/blackfin/mach-common/pm.c
834 --- linux-2.6.34.1/arch/blackfin/mach-common/pm.c 2010-07-05 14:24:10.000000000 -0400
835 +++ linux-2.6.34.1/arch/blackfin/mach-common/pm.c 2010-07-07 09:04:42.000000000 -0400
836 @@ -256,7 +256,7 @@ static int bfin_pm_enter(suspend_state_t
837 return 0;
838 }
839
840 -struct platform_suspend_ops bfin_pm_ops = {
841 +const struct platform_suspend_ops bfin_pm_ops = {
842 .enter = bfin_pm_enter,
843 .valid = bfin_pm_valid,
844 };
845 diff -urNp linux-2.6.34.1/arch/blackfin/mm/maccess.c linux-2.6.34.1/arch/blackfin/mm/maccess.c
846 --- linux-2.6.34.1/arch/blackfin/mm/maccess.c 2010-07-05 14:24:10.000000000 -0400
847 +++ linux-2.6.34.1/arch/blackfin/mm/maccess.c 2010-07-07 09:04:42.000000000 -0400
848 @@ -16,7 +16,7 @@ static int validate_memory_access_addres
849 return bfin_mem_access_type(addr, size);
850 }
851
852 -long probe_kernel_read(void *dst, void *src, size_t size)
853 +long probe_kernel_read(void *dst, const void *src, size_t size)
854 {
855 unsigned long lsrc = (unsigned long)src;
856 int mem_type;
857 @@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *
858 return -EFAULT;
859 }
860
861 -long probe_kernel_write(void *dst, void *src, size_t size)
862 +long probe_kernel_write(void *dst, const void *src, size_t size)
863 {
864 unsigned long ldst = (unsigned long)dst;
865 int mem_type;
866 diff -urNp linux-2.6.34.1/arch/frv/include/asm/kmap_types.h linux-2.6.34.1/arch/frv/include/asm/kmap_types.h
867 --- linux-2.6.34.1/arch/frv/include/asm/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
868 +++ linux-2.6.34.1/arch/frv/include/asm/kmap_types.h 2010-07-07 09:04:42.000000000 -0400
869 @@ -23,6 +23,7 @@ enum km_type {
870 KM_IRQ1,
871 KM_SOFTIRQ0,
872 KM_SOFTIRQ1,
873 + KM_CLEARPAGE,
874 KM_TYPE_NR
875 };
876
877 diff -urNp linux-2.6.34.1/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.34.1/arch/ia64/hp/common/hwsw_iommu.c
878 --- linux-2.6.34.1/arch/ia64/hp/common/hwsw_iommu.c 2010-07-05 14:24:10.000000000 -0400
879 +++ linux-2.6.34.1/arch/ia64/hp/common/hwsw_iommu.c 2010-07-07 09:04:42.000000000 -0400
880 @@ -17,7 +17,7 @@
881 #include <linux/swiotlb.h>
882 #include <asm/machvec.h>
883
884 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
885 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
886
887 /* swiotlb declarations & definitions: */
888 extern int swiotlb_late_init_with_default_size (size_t size);
889 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
890 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
891 }
892
893 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
894 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
895 {
896 if (use_swiotlb(dev))
897 return &swiotlb_dma_ops;
898 diff -urNp linux-2.6.34.1/arch/ia64/hp/common/sba_iommu.c linux-2.6.34.1/arch/ia64/hp/common/sba_iommu.c
899 --- linux-2.6.34.1/arch/ia64/hp/common/sba_iommu.c 2010-07-05 14:24:10.000000000 -0400
900 +++ linux-2.6.34.1/arch/ia64/hp/common/sba_iommu.c 2010-07-07 09:04:42.000000000 -0400
901 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
902 },
903 };
904
905 -extern struct dma_map_ops swiotlb_dma_ops;
906 +extern const struct dma_map_ops swiotlb_dma_ops;
907
908 static int __init
909 sba_init(void)
910 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
911
912 __setup("sbapagesize=",sba_page_override);
913
914 -struct dma_map_ops sba_dma_ops = {
915 +const struct dma_map_ops sba_dma_ops = {
916 .alloc_coherent = sba_alloc_coherent,
917 .free_coherent = sba_free_coherent,
918 .map_page = sba_map_page,
919 diff -urNp linux-2.6.34.1/arch/ia64/include/asm/dma-mapping.h linux-2.6.34.1/arch/ia64/include/asm/dma-mapping.h
920 --- linux-2.6.34.1/arch/ia64/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
921 +++ linux-2.6.34.1/arch/ia64/include/asm/dma-mapping.h 2010-07-07 09:04:42.000000000 -0400
922 @@ -12,7 +12,7 @@
923
924 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
925
926 -extern struct dma_map_ops *dma_ops;
927 +extern const struct dma_map_ops *dma_ops;
928 extern struct ia64_machine_vector ia64_mv;
929 extern void set_iommu_machvec(void);
930
931 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
932 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
933 dma_addr_t *daddr, gfp_t gfp)
934 {
935 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
936 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
937 void *caddr;
938
939 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
940 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
941 static inline void dma_free_coherent(struct device *dev, size_t size,
942 void *caddr, dma_addr_t daddr)
943 {
944 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
945 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
946 debug_dma_free_coherent(dev, size, caddr, daddr);
947 ops->free_coherent(dev, size, caddr, daddr);
948 }
949 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
950
951 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
952 {
953 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
954 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
955 return ops->mapping_error(dev, daddr);
956 }
957
958 static inline int dma_supported(struct device *dev, u64 mask)
959 {
960 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
961 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
962 return ops->dma_supported(dev, mask);
963 }
964
965 diff -urNp linux-2.6.34.1/arch/ia64/include/asm/elf.h linux-2.6.34.1/arch/ia64/include/asm/elf.h
966 --- linux-2.6.34.1/arch/ia64/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
967 +++ linux-2.6.34.1/arch/ia64/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
968 @@ -42,6 +42,13 @@
969 */
970 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
971
972 +#ifdef CONFIG_PAX_ASLR
973 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
974 +
975 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
976 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
977 +#endif
978 +
979 #define PT_IA_64_UNWIND 0x70000001
980
981 /* IA-64 relocations: */
982 diff -urNp linux-2.6.34.1/arch/ia64/include/asm/machvec.h linux-2.6.34.1/arch/ia64/include/asm/machvec.h
983 --- linux-2.6.34.1/arch/ia64/include/asm/machvec.h 2010-07-05 14:24:10.000000000 -0400
984 +++ linux-2.6.34.1/arch/ia64/include/asm/machvec.h 2010-07-07 09:04:42.000000000 -0400
985 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
986 /* DMA-mapping interface: */
987 typedef void ia64_mv_dma_init (void);
988 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
989 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
990 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
991
992 /*
993 * WARNING: The legacy I/O space is _architected_. Platforms are
994 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
995 # endif /* CONFIG_IA64_GENERIC */
996
997 extern void swiotlb_dma_init(void);
998 -extern struct dma_map_ops *dma_get_ops(struct device *);
999 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1000
1001 /*
1002 * Define default versions so we can extend machvec for new platforms without having
1003 diff -urNp linux-2.6.34.1/arch/ia64/include/asm/pgtable.h linux-2.6.34.1/arch/ia64/include/asm/pgtable.h
1004 --- linux-2.6.34.1/arch/ia64/include/asm/pgtable.h 2010-07-05 14:24:10.000000000 -0400
1005 +++ linux-2.6.34.1/arch/ia64/include/asm/pgtable.h 2010-07-07 09:04:42.000000000 -0400
1006 @@ -143,6 +143,17 @@
1007 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1008 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1009 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1010 +
1011 +#ifdef CONFIG_PAX_PAGEEXEC
1012 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1013 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1014 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1015 +#else
1016 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1017 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1018 +# define PAGE_COPY_NOEXEC PAGE_COPY
1019 +#endif
1020 +
1021 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1022 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1023 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1024 diff -urNp linux-2.6.34.1/arch/ia64/include/asm/uaccess.h linux-2.6.34.1/arch/ia64/include/asm/uaccess.h
1025 --- linux-2.6.34.1/arch/ia64/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
1026 +++ linux-2.6.34.1/arch/ia64/include/asm/uaccess.h 2010-07-07 09:04:42.000000000 -0400
1027 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1028 const void *__cu_from = (from); \
1029 long __cu_len = (n); \
1030 \
1031 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1032 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1033 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1034 __cu_len; \
1035 })
1036 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1037 long __cu_len = (n); \
1038 \
1039 __chk_user_ptr(__cu_from); \
1040 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1041 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1042 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1043 __cu_len; \
1044 })
1045 diff -urNp linux-2.6.34.1/arch/ia64/kernel/dma-mapping.c linux-2.6.34.1/arch/ia64/kernel/dma-mapping.c
1046 --- linux-2.6.34.1/arch/ia64/kernel/dma-mapping.c 2010-07-05 14:24:10.000000000 -0400
1047 +++ linux-2.6.34.1/arch/ia64/kernel/dma-mapping.c 2010-07-07 09:04:42.000000000 -0400
1048 @@ -3,7 +3,7 @@
1049 /* Set this to 1 if there is a HW IOMMU in the system */
1050 int iommu_detected __read_mostly;
1051
1052 -struct dma_map_ops *dma_ops;
1053 +const struct dma_map_ops *dma_ops;
1054 EXPORT_SYMBOL(dma_ops);
1055
1056 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1057 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1058 }
1059 fs_initcall(dma_init);
1060
1061 -struct dma_map_ops *dma_get_ops(struct device *dev)
1062 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1063 {
1064 return dma_ops;
1065 }
1066 diff -urNp linux-2.6.34.1/arch/ia64/kernel/module.c linux-2.6.34.1/arch/ia64/kernel/module.c
1067 --- linux-2.6.34.1/arch/ia64/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
1068 +++ linux-2.6.34.1/arch/ia64/kernel/module.c 2010-07-07 09:04:42.000000000 -0400
1069 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1070 void
1071 module_free (struct module *mod, void *module_region)
1072 {
1073 - if (mod && mod->arch.init_unw_table &&
1074 - module_region == mod->module_init) {
1075 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1076 unw_remove_unwind_table(mod->arch.init_unw_table);
1077 mod->arch.init_unw_table = NULL;
1078 }
1079 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1080 }
1081
1082 static inline int
1083 +in_init_rx (const struct module *mod, uint64_t addr)
1084 +{
1085 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1086 +}
1087 +
1088 +static inline int
1089 +in_init_rw (const struct module *mod, uint64_t addr)
1090 +{
1091 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1092 +}
1093 +
1094 +static inline int
1095 in_init (const struct module *mod, uint64_t addr)
1096 {
1097 - return addr - (uint64_t) mod->module_init < mod->init_size;
1098 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1099 +}
1100 +
1101 +static inline int
1102 +in_core_rx (const struct module *mod, uint64_t addr)
1103 +{
1104 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1105 +}
1106 +
1107 +static inline int
1108 +in_core_rw (const struct module *mod, uint64_t addr)
1109 +{
1110 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1111 }
1112
1113 static inline int
1114 in_core (const struct module *mod, uint64_t addr)
1115 {
1116 - return addr - (uint64_t) mod->module_core < mod->core_size;
1117 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1118 }
1119
1120 static inline int
1121 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1122 break;
1123
1124 case RV_BDREL:
1125 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1126 + if (in_init_rx(mod, val))
1127 + val -= (uint64_t) mod->module_init_rx;
1128 + else if (in_init_rw(mod, val))
1129 + val -= (uint64_t) mod->module_init_rw;
1130 + else if (in_core_rx(mod, val))
1131 + val -= (uint64_t) mod->module_core_rx;
1132 + else if (in_core_rw(mod, val))
1133 + val -= (uint64_t) mod->module_core_rw;
1134 break;
1135
1136 case RV_LTV:
1137 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1138 * addresses have been selected...
1139 */
1140 uint64_t gp;
1141 - if (mod->core_size > MAX_LTOFF)
1142 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1143 /*
1144 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1145 * at the end of the module.
1146 */
1147 - gp = mod->core_size - MAX_LTOFF / 2;
1148 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1149 else
1150 - gp = mod->core_size / 2;
1151 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1152 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1153 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1154 mod->arch.gp = gp;
1155 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1156 }
1157 diff -urNp linux-2.6.34.1/arch/ia64/kernel/pci-dma.c linux-2.6.34.1/arch/ia64/kernel/pci-dma.c
1158 --- linux-2.6.34.1/arch/ia64/kernel/pci-dma.c 2010-07-05 14:24:10.000000000 -0400
1159 +++ linux-2.6.34.1/arch/ia64/kernel/pci-dma.c 2010-07-07 09:04:42.000000000 -0400
1160 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1161 .dma_mask = &fallback_dev.coherent_dma_mask,
1162 };
1163
1164 -extern struct dma_map_ops intel_dma_ops;
1165 +extern const struct dma_map_ops intel_dma_ops;
1166
1167 static int __init pci_iommu_init(void)
1168 {
1169 diff -urNp linux-2.6.34.1/arch/ia64/kernel/pci-swiotlb.c linux-2.6.34.1/arch/ia64/kernel/pci-swiotlb.c
1170 --- linux-2.6.34.1/arch/ia64/kernel/pci-swiotlb.c 2010-07-05 14:24:10.000000000 -0400
1171 +++ linux-2.6.34.1/arch/ia64/kernel/pci-swiotlb.c 2010-07-07 09:04:42.000000000 -0400
1172 @@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent
1173 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1174 }
1175
1176 -struct dma_map_ops swiotlb_dma_ops = {
1177 +const struct dma_map_ops swiotlb_dma_ops = {
1178 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1179 .free_coherent = swiotlb_free_coherent,
1180 .map_page = swiotlb_map_page,
1181 diff -urNp linux-2.6.34.1/arch/ia64/kernel/sys_ia64.c linux-2.6.34.1/arch/ia64/kernel/sys_ia64.c
1182 --- linux-2.6.34.1/arch/ia64/kernel/sys_ia64.c 2010-07-05 14:24:10.000000000 -0400
1183 +++ linux-2.6.34.1/arch/ia64/kernel/sys_ia64.c 2010-07-07 09:04:42.000000000 -0400
1184 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1185 if (REGION_NUMBER(addr) == RGN_HPAGE)
1186 addr = 0;
1187 #endif
1188 +
1189 +#ifdef CONFIG_PAX_RANDMMAP
1190 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1191 + addr = mm->free_area_cache;
1192 + else
1193 +#endif
1194 +
1195 if (!addr)
1196 addr = mm->free_area_cache;
1197
1198 @@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
1199 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1200 /* At this point: (!vma || addr < vma->vm_end). */
1201 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1202 - if (start_addr != TASK_UNMAPPED_BASE) {
1203 + if (start_addr != mm->mmap_base) {
1204 /* Start a new search --- just in case we missed some holes. */
1205 - addr = TASK_UNMAPPED_BASE;
1206 + addr = mm->mmap_base;
1207 goto full_search;
1208 }
1209 return -ENOMEM;
1210 diff -urNp linux-2.6.34.1/arch/ia64/kernel/vmlinux.lds.S linux-2.6.34.1/arch/ia64/kernel/vmlinux.lds.S
1211 --- linux-2.6.34.1/arch/ia64/kernel/vmlinux.lds.S 2010-07-05 14:24:10.000000000 -0400
1212 +++ linux-2.6.34.1/arch/ia64/kernel/vmlinux.lds.S 2010-07-07 09:04:42.000000000 -0400
1213 @@ -196,7 +196,7 @@ SECTIONS
1214 /* Per-cpu data: */
1215 . = ALIGN(PERCPU_PAGE_SIZE);
1216 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1217 - __phys_per_cpu_start = __per_cpu_load;
1218 + __phys_per_cpu_start = per_cpu_load;
1219 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1220 * into percpu page size
1221 */
1222 diff -urNp linux-2.6.34.1/arch/ia64/mm/fault.c linux-2.6.34.1/arch/ia64/mm/fault.c
1223 --- linux-2.6.34.1/arch/ia64/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
1224 +++ linux-2.6.34.1/arch/ia64/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
1225 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1226 return pte_present(pte);
1227 }
1228
1229 +#ifdef CONFIG_PAX_PAGEEXEC
1230 +void pax_report_insns(void *pc, void *sp)
1231 +{
1232 + unsigned long i;
1233 +
1234 + printk(KERN_ERR "PAX: bytes at PC: ");
1235 + for (i = 0; i < 8; i++) {
1236 + unsigned int c;
1237 + if (get_user(c, (unsigned int *)pc+i))
1238 + printk(KERN_CONT "???????? ");
1239 + else
1240 + printk(KERN_CONT "%08x ", c);
1241 + }
1242 + printk("\n");
1243 +}
1244 +#endif
1245 +
1246 void __kprobes
1247 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1248 {
1249 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1250 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1251 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1252
1253 - if ((vma->vm_flags & mask) != mask)
1254 + if ((vma->vm_flags & mask) != mask) {
1255 +
1256 +#ifdef CONFIG_PAX_PAGEEXEC
1257 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1258 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1259 + goto bad_area;
1260 +
1261 + up_read(&mm->mmap_sem);
1262 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1263 + do_group_exit(SIGKILL);
1264 + }
1265 +#endif
1266 +
1267 goto bad_area;
1268
1269 + }
1270 +
1271 survive:
1272 /*
1273 * If for any reason at all we couldn't handle the fault, make
1274 diff -urNp linux-2.6.34.1/arch/ia64/mm/init.c linux-2.6.34.1/arch/ia64/mm/init.c
1275 --- linux-2.6.34.1/arch/ia64/mm/init.c 2010-07-05 14:24:10.000000000 -0400
1276 +++ linux-2.6.34.1/arch/ia64/mm/init.c 2010-07-07 09:04:42.000000000 -0400
1277 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1278 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1279 vma->vm_end = vma->vm_start + PAGE_SIZE;
1280 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1281 +
1282 +#ifdef CONFIG_PAX_PAGEEXEC
1283 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1284 + vma->vm_flags &= ~VM_EXEC;
1285 +
1286 +#ifdef CONFIG_PAX_MPROTECT
1287 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1288 + vma->vm_flags &= ~VM_MAYEXEC;
1289 +#endif
1290 +
1291 + }
1292 +#endif
1293 +
1294 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1295 down_write(&current->mm->mmap_sem);
1296 if (insert_vm_struct(current->mm, vma)) {
1297 diff -urNp linux-2.6.34.1/arch/ia64/sn/pci/pci_dma.c linux-2.6.34.1/arch/ia64/sn/pci/pci_dma.c
1298 --- linux-2.6.34.1/arch/ia64/sn/pci/pci_dma.c 2010-07-05 14:24:10.000000000 -0400
1299 +++ linux-2.6.34.1/arch/ia64/sn/pci/pci_dma.c 2010-07-07 09:04:42.000000000 -0400
1300 @@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *
1301 return ret;
1302 }
1303
1304 -static struct dma_map_ops sn_dma_ops = {
1305 +static const struct dma_map_ops sn_dma_ops = {
1306 .alloc_coherent = sn_dma_alloc_coherent,
1307 .free_coherent = sn_dma_free_coherent,
1308 .map_page = sn_dma_map_page,
1309 diff -urNp linux-2.6.34.1/arch/m32r/lib/usercopy.c linux-2.6.34.1/arch/m32r/lib/usercopy.c
1310 --- linux-2.6.34.1/arch/m32r/lib/usercopy.c 2010-07-05 14:24:10.000000000 -0400
1311 +++ linux-2.6.34.1/arch/m32r/lib/usercopy.c 2010-07-07 09:04:42.000000000 -0400
1312 @@ -14,6 +14,9 @@
1313 unsigned long
1314 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1315 {
1316 + if ((long)n < 0)
1317 + return n;
1318 +
1319 prefetch(from);
1320 if (access_ok(VERIFY_WRITE, to, n))
1321 __copy_user(to,from,n);
1322 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1323 unsigned long
1324 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1325 {
1326 + if ((long)n < 0)
1327 + return n;
1328 +
1329 prefetchw(to);
1330 if (access_ok(VERIFY_READ, from, n))
1331 __copy_user_zeroing(to,from,n);
1332 diff -urNp linux-2.6.34.1/arch/microblaze/include/asm/device.h linux-2.6.34.1/arch/microblaze/include/asm/device.h
1333 --- linux-2.6.34.1/arch/microblaze/include/asm/device.h 2010-07-05 14:24:10.000000000 -0400
1334 +++ linux-2.6.34.1/arch/microblaze/include/asm/device.h 2010-07-07 09:04:42.000000000 -0400
1335 @@ -16,7 +16,7 @@ struct dev_archdata {
1336 struct device_node *of_node;
1337
1338 /* DMA operations on that device */
1339 - struct dma_map_ops *dma_ops;
1340 + const struct dma_map_ops *dma_ops;
1341 void *dma_data;
1342 };
1343
1344 diff -urNp linux-2.6.34.1/arch/microblaze/include/asm/dma-mapping.h linux-2.6.34.1/arch/microblaze/include/asm/dma-mapping.h
1345 --- linux-2.6.34.1/arch/microblaze/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
1346 +++ linux-2.6.34.1/arch/microblaze/include/asm/dma-mapping.h 2010-07-07 09:04:42.000000000 -0400
1347 @@ -43,14 +43,14 @@ static inline unsigned long device_to_ma
1348 return 0xfffffffful;
1349 }
1350
1351 -extern struct dma_map_ops *dma_ops;
1352 +extern const struct dma_map_ops *dma_ops;
1353
1354 /*
1355 * Available generic sets of operations
1356 */
1357 -extern struct dma_map_ops dma_direct_ops;
1358 +extern const struct dma_map_ops dma_direct_ops;
1359
1360 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1361 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1362 {
1363 /* We don't handle the NULL dev case for ISA for now. We could
1364 * do it via an out of line call but it is not needed for now. The
1365 @@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm
1366 return dev->archdata.dma_ops;
1367 }
1368
1369 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1370 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1371 {
1372 dev->archdata.dma_ops = ops;
1373 }
1374
1375 static inline int dma_supported(struct device *dev, u64 mask)
1376 {
1377 - struct dma_map_ops *ops = get_dma_ops(dev);
1378 + const struct dma_map_ops *ops = get_dma_ops(dev);
1379
1380 if (unlikely(!ops))
1381 return 0;
1382 @@ -87,7 +87,7 @@ static inline int dma_supported(struct d
1383
1384 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1385 {
1386 - struct dma_map_ops *ops = get_dma_ops(dev);
1387 + const struct dma_map_ops *ops = get_dma_ops(dev);
1388
1389 if (unlikely(ops == NULL))
1390 return -EIO;
1391 @@ -103,7 +103,7 @@ static inline int dma_set_mask(struct de
1392
1393 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1394 {
1395 - struct dma_map_ops *ops = get_dma_ops(dev);
1396 + const struct dma_map_ops *ops = get_dma_ops(dev);
1397 if (ops->mapping_error)
1398 return ops->mapping_error(dev, dma_addr);
1399
1400 @@ -117,7 +117,7 @@ static inline int dma_mapping_error(stru
1401 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1402 dma_addr_t *dma_handle, gfp_t flag)
1403 {
1404 - struct dma_map_ops *ops = get_dma_ops(dev);
1405 + const struct dma_map_ops *ops = get_dma_ops(dev);
1406 void *memory;
1407
1408 BUG_ON(!ops);
1409 @@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(s
1410 static inline void dma_free_coherent(struct device *dev, size_t size,
1411 void *cpu_addr, dma_addr_t dma_handle)
1412 {
1413 - struct dma_map_ops *ops = get_dma_ops(dev);
1414 + const struct dma_map_ops *ops = get_dma_ops(dev);
1415
1416 BUG_ON(!ops);
1417 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
1418 diff -urNp linux-2.6.34.1/arch/microblaze/include/asm/pci.h linux-2.6.34.1/arch/microblaze/include/asm/pci.h
1419 --- linux-2.6.34.1/arch/microblaze/include/asm/pci.h 2010-07-05 14:24:10.000000000 -0400
1420 +++ linux-2.6.34.1/arch/microblaze/include/asm/pci.h 2010-07-07 09:04:42.000000000 -0400
1421 @@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_
1422 }
1423
1424 #ifdef CONFIG_PCI
1425 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
1426 -extern struct dma_map_ops *get_pci_dma_ops(void);
1427 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
1428 +extern const struct dma_map_ops *get_pci_dma_ops(void);
1429 #else /* CONFIG_PCI */
1430 #define set_pci_dma_ops(d)
1431 #define get_pci_dma_ops() NULL
1432 diff -urNp linux-2.6.34.1/arch/microblaze/kernel/dma.c linux-2.6.34.1/arch/microblaze/kernel/dma.c
1433 --- linux-2.6.34.1/arch/microblaze/kernel/dma.c 2010-07-05 14:24:10.000000000 -0400
1434 +++ linux-2.6.34.1/arch/microblaze/kernel/dma.c 2010-07-07 09:04:42.000000000 -0400
1435 @@ -134,7 +134,7 @@ static inline void dma_direct_unmap_page
1436 __dma_sync_page(dma_address, 0 , size, direction);
1437 }
1438
1439 -struct dma_map_ops dma_direct_ops = {
1440 +const struct dma_map_ops dma_direct_ops = {
1441 .alloc_coherent = dma_direct_alloc_coherent,
1442 .free_coherent = dma_direct_free_coherent,
1443 .map_sg = dma_direct_map_sg,
1444 diff -urNp linux-2.6.34.1/arch/microblaze/pci/pci-common.c linux-2.6.34.1/arch/microblaze/pci/pci-common.c
1445 --- linux-2.6.34.1/arch/microblaze/pci/pci-common.c 2010-07-05 14:24:10.000000000 -0400
1446 +++ linux-2.6.34.1/arch/microblaze/pci/pci-common.c 2010-07-07 09:04:42.000000000 -0400
1447 @@ -46,14 +46,14 @@ resource_size_t isa_mem_base;
1448 /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
1449 unsigned int pci_flags;
1450
1451 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
1452 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
1453
1454 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
1455 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
1456 {
1457 pci_dma_ops = dma_ops;
1458 }
1459
1460 -struct dma_map_ops *get_pci_dma_ops(void)
1461 +const struct dma_map_ops *get_pci_dma_ops(void)
1462 {
1463 return pci_dma_ops;
1464 }
1465 diff -urNp linux-2.6.34.1/arch/mips/alchemy/devboards/pm.c linux-2.6.34.1/arch/mips/alchemy/devboards/pm.c
1466 --- linux-2.6.34.1/arch/mips/alchemy/devboards/pm.c 2010-07-05 14:24:10.000000000 -0400
1467 +++ linux-2.6.34.1/arch/mips/alchemy/devboards/pm.c 2010-07-07 09:04:42.000000000 -0400
1468 @@ -110,7 +110,7 @@ static void db1x_pm_end(void)
1469
1470 }
1471
1472 -static struct platform_suspend_ops db1x_pm_ops = {
1473 +static const struct platform_suspend_ops db1x_pm_ops = {
1474 .valid = suspend_valid_only_mem,
1475 .begin = db1x_pm_begin,
1476 .enter = db1x_pm_enter,
1477 diff -urNp linux-2.6.34.1/arch/mips/include/asm/elf.h linux-2.6.34.1/arch/mips/include/asm/elf.h
1478 --- linux-2.6.34.1/arch/mips/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
1479 +++ linux-2.6.34.1/arch/mips/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
1480 @@ -368,6 +368,13 @@ extern const char *__elf_platform;
1481 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_ASLR
1485 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1486 +
1487 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1488 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1489 +#endif
1490 +
1491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1492 struct linux_binprm;
1493 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1494 diff -urNp linux-2.6.34.1/arch/mips/include/asm/page.h linux-2.6.34.1/arch/mips/include/asm/page.h
1495 --- linux-2.6.34.1/arch/mips/include/asm/page.h 2010-07-05 14:24:10.000000000 -0400
1496 +++ linux-2.6.34.1/arch/mips/include/asm/page.h 2010-07-07 09:04:42.000000000 -0400
1497 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1498 #ifdef CONFIG_CPU_MIPS32
1499 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1500 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1501 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1502 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1503 #else
1504 typedef struct { unsigned long long pte; } pte_t;
1505 #define pte_val(x) ((x).pte)
1506 diff -urNp linux-2.6.34.1/arch/mips/include/asm/system.h linux-2.6.34.1/arch/mips/include/asm/system.h
1507 --- linux-2.6.34.1/arch/mips/include/asm/system.h 2010-07-05 14:24:10.000000000 -0400
1508 +++ linux-2.6.34.1/arch/mips/include/asm/system.h 2010-07-07 09:04:42.000000000 -0400
1509 @@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void);
1510 */
1511 #define __ARCH_WANT_UNLOCKED_CTXSW
1512
1513 -extern unsigned long arch_align_stack(unsigned long sp);
1514 +#define arch_align_stack(x) ((x) & ALMASK)
1515
1516 #endif /* _ASM_SYSTEM_H */
1517 diff -urNp linux-2.6.34.1/arch/mips/kernel/binfmt_elfn32.c linux-2.6.34.1/arch/mips/kernel/binfmt_elfn32.c
1518 --- linux-2.6.34.1/arch/mips/kernel/binfmt_elfn32.c 2010-07-05 14:24:10.000000000 -0400
1519 +++ linux-2.6.34.1/arch/mips/kernel/binfmt_elfn32.c 2010-07-07 09:04:42.000000000 -0400
1520 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1521 #undef ELF_ET_DYN_BASE
1522 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1523
1524 +#ifdef CONFIG_PAX_ASLR
1525 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1526 +
1527 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1528 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1529 +#endif
1530 +
1531 #include <asm/processor.h>
1532 #include <linux/module.h>
1533 #include <linux/elfcore.h>
1534 diff -urNp linux-2.6.34.1/arch/mips/kernel/binfmt_elfo32.c linux-2.6.34.1/arch/mips/kernel/binfmt_elfo32.c
1535 --- linux-2.6.34.1/arch/mips/kernel/binfmt_elfo32.c 2010-07-05 14:24:10.000000000 -0400
1536 +++ linux-2.6.34.1/arch/mips/kernel/binfmt_elfo32.c 2010-07-07 09:04:42.000000000 -0400
1537 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1538 #undef ELF_ET_DYN_BASE
1539 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1540
1541 +#ifdef CONFIG_PAX_ASLR
1542 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1543 +
1544 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1545 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1546 +#endif
1547 +
1548 #include <asm/processor.h>
1549
1550 /*
1551 diff -urNp linux-2.6.34.1/arch/mips/kernel/kgdb.c linux-2.6.34.1/arch/mips/kernel/kgdb.c
1552 --- linux-2.6.34.1/arch/mips/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
1553 +++ linux-2.6.34.1/arch/mips/kernel/kgdb.c 2010-07-07 09:04:42.000000000 -0400
1554 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1555 return -1;
1556 }
1557
1558 +/* cannot be const, see kgdb_arch_init */
1559 struct kgdb_arch arch_kgdb_ops;
1560
1561 /*
1562 diff -urNp linux-2.6.34.1/arch/mips/kernel/process.c linux-2.6.34.1/arch/mips/kernel/process.c
1563 --- linux-2.6.34.1/arch/mips/kernel/process.c 2010-07-05 14:24:10.000000000 -0400
1564 +++ linux-2.6.34.1/arch/mips/kernel/process.c 2010-07-07 09:04:42.000000000 -0400
1565 @@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_stru
1566 out:
1567 return pc;
1568 }
1569 -
1570 -/*
1571 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1572 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1573 - */
1574 -unsigned long arch_align_stack(unsigned long sp)
1575 -{
1576 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1577 - sp -= get_random_int() & ~PAGE_MASK;
1578 -
1579 - return sp & ALMASK;
1580 -}
1581 diff -urNp linux-2.6.34.1/arch/mips/kernel/syscall.c linux-2.6.34.1/arch/mips/kernel/syscall.c
1582 --- linux-2.6.34.1/arch/mips/kernel/syscall.c 2010-07-05 14:24:10.000000000 -0400
1583 +++ linux-2.6.34.1/arch/mips/kernel/syscall.c 2010-07-07 09:04:42.000000000 -0400
1584 @@ -106,6 +106,11 @@ unsigned long arch_get_unmapped_area(str
1585 do_color_align = 0;
1586 if (filp || (flags & MAP_SHARED))
1587 do_color_align = 1;
1588 +
1589 +#ifdef CONFIG_PAX_RANDMMAP
1590 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1591 +#endif
1592 +
1593 if (addr) {
1594 if (do_color_align)
1595 addr = COLOUR_ALIGN(addr, pgoff);
1596 @@ -116,7 +121,7 @@ unsigned long arch_get_unmapped_area(str
1597 (!vmm || addr + len <= vmm->vm_start))
1598 return addr;
1599 }
1600 - addr = TASK_UNMAPPED_BASE;
1601 + addr = current->mm->mmap_base;
1602 if (do_color_align)
1603 addr = COLOUR_ALIGN(addr, pgoff);
1604 else
1605 diff -urNp linux-2.6.34.1/arch/mips/loongson/common/pm.c linux-2.6.34.1/arch/mips/loongson/common/pm.c
1606 --- linux-2.6.34.1/arch/mips/loongson/common/pm.c 2010-07-05 14:24:10.000000000 -0400
1607 +++ linux-2.6.34.1/arch/mips/loongson/common/pm.c 2010-07-07 09:04:42.000000000 -0400
1608 @@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspe
1609 }
1610 }
1611
1612 -static struct platform_suspend_ops loongson_pm_ops = {
1613 +static const struct platform_suspend_ops loongson_pm_ops = {
1614 .valid = loongson_pm_valid_state,
1615 .enter = loongson_pm_enter,
1616 };
1617 diff -urNp linux-2.6.34.1/arch/mips/mm/fault.c linux-2.6.34.1/arch/mips/mm/fault.c
1618 --- linux-2.6.34.1/arch/mips/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
1619 +++ linux-2.6.34.1/arch/mips/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
1620 @@ -26,6 +26,23 @@
1621 #include <asm/ptrace.h>
1622 #include <asm/highmem.h> /* For VMALLOC_END */
1623
1624 +#ifdef CONFIG_PAX_PAGEEXEC
1625 +void pax_report_insns(void *pc)
1626 +{
1627 + unsigned long i;
1628 +
1629 + printk(KERN_ERR "PAX: bytes at PC: ");
1630 + for (i = 0; i < 5; i++) {
1631 + unsigned int c;
1632 + if (get_user(c, (unsigned int *)pc+i))
1633 + printk(KERN_CONT "???????? ");
1634 + else
1635 + printk(KERN_CONT "%08x ", c);
1636 + }
1637 + printk("\n");
1638 +}
1639 +#endif
1640 +
1641 /*
1642 * This routine handles page faults. It determines the address,
1643 * and the problem, and then passes it off to one of the appropriate
1644 diff -urNp linux-2.6.34.1/arch/parisc/include/asm/elf.h linux-2.6.34.1/arch/parisc/include/asm/elf.h
1645 --- linux-2.6.34.1/arch/parisc/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
1646 +++ linux-2.6.34.1/arch/parisc/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
1647 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1648
1649 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1650
1651 +#ifdef CONFIG_PAX_ASLR
1652 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1653 +
1654 +#define PAX_DELTA_MMAP_LEN 16
1655 +#define PAX_DELTA_STACK_LEN 16
1656 +#endif
1657 +
1658 /* This yields a mask that user programs can use to figure out what
1659 instruction set this CPU supports. This could be done in user space,
1660 but it's not easy, and we've already done it here. */
1661 diff -urNp linux-2.6.34.1/arch/parisc/include/asm/pgtable.h linux-2.6.34.1/arch/parisc/include/asm/pgtable.h
1662 --- linux-2.6.34.1/arch/parisc/include/asm/pgtable.h 2010-07-05 14:24:10.000000000 -0400
1663 +++ linux-2.6.34.1/arch/parisc/include/asm/pgtable.h 2010-07-07 09:04:42.000000000 -0400
1664 @@ -207,6 +207,17 @@
1665 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1666 #define PAGE_COPY PAGE_EXECREAD
1667 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1668 +
1669 +#ifdef CONFIG_PAX_PAGEEXEC
1670 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1671 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1672 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1673 +#else
1674 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1675 +# define PAGE_COPY_NOEXEC PAGE_COPY
1676 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1677 +#endif
1678 +
1679 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1680 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1681 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1682 diff -urNp linux-2.6.34.1/arch/parisc/kernel/module.c linux-2.6.34.1/arch/parisc/kernel/module.c
1683 --- linux-2.6.34.1/arch/parisc/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
1684 +++ linux-2.6.34.1/arch/parisc/kernel/module.c 2010-07-07 09:04:42.000000000 -0400
1685 @@ -96,16 +96,38 @@
1686
1687 /* three functions to determine where in the module core
1688 * or init pieces the location is */
1689 +static inline int in_init_rx(struct module *me, void *loc)
1690 +{
1691 + return (loc >= me->module_init_rx &&
1692 + loc < (me->module_init_rx + me->init_size_rx));
1693 +}
1694 +
1695 +static inline int in_init_rw(struct module *me, void *loc)
1696 +{
1697 + return (loc >= me->module_init_rw &&
1698 + loc < (me->module_init_rw + me->init_size_rw));
1699 +}
1700 +
1701 static inline int in_init(struct module *me, void *loc)
1702 {
1703 - return (loc >= me->module_init &&
1704 - loc <= (me->module_init + me->init_size));
1705 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1706 +}
1707 +
1708 +static inline int in_core_rx(struct module *me, void *loc)
1709 +{
1710 + return (loc >= me->module_core_rx &&
1711 + loc < (me->module_core_rx + me->core_size_rx));
1712 +}
1713 +
1714 +static inline int in_core_rw(struct module *me, void *loc)
1715 +{
1716 + return (loc >= me->module_core_rw &&
1717 + loc < (me->module_core_rw + me->core_size_rw));
1718 }
1719
1720 static inline int in_core(struct module *me, void *loc)
1721 {
1722 - return (loc >= me->module_core &&
1723 - loc <= (me->module_core + me->core_size));
1724 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1725 }
1726
1727 static inline int in_local(struct module *me, void *loc)
1728 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1729 }
1730
1731 /* align things a bit */
1732 - me->core_size = ALIGN(me->core_size, 16);
1733 - me->arch.got_offset = me->core_size;
1734 - me->core_size += gots * sizeof(struct got_entry);
1735 -
1736 - me->core_size = ALIGN(me->core_size, 16);
1737 - me->arch.fdesc_offset = me->core_size;
1738 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1739 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1740 + me->arch.got_offset = me->core_size_rw;
1741 + me->core_size_rw += gots * sizeof(struct got_entry);
1742 +
1743 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1744 + me->arch.fdesc_offset = me->core_size_rw;
1745 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1746
1747 me->arch.got_max = gots;
1748 me->arch.fdesc_max = fdescs;
1749 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1750
1751 BUG_ON(value == 0);
1752
1753 - got = me->module_core + me->arch.got_offset;
1754 + got = me->module_core_rw + me->arch.got_offset;
1755 for (i = 0; got[i].addr; i++)
1756 if (got[i].addr == value)
1757 goto out;
1758 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1759 #ifdef CONFIG_64BIT
1760 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1761 {
1762 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1763 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1764
1765 if (!value) {
1766 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1767 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1768
1769 /* Create new one */
1770 fdesc->addr = value;
1771 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1772 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1773 return (Elf_Addr)fdesc;
1774 }
1775 #endif /* CONFIG_64BIT */
1776 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1777
1778 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1779 end = table + sechdrs[me->arch.unwind_section].sh_size;
1780 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1781 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1782
1783 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1784 me->arch.unwind_section, table, end, gp);
1785 diff -urNp linux-2.6.34.1/arch/parisc/kernel/sys_parisc.c linux-2.6.34.1/arch/parisc/kernel/sys_parisc.c
1786 --- linux-2.6.34.1/arch/parisc/kernel/sys_parisc.c 2010-07-05 14:24:10.000000000 -0400
1787 +++ linux-2.6.34.1/arch/parisc/kernel/sys_parisc.c 2010-07-07 09:04:42.000000000 -0400
1788 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792 - addr = TASK_UNMAPPED_BASE;
1793 + addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797 diff -urNp linux-2.6.34.1/arch/parisc/kernel/traps.c linux-2.6.34.1/arch/parisc/kernel/traps.c
1798 --- linux-2.6.34.1/arch/parisc/kernel/traps.c 2010-07-05 14:24:10.000000000 -0400
1799 +++ linux-2.6.34.1/arch/parisc/kernel/traps.c 2010-07-07 09:04:42.000000000 -0400
1800 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1805 - && (vma->vm_flags & VM_EXEC)) {
1806 -
1807 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811 diff -urNp linux-2.6.34.1/arch/parisc/mm/fault.c linux-2.6.34.1/arch/parisc/mm/fault.c
1812 --- linux-2.6.34.1/arch/parisc/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
1813 +++ linux-2.6.34.1/arch/parisc/mm/fault.c 2010-07-07 09:04:42.000000000 -0400
1814 @@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818 +#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826 - if (code == 6 || code == 16)
1827 + if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835 +#ifdef CONFIG_PAX_PAGEEXEC
1836 +/*
1837 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838 + *
1839 + * returns 1 when task should be killed
1840 + * 2 when rt_sigreturn trampoline was detected
1841 + * 3 when unpatched PLT trampoline was detected
1842 + */
1843 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1844 +{
1845 +
1846 +#ifdef CONFIG_PAX_EMUPLT
1847 + int err;
1848 +
1849 + do { /* PaX: unpatched PLT emulation */
1850 + unsigned int bl, depwi;
1851 +
1852 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860 +
1861 + err = get_user(ldw, (unsigned int *)addr);
1862 + err |= get_user(bv, (unsigned int *)(addr+4));
1863 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1864 +
1865 + if (err)
1866 + break;
1867 +
1868 + if (ldw == 0x0E801096U &&
1869 + bv == 0xEAC0C000U &&
1870 + ldw2 == 0x0E881095U)
1871 + {
1872 + unsigned int resolver, map;
1873 +
1874 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876 + if (err)
1877 + break;
1878 +
1879 + regs->gr[20] = instruction_pointer(regs)+8;
1880 + regs->gr[21] = map;
1881 + regs->gr[22] = resolver;
1882 + regs->iaoq[0] = resolver | 3UL;
1883 + regs->iaoq[1] = regs->iaoq[0] + 4;
1884 + return 3;
1885 + }
1886 + }
1887 + } while (0);
1888 +#endif
1889 +
1890 +#ifdef CONFIG_PAX_EMUTRAMP
1891 +
1892 +#ifndef CONFIG_PAX_EMUSIGRT
1893 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894 + return 1;
1895 +#endif
1896 +
1897 + do { /* PaX: rt_sigreturn emulation */
1898 + unsigned int ldi1, ldi2, bel, nop;
1899 +
1900 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904 +
1905 + if (err)
1906 + break;
1907 +
1908 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909 + ldi2 == 0x3414015AU &&
1910 + bel == 0xE4008200U &&
1911 + nop == 0x08000240U)
1912 + {
1913 + regs->gr[25] = (ldi1 & 2) >> 1;
1914 + regs->gr[20] = __NR_rt_sigreturn;
1915 + regs->gr[31] = regs->iaoq[1] + 16;
1916 + regs->sr[0] = regs->iasq[1];
1917 + regs->iaoq[0] = 0x100UL;
1918 + regs->iaoq[1] = regs->iaoq[0] + 4;
1919 + regs->iasq[0] = regs->sr[2];
1920 + regs->iasq[1] = regs->sr[2];
1921 + return 2;
1922 + }
1923 + } while (0);
1924 +#endif
1925 +
1926 + return 1;
1927 +}
1928 +
1929 +void pax_report_insns(void *pc, void *sp)
1930 +{
1931 + unsigned long i;
1932 +
1933 + printk(KERN_ERR "PAX: bytes at PC: ");
1934 + for (i = 0; i < 5; i++) {
1935 + unsigned int c;
1936 + if (get_user(c, (unsigned int *)pc+i))
1937 + printk(KERN_CONT "???????? ");
1938 + else
1939 + printk(KERN_CONT "%08x ", c);
1940 + }
1941 + printk("\n");
1942 +}
1943 +#endif
1944 +
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948 @@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952 - if ((vma->vm_flags & acc_type) != acc_type)
1953 + if ((vma->vm_flags & acc_type) != acc_type) {
1954 +
1955 +#ifdef CONFIG_PAX_PAGEEXEC
1956 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957 + (address & ~3UL) == instruction_pointer(regs))
1958 + {
1959 + up_read(&mm->mmap_sem);
1960 + switch (pax_handle_fetch_fault(regs)) {
1961 +
1962 +#ifdef CONFIG_PAX_EMUPLT
1963 + case 3:
1964 + return;
1965 +#endif
1966 +
1967 +#ifdef CONFIG_PAX_EMUTRAMP
1968 + case 2:
1969 + return;
1970 +#endif
1971 +
1972 + }
1973 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974 + do_group_exit(SIGKILL);
1975 + }
1976 +#endif
1977 +
1978 goto bad_area;
1979 + }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/device.h linux-2.6.34.1/arch/powerpc/include/asm/device.h
1984 --- linux-2.6.34.1/arch/powerpc/include/asm/device.h 2010-07-05 14:24:10.000000000 -0400
1985 +++ linux-2.6.34.1/arch/powerpc/include/asm/device.h 2010-07-07 09:04:42.000000000 -0400
1986 @@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990 - struct dma_map_ops *dma_ops;
1991 + const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/dma-mapping.h linux-2.6.34.1/arch/powerpc/include/asm/dma-mapping.h
1996 --- linux-2.6.34.1/arch/powerpc/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
1997 +++ linux-2.6.34.1/arch/powerpc/include/asm/dma-mapping.h 2010-07-07 09:04:42.000000000 -0400
1998 @@ -67,11 +67,11 @@ static inline unsigned long device_to_ma
1999 * Available generic sets of operations
2000 */
2001 #ifdef CONFIG_PPC64
2002 -extern struct dma_map_ops dma_iommu_ops;
2003 +extern const struct dma_map_ops dma_iommu_ops;
2004 #endif
2005 -extern struct dma_map_ops dma_direct_ops;
2006 +extern const struct dma_map_ops dma_direct_ops;
2007
2008 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2009 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2010 {
2011 /* We don't handle the NULL dev case for ISA for now. We could
2012 * do it via an out of line call but it is not needed for now. The
2013 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2014 return dev->archdata.dma_ops;
2015 }
2016
2017 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2018 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2019 {
2020 dev->archdata.dma_ops = ops;
2021 }
2022 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2023
2024 static inline int dma_supported(struct device *dev, u64 mask)
2025 {
2026 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2027 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2028
2029 if (unlikely(dma_ops == NULL))
2030 return 0;
2031 @@ -129,7 +129,7 @@ static inline int dma_supported(struct d
2032
2033 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2034 {
2035 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2036 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2037
2038 if (unlikely(dma_ops == NULL))
2039 return -EIO;
2040 @@ -144,7 +144,7 @@ static inline int dma_set_mask(struct de
2041 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2042 dma_addr_t *dma_handle, gfp_t flag)
2043 {
2044 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2045 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2046 void *cpu_addr;
2047
2048 BUG_ON(!dma_ops);
2049 @@ -159,7 +159,7 @@ static inline void *dma_alloc_coherent(s
2050 static inline void dma_free_coherent(struct device *dev, size_t size,
2051 void *cpu_addr, dma_addr_t dma_handle)
2052 {
2053 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2054 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2055
2056 BUG_ON(!dma_ops);
2057
2058 @@ -170,7 +170,7 @@ static inline void dma_free_coherent(str
2059
2060 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2061 {
2062 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2063 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2064
2065 if (dma_ops->mapping_error)
2066 return dma_ops->mapping_error(dev, dma_addr);
2067 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/elf.h linux-2.6.34.1/arch/powerpc/include/asm/elf.h
2068 --- linux-2.6.34.1/arch/powerpc/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
2069 +++ linux-2.6.34.1/arch/powerpc/include/asm/elf.h 2010-07-07 09:04:42.000000000 -0400
2070 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2071 the loader. We need to make sure that it is out of the way of the program
2072 that it will "exec", and that there is sufficient room for the brk. */
2073
2074 -extern unsigned long randomize_et_dyn(unsigned long base);
2075 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2076 +#define ELF_ET_DYN_BASE (0x20000000)
2077 +
2078 +#ifdef CONFIG_PAX_ASLR
2079 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2080 +
2081 +#ifdef __powerpc64__
2082 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2083 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2084 +#else
2085 +#define PAX_DELTA_MMAP_LEN 15
2086 +#define PAX_DELTA_STACK_LEN 15
2087 +#endif
2088 +#endif
2089
2090 /*
2091 * Our registers are always unsigned longs, whether we're a 32 bit
2092 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
2093 (0x7ff >> (PAGE_SHIFT - 12)) : \
2094 (0x3ffff >> (PAGE_SHIFT - 12)))
2095
2096 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2097 -#define arch_randomize_brk arch_randomize_brk
2098 -
2099 #endif /* __KERNEL__ */
2100
2101 /*
2102 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/iommu.h linux-2.6.34.1/arch/powerpc/include/asm/iommu.h
2103 --- linux-2.6.34.1/arch/powerpc/include/asm/iommu.h 2010-07-05 14:24:10.000000000 -0400
2104 +++ linux-2.6.34.1/arch/powerpc/include/asm/iommu.h 2010-07-07 09:04:42.000000000 -0400
2105 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2106 extern void iommu_init_early_dart(void);
2107 extern void iommu_init_early_pasemi(void);
2108
2109 +/* dma-iommu.c */
2110 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2111 +
2112 #ifdef CONFIG_PCI
2113 extern void pci_iommu_init(void);
2114 extern void pci_direct_iommu_init(void);
2115 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/kmap_types.h linux-2.6.34.1/arch/powerpc/include/asm/kmap_types.h
2116 --- linux-2.6.34.1/arch/powerpc/include/asm/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
2117 +++ linux-2.6.34.1/arch/powerpc/include/asm/kmap_types.h 2010-07-07 09:04:42.000000000 -0400
2118 @@ -26,6 +26,7 @@ enum km_type {
2119 KM_SOFTIRQ1,
2120 KM_PPC_SYNC_PAGE,
2121 KM_PPC_SYNC_ICACHE,
2122 + KM_CLEARPAGE,
2123 KM_TYPE_NR
2124 };
2125
2126 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/page.h linux-2.6.34.1/arch/powerpc/include/asm/page.h
2127 --- linux-2.6.34.1/arch/powerpc/include/asm/page.h 2010-07-05 14:24:10.000000000 -0400
2128 +++ linux-2.6.34.1/arch/powerpc/include/asm/page.h 2010-07-07 09:04:42.000000000 -0400
2129 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2130 * and needs to be executable. This means the whole heap ends
2131 * up being executable.
2132 */
2133 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2134 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135 +#define VM_DATA_DEFAULT_FLAGS32 \
2136 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2137 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2140 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2141 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2142 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2143 #endif
2144
2145 +#define ktla_ktva(addr) (addr)
2146 +#define ktva_ktla(addr) (addr)
2147 +
2148 #ifndef __ASSEMBLY__
2149
2150 #undef STRICT_MM_TYPECHECKS
2151 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/page_64.h linux-2.6.34.1/arch/powerpc/include/asm/page_64.h
2152 --- linux-2.6.34.1/arch/powerpc/include/asm/page_64.h 2010-07-05 14:24:10.000000000 -0400
2153 +++ linux-2.6.34.1/arch/powerpc/include/asm/page_64.h 2010-07-07 09:04:42.000000000 -0400
2154 @@ -180,15 +180,18 @@ do { \
2155 * stack by default, so in the absense of a PT_GNU_STACK program header
2156 * we turn execute permission off.
2157 */
2158 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2159 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2160 +#define VM_STACK_DEFAULT_FLAGS32 \
2161 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2162 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2163
2164 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2165 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2166
2167 +#ifndef CONFIG_PAX_PAGEEXEC
2168 #define VM_STACK_DEFAULT_FLAGS \
2169 (test_thread_flag(TIF_32BIT) ? \
2170 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2171 +#endif
2172
2173 #include <asm-generic/getorder.h>
2174
2175 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/pci.h linux-2.6.34.1/arch/powerpc/include/asm/pci.h
2176 --- linux-2.6.34.1/arch/powerpc/include/asm/pci.h 2010-07-05 14:24:10.000000000 -0400
2177 +++ linux-2.6.34.1/arch/powerpc/include/asm/pci.h 2010-07-07 09:04:42.000000000 -0400
2178 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2179 }
2180
2181 #ifdef CONFIG_PCI
2182 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2183 -extern struct dma_map_ops *get_pci_dma_ops(void);
2184 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2185 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2186 #else /* CONFIG_PCI */
2187 #define set_pci_dma_ops(d)
2188 #define get_pci_dma_ops() NULL
2189 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/pte-hash32.h linux-2.6.34.1/arch/powerpc/include/asm/pte-hash32.h
2190 --- linux-2.6.34.1/arch/powerpc/include/asm/pte-hash32.h 2010-07-05 14:24:10.000000000 -0400
2191 +++ linux-2.6.34.1/arch/powerpc/include/asm/pte-hash32.h 2010-07-07 09:04:42.000000000 -0400
2192 @@ -21,6 +21,7 @@
2193 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2194 #define _PAGE_USER 0x004 /* usermode access allowed */
2195 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2196 +#define _PAGE_EXEC _PAGE_GUARDED
2197 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2198 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2199 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2200 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/reg.h linux-2.6.34.1/arch/powerpc/include/asm/reg.h
2201 --- linux-2.6.34.1/arch/powerpc/include/asm/reg.h 2010-07-05 14:24:10.000000000 -0400
2202 +++ linux-2.6.34.1/arch/powerpc/include/asm/reg.h 2010-07-07 09:04:42.000000000 -0400
2203 @@ -191,6 +191,7 @@
2204 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2205 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2206 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2207 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2208 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2209 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2210 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2211 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/swiotlb.h linux-2.6.34.1/arch/powerpc/include/asm/swiotlb.h
2212 --- linux-2.6.34.1/arch/powerpc/include/asm/swiotlb.h 2010-07-05 14:24:10.000000000 -0400
2213 +++ linux-2.6.34.1/arch/powerpc/include/asm/swiotlb.h 2010-07-07 09:04:42.000000000 -0400
2214 @@ -13,7 +13,7 @@
2215
2216 #include <linux/swiotlb.h>
2217
2218 -extern struct dma_map_ops swiotlb_dma_ops;
2219 +extern const struct dma_map_ops swiotlb_dma_ops;
2220
2221 static inline void dma_mark_clean(void *addr, size_t size) {}
2222
2223 diff -urNp linux-2.6.34.1/arch/powerpc/include/asm/uaccess.h linux-2.6.34.1/arch/powerpc/include/asm/uaccess.h
2224 --- linux-2.6.34.1/arch/powerpc/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
2225 +++ linux-2.6.34.1/arch/powerpc/include/asm/uaccess.h 2010-07-07 09:04:42.000000000 -0400
2226 @@ -13,6 +13,8 @@
2227 #define VERIFY_READ 0
2228 #define VERIFY_WRITE 1
2229
2230 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2231 +
2232 /*
2233 * The fs value determines whether argument validity checking should be
2234 * performed or not. If get_fs() == USER_DS, checking is performed, with
2235 @@ -327,52 +329,6 @@ do { \
2236 extern unsigned long __copy_tofrom_user(void __user *to,
2237 const void __user *from, unsigned long size);
2238
2239 -#ifndef __powerpc64__
2240 -
2241 -static inline unsigned long copy_from_user(void *to,
2242 - const void __user *from, unsigned long n)
2243 -{
2244 - unsigned long over;
2245 -
2246 - if (access_ok(VERIFY_READ, from, n))
2247 - return __copy_tofrom_user((__force void __user *)to, from, n);
2248 - if ((unsigned long)from < TASK_SIZE) {
2249 - over = (unsigned long)from + n - TASK_SIZE;
2250 - return __copy_tofrom_user((__force void __user *)to, from,
2251 - n - over) + over;
2252 - }
2253 - return n;
2254 -}
2255 -
2256 -static inline unsigned long copy_to_user(void __user *to,
2257 - const void *from, unsigned long n)
2258 -{
2259 - unsigned long over;
2260 -
2261 - if (access_ok(VERIFY_WRITE, to, n))
2262 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2263 - if ((unsigned long)to < TASK_SIZE) {
2264 - over = (unsigned long)to + n - TASK_SIZE;
2265 - return __copy_tofrom_user(to, (__force void __user *)from,
2266 - n - over) + over;
2267 - }
2268 - return n;
2269 -}
2270 -
2271 -#else /* __powerpc64__ */
2272 -
2273 -#define __copy_in_user(to, from, size) \
2274 - __copy_tofrom_user((to), (from), (size))
2275 -
2276 -extern unsigned long copy_from_user(void *to, const void __user *from,
2277 - unsigned long n);
2278 -extern unsigned long copy_to_user(void __user *to, const void *from,
2279 - unsigned long n);
2280 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2281 - unsigned long n);
2282 -
2283 -#endif /* __powerpc64__ */
2284 -
2285 static inline unsigned long __copy_from_user_inatomic(void *to,
2286 const void __user *from, unsigned long n)
2287 {
2288 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2289 if (ret == 0)
2290 return 0;
2291 }
2292 +
2293 + if (!__builtin_constant_p(n))
2294 + check_object_size(to, n, false);
2295 +
2296 return __copy_tofrom_user((__force void __user *)to, from, n);
2297 }
2298
2299 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2300 if (ret == 0)
2301 return 0;
2302 }
2303 +
2304 + if (!__builtin_constant_p(n))
2305 + check_object_size(from, n, true);
2306 +
2307 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2308 }
2309
2310 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2311 return __copy_to_user_inatomic(to, from, size);
2312 }
2313
2314 +#ifndef __powerpc64__
2315 +
2316 +static inline unsigned long __must_check copy_from_user(void *to,
2317 + const void __user *from, unsigned long n)
2318 +{
2319 + unsigned long over;
2320 +
2321 + if ((long)n < 0)
2322 + return n;
2323 +
2324 + if (access_ok(VERIFY_READ, from, n)) {
2325 + if (!__builtin_constant_p(n))
2326 + check_object_size(to, n, false);
2327 + return __copy_tofrom_user((__force void __user *)to, from, n);
2328 + }
2329 + if ((unsigned long)from < TASK_SIZE) {
2330 + over = (unsigned long)from + n - TASK_SIZE;
2331 + if (!__builtin_constant_p(n - over))
2332 + check_object_size(to, n - over, false);
2333 + return __copy_tofrom_user((__force void __user *)to, from,
2334 + n - over) + over;
2335 + }
2336 + return n;
2337 +}
2338 +
2339 +static inline unsigned long __must_check copy_to_user(void __user *to,
2340 + const void *from, unsigned long n)
2341 +{
2342 + unsigned long over;
2343 +
2344 + if ((long)n < 0)
2345 + return n;
2346 +
2347 + if (access_ok(VERIFY_WRITE, to, n)) {
2348 + if (!__builtin_constant_p(n))
2349 + check_object_size(from, n, true);
2350 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2351 + }
2352 + if ((unsigned long)to < TASK_SIZE) {
2353 + over = (unsigned long)to + n - TASK_SIZE;
2354 + if (!__builtin_constant_p(n))
2355 + check_object_size(from, n - over, true);
2356 + return __copy_tofrom_user(to, (__force void __user *)from,
2357 + n - over) + over;
2358 + }
2359 + return n;
2360 +}
2361 +
2362 +#else /* __powerpc64__ */
2363 +
2364 +#define __copy_in_user(to, from, size) \
2365 + __copy_tofrom_user((to), (from), (size))
2366 +
2367 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2368 +{
2369 + if ((long)n < 0 || n > INT_MAX)
2370 + return n;
2371 +
2372 + if (!__builtin_constant_p(n))
2373 + check_object_size(to, n, false);
2374 +
2375 + if (likely(access_ok(VERIFY_READ, from, n)))
2376 + n = __copy_from_user(to, from, n);
2377 + else
2378 + memset(to, 0, n);
2379 + return n;
2380 +}
2381 +
2382 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2383 +{
2384 + if ((long)n < 0 || n > INT_MAX)
2385 + return n;
2386 +
2387 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2388 + if (!__builtin_constant_p(n))
2389 + check_object_size(from, n, true);
2390 + n = __copy_to_user(to, from, n);
2391 + }
2392 + return n;
2393 +}
2394 +
2395 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2396 + unsigned long n);
2397 +
2398 +#endif /* __powerpc64__ */
2399 +
2400 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2401
2402 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2403 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/dma-iommu.c linux-2.6.34.1/arch/powerpc/kernel/dma-iommu.c
2404 --- linux-2.6.34.1/arch/powerpc/kernel/dma-iommu.c 2010-07-05 14:24:10.000000000 -0400
2405 +++ linux-2.6.34.1/arch/powerpc/kernel/dma-iommu.c 2010-07-07 09:04:42.000000000 -0400
2406 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2407 }
2408
2409 /* We support DMA to/from any memory page via the iommu */
2410 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2411 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2412 {
2413 struct iommu_table *tbl = get_iommu_table_base(dev);
2414
2415 @@ -89,7 +89,7 @@ static int dma_iommu_dma_supported(struc
2416 return 1;
2417 }
2418
2419 -struct dma_map_ops dma_iommu_ops = {
2420 +const struct dma_map_ops dma_iommu_ops = {
2421 .alloc_coherent = dma_iommu_alloc_coherent,
2422 .free_coherent = dma_iommu_free_coherent,
2423 .map_sg = dma_iommu_map_sg,
2424 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.34.1/arch/powerpc/kernel/dma-swiotlb.c
2425 --- linux-2.6.34.1/arch/powerpc/kernel/dma-swiotlb.c 2010-07-05 14:24:10.000000000 -0400
2426 +++ linux-2.6.34.1/arch/powerpc/kernel/dma-swiotlb.c 2010-07-07 09:04:42.000000000 -0400
2427 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2428 * map_page, and unmap_page on highmem, use normal dma_ops
2429 * for everything else.
2430 */
2431 -struct dma_map_ops swiotlb_dma_ops = {
2432 +const struct dma_map_ops swiotlb_dma_ops = {
2433 .alloc_coherent = dma_direct_alloc_coherent,
2434 .free_coherent = dma_direct_free_coherent,
2435 .map_sg = swiotlb_map_sg_attrs,
2436 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/dma.c linux-2.6.34.1/arch/powerpc/kernel/dma.c
2437 --- linux-2.6.34.1/arch/powerpc/kernel/dma.c 2010-07-05 14:24:10.000000000 -0400
2438 +++ linux-2.6.34.1/arch/powerpc/kernel/dma.c 2010-07-07 09:04:42.000000000 -0400
2439 @@ -135,7 +135,7 @@ static inline void dma_direct_sync_singl
2440 }
2441 #endif
2442
2443 -struct dma_map_ops dma_direct_ops = {
2444 +const struct dma_map_ops dma_direct_ops = {
2445 .alloc_coherent = dma_direct_alloc_coherent,
2446 .free_coherent = dma_direct_free_coherent,
2447 .map_sg = dma_direct_map_sg,
2448 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/exceptions-64e.S linux-2.6.34.1/arch/powerpc/kernel/exceptions-64e.S
2449 --- linux-2.6.34.1/arch/powerpc/kernel/exceptions-64e.S 2010-07-05 14:24:10.000000000 -0400
2450 +++ linux-2.6.34.1/arch/powerpc/kernel/exceptions-64e.S 2010-07-07 09:04:42.000000000 -0400
2451 @@ -455,6 +455,7 @@ storage_fault_common:
2452 std r14,_DAR(r1)
2453 std r15,_DSISR(r1)
2454 addi r3,r1,STACK_FRAME_OVERHEAD
2455 + bl .save_nvgprs
2456 mr r4,r14
2457 mr r5,r15
2458 ld r14,PACA_EXGEN+EX_R14(r13)
2459 @@ -464,8 +465,7 @@ storage_fault_common:
2460 cmpdi r3,0
2461 bne- 1f
2462 b .ret_from_except_lite
2463 -1: bl .save_nvgprs
2464 - mr r5,r3
2465 +1: mr r5,r3
2466 addi r3,r1,STACK_FRAME_OVERHEAD
2467 ld r4,_DAR(r1)
2468 bl .bad_page_fault
2469 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/exceptions-64s.S linux-2.6.34.1/arch/powerpc/kernel/exceptions-64s.S
2470 --- linux-2.6.34.1/arch/powerpc/kernel/exceptions-64s.S 2010-07-05 14:24:10.000000000 -0400
2471 +++ linux-2.6.34.1/arch/powerpc/kernel/exceptions-64s.S 2010-07-07 09:04:42.000000000 -0400
2472 @@ -829,10 +829,10 @@ handle_page_fault:
2473 11: ld r4,_DAR(r1)
2474 ld r5,_DSISR(r1)
2475 addi r3,r1,STACK_FRAME_OVERHEAD
2476 + bl .save_nvgprs
2477 bl .do_page_fault
2478 cmpdi r3,0
2479 beq+ 13f
2480 - bl .save_nvgprs
2481 mr r5,r3
2482 addi r3,r1,STACK_FRAME_OVERHEAD
2483 lwz r4,_DAR(r1)
2484 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/ibmebus.c linux-2.6.34.1/arch/powerpc/kernel/ibmebus.c
2485 --- linux-2.6.34.1/arch/powerpc/kernel/ibmebus.c 2010-07-05 14:24:10.000000000 -0400
2486 +++ linux-2.6.34.1/arch/powerpc/kernel/ibmebus.c 2010-07-07 09:04:42.000000000 -0400
2487 @@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct
2488 return 1;
2489 }
2490
2491 -static struct dma_map_ops ibmebus_dma_ops = {
2492 +static const struct dma_map_ops ibmebus_dma_ops = {
2493 .alloc_coherent = ibmebus_alloc_coherent,
2494 .free_coherent = ibmebus_free_coherent,
2495 .map_sg = ibmebus_map_sg,
2496 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/kgdb.c linux-2.6.34.1/arch/powerpc/kernel/kgdb.c
2497 --- linux-2.6.34.1/arch/powerpc/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
2498 +++ linux-2.6.34.1/arch/powerpc/kernel/kgdb.c 2010-07-07 09:04:42.000000000 -0400
2499 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2500 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2501 return 0;
2502
2503 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2504 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2505 regs->nip += 4;
2506
2507 return 1;
2508 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2509 /*
2510 * Global data
2511 */
2512 -struct kgdb_arch arch_kgdb_ops = {
2513 +const struct kgdb_arch arch_kgdb_ops = {
2514 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2515 };
2516
2517 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/module.c linux-2.6.34.1/arch/powerpc/kernel/module.c
2518 --- linux-2.6.34.1/arch/powerpc/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
2519 +++ linux-2.6.34.1/arch/powerpc/kernel/module.c 2010-07-07 09:04:42.000000000 -0400
2520 @@ -31,11 +31,24 @@
2521
2522 LIST_HEAD(module_bug_list);
2523
2524 +#ifdef CONFIG_PAX_KERNEXEC
2525 void *module_alloc(unsigned long size)
2526 {
2527 if (size == 0)
2528 return NULL;
2529
2530 + return vmalloc(size);
2531 +}
2532 +
2533 +void *module_alloc_exec(unsigned long size)
2534 +#else
2535 +void *module_alloc(unsigned long size)
2536 +#endif
2537 +
2538 +{
2539 + if (size == 0)
2540 + return NULL;
2541 +
2542 return vmalloc_exec(size);
2543 }
2544
2545 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2546 vfree(module_region);
2547 }
2548
2549 +#ifdef CONFIG_PAX_KERNEXEC
2550 +void module_free_exec(struct module *mod, void *module_region)
2551 +{
2552 + module_free(mod, module_region);
2553 +}
2554 +#endif
2555 +
2556 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2557 const Elf_Shdr *sechdrs,
2558 const char *name)
2559 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/module_32.c linux-2.6.34.1/arch/powerpc/kernel/module_32.c
2560 --- linux-2.6.34.1/arch/powerpc/kernel/module_32.c 2010-07-05 14:24:10.000000000 -0400
2561 +++ linux-2.6.34.1/arch/powerpc/kernel/module_32.c 2010-07-07 09:04:42.000000000 -0400
2562 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2563 me->arch.core_plt_section = i;
2564 }
2565 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2566 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2567 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2568 return -ENOEXEC;
2569 }
2570
2571 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2572
2573 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2574 /* Init, or core PLT? */
2575 - if (location >= mod->module_core
2576 - && location < mod->module_core + mod->core_size)
2577 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2578 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2579 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2580 - else
2581 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2582 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2583 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2584 + else {
2585 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2586 + return ~0UL;
2587 + }
2588
2589 /* Find this entry, or if that fails, the next avail. entry */
2590 while (entry->jump[0]) {
2591 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/pci-common.c linux-2.6.34.1/arch/powerpc/kernel/pci-common.c
2592 --- linux-2.6.34.1/arch/powerpc/kernel/pci-common.c 2010-07-05 14:24:10.000000000 -0400
2593 +++ linux-2.6.34.1/arch/powerpc/kernel/pci-common.c 2010-07-07 09:04:43.000000000 -0400
2594 @@ -51,14 +51,14 @@ resource_size_t isa_mem_base;
2595 unsigned int ppc_pci_flags = 0;
2596
2597
2598 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2599 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2600
2601 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2602 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2603 {
2604 pci_dma_ops = dma_ops;
2605 }
2606
2607 -struct dma_map_ops *get_pci_dma_ops(void)
2608 +const struct dma_map_ops *get_pci_dma_ops(void)
2609 {
2610 return pci_dma_ops;
2611 }
2612 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/process.c linux-2.6.34.1/arch/powerpc/kernel/process.c
2613 --- linux-2.6.34.1/arch/powerpc/kernel/process.c 2010-07-05 14:24:10.000000000 -0400
2614 +++ linux-2.6.34.1/arch/powerpc/kernel/process.c 2010-07-07 09:04:43.000000000 -0400
2615 @@ -1217,51 +1217,3 @@ unsigned long arch_align_stack(unsigned
2616 sp -= get_random_int() & ~PAGE_MASK;
2617 return sp & ~0xf;
2618 }
2619 -
2620 -static inline unsigned long brk_rnd(void)
2621 -{
2622 - unsigned long rnd = 0;
2623 -
2624 - /* 8MB for 32bit, 1GB for 64bit */
2625 - if (is_32bit_task())
2626 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2627 - else
2628 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2629 -
2630 - return rnd << PAGE_SHIFT;
2631 -}
2632 -
2633 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2634 -{
2635 - unsigned long base = mm->brk;
2636 - unsigned long ret;
2637 -
2638 -#ifdef CONFIG_PPC_STD_MMU_64
2639 - /*
2640 - * If we are using 1TB segments and we are allowed to randomise
2641 - * the heap, we can put it above 1TB so it is backed by a 1TB
2642 - * segment. Otherwise the heap will be in the bottom 1TB
2643 - * which always uses 256MB segments and this may result in a
2644 - * performance penalty.
2645 - */
2646 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2647 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2648 -#endif
2649 -
2650 - ret = PAGE_ALIGN(base + brk_rnd());
2651 -
2652 - if (ret < mm->brk)
2653 - return mm->brk;
2654 -
2655 - return ret;
2656 -}
2657 -
2658 -unsigned long randomize_et_dyn(unsigned long base)
2659 -{
2660 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2661 -
2662 - if (ret < base)
2663 - return base;
2664 -
2665 - return ret;
2666 -}
2667 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/signal_32.c linux-2.6.34.1/arch/powerpc/kernel/signal_32.c
2668 --- linux-2.6.34.1/arch/powerpc/kernel/signal_32.c 2010-07-05 14:24:10.000000000 -0400
2669 +++ linux-2.6.34.1/arch/powerpc/kernel/signal_32.c 2010-07-07 09:04:43.000000000 -0400
2670 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2671 /* Save user registers on the stack */
2672 frame = &rt_sf->uc.uc_mcontext;
2673 addr = frame;
2674 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2675 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2676 if (save_user_regs(regs, frame, 0, 1))
2677 goto badframe;
2678 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2679 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/signal_64.c linux-2.6.34.1/arch/powerpc/kernel/signal_64.c
2680 --- linux-2.6.34.1/arch/powerpc/kernel/signal_64.c 2010-07-05 14:24:10.000000000 -0400
2681 +++ linux-2.6.34.1/arch/powerpc/kernel/signal_64.c 2010-07-07 09:04:43.000000000 -0400
2682 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2683 current->thread.fpscr.val = 0;
2684
2685 /* Set up to return from userspace. */
2686 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2687 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2688 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2689 } else {
2690 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2691 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/vdso.c linux-2.6.34.1/arch/powerpc/kernel/vdso.c
2692 --- linux-2.6.34.1/arch/powerpc/kernel/vdso.c 2010-07-05 14:24:10.000000000 -0400
2693 +++ linux-2.6.34.1/arch/powerpc/kernel/vdso.c 2010-07-07 09:04:43.000000000 -0400
2694 @@ -36,6 +36,7 @@
2695 #include <asm/firmware.h>
2696 #include <asm/vdso.h>
2697 #include <asm/vdso_datapage.h>
2698 +#include <asm/mman.h>
2699
2700 #include "setup.h"
2701
2702 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2703 vdso_base = VDSO32_MBASE;
2704 #endif
2705
2706 - current->mm->context.vdso_base = 0;
2707 + current->mm->context.vdso_base = ~0UL;
2708
2709 /* vDSO has a problem and was disabled, just don't "enable" it for the
2710 * process
2711 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2712 vdso_base = get_unmapped_area(NULL, vdso_base,
2713 (vdso_pages << PAGE_SHIFT) +
2714 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2715 - 0, 0);
2716 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2717 if (IS_ERR_VALUE(vdso_base)) {
2718 rc = vdso_base;
2719 goto fail_mmapsem;
2720 diff -urNp linux-2.6.34.1/arch/powerpc/kernel/vio.c linux-2.6.34.1/arch/powerpc/kernel/vio.c
2721 --- linux-2.6.34.1/arch/powerpc/kernel/vio.c 2010-07-05 14:24:10.000000000 -0400
2722 +++ linux-2.6.34.1/arch/powerpc/kernel/vio.c 2010-07-07 09:04:43.000000000 -0400
2723 @@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struc
2724 vio_cmo_dealloc(viodev, alloc_size);
2725 }
2726
2727 -struct dma_map_ops vio_dma_mapping_ops = {
2728 +static const struct dma_map_ops vio_dma_mapping_ops = {
2729 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2730 .free_coherent = vio_dma_iommu_free_coherent,
2731 .map_sg = vio_dma_iommu_map_sg,
2732 .unmap_sg = vio_dma_iommu_unmap_sg,
2733 + .dma_supported = dma_iommu_dma_supported,
2734 .map_page = vio_dma_iommu_map_page,
2735 .unmap_page = vio_dma_iommu_unmap_page,
2736
2737 @@ -858,7 +859,6 @@ static void vio_cmo_bus_remove(struct vi
2738
2739 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2740 {
2741 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2742 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2743 }
2744
2745 diff -urNp linux-2.6.34.1/arch/powerpc/lib/usercopy_64.c linux-2.6.34.1/arch/powerpc/lib/usercopy_64.c
2746 --- linux-2.6.34.1/arch/powerpc/lib/usercopy_64.c 2010-07-05 14:24:10.000000000 -0400
2747 +++ linux-2.6.34.1/arch/powerpc/lib/usercopy_64.c 2010-07-07 09:04:43.000000000 -0400
2748 @@ -9,22 +9,6 @@
2749 #include <linux/module.h>
2750 #include <asm/uaccess.h>
2751
2752 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2753 -{
2754 - if (likely(access_ok(VERIFY_READ, from, n)))
2755 - n = __copy_from_user(to, from, n);
2756 - else
2757 - memset(to, 0, n);
2758 - return n;
2759 -}
2760 -
2761 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2762 -{
2763 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2764 - n = __copy_to_user(to, from, n);
2765 - return n;
2766 -}
2767 -
2768 unsigned long copy_in_user(void __user *to, const void __user *from,
2769 unsigned long n)
2770 {
2771 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2772 return n;
2773 }
2774
2775 -EXPORT_SYMBOL(copy_from_user);
2776 -EXPORT_SYMBOL(copy_to_user);
2777 EXPORT_SYMBOL(copy_in_user);
2778
2779 diff -urNp linux-2.6.34.1/arch/powerpc/mm/fault.c linux-2.6.34.1/arch/powerpc/mm/fault.c
2780 --- linux-2.6.34.1/arch/powerpc/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
2781 +++ linux-2.6.34.1/arch/powerpc/mm/fault.c 2010-07-07 09:04:43.000000000 -0400
2782 @@ -30,6 +30,10 @@
2783 #include <linux/kprobes.h>
2784 #include <linux/kdebug.h>
2785 #include <linux/perf_event.h>
2786 +#include <linux/slab.h>
2787 +#include <linux/pagemap.h>
2788 +#include <linux/compiler.h>
2789 +#include <linux/unistd.h>
2790
2791 #include <asm/firmware.h>
2792 #include <asm/page.h>
2793 @@ -41,6 +45,7 @@
2794 #include <asm/tlbflush.h>
2795 #include <asm/siginfo.h>
2796 #include <mm/mmu_decl.h>
2797 +#include <asm/ptrace.h>
2798
2799 #ifdef CONFIG_KPROBES
2800 static inline int notify_page_fault(struct pt_regs *regs)
2801 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2802 }
2803 #endif
2804
2805 +#ifdef CONFIG_PAX_PAGEEXEC
2806 +/*
2807 + * PaX: decide what to do with offenders (regs->nip = fault address)
2808 + *
2809 + * returns 1 when task should be killed
2810 + */
2811 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2812 +{
2813 + return 1;
2814 +}
2815 +
2816 +void pax_report_insns(void *pc, void *sp)
2817 +{
2818 + unsigned long i;
2819 +
2820 + printk(KERN_ERR "PAX: bytes at PC: ");
2821 + for (i = 0; i < 5; i++) {
2822 + unsigned int c;
2823 + if (get_user(c, (unsigned int __user *)pc+i))
2824 + printk(KERN_CONT "???????? ");
2825 + else
2826 + printk(KERN_CONT "%08x ", c);
2827 + }
2828 + printk("\n");
2829 +}
2830 +#endif
2831 +
2832 /*
2833 * Check whether the instruction at regs->nip is a store using
2834 * an update addressing form which will update r1.
2835 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2836 * indicate errors in DSISR but can validly be set in SRR1.
2837 */
2838 if (trap == 0x400)
2839 - error_code &= 0x48200000;
2840 + error_code &= 0x58200000;
2841 else
2842 is_write = error_code & DSISR_ISSTORE;
2843 #else
2844 @@ -256,7 +288,7 @@ good_area:
2845 * "undefined". Of those that can be set, this is the only
2846 * one which seems bad.
2847 */
2848 - if (error_code & 0x10000000)
2849 + if (error_code & DSISR_GUARDED)
2850 /* Guarded storage error. */
2851 goto bad_area;
2852 #endif /* CONFIG_8xx */
2853 @@ -271,7 +303,7 @@ good_area:
2854 * processors use the same I/D cache coherency mechanism
2855 * as embedded.
2856 */
2857 - if (error_code & DSISR_PROTFAULT)
2858 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2859 goto bad_area;
2860 #endif /* CONFIG_PPC_STD_MMU */
2861
2862 @@ -341,6 +373,23 @@ bad_area:
2863 bad_area_nosemaphore:
2864 /* User mode accesses cause a SIGSEGV */
2865 if (user_mode(regs)) {
2866 +
2867 +#ifdef CONFIG_PAX_PAGEEXEC
2868 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2869 +#ifdef CONFIG_PPC_STD_MMU
2870 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2871 +#else
2872 + if (is_exec && regs->nip == address) {
2873 +#endif
2874 + switch (pax_handle_fetch_fault(regs)) {
2875 + }
2876 +
2877 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2878 + do_group_exit(SIGKILL);
2879 + }
2880 + }
2881 +#endif
2882 +
2883 _exception(SIGSEGV, regs, code, address);
2884 return 0;
2885 }
2886 diff -urNp linux-2.6.34.1/arch/powerpc/mm/mmap_64.c linux-2.6.34.1/arch/powerpc/mm/mmap_64.c
2887 --- linux-2.6.34.1/arch/powerpc/mm/mmap_64.c 2010-07-05 14:24:10.000000000 -0400
2888 +++ linux-2.6.34.1/arch/powerpc/mm/mmap_64.c 2010-07-07 09:04:43.000000000 -0400
2889 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2890 */
2891 if (mmap_is_legacy()) {
2892 mm->mmap_base = TASK_UNMAPPED_BASE;
2893 +
2894 +#ifdef CONFIG_PAX_RANDMMAP
2895 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2896 + mm->mmap_base += mm->delta_mmap;
2897 +#endif
2898 +
2899 mm->get_unmapped_area = arch_get_unmapped_area;
2900 mm->unmap_area = arch_unmap_area;
2901 } else {
2902 mm->mmap_base = mmap_base();
2903 +
2904 +#ifdef CONFIG_PAX_RANDMMAP
2905 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2906 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2907 +#endif
2908 +
2909 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2910 mm->unmap_area = arch_unmap_area_topdown;
2911 }
2912 diff -urNp linux-2.6.34.1/arch/powerpc/mm/slice.c linux-2.6.34.1/arch/powerpc/mm/slice.c
2913 --- linux-2.6.34.1/arch/powerpc/mm/slice.c 2010-07-05 14:24:10.000000000 -0400
2914 +++ linux-2.6.34.1/arch/powerpc/mm/slice.c 2010-07-07 09:04:43.000000000 -0400
2915 @@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
2916 if (fixed && addr > (mm->task_size - len))
2917 return -EINVAL;
2918
2919 +#ifdef CONFIG_PAX_RANDMMAP
2920 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2921 + addr = 0;
2922 +#endif
2923 +
2924 /* If hint, make sure it matches our alignment restrictions */
2925 if (!fixed && addr) {
2926 addr = _ALIGN_UP(addr, 1ul << pshift);
2927 diff -urNp linux-2.6.34.1/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.34.1/arch/powerpc/platforms/52xx/lite5200_pm.c
2928 --- linux-2.6.34.1/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-07-05 14:24:10.000000000 -0400
2929 +++ linux-2.6.34.1/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-07-07 09:04:43.000000000 -0400
2930 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2931 lite5200_pm_target_state = PM_SUSPEND_ON;
2932 }
2933
2934 -static struct platform_suspend_ops lite5200_pm_ops = {
2935 +static const struct platform_suspend_ops lite5200_pm_ops = {
2936 .valid = lite5200_pm_valid,
2937 .begin = lite5200_pm_begin,
2938 .prepare = lite5200_pm_prepare,
2939 diff -urNp linux-2.6.34.1/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.34.1/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2940 --- linux-2.6.34.1/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-07-05 14:24:10.000000000 -0400
2941 +++ linux-2.6.34.1/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-07-07 09:04:43.000000000 -0400
2942 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2943 iounmap(mbar);
2944 }
2945
2946 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2947 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2948 .valid = mpc52xx_pm_valid,
2949 .prepare = mpc52xx_pm_prepare,
2950 .enter = mpc52xx_pm_enter,
2951 diff -urNp linux-2.6.34.1/arch/powerpc/platforms/83xx/suspend.c linux-2.6.34.1/arch/powerpc/platforms/83xx/suspend.c
2952 --- linux-2.6.34.1/arch/powerpc/platforms/83xx/suspend.c 2010-07-05 14:24:10.000000000 -0400
2953 +++ linux-2.6.34.1/arch/powerpc/platforms/83xx/suspend.c 2010-07-07 09:04:43.000000000 -0400
2954 @@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void)
2955 return ret;
2956 }
2957
2958 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2959 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2960 .valid = mpc83xx_suspend_valid,
2961 .begin = mpc83xx_suspend_begin,
2962 .enter = mpc83xx_suspend_enter,
2963 diff -urNp linux-2.6.34.1/arch/powerpc/platforms/cell/iommu.c linux-2.6.34.1/arch/powerpc/platforms/cell/iommu.c
2964 --- linux-2.6.34.1/arch/powerpc/platforms/cell/iommu.c 2010-07-05 14:24:10.000000000 -0400
2965 +++ linux-2.6.34.1/arch/powerpc/platforms/cell/iommu.c 2010-07-07 09:04:43.000000000 -0400
2966 @@ -643,7 +643,7 @@ static int dma_fixed_dma_supported(struc
2967
2968 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2969
2970 -struct dma_map_ops dma_iommu_fixed_ops = {
2971 +const struct dma_map_ops dma_iommu_fixed_ops = {
2972 .alloc_coherent = dma_fixed_alloc_coherent,
2973 .free_coherent = dma_fixed_free_coherent,
2974 .map_sg = dma_fixed_map_sg,
2975 diff -urNp linux-2.6.34.1/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.34.1/arch/powerpc/platforms/ps3/system-bus.c
2976 --- linux-2.6.34.1/arch/powerpc/platforms/ps3/system-bus.c 2010-07-05 14:24:10.000000000 -0400
2977 +++ linux-2.6.34.1/arch/powerpc/platforms/ps3/system-bus.c 2010-07-07 09:04:43.000000000 -0400
2978 @@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi
2979 return mask >= DMA_BIT_MASK(32);
2980 }
2981
2982 -static struct dma_map_ops ps3_sb_dma_ops = {
2983 +static const struct dma_map_ops ps3_sb_dma_ops = {
2984 .alloc_coherent = ps3_alloc_coherent,
2985 .free_coherent = ps3_free_coherent,
2986 .map_sg = ps3_sb_map_sg,
2987 @@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops
2988 .unmap_page = ps3_unmap_page,
2989 };
2990
2991 -static struct dma_map_ops ps3_ioc0_dma_ops = {
2992 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
2993 .alloc_coherent = ps3_alloc_coherent,
2994 .free_coherent = ps3_free_coherent,
2995 .map_sg = ps3_ioc0_map_sg,
2996 diff -urNp linux-2.6.34.1/arch/powerpc/sysdev/fsl_pmc.c linux-2.6.34.1/arch/powerpc/sysdev/fsl_pmc.c
2997 --- linux-2.6.34.1/arch/powerpc/sysdev/fsl_pmc.c 2010-07-05 14:24:10.000000000 -0400
2998 +++ linux-2.6.34.1/arch/powerpc/sysdev/fsl_pmc.c 2010-07-07 09:04:43.000000000 -0400
2999 @@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_sta
3000 return 1;
3001 }
3002
3003 -static struct platform_suspend_ops pmc_suspend_ops = {
3004 +static const struct platform_suspend_ops pmc_suspend_ops = {
3005 .valid = pmc_suspend_valid,
3006 .enter = pmc_suspend_enter,
3007 };
3008 diff -urNp linux-2.6.34.1/arch/s390/Kconfig linux-2.6.34.1/arch/s390/Kconfig
3009 --- linux-2.6.34.1/arch/s390/Kconfig 2010-07-05 14:24:10.000000000 -0400
3010 +++ linux-2.6.34.1/arch/s390/Kconfig 2010-07-07 09:04:43.000000000 -0400
3011 @@ -229,13 +229,12 @@ config AUDIT_ARCH
3012
3013 config S390_EXEC_PROTECT
3014 bool "Data execute protection"
3015 + default y
3016 help
3017 This option allows to enable a buffer overflow protection for user
3018 - space programs and it also selects the addressing mode option above.
3019 - The kernel parameter noexec=on will enable this feature and also
3020 - switch the addressing modes, default is disabled. Enabling this (via
3021 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3022 - will reduce system performance.
3023 + space programs.
3024 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3025 + reduce system performance.
3026
3027 comment "Code generation options"
3028
3029 diff -urNp linux-2.6.34.1/arch/s390/include/asm/elf.h linux-2.6.34.1/arch/s390/include/asm/elf.h
3030 --- linux-2.6.34.1/arch/s390/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
3031 +++ linux-2.6.34.1/arch/s390/include/asm/elf.h 2010-07-07 09:04:43.000000000 -0400
3032 @@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
3033 that it will "exec", and that there is sufficient room for the brk. */
3034 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3035
3036 +#ifdef CONFIG_PAX_ASLR
3037 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3038 +
3039 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3040 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3041 +#endif
3042 +
3043 /* This yields a mask that user programs can use to figure out what
3044 instruction set this CPU supports. */
3045
3046 diff -urNp linux-2.6.34.1/arch/s390/include/asm/uaccess.h linux-2.6.34.1/arch/s390/include/asm/uaccess.h
3047 --- linux-2.6.34.1/arch/s390/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
3048 +++ linux-2.6.34.1/arch/s390/include/asm/uaccess.h 2010-07-07 09:04:43.000000000 -0400
3049 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
3050 copy_to_user(void __user *to, const void *from, unsigned long n)
3051 {
3052 might_fault();
3053 +
3054 + if ((long)n < 0)
3055 + return n;
3056 +
3057 if (access_ok(VERIFY_WRITE, to, n))
3058 n = __copy_to_user(to, from, n);
3059 return n;
3060 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
3061 static inline unsigned long __must_check
3062 __copy_from_user(void *to, const void __user *from, unsigned long n)
3063 {
3064 + if ((long)n < 0)
3065 + return n;
3066 +
3067 if (__builtin_constant_p(n) && (n <= 256))
3068 return uaccess.copy_from_user_small(n, from, to);
3069 else
3070 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
3071 unsigned int sz = __compiletime_object_size(to);
3072
3073 might_fault();
3074 +
3075 + if ((long)n < 0)
3076 + return n;
3077 +
3078 if (unlikely(sz != -1 && sz < n)) {
3079 copy_from_user_overflow();
3080 return n;
3081 diff -urNp linux-2.6.34.1/arch/s390/kernel/module.c linux-2.6.34.1/arch/s390/kernel/module.c
3082 --- linux-2.6.34.1/arch/s390/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
3083 +++ linux-2.6.34.1/arch/s390/kernel/module.c 2010-07-07 09:04:43.000000000 -0400
3084 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3085
3086 /* Increase core size by size of got & plt and set start
3087 offsets for got and plt. */
3088 - me->core_size = ALIGN(me->core_size, 4);
3089 - me->arch.got_offset = me->core_size;
3090 - me->core_size += me->arch.got_size;
3091 - me->arch.plt_offset = me->core_size;
3092 - me->core_size += me->arch.plt_size;
3093 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3094 + me->arch.got_offset = me->core_size_rw;
3095 + me->core_size_rw += me->arch.got_size;
3096 + me->arch.plt_offset = me->core_size_rx;
3097 + me->core_size_rx += me->arch.plt_size;
3098 return 0;
3099 }
3100
3101 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3102 if (info->got_initialized == 0) {
3103 Elf_Addr *gotent;
3104
3105 - gotent = me->module_core + me->arch.got_offset +
3106 + gotent = me->module_core_rw + me->arch.got_offset +
3107 info->got_offset;
3108 *gotent = val;
3109 info->got_initialized = 1;
3110 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3111 else if (r_type == R_390_GOTENT ||
3112 r_type == R_390_GOTPLTENT)
3113 *(unsigned int *) loc =
3114 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3115 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3116 else if (r_type == R_390_GOT64 ||
3117 r_type == R_390_GOTPLT64)
3118 *(unsigned long *) loc = val;
3119 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3120 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3121 if (info->plt_initialized == 0) {
3122 unsigned int *ip;
3123 - ip = me->module_core + me->arch.plt_offset +
3124 + ip = me->module_core_rx + me->arch.plt_offset +
3125 info->plt_offset;
3126 #ifndef CONFIG_64BIT
3127 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3128 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3129 val - loc + 0xffffUL < 0x1ffffeUL) ||
3130 (r_type == R_390_PLT32DBL &&
3131 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3132 - val = (Elf_Addr) me->module_core +
3133 + val = (Elf_Addr) me->module_core_rx +
3134 me->arch.plt_offset +
3135 info->plt_offset;
3136 val += rela->r_addend - loc;
3137 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3138 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3139 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3140 val = val + rela->r_addend -
3141 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3142 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3143 if (r_type == R_390_GOTOFF16)
3144 *(unsigned short *) loc = val;
3145 else if (r_type == R_390_GOTOFF32)
3146 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3147 break;
3148 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3149 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3150 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3151 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3152 rela->r_addend - loc;
3153 if (r_type == R_390_GOTPC)
3154 *(unsigned int *) loc = val;
3155 diff -urNp linux-2.6.34.1/arch/s390/kernel/setup.c linux-2.6.34.1/arch/s390/kernel/setup.c
3156 --- linux-2.6.34.1/arch/s390/kernel/setup.c 2010-07-05 14:24:10.000000000 -0400
3157 +++ linux-2.6.34.1/arch/s390/kernel/setup.c 2010-07-07 09:04:43.000000000 -0400
3158 @@ -297,7 +297,7 @@ static int __init early_parse_mem(char *
3159 }
3160 early_param("mem", early_parse_mem);
3161
3162 -unsigned int user_mode = HOME_SPACE_MODE;
3163 +unsigned int user_mode = SECONDARY_SPACE_MODE;
3164 EXPORT_SYMBOL_GPL(user_mode);
3165
3166 static int set_amode_and_uaccess(unsigned long user_amode,
3167 @@ -326,17 +326,6 @@ static int set_amode_and_uaccess(unsigne
3168 }
3169 }
3170
3171 -/*
3172 - * Switch kernel/user addressing modes?
3173 - */
3174 -static int __init early_parse_switch_amode(char *p)
3175 -{
3176 - if (user_mode != SECONDARY_SPACE_MODE)
3177 - user_mode = PRIMARY_SPACE_MODE;
3178 - return 0;
3179 -}
3180 -early_param("switch_amode", early_parse_switch_amode);
3181 -
3182 static int __init early_parse_user_mode(char *p)
3183 {
3184 if (p && strcmp(p, "primary") == 0)
3185 @@ -353,20 +342,6 @@ static int __init early_parse_user_mode(
3186 }
3187 early_param("user_mode", early_parse_user_mode);
3188
3189 -#ifdef CONFIG_S390_EXEC_PROTECT
3190 -/*
3191 - * Enable execute protection?
3192 - */
3193 -static int __init early_parse_noexec(char *p)
3194 -{
3195 - if (!strncmp(p, "off", 3))
3196 - return 0;
3197 - user_mode = SECONDARY_SPACE_MODE;
3198 - return 0;
3199 -}
3200 -early_param("noexec", early_parse_noexec);
3201 -#endif /* CONFIG_S390_EXEC_PROTECT */
3202 -
3203 static void setup_addressing_mode(void)
3204 {
3205 if (user_mode == SECONDARY_SPACE_MODE) {
3206 diff -urNp linux-2.6.34.1/arch/s390/mm/maccess.c linux-2.6.34.1/arch/s390/mm/maccess.c
3207 --- linux-2.6.34.1/arch/s390/mm/maccess.c 2010-07-05 14:24:10.000000000 -0400
3208 +++ linux-2.6.34.1/arch/s390/mm/maccess.c 2010-07-07 09:04:43.000000000 -0400
3209 @@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void
3210 return rc ? rc : count;
3211 }
3212
3213 -long probe_kernel_write(void *dst, void *src, size_t size)
3214 +long probe_kernel_write(void *dst, const void *src, size_t size)
3215 {
3216 long copied = 0;
3217
3218 diff -urNp linux-2.6.34.1/arch/s390/mm/mmap.c linux-2.6.34.1/arch/s390/mm/mmap.c
3219 --- linux-2.6.34.1/arch/s390/mm/mmap.c 2010-07-05 14:24:10.000000000 -0400
3220 +++ linux-2.6.34.1/arch/s390/mm/mmap.c 2010-07-07 09:04:43.000000000 -0400
3221 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3222 */
3223 if (mmap_is_legacy()) {
3224 mm->mmap_base = TASK_UNMAPPED_BASE;
3225 +
3226 +#ifdef CONFIG_PAX_RANDMMAP
3227 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3228 + mm->mmap_base += mm->delta_mmap;
3229 +#endif
3230 +
3231 mm->get_unmapped_area = arch_get_unmapped_area;
3232 mm->unmap_area = arch_unmap_area;
3233 } else {
3234 mm->mmap_base = mmap_base();
3235 +
3236 +#ifdef CONFIG_PAX_RANDMMAP
3237 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3238 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3239 +#endif
3240 +
3241 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3242 mm->unmap_area = arch_unmap_area_topdown;
3243 }
3244 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3245 */
3246 if (mmap_is_legacy()) {
3247 mm->mmap_base = TASK_UNMAPPED_BASE;
3248 +
3249 +#ifdef CONFIG_PAX_RANDMMAP
3250 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3251 + mm->mmap_base += mm->delta_mmap;
3252 +#endif
3253 +
3254 mm->get_unmapped_area = s390_get_unmapped_area;
3255 mm->unmap_area = arch_unmap_area;
3256 } else {
3257 mm->mmap_base = mmap_base();
3258 +
3259 +#ifdef CONFIG_PAX_RANDMMAP
3260 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3261 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3262 +#endif
3263 +
3264 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3265 mm->unmap_area = arch_unmap_area_topdown;
3266 }
3267 diff -urNp linux-2.6.34.1/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.34.1/arch/sh/boards/mach-hp6xx/pm.c
3268 --- linux-2.6.34.1/arch/sh/boards/mach-hp6xx/pm.c 2010-07-05 14:24:10.000000000 -0400
3269 +++ linux-2.6.34.1/arch/sh/boards/mach-hp6xx/pm.c 2010-07-07 09:04:43.000000000 -0400
3270 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3271 return 0;
3272 }
3273
3274 -static struct platform_suspend_ops hp6x0_pm_ops = {
3275 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3276 .enter = hp6x0_pm_enter,
3277 .valid = suspend_valid_only_mem,
3278 };
3279 diff -urNp linux-2.6.34.1/arch/sh/include/asm/dma-mapping.h linux-2.6.34.1/arch/sh/include/asm/dma-mapping.h
3280 --- linux-2.6.34.1/arch/sh/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
3281 +++ linux-2.6.34.1/arch/sh/include/asm/dma-mapping.h 2010-07-07 09:04:43.000000000 -0400
3282 @@ -1,10 +1,10 @@
3283 #ifndef __ASM_SH_DMA_MAPPING_H
3284 #define __ASM_SH_DMA_MAPPING_H
3285
3286 -extern struct dma_map_ops *dma_ops;
3287 +extern const struct dma_map_ops *dma_ops;
3288 extern void no_iommu_init(void);
3289
3290 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3291 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3292 {
3293 return dma_ops;
3294 }
3295 @@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm
3296
3297 static inline int dma_supported(struct device *dev, u64 mask)
3298 {
3299 - struct dma_map_ops *ops = get_dma_ops(dev);
3300 + const struct dma_map_ops *ops = get_dma_ops(dev);
3301
3302 if (ops->dma_supported)
3303 return ops->dma_supported(dev, mask);
3304 @@ -24,7 +24,7 @@ static inline int dma_supported(struct d
3305
3306 static inline int dma_set_mask(struct device *dev, u64 mask)
3307 {
3308 - struct dma_map_ops *ops = get_dma_ops(dev);
3309 + const struct dma_map_ops *ops = get_dma_ops(dev);
3310
3311 if (!dev->dma_mask || !dma_supported(dev, mask))
3312 return -EIO;
3313 @@ -59,7 +59,7 @@ static inline int dma_get_cache_alignmen
3314
3315 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3316 {
3317 - struct dma_map_ops *ops = get_dma_ops(dev);
3318 + const struct dma_map_ops *ops = get_dma_ops(dev);
3319
3320 if (ops->mapping_error)
3321 return ops->mapping_error(dev, dma_addr);
3322 @@ -70,7 +70,7 @@ static inline int dma_mapping_error(stru
3323 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3324 dma_addr_t *dma_handle, gfp_t gfp)
3325 {
3326 - struct dma_map_ops *ops = get_dma_ops(dev);
3327 + const struct dma_map_ops *ops = get_dma_ops(dev);
3328 void *memory;
3329
3330 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
3331 @@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(s
3332 static inline void dma_free_coherent(struct device *dev, size_t size,
3333 void *vaddr, dma_addr_t dma_handle)
3334 {
3335 - struct dma_map_ops *ops = get_dma_ops(dev);
3336 + const struct dma_map_ops *ops = get_dma_ops(dev);
3337
3338 if (dma_release_from_coherent(dev, get_order(size), vaddr))
3339 return;
3340 diff -urNp linux-2.6.34.1/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.34.1/arch/sh/kernel/cpu/shmobile/pm.c
3341 --- linux-2.6.34.1/arch/sh/kernel/cpu/shmobile/pm.c 2010-07-05 14:24:10.000000000 -0400
3342 +++ linux-2.6.34.1/arch/sh/kernel/cpu/shmobile/pm.c 2010-07-07 09:04:43.000000000 -0400
3343 @@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t s
3344 return 0;
3345 }
3346
3347 -static struct platform_suspend_ops sh_pm_ops = {
3348 +static const struct platform_suspend_ops sh_pm_ops = {
3349 .enter = sh_pm_enter,
3350 .valid = suspend_valid_only_mem,
3351 };
3352 diff -urNp linux-2.6.34.1/arch/sh/kernel/dma-nommu.c linux-2.6.34.1/arch/sh/kernel/dma-nommu.c
3353 --- linux-2.6.34.1/arch/sh/kernel/dma-nommu.c 2010-07-05 14:24:10.000000000 -0400
3354 +++ linux-2.6.34.1/arch/sh/kernel/dma-nommu.c 2010-07-07 09:04:43.000000000 -0400
3355 @@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device
3356 }
3357 #endif
3358
3359 -struct dma_map_ops nommu_dma_ops = {
3360 +const struct dma_map_ops nommu_dma_ops = {
3361 .alloc_coherent = dma_generic_alloc_coherent,
3362 .free_coherent = dma_generic_free_coherent,
3363 .map_page = nommu_map_page,
3364 diff -urNp linux-2.6.34.1/arch/sh/kernel/kgdb.c linux-2.6.34.1/arch/sh/kernel/kgdb.c
3365 --- linux-2.6.34.1/arch/sh/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
3366 +++ linux-2.6.34.1/arch/sh/kernel/kgdb.c 2010-07-07 09:04:43.000000000 -0400
3367 @@ -307,7 +307,7 @@ void kgdb_arch_exit(void)
3368 unregister_die_notifier(&kgdb_notifier);
3369 }
3370
3371 -struct kgdb_arch arch_kgdb_ops = {
3372 +const struct kgdb_arch arch_kgdb_ops = {
3373 /* Breakpoint instruction: trapa #0x3c */
3374 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3375 .gdb_bpt_instr = { 0x3c, 0xc3 },
3376 diff -urNp linux-2.6.34.1/arch/sh/mm/consistent.c linux-2.6.34.1/arch/sh/mm/consistent.c
3377 --- linux-2.6.34.1/arch/sh/mm/consistent.c 2010-07-05 14:24:10.000000000 -0400
3378 +++ linux-2.6.34.1/arch/sh/mm/consistent.c 2010-07-07 09:04:43.000000000 -0400
3379 @@ -22,7 +22,7 @@
3380
3381 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
3382
3383 -struct dma_map_ops *dma_ops;
3384 +const struct dma_map_ops *dma_ops;
3385 EXPORT_SYMBOL(dma_ops);
3386
3387 static int __init dma_init(void)
3388 diff -urNp linux-2.6.34.1/arch/sparc/Makefile linux-2.6.34.1/arch/sparc/Makefile
3389 --- linux-2.6.34.1/arch/sparc/Makefile 2010-07-05 14:24:10.000000000 -0400
3390 +++ linux-2.6.34.1/arch/sparc/Makefile 2010-07-07 09:04:44.000000000 -0400
3391 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
3392 # Export what is needed by arch/sparc/boot/Makefile
3393 export VMLINUX_INIT VMLINUX_MAIN
3394 VMLINUX_INIT := $(head-y) $(init-y)
3395 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3396 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3397 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3398 VMLINUX_MAIN += $(drivers-y) $(net-y)
3399
3400 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/atomic_64.h linux-2.6.34.1/arch/sparc/include/asm/atomic_64.h
3401 --- linux-2.6.34.1/arch/sparc/include/asm/atomic_64.h 2010-07-05 14:24:10.000000000 -0400
3402 +++ linux-2.6.34.1/arch/sparc/include/asm/atomic_64.h 2010-07-07 09:04:43.000000000 -0400
3403 @@ -14,18 +14,39 @@
3404 #define ATOMIC64_INIT(i) { (i) }
3405
3406 #define atomic_read(v) ((v)->counter)
3407 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3408 +{
3409 + return v->counter;
3410 +}
3411 #define atomic64_read(v) ((v)->counter)
3412 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3413 +{
3414 + return v->counter;
3415 +}
3416
3417 #define atomic_set(v, i) (((v)->counter) = i)
3418 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3419 +{
3420 + v->counter = i;
3421 +}
3422 #define atomic64_set(v, i) (((v)->counter) = i)
3423 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3424 +{
3425 + v->counter = i;
3426 +}
3427
3428 extern void atomic_add(int, atomic_t *);
3429 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3430 extern void atomic64_add(int, atomic64_t *);
3431 +extern void atomic64_add_unchecked(int, atomic64_unchecked_t *);
3432 extern void atomic_sub(int, atomic_t *);
3433 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3434 extern void atomic64_sub(int, atomic64_t *);
3435 +extern void atomic64_sub_unchecked(int, atomic64_unchecked_t *);
3436
3437 extern int atomic_add_ret(int, atomic_t *);
3438 extern int atomic64_add_ret(int, atomic64_t *);
3439 +extern int atomic64_add_ret_unchecked(int, atomic64_unchecked_t *);
3440 extern int atomic_sub_ret(int, atomic_t *);
3441 extern int atomic64_sub_ret(int, atomic64_t *);
3442
3443 @@ -34,6 +55,7 @@ extern int atomic64_sub_ret(int, atomic6
3444
3445 #define atomic_inc_return(v) atomic_add_ret(1, v)
3446 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3447 +#define atomic64_inc_return_unchecked(v) atomic64_add_ret_unchecked(1, v)
3448
3449 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3450 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3451 @@ -59,10 +81,26 @@ extern int atomic64_sub_ret(int, atomic6
3452 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3453
3454 #define atomic_inc(v) atomic_add(1, v)
3455 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3456 +{
3457 + atomic_add_unchecked(1, v);
3458 +}
3459 #define atomic64_inc(v) atomic64_add(1, v)
3460 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3461 +{
3462 + atomic64_add_unchecked(1, v);
3463 +}
3464
3465 #define atomic_dec(v) atomic_sub(1, v)
3466 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3467 +{
3468 + atomic_sub_unchecked(1, v);
3469 +}
3470 #define atomic64_dec(v) atomic64_sub(1, v)
3471 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3472 +{
3473 + atomic64_sub_unchecked(1, v);
3474 +}
3475
3476 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3477 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3478 @@ -72,17 +110,28 @@ extern int atomic64_sub_ret(int, atomic6
3479
3480 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3481 {
3482 - int c, old;
3483 + int c, old, new;
3484 c = atomic_read(v);
3485 for (;;) {
3486 - if (unlikely(c == (u)))
3487 + if (unlikely(c == u))
3488 break;
3489 - old = atomic_cmpxchg((v), c, c + (a));
3490 +
3491 + asm volatile("addcc %2, %0, %0\n"
3492 +
3493 +#ifdef CONFIG_PAX_REFCOUNT
3494 + "tvs %%icc, 6\n"
3495 +#endif
3496 +
3497 + : "=r" (new)
3498 + : "0" (c), "ir" (a)
3499 + : "cc");
3500 +
3501 + old = atomic_cmpxchg(v, c, new);
3502 if (likely(old == c))
3503 break;
3504 c = old;
3505 }
3506 - return c != (u);
3507 + return c != u;
3508 }
3509
3510 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3511 @@ -93,17 +142,28 @@ static inline int atomic_add_unless(atom
3512
3513 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
3514 {
3515 - long c, old;
3516 + long c, old, new;
3517 c = atomic64_read(v);
3518 for (;;) {
3519 - if (unlikely(c == (u)))
3520 + if (unlikely(c == u))
3521 break;
3522 - old = atomic64_cmpxchg((v), c, c + (a));
3523 +
3524 + asm volatile("addcc %2, %0, %0\n"
3525 +
3526 +#ifdef CONFIG_PAX_REFCOUNT
3527 + "tvs %%xcc, 6\n"
3528 +#endif
3529 +
3530 + : "=r" (new)
3531 + : "0" (c), "ir" (a)
3532 + : "cc");
3533 +
3534 + old = atomic64_cmpxchg(v, c, new);
3535 if (likely(old == c))
3536 break;
3537 c = old;
3538 }
3539 - return c != (u);
3540 + return c != u;
3541 }
3542
3543 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3544 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/dma-mapping.h linux-2.6.34.1/arch/sparc/include/asm/dma-mapping.h
3545 --- linux-2.6.34.1/arch/sparc/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
3546 +++ linux-2.6.34.1/arch/sparc/include/asm/dma-mapping.h 2010-07-07 09:04:43.000000000 -0400
3547 @@ -13,10 +13,10 @@ extern int dma_supported(struct device *
3548 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3549 #define dma_is_consistent(d, h) (1)
3550
3551 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3552 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3553 extern struct bus_type pci_bus_type;
3554
3555 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3556 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3557 {
3558 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3559 if (dev->bus == &pci_bus_type)
3560 @@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dm
3561 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3562 dma_addr_t *dma_handle, gfp_t flag)
3563 {
3564 - struct dma_map_ops *ops = get_dma_ops(dev);
3565 + const struct dma_map_ops *ops = get_dma_ops(dev);
3566 void *cpu_addr;
3567
3568 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3569 @@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(s
3570 static inline void dma_free_coherent(struct device *dev, size_t size,
3571 void *cpu_addr, dma_addr_t dma_handle)
3572 {
3573 - struct dma_map_ops *ops = get_dma_ops(dev);
3574 + const struct dma_map_ops *ops = get_dma_ops(dev);
3575
3576 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3577 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3578 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/elf_32.h linux-2.6.34.1/arch/sparc/include/asm/elf_32.h
3579 --- linux-2.6.34.1/arch/sparc/include/asm/elf_32.h 2010-07-05 14:24:10.000000000 -0400
3580 +++ linux-2.6.34.1/arch/sparc/include/asm/elf_32.h 2010-07-07 09:04:43.000000000 -0400
3581 @@ -114,6 +114,13 @@ typedef struct {
3582
3583 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3584
3585 +#ifdef CONFIG_PAX_ASLR
3586 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3587 +
3588 +#define PAX_DELTA_MMAP_LEN 16
3589 +#define PAX_DELTA_STACK_LEN 16
3590 +#endif
3591 +
3592 /* This yields a mask that user programs can use to figure out what
3593 instruction set this cpu supports. This can NOT be done in userspace
3594 on Sparc. */
3595 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/elf_64.h linux-2.6.34.1/arch/sparc/include/asm/elf_64.h
3596 --- linux-2.6.34.1/arch/sparc/include/asm/elf_64.h 2010-07-05 14:24:10.000000000 -0400
3597 +++ linux-2.6.34.1/arch/sparc/include/asm/elf_64.h 2010-07-07 09:04:43.000000000 -0400
3598 @@ -162,6 +162,12 @@ typedef struct {
3599 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3600 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3601
3602 +#ifdef CONFIG_PAX_ASLR
3603 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3604 +
3605 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3606 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3607 +#endif
3608
3609 /* This yields a mask that user programs can use to figure out what
3610 instruction set this cpu supports. */
3611 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/pgtable_32.h linux-2.6.34.1/arch/sparc/include/asm/pgtable_32.h
3612 --- linux-2.6.34.1/arch/sparc/include/asm/pgtable_32.h 2010-07-05 14:24:10.000000000 -0400
3613 +++ linux-2.6.34.1/arch/sparc/include/asm/pgtable_32.h 2010-07-07 09:04:43.000000000 -0400
3614 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3615 BTFIXUPDEF_INT(page_none)
3616 BTFIXUPDEF_INT(page_copy)
3617 BTFIXUPDEF_INT(page_readonly)
3618 +
3619 +#ifdef CONFIG_PAX_PAGEEXEC
3620 +BTFIXUPDEF_INT(page_shared_noexec)
3621 +BTFIXUPDEF_INT(page_copy_noexec)
3622 +BTFIXUPDEF_INT(page_readonly_noexec)
3623 +#endif
3624 +
3625 BTFIXUPDEF_INT(page_kernel)
3626
3627 #define PMD_SHIFT SUN4C_PMD_SHIFT
3628 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3629 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3630 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3631
3632 +#ifdef CONFIG_PAX_PAGEEXEC
3633 +extern pgprot_t PAGE_SHARED_NOEXEC;
3634 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3635 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3636 +#else
3637 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3638 +# define PAGE_COPY_NOEXEC PAGE_COPY
3639 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3640 +#endif
3641 +
3642 extern unsigned long page_kernel;
3643
3644 #ifdef MODULE
3645 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.34.1/arch/sparc/include/asm/pgtsrmmu.h
3646 --- linux-2.6.34.1/arch/sparc/include/asm/pgtsrmmu.h 2010-07-05 14:24:10.000000000 -0400
3647 +++ linux-2.6.34.1/arch/sparc/include/asm/pgtsrmmu.h 2010-07-07 09:04:43.000000000 -0400
3648 @@ -115,6 +115,13 @@
3649 SRMMU_EXEC | SRMMU_REF)
3650 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3651 SRMMU_EXEC | SRMMU_REF)
3652 +
3653 +#ifdef CONFIG_PAX_PAGEEXEC
3654 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3655 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3656 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3657 +#endif
3658 +
3659 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3660 SRMMU_DIRTY | SRMMU_REF)
3661
3662 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/spinlock_64.h linux-2.6.34.1/arch/sparc/include/asm/spinlock_64.h
3663 --- linux-2.6.34.1/arch/sparc/include/asm/spinlock_64.h 2010-07-05 14:24:10.000000000 -0400
3664 +++ linux-2.6.34.1/arch/sparc/include/asm/spinlock_64.h 2010-07-07 09:04:43.000000000 -0400
3665 @@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_r
3666 __asm__ __volatile__ (
3667 "1: ldsw [%2], %0\n"
3668 " brlz,pn %0, 2f\n"
3669 -"4: add %0, 1, %1\n"
3670 +"4: addcc %0, 1, %1\n"
3671 +
3672 +#ifdef CONFIG_PAX_REFCOUNT
3673 +" tvs %%icc, 6\n"
3674 +#endif
3675 +
3676 " cas [%2], %0, %1\n"
3677 " cmp %0, %1\n"
3678 " bne,pn %%icc, 1b\n"
3679 @@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_r
3680 " .previous"
3681 : "=&r" (tmp1), "=&r" (tmp2)
3682 : "r" (lock)
3683 - : "memory");
3684 + : "memory", "cc");
3685 }
3686
3687 static int inline arch_read_trylock(arch_rwlock_t *lock)
3688 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3689 "1: ldsw [%2], %0\n"
3690 " brlz,a,pn %0, 2f\n"
3691 " mov 0, %0\n"
3692 -" add %0, 1, %1\n"
3693 +" addcc %0, 1, %1\n"
3694 +
3695 +#ifdef CONFIG_PAX_REFCOUNT
3696 +" tvs %%icc, 6\n"
3697 +#endif
3698 +
3699 " cas [%2], %0, %1\n"
3700 " cmp %0, %1\n"
3701 " bne,pn %%icc, 1b\n"
3702 @@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch
3703
3704 __asm__ __volatile__(
3705 "1: lduw [%2], %0\n"
3706 -" sub %0, 1, %1\n"
3707 +" subcc %0, 1, %1\n"
3708 +
3709 +#ifdef CONFIG_PAX_REFCOUNT
3710 +" tvs %%icc, 6\n"
3711 +#endif
3712 +
3713 " cas [%2], %0, %1\n"
3714 " cmp %0, %1\n"
3715 " bne,pn %%xcc, 1b\n"
3716 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/uaccess.h linux-2.6.34.1/arch/sparc/include/asm/uaccess.h
3717 --- linux-2.6.34.1/arch/sparc/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
3718 +++ linux-2.6.34.1/arch/sparc/include/asm/uaccess.h 2010-07-07 09:04:43.000000000 -0400
3719 @@ -1,5 +1,8 @@
3720 #ifndef ___ASM_SPARC_UACCESS_H
3721 #define ___ASM_SPARC_UACCESS_H
3722 +
3723 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3724 +
3725 #if defined(__sparc__) && defined(__arch64__)
3726 #include <asm/uaccess_64.h>
3727 #else
3728 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/uaccess_32.h linux-2.6.34.1/arch/sparc/include/asm/uaccess_32.h
3729 --- linux-2.6.34.1/arch/sparc/include/asm/uaccess_32.h 2010-07-05 14:24:10.000000000 -0400
3730 +++ linux-2.6.34.1/arch/sparc/include/asm/uaccess_32.h 2010-07-07 09:04:43.000000000 -0400
3731 @@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __
3732
3733 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3734 {
3735 - if (n && __access_ok((unsigned long) to, n))
3736 + if ((long)n < 0)
3737 + return n;
3738 +
3739 + if (n && __access_ok((unsigned long) to, n)) {
3740 + if (!__builtin_constant_p(n))
3741 + check_object_size(from, n, true);
3742 return __copy_user(to, (__force void __user *) from, n);
3743 - else
3744 + } else
3745 return n;
3746 }
3747
3748 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3749 {
3750 + if ((long)n < 0)
3751 + return n;
3752 +
3753 + if (!__builtin_constant_p(n))
3754 + check_object_size(from, n, true);
3755 +
3756 return __copy_user(to, (__force void __user *) from, n);
3757 }
3758
3759 @@ -272,19 +283,27 @@ static inline unsigned long copy_from_us
3760 {
3761 int sz = __compiletime_object_size(to);
3762
3763 + if ((long)n < 0)
3764 + return n;
3765 +
3766 if (unlikely(sz != -1 && sz < n)) {
3767 copy_from_user_overflow();
3768 return n;
3769 }
3770
3771 - if (n && __access_ok((unsigned long) from, n))
3772 + if (n && __access_ok((unsigned long) from, n)) {
3773 + if (!__builtin_constant_p(n))
3774 + check_object_size(to, n, false);
3775 return __copy_user((__force void __user *) to, from, n);
3776 - else
3777 + } else
3778 return n;
3779 }
3780
3781 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3782 {
3783 + if ((long)n < 0)
3784 + return n;
3785 +
3786 return __copy_user((__force void __user *) to, from, n);
3787 }
3788
3789 diff -urNp linux-2.6.34.1/arch/sparc/include/asm/uaccess_64.h linux-2.6.34.1/arch/sparc/include/asm/uaccess_64.h
3790 --- linux-2.6.34.1/arch/sparc/include/asm/uaccess_64.h 2010-07-05 14:24:10.000000000 -0400
3791 +++ linux-2.6.34.1/arch/sparc/include/asm/uaccess_64.h 2010-07-07 09:04:43.000000000 -0400
3792 @@ -10,6 +10,7 @@
3793 #include <linux/compiler.h>
3794 #include <linux/string.h>
3795 #include <linux/thread_info.h>
3796 +#include <linux/kernel.h>
3797 #include <asm/asi.h>
3798 #include <asm/system.h>
3799 #include <asm/spitfire.h>
3800 @@ -224,6 +225,12 @@ copy_from_user(void *to, const void __us
3801 int sz = __compiletime_object_size(to);
3802 unsigned long ret = size;
3803
3804 + if ((long)size < 0 || size > INT_MAX)
3805 + return size;
3806 +
3807 + if (!__builtin_constant_p(size))
3808 + check_object_size(to, size, false);
3809 +
3810 if (likely(sz == -1 || sz >= size)) {
3811 ret = ___copy_from_user(to, from, size);
3812 if (unlikely(ret))
3813 @@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup(
3814 static inline unsigned long __must_check
3815 copy_to_user(void __user *to, const void *from, unsigned long size)
3816 {
3817 - unsigned long ret = ___copy_to_user(to, from, size);
3818 + unsigned long ret;
3819 +
3820 + if ((long)size < 0 || size > INT_MAX)
3821 + return size;
3822 +
3823 + if (!__builtin_constant_p(size))
3824 + check_object_size(from, size, true);
3825
3826 + ret = ___copy_to_user(to, from, size);
3827 if (unlikely(ret))
3828 ret = copy_to_user_fixup(to, from, size);
3829 return ret;
3830 diff -urNp linux-2.6.34.1/arch/sparc/kernel/Makefile linux-2.6.34.1/arch/sparc/kernel/Makefile
3831 --- linux-2.6.34.1/arch/sparc/kernel/Makefile 2010-07-05 14:24:10.000000000 -0400
3832 +++ linux-2.6.34.1/arch/sparc/kernel/Makefile 2010-07-07 09:04:43.000000000 -0400
3833 @@ -3,7 +3,7 @@
3834 #
3835
3836 asflags-y := -ansi
3837 -ccflags-y := -Werror
3838 +#ccflags-y := -Werror
3839
3840 extra-y := head_$(BITS).o
3841 extra-y += init_task.o
3842 diff -urNp linux-2.6.34.1/arch/sparc/kernel/iommu.c linux-2.6.34.1/arch/sparc/kernel/iommu.c
3843 --- linux-2.6.34.1/arch/sparc/kernel/iommu.c 2010-07-05 14:24:10.000000000 -0400
3844 +++ linux-2.6.34.1/arch/sparc/kernel/iommu.c 2010-07-07 09:04:43.000000000 -0400
3845 @@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3846 spin_unlock_irqrestore(&iommu->lock, flags);
3847 }
3848
3849 -static struct dma_map_ops sun4u_dma_ops = {
3850 +static const struct dma_map_ops sun4u_dma_ops = {
3851 .alloc_coherent = dma_4u_alloc_coherent,
3852 .free_coherent = dma_4u_free_coherent,
3853 .map_page = dma_4u_map_page,
3854 @@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops
3855 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3856 };
3857
3858 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3859 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3860 EXPORT_SYMBOL(dma_ops);
3861
3862 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3863 diff -urNp linux-2.6.34.1/arch/sparc/kernel/ioport.c linux-2.6.34.1/arch/sparc/kernel/ioport.c
3864 --- linux-2.6.34.1/arch/sparc/kernel/ioport.c 2010-07-05 14:24:10.000000000 -0400
3865 +++ linux-2.6.34.1/arch/sparc/kernel/ioport.c 2010-07-07 09:04:43.000000000 -0400
3866 @@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(stru
3867 BUG();
3868 }
3869
3870 -struct dma_map_ops sbus_dma_ops = {
3871 +const struct dma_map_ops sbus_dma_ops = {
3872 .alloc_coherent = sbus_alloc_coherent,
3873 .free_coherent = sbus_free_coherent,
3874 .map_page = sbus_map_page,
3875 @@ -408,7 +408,7 @@ struct dma_map_ops sbus_dma_ops = {
3876 .sync_sg_for_device = sbus_sync_sg_for_device,
3877 };
3878
3879 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
3880 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
3881 EXPORT_SYMBOL(dma_ops);
3882
3883 static int __init sparc_register_ioport(void)
3884 @@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(str
3885 }
3886 }
3887
3888 -struct dma_map_ops pci32_dma_ops = {
3889 +const struct dma_map_ops pci32_dma_ops = {
3890 .alloc_coherent = pci32_alloc_coherent,
3891 .free_coherent = pci32_free_coherent,
3892 .map_page = pci32_map_page,
3893 diff -urNp linux-2.6.34.1/arch/sparc/kernel/kgdb_32.c linux-2.6.34.1/arch/sparc/kernel/kgdb_32.c
3894 --- linux-2.6.34.1/arch/sparc/kernel/kgdb_32.c 2010-07-05 14:24:10.000000000 -0400
3895 +++ linux-2.6.34.1/arch/sparc/kernel/kgdb_32.c 2010-07-07 09:04:43.000000000 -0400
3896 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
3897 {
3898 }
3899
3900 -struct kgdb_arch arch_kgdb_ops = {
3901 +const struct kgdb_arch arch_kgdb_ops = {
3902 /* Breakpoint instruction: ta 0x7d */
3903 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
3904 };
3905 diff -urNp linux-2.6.34.1/arch/sparc/kernel/kgdb_64.c linux-2.6.34.1/arch/sparc/kernel/kgdb_64.c
3906 --- linux-2.6.34.1/arch/sparc/kernel/kgdb_64.c 2010-07-05 14:24:10.000000000 -0400
3907 +++ linux-2.6.34.1/arch/sparc/kernel/kgdb_64.c 2010-07-07 09:04:43.000000000 -0400
3908 @@ -181,7 +181,7 @@ void kgdb_arch_exit(void)
3909 {
3910 }
3911
3912 -struct kgdb_arch arch_kgdb_ops = {
3913 +const struct kgdb_arch arch_kgdb_ops = {
3914 /* Breakpoint instruction: ta 0x72 */
3915 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
3916 };
3917 diff -urNp linux-2.6.34.1/arch/sparc/kernel/pci_sun4v.c linux-2.6.34.1/arch/sparc/kernel/pci_sun4v.c
3918 --- linux-2.6.34.1/arch/sparc/kernel/pci_sun4v.c 2010-07-05 14:24:10.000000000 -0400
3919 +++ linux-2.6.34.1/arch/sparc/kernel/pci_sun4v.c 2010-07-07 09:04:43.000000000 -0400
3920 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
3921 spin_unlock_irqrestore(&iommu->lock, flags);
3922 }
3923
3924 -static struct dma_map_ops sun4v_dma_ops = {
3925 +static const struct dma_map_ops sun4v_dma_ops = {
3926 .alloc_coherent = dma_4v_alloc_coherent,
3927 .free_coherent = dma_4v_free_coherent,
3928 .map_page = dma_4v_map_page,
3929 diff -urNp linux-2.6.34.1/arch/sparc/kernel/sys_sparc_32.c linux-2.6.34.1/arch/sparc/kernel/sys_sparc_32.c
3930 --- linux-2.6.34.1/arch/sparc/kernel/sys_sparc_32.c 2010-07-05 14:24:10.000000000 -0400
3931 +++ linux-2.6.34.1/arch/sparc/kernel/sys_sparc_32.c 2010-07-07 09:04:43.000000000 -0400
3932 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
3933 if (ARCH_SUN4C && len > 0x20000000)
3934 return -ENOMEM;
3935 if (!addr)
3936 - addr = TASK_UNMAPPED_BASE;
3937 + addr = current->mm->mmap_base;
3938
3939 if (flags & MAP_SHARED)
3940 addr = COLOUR_ALIGN(addr);
3941 diff -urNp linux-2.6.34.1/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.1/arch/sparc/kernel/sys_sparc_64.c
3942 --- linux-2.6.34.1/arch/sparc/kernel/sys_sparc_64.c 2010-07-05 14:24:10.000000000 -0400
3943 +++ linux-2.6.34.1/arch/sparc/kernel/sys_sparc_64.c 2010-07-07 09:04:43.000000000 -0400
3944 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3945 /* We do not accept a shared mapping if it would violate
3946 * cache aliasing constraints.
3947 */
3948 - if ((flags & MAP_SHARED) &&
3949 + if ((filp || (flags & MAP_SHARED)) &&
3950 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3951 return -EINVAL;
3952 return addr;
3953 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3954 if (filp || (flags & MAP_SHARED))
3955 do_color_align = 1;
3956
3957 +#ifdef CONFIG_PAX_RANDMMAP
3958 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3959 +#endif
3960 +
3961 if (addr) {
3962 if (do_color_align)
3963 addr = COLOUR_ALIGN(addr, pgoff);
3964 @@ -152,9 +156,9 @@ unsigned long arch_get_unmapped_area(str
3965 }
3966
3967 if (len > mm->cached_hole_size) {
3968 - start_addr = addr = mm->free_area_cache;
3969 + start_addr = addr = mm->free_area_cache;
3970 } else {
3971 - start_addr = addr = TASK_UNMAPPED_BASE;
3972 + start_addr = addr = mm->mmap_base;
3973 mm->cached_hole_size = 0;
3974 }
3975
3976 @@ -174,8 +178,8 @@ full_search:
3977 vma = find_vma(mm, VA_EXCLUDE_END);
3978 }
3979 if (unlikely(task_size < addr)) {
3980 - if (start_addr != TASK_UNMAPPED_BASE) {
3981 - start_addr = addr = TASK_UNMAPPED_BASE;
3982 + if (start_addr != mm->mmap_base) {
3983 + start_addr = addr = mm->mmap_base;
3984 mm->cached_hole_size = 0;
3985 goto full_search;
3986 }
3987 @@ -215,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
3988 /* We do not accept a shared mapping if it would violate
3989 * cache aliasing constraints.
3990 */
3991 - if ((flags & MAP_SHARED) &&
3992 + if ((filp || (flags & MAP_SHARED)) &&
3993 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3994 return -EINVAL;
3995 return addr;
3996 @@ -385,6 +389,12 @@ void arch_pick_mmap_layout(struct mm_str
3997 gap == RLIM_INFINITY ||
3998 sysctl_legacy_va_layout) {
3999 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4000 +
4001 +#ifdef CONFIG_PAX_RANDMMAP
4002 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4003 + mm->mmap_base += mm->delta_mmap;
4004 +#endif
4005 +
4006 mm->get_unmapped_area = arch_get_unmapped_area;
4007 mm->unmap_area = arch_unmap_area;
4008 } else {
4009 @@ -397,6 +407,12 @@ void arch_pick_mmap_layout(struct mm_str
4010 gap = (task_size / 6 * 5);
4011
4012 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4013 +
4014 +#ifdef CONFIG_PAX_RANDMMAP
4015 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4016 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4017 +#endif
4018 +
4019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4020 mm->unmap_area = arch_unmap_area_topdown;
4021 }
4022 diff -urNp linux-2.6.34.1/arch/sparc/kernel/traps_64.c linux-2.6.34.1/arch/sparc/kernel/traps_64.c
4023 --- linux-2.6.34.1/arch/sparc/kernel/traps_64.c 2010-07-05 14:24:10.000000000 -0400
4024 +++ linux-2.6.34.1/arch/sparc/kernel/traps_64.c 2010-07-07 09:04:43.000000000 -0400
4025 @@ -94,6 +94,12 @@ void bad_trap(struct pt_regs *regs, long
4026
4027 lvl -= 0x100;
4028 if (regs->tstate & TSTATE_PRIV) {
4029 +
4030 +#ifdef CONFIG_PAX_REFCOUNT
4031 + if (lvl == 6)
4032 + pax_report_refcount_overflow(regs);
4033 +#endif
4034 +
4035 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4036 die_if_kernel(buffer, regs);
4037 }
4038 @@ -112,11 +118,16 @@ void bad_trap(struct pt_regs *regs, long
4039 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4040 {
4041 char buffer[32];
4042 -
4043 +
4044 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4045 0, lvl, SIGTRAP) == NOTIFY_STOP)
4046 return;
4047
4048 +#ifdef CONFIG_PAX_REFCOUNT
4049 + if (lvl == 6)
4050 + pax_report_refcount_overflow(regs);
4051 +#endif
4052 +
4053 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4054
4055 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4056 diff -urNp linux-2.6.34.1/arch/sparc/lib/atomic_64.S linux-2.6.34.1/arch/sparc/lib/atomic_64.S
4057 --- linux-2.6.34.1/arch/sparc/lib/atomic_64.S 2010-07-05 14:24:10.000000000 -0400
4058 +++ linux-2.6.34.1/arch/sparc/lib/atomic_64.S 2010-07-07 09:04:44.000000000 -0400
4059 @@ -18,7 +18,12 @@
4060 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4061 BACKOFF_SETUP(%o2)
4062 1: lduw [%o1], %g1
4063 - add %g1, %o0, %g7
4064 + addcc %g1, %o0, %g7
4065 +
4066 +#ifdef CONFIG_PAX_REFCOUNT
4067 + tvs %icc, 6
4068 +#endif
4069 +
4070 cas [%o1], %g1, %g7
4071 cmp %g1, %g7
4072 bne,pn %icc, 2f
4073 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4074 2: BACKOFF_SPIN(%o2, %o3, 1b)
4075 .size atomic_add, .-atomic_add
4076
4077 + .globl atomic_add_unchecked
4078 + .type atomic_add_unchecked,#function
4079 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4080 + BACKOFF_SETUP(%o2)
4081 +1: lduw [%o1], %g1
4082 + add %g1, %o0, %g7
4083 + cas [%o1], %g1, %g7
4084 + cmp %g1, %g7
4085 + bne,pn %icc, 2f
4086 + nop
4087 + retl
4088 + nop
4089 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4090 + .size atomic_add_unchecked, .-atomic_add_unchecked
4091 +
4092 .globl atomic_sub
4093 .type atomic_sub,#function
4094 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4095 BACKOFF_SETUP(%o2)
4096 1: lduw [%o1], %g1
4097 - sub %g1, %o0, %g7
4098 + subcc %g1, %o0, %g7
4099 +
4100 +#ifdef CONFIG_PAX_REFCOUNT
4101 + tvs %icc, 6
4102 +#endif
4103 +
4104 cas [%o1], %g1, %g7
4105 cmp %g1, %g7
4106 bne,pn %icc, 2f
4107 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4108 2: BACKOFF_SPIN(%o2, %o3, 1b)
4109 .size atomic_sub, .-atomic_sub
4110
4111 + .globl atomic_sub_unchecked
4112 + .type atomic_sub_unchecked,#function
4113 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4114 + BACKOFF_SETUP(%o2)
4115 +1: lduw [%o1], %g1
4116 + sub %g1, %o0, %g7
4117 + cas [%o1], %g1, %g7
4118 + cmp %g1, %g7
4119 + bne,pn %icc, 2f
4120 + nop
4121 + retl
4122 + nop
4123 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4124 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4125 +
4126 .globl atomic_add_ret
4127 .type atomic_add_ret,#function
4128 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4129 BACKOFF_SETUP(%o2)
4130 1: lduw [%o1], %g1
4131 - add %g1, %o0, %g7
4132 + addcc %g1, %o0, %g7
4133 +
4134 +#ifdef CONFIG_PAX_REFCOUNT
4135 + tvs %icc, 6
4136 +#endif
4137 +
4138 cas [%o1], %g1, %g7
4139 cmp %g1, %g7
4140 bne,pn %icc, 2f
4141 @@ -64,7 +109,12 @@ atomic_add_ret: /* %o0 = increment, %o1
4142 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4143 BACKOFF_SETUP(%o2)
4144 1: lduw [%o1], %g1
4145 - sub %g1, %o0, %g7
4146 + subcc %g1, %o0, %g7
4147 +
4148 +#ifdef CONFIG_PAX_REFCOUNT
4149 + tvs %icc, 6
4150 +#endif
4151 +
4152 cas [%o1], %g1, %g7
4153 cmp %g1, %g7
4154 bne,pn %icc, 2f
4155 @@ -80,7 +130,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4156 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4157 BACKOFF_SETUP(%o2)
4158 1: ldx [%o1], %g1
4159 - add %g1, %o0, %g7
4160 + addcc %g1, %o0, %g7
4161 +
4162 +#ifdef CONFIG_PAX_REFCOUNT
4163 + tvs %xcc, 6
4164 +#endif
4165 +
4166 casx [%o1], %g1, %g7
4167 cmp %g1, %g7
4168 bne,pn %xcc, 2f
4169 @@ -90,12 +145,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4170 2: BACKOFF_SPIN(%o2, %o3, 1b)
4171 .size atomic64_add, .-atomic64_add
4172
4173 + .globl atomic64_add_unchecked
4174 + .type atomic64_add_unchecked,#function
4175 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4176 + BACKOFF_SETUP(%o2)
4177 +1: ldx [%o1], %g1
4178 + addcc %g1, %o0, %g7
4179 + casx [%o1], %g1, %g7
4180 + cmp %g1, %g7
4181 + bne,pn %xcc, 2f
4182 + nop
4183 + retl
4184 + nop
4185 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4186 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4187 +
4188 .globl atomic64_sub
4189 .type atomic64_sub,#function
4190 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4191 BACKOFF_SETUP(%o2)
4192 1: ldx [%o1], %g1
4193 - sub %g1, %o0, %g7
4194 + subcc %g1, %o0, %g7
4195 +
4196 +#ifdef CONFIG_PAX_REFCOUNT
4197 + tvs %xcc, 6
4198 +#endif
4199 +
4200 casx [%o1], %g1, %g7
4201 cmp %g1, %g7
4202 bne,pn %xcc, 2f
4203 @@ -105,12 +180,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4204 2: BACKOFF_SPIN(%o2, %o3, 1b)
4205 .size atomic64_sub, .-atomic64_sub
4206
4207 + .globl atomic64_sub_unchecked
4208 + .type atomic64_sub_unchecked,#function
4209 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4210 + BACKOFF_SETUP(%o2)
4211 +1: ldx [%o1], %g1
4212 + subcc %g1, %o0, %g7
4213 + casx [%o1], %g1, %g7
4214 + cmp %g1, %g7
4215 + bne,pn %xcc, 2f
4216 + nop
4217 + retl
4218 + nop
4219 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4220 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4221 +
4222 .globl atomic64_add_ret
4223 .type atomic64_add_ret,#function
4224 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4225 BACKOFF_SETUP(%o2)
4226 1: ldx [%o1], %g1
4227 - add %g1, %o0, %g7
4228 + addcc %g1, %o0, %g7
4229 +
4230 +#ifdef CONFIG_PAX_REFCOUNT
4231 + tvs %xcc, 6
4232 +#endif
4233 +
4234 casx [%o1], %g1, %g7
4235 cmp %g1, %g7
4236 bne,pn %xcc, 2f
4237 @@ -121,12 +216,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4238 2: BACKOFF_SPIN(%o2, %o3, 1b)
4239 .size atomic64_add_ret, .-atomic64_add_ret
4240
4241 + .globl atomic64_add_ret_unchecked
4242 + .type atomic64_add_ret_unchecked,#function
4243 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4244 + BACKOFF_SETUP(%o2)
4245 +1: ldx [%o1], %g1
4246 + addcc %g1, %o0, %g7
4247 + casx [%o1], %g1, %g7
4248 + cmp %g1, %g7
4249 + bne,pn %xcc, 2f
4250 + add %g7, %o0, %g7
4251 + mov %g7, %o0
4252 + retl
4253 + nop
4254 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4255 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4256 +
4257 .globl atomic64_sub_ret
4258 .type atomic64_sub_ret,#function
4259 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4260 BACKOFF_SETUP(%o2)
4261 1: ldx [%o1], %g1
4262 - sub %g1, %o0, %g7
4263 + subcc %g1, %o0, %g7
4264 +
4265 +#ifdef CONFIG_PAX_REFCOUNT
4266 + tvs %xcc, 6
4267 +#endif
4268 +
4269 casx [%o1], %g1, %g7
4270 cmp %g1, %g7
4271 bne,pn %xcc, 2f
4272 diff -urNp linux-2.6.34.1/arch/sparc/lib/ksyms.c linux-2.6.34.1/arch/sparc/lib/ksyms.c
4273 --- linux-2.6.34.1/arch/sparc/lib/ksyms.c 2010-07-05 14:24:10.000000000 -0400
4274 +++ linux-2.6.34.1/arch/sparc/lib/ksyms.c 2010-07-07 09:04:44.000000000 -0400
4275 @@ -142,12 +142,15 @@ EXPORT_SYMBOL(__downgrade_write);
4276
4277 /* Atomic counter implementation. */
4278 EXPORT_SYMBOL(atomic_add);
4279 +EXPORT_SYMBOL(atomic_add_unchecked);
4280 EXPORT_SYMBOL(atomic_add_ret);
4281 EXPORT_SYMBOL(atomic_sub);
4282 +EXPORT_SYMBOL(atomic_sub_unchecked);
4283 EXPORT_SYMBOL(atomic_sub_ret);
4284 EXPORT_SYMBOL(atomic64_add);
4285 EXPORT_SYMBOL(atomic64_add_ret);
4286 EXPORT_SYMBOL(atomic64_sub);
4287 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4288 EXPORT_SYMBOL(atomic64_sub_ret);
4289
4290 /* Atomic bit operations. */
4291 diff -urNp linux-2.6.34.1/arch/sparc/lib/rwsem_64.S linux-2.6.34.1/arch/sparc/lib/rwsem_64.S
4292 --- linux-2.6.34.1/arch/sparc/lib/rwsem_64.S 2010-07-05 14:24:10.000000000 -0400
4293 +++ linux-2.6.34.1/arch/sparc/lib/rwsem_64.S 2010-07-07 09:04:44.000000000 -0400
4294 @@ -11,7 +11,12 @@
4295 .globl __down_read
4296 __down_read:
4297 1: lduw [%o0], %g1
4298 - add %g1, 1, %g7
4299 + addcc %g1, 1, %g7
4300 +
4301 +#ifdef CONFIG_PAX_REFCOUNT
4302 + tvs %icc, 6
4303 +#endif
4304 +
4305 cas [%o0], %g1, %g7
4306 cmp %g1, %g7
4307 bne,pn %icc, 1b
4308 @@ -33,7 +38,12 @@ __down_read:
4309 .globl __down_read_trylock
4310 __down_read_trylock:
4311 1: lduw [%o0], %g1
4312 - add %g1, 1, %g7
4313 + addcc %g1, 1, %g7
4314 +
4315 +#ifdef CONFIG_PAX_REFCOUNT
4316 + tvs %icc, 6
4317 +#endif
4318 +
4319 cmp %g7, 0
4320 bl,pn %icc, 2f
4321 mov 0, %o1
4322 @@ -51,7 +61,12 @@ __down_write:
4323 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4324 1:
4325 lduw [%o0], %g3
4326 - add %g3, %g1, %g7
4327 + addcc %g3, %g1, %g7
4328 +
4329 +#ifdef CONFIG_PAX_REFCOUNT
4330 + tvs %icc, 6
4331 +#endif
4332 +
4333 cas [%o0], %g3, %g7
4334 cmp %g3, %g7
4335 bne,pn %icc, 1b
4336 @@ -77,7 +92,12 @@ __down_write_trylock:
4337 cmp %g3, 0
4338 bne,pn %icc, 2f
4339 mov 0, %o1
4340 - add %g3, %g1, %g7
4341 + addcc %g3, %g1, %g7
4342 +
4343 +#ifdef CONFIG_PAX_REFCOUNT
4344 + tvs %icc, 6
4345 +#endif
4346 +
4347 cas [%o0], %g3, %g7
4348 cmp %g3, %g7
4349 bne,pn %icc, 1b
4350 @@ -90,7 +110,12 @@ __down_write_trylock:
4351 __up_read:
4352 1:
4353 lduw [%o0], %g1
4354 - sub %g1, 1, %g7
4355 + subcc %g1, 1, %g7
4356 +
4357 +#ifdef CONFIG_PAX_REFCOUNT
4358 + tvs %icc, 6
4359 +#endif
4360 +
4361 cas [%o0], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, 1b
4364 @@ -118,7 +143,12 @@ __up_write:
4365 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4366 1:
4367 lduw [%o0], %g3
4368 - sub %g3, %g1, %g7
4369 + subcc %g3, %g1, %g7
4370 +
4371 +#ifdef CONFIG_PAX_REFCOUNT
4372 + tvs %icc, 6
4373 +#endif
4374 +
4375 cas [%o0], %g3, %g7
4376 cmp %g3, %g7
4377 bne,pn %icc, 1b
4378 @@ -143,7 +173,12 @@ __downgrade_write:
4379 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4380 1:
4381 lduw [%o0], %g3
4382 - sub %g3, %g1, %g7
4383 + subcc %g3, %g1, %g7
4384 +
4385 +#ifdef CONFIG_PAX_REFCOUNT
4386 + tvs %icc, 6
4387 +#endif
4388 +
4389 cas [%o0], %g3, %g7
4390 cmp %g3, %g7
4391 bne,pn %icc, 1b
4392 diff -urNp linux-2.6.34.1/arch/sparc/mm/Makefile linux-2.6.34.1/arch/sparc/mm/Makefile
4393 --- linux-2.6.34.1/arch/sparc/mm/Makefile 2010-07-05 14:24:10.000000000 -0400
4394 +++ linux-2.6.34.1/arch/sparc/mm/Makefile 2010-07-07 09:04:44.000000000 -0400
4395 @@ -2,7 +2,7 @@
4396 #
4397
4398 asflags-y := -ansi
4399 -ccflags-y := -Werror
4400 +#ccflags-y := -Werror
4401
4402 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
4403 obj-y += fault_$(BITS).o
4404 diff -urNp linux-2.6.34.1/arch/sparc/mm/fault_32.c linux-2.6.34.1/arch/sparc/mm/fault_32.c
4405 --- linux-2.6.34.1/arch/sparc/mm/fault_32.c 2010-07-05 14:24:10.000000000 -0400
4406 +++ linux-2.6.34.1/arch/sparc/mm/fault_32.c 2010-07-07 09:04:44.000000000 -0400
4407 @@ -22,6 +22,9 @@
4408 #include <linux/interrupt.h>
4409 #include <linux/module.h>
4410 #include <linux/kdebug.h>
4411 +#include <linux/slab.h>
4412 +#include <linux/pagemap.h>
4413 +#include <linux/compiler.h>
4414
4415 #include <asm/system.h>
4416 #include <asm/page.h>
4417 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4418 return safe_compute_effective_address(regs, insn);
4419 }
4420
4421 +#ifdef CONFIG_PAX_PAGEEXEC
4422 +#ifdef CONFIG_PAX_DLRESOLVE
4423 +static void pax_emuplt_close(struct vm_area_struct *vma)
4424 +{
4425 + vma->vm_mm->call_dl_resolve = 0UL;
4426 +}
4427 +
4428 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4429 +{
4430 + unsigned int *kaddr;
4431 +
4432 + vmf->page = alloc_page(GFP_HIGHUSER);
4433 + if (!vmf->page)
4434 + return VM_FAULT_OOM;
4435 +
4436 + kaddr = kmap(vmf->page);
4437 + memset(kaddr, 0, PAGE_SIZE);
4438 + kaddr[0] = 0x9DE3BFA8U; /* save */
4439 + flush_dcache_page(vmf->page);
4440 + kunmap(vmf->page);
4441 + return VM_FAULT_MAJOR;
4442 +}
4443 +
4444 +static const struct vm_operations_struct pax_vm_ops = {
4445 + .close = pax_emuplt_close,
4446 + .fault = pax_emuplt_fault
4447 +};
4448 +
4449 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4450 +{
4451 + int ret;
4452 +
4453 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4454 + vma->vm_mm = current->mm;
4455 + vma->vm_start = addr;
4456 + vma->vm_end = addr + PAGE_SIZE;
4457 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4458 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4459 + vma->vm_ops = &pax_vm_ops;
4460 +
4461 + ret = insert_vm_struct(current->mm, vma);
4462 + if (ret)
4463 + return ret;
4464 +
4465 + ++current->mm->total_vm;
4466 + return 0;
4467 +}
4468 +#endif
4469 +
4470 +/*
4471 + * PaX: decide what to do with offenders (regs->pc = fault address)
4472 + *
4473 + * returns 1 when task should be killed
4474 + * 2 when patched PLT trampoline was detected
4475 + * 3 when unpatched PLT trampoline was detected
4476 + */
4477 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4478 +{
4479 +
4480 +#ifdef CONFIG_PAX_EMUPLT
4481 + int err;
4482 +
4483 + do { /* PaX: patched PLT emulation #1 */
4484 + unsigned int sethi1, sethi2, jmpl;
4485 +
4486 + err = get_user(sethi1, (unsigned int *)regs->pc);
4487 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4488 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4489 +
4490 + if (err)
4491 + break;
4492 +
4493 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4494 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4495 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4496 + {
4497 + unsigned int addr;
4498 +
4499 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4500 + addr = regs->u_regs[UREG_G1];
4501 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4502 + regs->pc = addr;
4503 + regs->npc = addr+4;
4504 + return 2;
4505 + }
4506 + } while (0);
4507 +
4508 + { /* PaX: patched PLT emulation #2 */
4509 + unsigned int ba;
4510 +
4511 + err = get_user(ba, (unsigned int *)regs->pc);
4512 +
4513 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4514 + unsigned int addr;
4515 +
4516 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4517 + regs->pc = addr;
4518 + regs->npc = addr+4;
4519 + return 2;
4520 + }
4521 + }
4522 +
4523 + do { /* PaX: patched PLT emulation #3 */
4524 + unsigned int sethi, jmpl, nop;
4525 +
4526 + err = get_user(sethi, (unsigned int *)regs->pc);
4527 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4528 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4529 +
4530 + if (err)
4531 + break;
4532 +
4533 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4534 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4535 + nop == 0x01000000U)
4536 + {
4537 + unsigned int addr;
4538 +
4539 + addr = (sethi & 0x003FFFFFU) << 10;
4540 + regs->u_regs[UREG_G1] = addr;
4541 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4542 + regs->pc = addr;
4543 + regs->npc = addr+4;
4544 + return 2;
4545 + }
4546 + } while (0);
4547 +
4548 + do { /* PaX: unpatched PLT emulation step 1 */
4549 + unsigned int sethi, ba, nop;
4550 +
4551 + err = get_user(sethi, (unsigned int *)regs->pc);
4552 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4553 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4554 +
4555 + if (err)
4556 + break;
4557 +
4558 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4559 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4560 + nop == 0x01000000U)
4561 + {
4562 + unsigned int addr, save, call;
4563 +
4564 + if ((ba & 0xFFC00000U) == 0x30800000U)
4565 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4566 + else
4567 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4568 +
4569 + err = get_user(save, (unsigned int *)addr);
4570 + err |= get_user(call, (unsigned int *)(addr+4));
4571 + err |= get_user(nop, (unsigned int *)(addr+8));
4572 + if (err)
4573 + break;
4574 +
4575 +#ifdef CONFIG_PAX_DLRESOLVE
4576 + if (save == 0x9DE3BFA8U &&
4577 + (call & 0xC0000000U) == 0x40000000U &&
4578 + nop == 0x01000000U)
4579 + {
4580 + struct vm_area_struct *vma;
4581 + unsigned long call_dl_resolve;
4582 +
4583 + down_read(&current->mm->mmap_sem);
4584 + call_dl_resolve = current->mm->call_dl_resolve;
4585 + up_read(&current->mm->mmap_sem);
4586 + if (likely(call_dl_resolve))
4587 + goto emulate;
4588 +
4589 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4590 +
4591 + down_write(&current->mm->mmap_sem);
4592 + if (current->mm->call_dl_resolve) {
4593 + call_dl_resolve = current->mm->call_dl_resolve;
4594 + up_write(&current->mm->mmap_sem);
4595 + if (vma)
4596 + kmem_cache_free(vm_area_cachep, vma);
4597 + goto emulate;
4598 + }
4599 +
4600 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4601 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4602 + up_write(&current->mm->mmap_sem);
4603 + if (vma)
4604 + kmem_cache_free(vm_area_cachep, vma);
4605 + return 1;
4606 + }
4607 +
4608 + if (pax_insert_vma(vma, call_dl_resolve)) {
4609 + up_write(&current->mm->mmap_sem);
4610 + kmem_cache_free(vm_area_cachep, vma);
4611 + return 1;
4612 + }
4613 +
4614 + current->mm->call_dl_resolve = call_dl_resolve;
4615 + up_write(&current->mm->mmap_sem);
4616 +
4617 +emulate:
4618 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4619 + regs->pc = call_dl_resolve;
4620 + regs->npc = addr+4;
4621 + return 3;
4622 + }
4623 +#endif
4624 +
4625 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4626 + if ((save & 0xFFC00000U) == 0x05000000U &&
4627 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4628 + nop == 0x01000000U)
4629 + {
4630 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4631 + regs->u_regs[UREG_G2] = addr + 4;
4632 + addr = (save & 0x003FFFFFU) << 10;
4633 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4634 + regs->pc = addr;
4635 + regs->npc = addr+4;
4636 + return 3;
4637 + }
4638 + }
4639 + } while (0);
4640 +
4641 + do { /* PaX: unpatched PLT emulation step 2 */
4642 + unsigned int save, call, nop;
4643 +
4644 + err = get_user(save, (unsigned int *)(regs->pc-4));
4645 + err |= get_user(call, (unsigned int *)regs->pc);
4646 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4647 + if (err)
4648 + break;
4649 +
4650 + if (save == 0x9DE3BFA8U &&
4651 + (call & 0xC0000000U) == 0x40000000U &&
4652 + nop == 0x01000000U)
4653 + {
4654 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4655 +
4656 + regs->u_regs[UREG_RETPC] = regs->pc;
4657 + regs->pc = dl_resolve;
4658 + regs->npc = dl_resolve+4;
4659 + return 3;
4660 + }
4661 + } while (0);
4662 +#endif
4663 +
4664 + return 1;
4665 +}
4666 +
4667 +void pax_report_insns(void *pc, void *sp)
4668 +{
4669 + unsigned long i;
4670 +
4671 + printk(KERN_ERR "PAX: bytes at PC: ");
4672 + for (i = 0; i < 8; i++) {
4673 + unsigned int c;
4674 + if (get_user(c, (unsigned int *)pc+i))
4675 + printk(KERN_CONT "???????? ");
4676 + else
4677 + printk(KERN_CONT "%08x ", c);
4678 + }
4679 + printk("\n");
4680 +}
4681 +#endif
4682 +
4683 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4684 int text_fault)
4685 {
4686 @@ -282,6 +547,24 @@ good_area:
4687 if(!(vma->vm_flags & VM_WRITE))
4688 goto bad_area;
4689 } else {
4690 +
4691 +#ifdef CONFIG_PAX_PAGEEXEC
4692 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4693 + up_read(&mm->mmap_sem);
4694 + switch (pax_handle_fetch_fault(regs)) {
4695 +
4696 +#ifdef CONFIG_PAX_EMUPLT
4697 + case 2:
4698 + case 3:
4699 + return;
4700 +#endif
4701 +
4702 + }
4703 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4704 + do_group_exit(SIGKILL);
4705 + }
4706 +#endif
4707 +
4708 /* Allow reads even for write-only mappings */
4709 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4710 goto bad_area;
4711 diff -urNp linux-2.6.34.1/arch/sparc/mm/fault_64.c linux-2.6.34.1/arch/sparc/mm/fault_64.c
4712 --- linux-2.6.34.1/arch/sparc/mm/fault_64.c 2010-07-05 14:24:10.000000000 -0400
4713 +++ linux-2.6.34.1/arch/sparc/mm/fault_64.c 2010-07-07 09:04:44.000000000 -0400
4714 @@ -21,6 +21,9 @@
4715 #include <linux/kprobes.h>
4716 #include <linux/kdebug.h>
4717 #include <linux/percpu.h>
4718 +#include <linux/slab.h>
4719 +#include <linux/pagemap.h>
4720 +#include <linux/compiler.h>
4721
4722 #include <asm/page.h>
4723 #include <asm/pgtable.h>
4724 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4725 show_regs(regs);
4726 }
4727
4728 +#ifdef CONFIG_PAX_PAGEEXEC
4729 +#ifdef CONFIG_PAX_DLRESOLVE
4730 +static void pax_emuplt_close(struct vm_area_struct *vma)
4731 +{
4732 + vma->vm_mm->call_dl_resolve = 0UL;
4733 +}
4734 +
4735 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4736 +{
4737 + unsigned int *kaddr;
4738 +
4739 + vmf->page = alloc_page(GFP_HIGHUSER);
4740 + if (!vmf->page)
4741 + return VM_FAULT_OOM;
4742 +
4743 + kaddr = kmap(vmf->page);
4744 + memset(kaddr, 0, PAGE_SIZE);
4745 + kaddr[0] = 0x9DE3BFA8U; /* save */
4746 + flush_dcache_page(vmf->page);
4747 + kunmap(vmf->page);
4748 + return VM_FAULT_MAJOR;
4749 +}
4750 +
4751 +static const struct vm_operations_struct pax_vm_ops = {
4752 + .close = pax_emuplt_close,
4753 + .fault = pax_emuplt_fault
4754 +};
4755 +
4756 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4757 +{
4758 + int ret;
4759 +
4760 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4761 + vma->vm_mm = current->mm;
4762 + vma->vm_start = addr;
4763 + vma->vm_end = addr + PAGE_SIZE;
4764 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4765 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4766 + vma->vm_ops = &pax_vm_ops;
4767 +
4768 + ret = insert_vm_struct(current->mm, vma);
4769 + if (ret)
4770 + return ret;
4771 +
4772 + ++current->mm->total_vm;
4773 + return 0;
4774 +}
4775 +#endif
4776 +
4777 +/*
4778 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4779 + *
4780 + * returns 1 when task should be killed
4781 + * 2 when patched PLT trampoline was detected
4782 + * 3 when unpatched PLT trampoline was detected
4783 + */
4784 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4785 +{
4786 +
4787 +#ifdef CONFIG_PAX_EMUPLT
4788 + int err;
4789 +
4790 + do { /* PaX: patched PLT emulation #1 */
4791 + unsigned int sethi1, sethi2, jmpl;
4792 +
4793 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4794 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4795 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4796 +
4797 + if (err)
4798 + break;
4799 +
4800 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4801 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4802 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4803 + {
4804 + unsigned long addr;
4805 +
4806 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4807 + addr = regs->u_regs[UREG_G1];
4808 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4809 +
4810 + if (test_thread_flag(TIF_32BIT))
4811 + addr &= 0xFFFFFFFFUL;
4812 +
4813 + regs->tpc = addr;
4814 + regs->tnpc = addr+4;
4815 + return 2;
4816 + }
4817 + } while (0);
4818 +
4819 + { /* PaX: patched PLT emulation #2 */
4820 + unsigned int ba;
4821 +
4822 + err = get_user(ba, (unsigned int *)regs->tpc);
4823 +
4824 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4825 + unsigned long addr;
4826 +
4827 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4828 +
4829 + if (test_thread_flag(TIF_32BIT))
4830 + addr &= 0xFFFFFFFFUL;
4831 +
4832 + regs->tpc = addr;
4833 + regs->tnpc = addr+4;
4834 + return 2;
4835 + }
4836 + }
4837 +
4838 + do { /* PaX: patched PLT emulation #3 */
4839 + unsigned int sethi, jmpl, nop;
4840 +
4841 + err = get_user(sethi, (unsigned int *)regs->tpc);
4842 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4843 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4844 +
4845 + if (err)
4846 + break;
4847 +
4848 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4849 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4850 + nop == 0x01000000U)
4851 + {
4852 + unsigned long addr;
4853 +
4854 + addr = (sethi & 0x003FFFFFU) << 10;
4855 + regs->u_regs[UREG_G1] = addr;
4856 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4857 +
4858 + if (test_thread_flag(TIF_32BIT))
4859 + addr &= 0xFFFFFFFFUL;
4860 +
4861 + regs->tpc = addr;
4862 + regs->tnpc = addr+4;
4863 + return 2;
4864 + }
4865 + } while (0);
4866 +
4867 + do { /* PaX: patched PLT emulation #4 */
4868 + unsigned int sethi, mov1, call, mov2;
4869 +
4870 + err = get_user(sethi, (unsigned int *)regs->tpc);
4871 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4872 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4873 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4874 +
4875 + if (err)
4876 + break;
4877 +
4878 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4879 + mov1 == 0x8210000FU &&
4880 + (call & 0xC0000000U) == 0x40000000U &&
4881 + mov2 == 0x9E100001U)
4882 + {
4883 + unsigned long addr;
4884 +
4885 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4886 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4887 +
4888 + if (test_thread_flag(TIF_32BIT))
4889 + addr &= 0xFFFFFFFFUL;
4890 +
4891 + regs->tpc = addr;
4892 + regs->tnpc = addr+4;
4893 + return 2;
4894 + }
4895 + } while (0);
4896 +
4897 + do { /* PaX: patched PLT emulation #5 */
4898 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4899 +
4900 + err = get_user(sethi, (unsigned int *)regs->tpc);
4901 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4902 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4903 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4904 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4905 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4906 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4907 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4908 +
4909 + if (err)
4910 + break;
4911 +
4912 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4913 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4914 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4915 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4916 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4917 + sllx == 0x83287020U &&
4918 + jmpl == 0x81C04005U &&
4919 + nop == 0x01000000U)
4920 + {
4921 + unsigned long addr;
4922 +
4923 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4924 + regs->u_regs[UREG_G1] <<= 32;
4925 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4926 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4927 + regs->tpc = addr;
4928 + regs->tnpc = addr+4;
4929 + return 2;
4930 + }
4931 + } while (0);
4932 +
4933 + do { /* PaX: patched PLT emulation #6 */
4934 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4935 +
4936 + err = get_user(sethi, (unsigned int *)regs->tpc);
4937 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4938 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4939 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4940 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4941 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4942 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4943 +
4944 + if (err)
4945 + break;
4946 +
4947 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4948 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4949 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4950 + sllx == 0x83287020U &&
4951 + (or & 0xFFFFE000U) == 0x8A116000U &&
4952 + jmpl == 0x81C04005U &&
4953 + nop == 0x01000000U)
4954 + {
4955 + unsigned long addr;
4956 +
4957 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4958 + regs->u_regs[UREG_G1] <<= 32;
4959 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4960 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4961 + regs->tpc = addr;
4962 + regs->tnpc = addr+4;
4963 + return 2;
4964 + }
4965 + } while (0);
4966 +
4967 + do { /* PaX: unpatched PLT emulation step 1 */
4968 + unsigned int sethi, ba, nop;
4969 +
4970 + err = get_user(sethi, (unsigned int *)regs->tpc);
4971 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4972 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4973 +
4974 + if (err)
4975 + break;
4976 +
4977 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4978 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4979 + nop == 0x01000000U)
4980 + {
4981 + unsigned long addr;
4982 + unsigned int save, call;
4983 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4984 +
4985 + if ((ba & 0xFFC00000U) == 0x30800000U)
4986 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987 + else
4988 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4989 +
4990 + if (test_thread_flag(TIF_32BIT))
4991 + addr &= 0xFFFFFFFFUL;
4992 +
4993 + err = get_user(save, (unsigned int *)addr);
4994 + err |= get_user(call, (unsigned int *)(addr+4));
4995 + err |= get_user(nop, (unsigned int *)(addr+8));
4996 + if (err)
4997 + break;
4998 +
4999 +#ifdef CONFIG_PAX_DLRESOLVE
5000 + if (save == 0x9DE3BFA8U &&
5001 + (call & 0xC0000000U) == 0x40000000U &&
5002 + nop == 0x01000000U)
5003 + {
5004 + struct vm_area_struct *vma;
5005 + unsigned long call_dl_resolve;
5006 +
5007 + down_read(&current->mm->mmap_sem);
5008 + call_dl_resolve = current->mm->call_dl_resolve;
5009 + up_read(&current->mm->mmap_sem);
5010 + if (likely(call_dl_resolve))
5011 + goto emulate;
5012 +
5013 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5014 +
5015 + down_write(&current->mm->mmap_sem);
5016 + if (current->mm->call_dl_resolve) {
5017 + call_dl_resolve = current->mm->call_dl_resolve;
5018 + up_write(&current->mm->mmap_sem);
5019 + if (vma)
5020 + kmem_cache_free(vm_area_cachep, vma);
5021 + goto emulate;
5022 + }
5023 +
5024 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5025 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5026 + up_write(&current->mm->mmap_sem);
5027 + if (vma)
5028 + kmem_cache_free(vm_area_cachep, vma);
5029 + return 1;
5030 + }
5031 +
5032 + if (pax_insert_vma(vma, call_dl_resolve)) {
5033 + up_write(&current->mm->mmap_sem);
5034 + kmem_cache_free(vm_area_cachep, vma);
5035 + return 1;
5036 + }
5037 +
5038 + current->mm->call_dl_resolve = call_dl_resolve;
5039 + up_write(&current->mm->mmap_sem);
5040 +
5041 +emulate:
5042 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5043 + regs->tpc = call_dl_resolve;
5044 + regs->tnpc = addr+4;
5045 + return 3;
5046 + }
5047 +#endif
5048 +
5049 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5050 + if ((save & 0xFFC00000U) == 0x05000000U &&
5051 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5052 + nop == 0x01000000U)
5053 + {
5054 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5055 + regs->u_regs[UREG_G2] = addr + 4;
5056 + addr = (save & 0x003FFFFFU) << 10;
5057 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5058 +
5059 + if (test_thread_flag(TIF_32BIT))
5060 + addr &= 0xFFFFFFFFUL;
5061 +
5062 + regs->tpc = addr;
5063 + regs->tnpc = addr+4;
5064 + return 3;
5065 + }
5066 +
5067 + /* PaX: 64-bit PLT stub */
5068 + err = get_user(sethi1, (unsigned int *)addr);
5069 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5070 + err |= get_user(or1, (unsigned int *)(addr+8));
5071 + err |= get_user(or2, (unsigned int *)(addr+12));
5072 + err |= get_user(sllx, (unsigned int *)(addr+16));
5073 + err |= get_user(add, (unsigned int *)(addr+20));
5074 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5075 + err |= get_user(nop, (unsigned int *)(addr+28));
5076 + if (err)
5077 + break;
5078 +
5079 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5080 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5081 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5082 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5083 + sllx == 0x89293020U &&
5084 + add == 0x8A010005U &&
5085 + jmpl == 0x89C14000U &&
5086 + nop == 0x01000000U)
5087 + {
5088 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5089 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5090 + regs->u_regs[UREG_G4] <<= 32;
5091 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5092 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5093 + regs->u_regs[UREG_G4] = addr + 24;
5094 + addr = regs->u_regs[UREG_G5];
5095 + regs->tpc = addr;
5096 + regs->tnpc = addr+4;
5097 + return 3;
5098 + }
5099 + }
5100 + } while (0);
5101 +
5102 +#ifdef CONFIG_PAX_DLRESOLVE
5103 + do { /* PaX: unpatched PLT emulation step 2 */
5104 + unsigned int save, call, nop;
5105 +
5106 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5107 + err |= get_user(call, (unsigned int *)regs->tpc);
5108 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5109 + if (err)
5110 + break;
5111 +
5112 + if (save == 0x9DE3BFA8U &&
5113 + (call & 0xC0000000U) == 0x40000000U &&
5114 + nop == 0x01000000U)
5115 + {
5116 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5117 +
5118 + if (test_thread_flag(TIF_32BIT))
5119 + dl_resolve &= 0xFFFFFFFFUL;
5120 +
5121 + regs->u_regs[UREG_RETPC] = regs->tpc;
5122 + regs->tpc = dl_resolve;
5123 + regs->tnpc = dl_resolve+4;
5124 + return 3;
5125 + }
5126 + } while (0);
5127 +#endif
5128 +
5129 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5130 + unsigned int sethi, ba, nop;
5131 +
5132 + err = get_user(sethi, (unsigned int *)regs->tpc);
5133 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5134 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5135 +
5136 + if (err)
5137 + break;
5138 +
5139 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5140 + (ba & 0xFFF00000U) == 0x30600000U &&
5141 + nop == 0x01000000U)
5142 + {
5143 + unsigned long addr;
5144 +
5145 + addr = (sethi & 0x003FFFFFU) << 10;
5146 + regs->u_regs[UREG_G1] = addr;
5147 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148 +
5149 + if (test_thread_flag(TIF_32BIT))
5150 + addr &= 0xFFFFFFFFUL;
5151 +
5152 + regs->tpc = addr;
5153 + regs->tnpc = addr+4;
5154 + return 2;
5155 + }
5156 + } while (0);
5157 +
5158 +#endif
5159 +
5160 + return 1;
5161 +}
5162 +
5163 +void pax_report_insns(void *pc, void *sp)
5164 +{
5165 + unsigned long i;
5166 +
5167 + printk(KERN_ERR "PAX: bytes at PC: ");
5168 + for (i = 0; i < 8; i++) {
5169 + unsigned int c;
5170 + if (get_user(c, (unsigned int *)pc+i))
5171 + printk(KERN_CONT "???????? ");
5172 + else
5173 + printk(KERN_CONT "%08x ", c);
5174 + }
5175 + printk("\n");
5176 +}
5177 +#endif
5178 +
5179 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5180 {
5181 struct mm_struct *mm = current->mm;
5182 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
5183 if (!vma)
5184 goto bad_area;
5185
5186 +#ifdef CONFIG_PAX_PAGEEXEC
5187 + /* PaX: detect ITLB misses on non-exec pages */
5188 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5189 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5190 + {
5191 + if (address != regs->tpc)
5192 + goto good_area;
5193 +
5194 + up_read(&mm->mmap_sem);
5195 + switch (pax_handle_fetch_fault(regs)) {
5196 +
5197 +#ifdef CONFIG_PAX_EMUPLT
5198 + case 2:
5199 + case 3:
5200 + return;
5201 +#endif
5202 +
5203 + }
5204 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5205 + do_group_exit(SIGKILL);
5206 + }
5207 +#endif
5208 +
5209 /* Pure DTLB misses do not tell us whether the fault causing
5210 * load/store/atomic was a write or not, it only says that there
5211 * was no match. So in such a case we (carefully) read the
5212 diff -urNp linux-2.6.34.1/arch/sparc/mm/init_32.c linux-2.6.34.1/arch/sparc/mm/init_32.c
5213 --- linux-2.6.34.1/arch/sparc/mm/init_32.c 2010-07-05 14:24:10.000000000 -0400
5214 +++ linux-2.6.34.1/arch/sparc/mm/init_32.c 2010-07-07 09:04:44.000000000 -0400
5215 @@ -318,6 +318,9 @@ extern void device_scan(void);
5216 pgprot_t PAGE_SHARED __read_mostly;
5217 EXPORT_SYMBOL(PAGE_SHARED);
5218
5219 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5220 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5221 +
5222 void __init paging_init(void)
5223 {
5224 switch(sparc_cpu_model) {
5225 @@ -346,17 +349,17 @@ void __init paging_init(void)
5226
5227 /* Initialize the protection map with non-constant, MMU dependent values. */
5228 protection_map[0] = PAGE_NONE;
5229 - protection_map[1] = PAGE_READONLY;
5230 - protection_map[2] = PAGE_COPY;
5231 - protection_map[3] = PAGE_COPY;
5232 + protection_map[1] = PAGE_READONLY_NOEXEC;
5233 + protection_map[2] = PAGE_COPY_NOEXEC;
5234 + protection_map[3] = PAGE_COPY_NOEXEC;
5235 protection_map[4] = PAGE_READONLY;
5236 protection_map[5] = PAGE_READONLY;
5237 protection_map[6] = PAGE_COPY;
5238 protection_map[7] = PAGE_COPY;
5239 protection_map[8] = PAGE_NONE;
5240 - protection_map[9] = PAGE_READONLY;
5241 - protection_map[10] = PAGE_SHARED;
5242 - protection_map[11] = PAGE_SHARED;
5243 + protection_map[9] = PAGE_READONLY_NOEXEC;
5244 + protection_map[10] = PAGE_SHARED_NOEXEC;
5245 + protection_map[11] = PAGE_SHARED_NOEXEC;
5246 protection_map[12] = PAGE_READONLY;
5247 protection_map[13] = PAGE_READONLY;
5248 protection_map[14] = PAGE_SHARED;
5249 diff -urNp linux-2.6.34.1/arch/sparc/mm/srmmu.c linux-2.6.34.1/arch/sparc/mm/srmmu.c
5250 --- linux-2.6.34.1/arch/sparc/mm/srmmu.c 2010-07-05 14:24:10.000000000 -0400
5251 +++ linux-2.6.34.1/arch/sparc/mm/srmmu.c 2010-07-07 09:04:44.000000000 -0400
5252 @@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void)
5253 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5254 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5255 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5256 +
5257 +#ifdef CONFIG_PAX_PAGEEXEC
5258 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5259 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5260 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5261 +#endif
5262 +
5263 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5264 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5265
5266 diff -urNp linux-2.6.34.1/arch/um/include/asm/kmap_types.h linux-2.6.34.1/arch/um/include/asm/kmap_types.h
5267 --- linux-2.6.34.1/arch/um/include/asm/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
5268 +++ linux-2.6.34.1/arch/um/include/asm/kmap_types.h 2010-07-07 09:04:44.000000000 -0400
5269 @@ -23,6 +23,7 @@ enum km_type {
5270 KM_IRQ1,
5271 KM_SOFTIRQ0,
5272 KM_SOFTIRQ1,
5273 + KM_CLEARPAGE,
5274 KM_TYPE_NR
5275 };
5276
5277 diff -urNp linux-2.6.34.1/arch/um/include/asm/page.h linux-2.6.34.1/arch/um/include/asm/page.h
5278 --- linux-2.6.34.1/arch/um/include/asm/page.h 2010-07-05 14:24:10.000000000 -0400
5279 +++ linux-2.6.34.1/arch/um/include/asm/page.h 2010-07-07 09:04:44.000000000 -0400
5280 @@ -14,6 +14,9 @@
5281 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5282 #define PAGE_MASK (~(PAGE_SIZE-1))
5283
5284 +#define ktla_ktva(addr) (addr)
5285 +#define ktva_ktla(addr) (addr)
5286 +
5287 #ifndef __ASSEMBLY__
5288
5289 struct page;
5290 diff -urNp linux-2.6.34.1/arch/um/sys-i386/syscalls.c linux-2.6.34.1/arch/um/sys-i386/syscalls.c
5291 --- linux-2.6.34.1/arch/um/sys-i386/syscalls.c 2010-07-05 14:24:10.000000000 -0400
5292 +++ linux-2.6.34.1/arch/um/sys-i386/syscalls.c 2010-07-07 09:04:44.000000000 -0400
5293 @@ -11,6 +11,21 @@
5294 #include "asm/uaccess.h"
5295 #include "asm/unistd.h"
5296
5297 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5298 +{
5299 + unsigned long pax_task_size = TASK_SIZE;
5300 +
5301 +#ifdef CONFIG_PAX_SEGMEXEC
5302 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5303 + pax_task_size = SEGMEXEC_TASK_SIZE;
5304 +#endif
5305 +
5306 + if (len > pax_task_size || addr > pax_task_size - len)
5307 + return -EINVAL;
5308 +
5309 + return 0;
5310 +}
5311 +
5312 /*
5313 * The prototype on i386 is:
5314 *
5315 diff -urNp linux-2.6.34.1/arch/x86/Kconfig linux-2.6.34.1/arch/x86/Kconfig
5316 --- linux-2.6.34.1/arch/x86/Kconfig 2010-07-05 14:24:10.000000000 -0400
5317 +++ linux-2.6.34.1/arch/x86/Kconfig 2010-07-07 09:04:46.000000000 -0400
5318 @@ -1123,7 +1123,7 @@ config PAGE_OFFSET
5319 hex
5320 default 0xB0000000 if VMSPLIT_3G_OPT
5321 default 0x80000000 if VMSPLIT_2G
5322 - default 0x78000000 if VMSPLIT_2G_OPT
5323 + default 0x70000000 if VMSPLIT_2G_OPT
5324 default 0x40000000 if VMSPLIT_1G
5325 default 0xC0000000
5326 depends on X86_32
5327 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
5328
5329 config EFI
5330 bool "EFI runtime service support"
5331 - depends on ACPI
5332 + depends on ACPI && !PAX_KERNEXEC
5333 ---help---
5334 This enables the kernel to use EFI runtime services that are
5335 available (such as the EFI variable services).
5336 @@ -1544,6 +1544,7 @@ config KEXEC_JUMP
5337 config PHYSICAL_START
5338 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
5339 default "0x1000000"
5340 + range 0x400000 0x40000000
5341 ---help---
5342 This gives the physical address where the kernel is loaded.
5343
5344 @@ -1608,6 +1609,7 @@ config PHYSICAL_ALIGN
5345 hex
5346 prompt "Alignment value to which kernel should be aligned" if X86_32
5347 default "0x1000000"
5348 + range 0x400000 0x1000000 if PAX_KERNEXEC
5349 range 0x2000 0x1000000
5350 ---help---
5351 This value puts the alignment restrictions on physical address
5352 @@ -1639,9 +1641,10 @@ config HOTPLUG_CPU
5353 Say N if you want to disable CPU hotplug.
5354
5355 config COMPAT_VDSO
5356 - def_bool y
5357 + def_bool n
5358 prompt "Compat VDSO support"
5359 depends on X86_32 || IA32_EMULATION
5360 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5361 ---help---
5362 Map the 32-bit VDSO to the predictable old-style address too.
5363
5364 diff -urNp linux-2.6.34.1/arch/x86/Kconfig.cpu linux-2.6.34.1/arch/x86/Kconfig.cpu
5365 --- linux-2.6.34.1/arch/x86/Kconfig.cpu 2010-07-05 14:24:10.000000000 -0400
5366 +++ linux-2.6.34.1/arch/x86/Kconfig.cpu 2010-07-07 09:04:46.000000000 -0400
5367 @@ -336,7 +336,7 @@ config X86_PPRO_FENCE
5368
5369 config X86_F00F_BUG
5370 def_bool y
5371 - depends on M586MMX || M586TSC || M586 || M486 || M386
5372 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5373
5374 config X86_WP_WORKS_OK
5375 def_bool y
5376 @@ -356,7 +356,7 @@ config X86_POPAD_OK
5377
5378 config X86_ALIGNMENT_16
5379 def_bool y
5380 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5381 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5382
5383 config X86_INTEL_USERCOPY
5384 def_bool y
5385 @@ -402,7 +402,7 @@ config X86_CMPXCHG64
5386 # generates cmov.
5387 config X86_CMOV
5388 def_bool y
5389 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5390 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5391
5392 config X86_MINIMUM_CPU_FAMILY
5393 int
5394 diff -urNp linux-2.6.34.1/arch/x86/Kconfig.debug linux-2.6.34.1/arch/x86/Kconfig.debug
5395 --- linux-2.6.34.1/arch/x86/Kconfig.debug 2010-07-05 14:24:10.000000000 -0400
5396 +++ linux-2.6.34.1/arch/x86/Kconfig.debug 2010-07-07 09:04:46.000000000 -0400
5397 @@ -99,7 +99,7 @@ config X86_PTDUMP
5398 config DEBUG_RODATA
5399 bool "Write protect kernel read-only data structures"
5400 default y
5401 - depends on DEBUG_KERNEL
5402 + depends on DEBUG_KERNEL && BROKEN
5403 ---help---
5404 Mark the kernel read-only data as write-protected in the pagetables,
5405 in order to catch accidental (and incorrect) writes to such const
5406 diff -urNp linux-2.6.34.1/arch/x86/Makefile linux-2.6.34.1/arch/x86/Makefile
5407 --- linux-2.6.34.1/arch/x86/Makefile 2010-07-05 14:24:10.000000000 -0400
5408 +++ linux-2.6.34.1/arch/x86/Makefile 2010-07-07 09:04:49.000000000 -0400
5409 @@ -190,3 +190,12 @@ define archhelp
5410 echo ' FDARGS="..." arguments for the booted kernel'
5411 echo ' FDINITRD=file initrd for the booted kernel'
5412 endef
5413 +
5414 +define OLD_LD
5415 +
5416 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5417 +*** Please upgrade your binutils to 2.18 or newer
5418 +endef
5419 +
5420 +archprepare:
5421 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5422 diff -urNp linux-2.6.34.1/arch/x86/boot/bitops.h linux-2.6.34.1/arch/x86/boot/bitops.h
5423 --- linux-2.6.34.1/arch/x86/boot/bitops.h 2010-07-05 14:24:10.000000000 -0400
5424 +++ linux-2.6.34.1/arch/x86/boot/bitops.h 2010-07-07 09:04:44.000000000 -0400
5425 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5426 u8 v;
5427 const u32 *p = (const u32 *)addr;
5428
5429 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5430 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5431 return v;
5432 }
5433
5434 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5435
5436 static inline void set_bit(int nr, void *addr)
5437 {
5438 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5439 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5440 }
5441
5442 #endif /* BOOT_BITOPS_H */
5443 diff -urNp linux-2.6.34.1/arch/x86/boot/boot.h linux-2.6.34.1/arch/x86/boot/boot.h
5444 --- linux-2.6.34.1/arch/x86/boot/boot.h 2010-07-05 14:24:10.000000000 -0400
5445 +++ linux-2.6.34.1/arch/x86/boot/boot.h 2010-07-07 09:04:44.000000000 -0400
5446 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5447 static inline u16 ds(void)
5448 {
5449 u16 seg;
5450 - asm("movw %%ds,%0" : "=rm" (seg));
5451 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5452 return seg;
5453 }
5454
5455 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5456 static inline int memcmp(const void *s1, const void *s2, size_t len)
5457 {
5458 u8 diff;
5459 - asm("repe; cmpsb; setnz %0"
5460 + asm volatile("repe; cmpsb; setnz %0"
5461 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5462 return diff;
5463 }
5464 diff -urNp linux-2.6.34.1/arch/x86/boot/compressed/head_32.S linux-2.6.34.1/arch/x86/boot/compressed/head_32.S
5465 --- linux-2.6.34.1/arch/x86/boot/compressed/head_32.S 2010-07-05 14:24:10.000000000 -0400
5466 +++ linux-2.6.34.1/arch/x86/boot/compressed/head_32.S 2010-07-07 09:04:44.000000000 -0400
5467 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5468 notl %eax
5469 andl %eax, %ebx
5470 #else
5471 - movl $LOAD_PHYSICAL_ADDR, %ebx
5472 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5473 #endif
5474
5475 /* Target address to relocate to for decompression */
5476 @@ -149,7 +149,7 @@ relocated:
5477 * and where it was actually loaded.
5478 */
5479 movl %ebp, %ebx
5480 - subl $LOAD_PHYSICAL_ADDR, %ebx
5481 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5482 jz 2f /* Nothing to be done if loaded at compiled addr. */
5483 /*
5484 * Process relocations.
5485 @@ -157,8 +157,7 @@ relocated:
5486
5487 1: subl $4, %edi
5488 movl (%edi), %ecx
5489 - testl %ecx, %ecx
5490 - jz 2f
5491 + jecxz 2f
5492 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5493 jmp 1b
5494 2:
5495 diff -urNp linux-2.6.34.1/arch/x86/boot/compressed/head_64.S linux-2.6.34.1/arch/x86/boot/compressed/head_64.S
5496 --- linux-2.6.34.1/arch/x86/boot/compressed/head_64.S 2010-07-05 14:24:10.000000000 -0400
5497 +++ linux-2.6.34.1/arch/x86/boot/compressed/head_64.S 2010-07-07 09:04:45.000000000 -0400
5498 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5499 notl %eax
5500 andl %eax, %ebx
5501 #else
5502 - movl $LOAD_PHYSICAL_ADDR, %ebx
5503 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5504 #endif
5505
5506 /* Target address to relocate to for decompression */
5507 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5508 notq %rax
5509 andq %rax, %rbp
5510 #else
5511 - movq $LOAD_PHYSICAL_ADDR, %rbp
5512 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5513 #endif
5514
5515 /* Target address to relocate to for decompression */
5516 diff -urNp linux-2.6.34.1/arch/x86/boot/compressed/misc.c linux-2.6.34.1/arch/x86/boot/compressed/misc.c
5517 --- linux-2.6.34.1/arch/x86/boot/compressed/misc.c 2010-07-05 14:24:10.000000000 -0400
5518 +++ linux-2.6.34.1/arch/x86/boot/compressed/misc.c 2010-07-07 09:04:45.000000000 -0400
5519 @@ -285,7 +285,7 @@ static void parse_elf(void *output)
5520 case PT_LOAD:
5521 #ifdef CONFIG_RELOCATABLE
5522 dest = output;
5523 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5524 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5525 #else
5526 dest = (void *)(phdr->p_paddr);
5527 #endif
5528 @@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void *
5529 error("Destination address too large");
5530 #endif
5531 #ifndef CONFIG_RELOCATABLE
5532 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5533 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5534 error("Wrong destination address");
5535 #endif
5536
5537 diff -urNp linux-2.6.34.1/arch/x86/boot/compressed/mkpiggy.c linux-2.6.34.1/arch/x86/boot/compressed/mkpiggy.c
5538 --- linux-2.6.34.1/arch/x86/boot/compressed/mkpiggy.c 2010-07-05 14:24:10.000000000 -0400
5539 +++ linux-2.6.34.1/arch/x86/boot/compressed/mkpiggy.c 2010-07-07 09:04:45.000000000 -0400
5540 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5541
5542 offs = (olen > ilen) ? olen - ilen : 0;
5543 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5544 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5545 + offs += 64*1024; /* Add 64K bytes slack */
5546 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5547
5548 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
5549 diff -urNp linux-2.6.34.1/arch/x86/boot/compressed/relocs.c linux-2.6.34.1/arch/x86/boot/compressed/relocs.c
5550 --- linux-2.6.34.1/arch/x86/boot/compressed/relocs.c 2010-07-05 14:24:10.000000000 -0400
5551 +++ linux-2.6.34.1/arch/x86/boot/compressed/relocs.c 2010-07-07 09:04:45.000000000 -0400
5552 @@ -13,8 +13,11 @@
5553
5554 static void die(char *fmt, ...);
5555
5556 +#include "../../../../include/generated/autoconf.h"
5557 +
5558 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5559 static Elf32_Ehdr ehdr;
5560 +static Elf32_Phdr *phdr;
5561 static unsigned long reloc_count, reloc_idx;
5562 static unsigned long *relocs;
5563
5564 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5565 }
5566 }
5567
5568 +static void read_phdrs(FILE *fp)
5569 +{
5570 + unsigned int i;
5571 +
5572 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5573 + if (!phdr) {
5574 + die("Unable to allocate %d program headers\n",
5575 + ehdr.e_phnum);
5576 + }
5577 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5578 + die("Seek to %d failed: %s\n",
5579 + ehdr.e_phoff, strerror(errno));
5580 + }
5581 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5582 + die("Cannot read ELF program headers: %s\n",
5583 + strerror(errno));
5584 + }
5585 + for(i = 0; i < ehdr.e_phnum; i++) {
5586 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5587 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5588 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5589 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5590 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5591 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5592 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5593 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5594 + }
5595 +
5596 +}
5597 +
5598 static void read_shdrs(FILE *fp)
5599 {
5600 - int i;
5601 + unsigned int i;
5602 Elf32_Shdr shdr;
5603
5604 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5605 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5606
5607 static void read_strtabs(FILE *fp)
5608 {
5609 - int i;
5610 + unsigned int i;
5611 for (i = 0; i < ehdr.e_shnum; i++) {
5612 struct section *sec = &secs[i];
5613 if (sec->shdr.sh_type != SHT_STRTAB) {
5614 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5615
5616 static void read_symtabs(FILE *fp)
5617 {
5618 - int i,j;
5619 + unsigned int i,j;
5620 for (i = 0; i < ehdr.e_shnum; i++) {
5621 struct section *sec = &secs[i];
5622 if (sec->shdr.sh_type != SHT_SYMTAB) {
5623 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5624
5625 static void read_relocs(FILE *fp)
5626 {
5627 - int i,j;
5628 + unsigned int i,j;
5629 + uint32_t base;
5630 +
5631 for (i = 0; i < ehdr.e_shnum; i++) {
5632 struct section *sec = &secs[i];
5633 if (sec->shdr.sh_type != SHT_REL) {
5634 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5635 die("Cannot read symbol table: %s\n",
5636 strerror(errno));
5637 }
5638 + base = 0;
5639 + for (j = 0; j < ehdr.e_phnum; j++) {
5640 + if (phdr[j].p_type != PT_LOAD )
5641 + continue;
5642 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5643 + continue;
5644 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5645 + break;
5646 + }
5647 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5648 Elf32_Rel *rel = &sec->reltab[j];
5649 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5650 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5651 rel->r_info = elf32_to_cpu(rel->r_info);
5652 }
5653 }
5654 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5655
5656 static void print_absolute_symbols(void)
5657 {
5658 - int i;
5659 + unsigned int i;
5660 printf("Absolute symbols\n");
5661 printf(" Num: Value Size Type Bind Visibility Name\n");
5662 for (i = 0; i < ehdr.e_shnum; i++) {
5663 struct section *sec = &secs[i];
5664 char *sym_strtab;
5665 Elf32_Sym *sh_symtab;
5666 - int j;
5667 + unsigned int j;
5668
5669 if (sec->shdr.sh_type != SHT_SYMTAB) {
5670 continue;
5671 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5672
5673 static void print_absolute_relocs(void)
5674 {
5675 - int i, printed = 0;
5676 + unsigned int i, printed = 0;
5677
5678 for (i = 0; i < ehdr.e_shnum; i++) {
5679 struct section *sec = &secs[i];
5680 struct section *sec_applies, *sec_symtab;
5681 char *sym_strtab;
5682 Elf32_Sym *sh_symtab;
5683 - int j;
5684 + unsigned int j;
5685 if (sec->shdr.sh_type != SHT_REL) {
5686 continue;
5687 }
5688 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5689
5690 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5691 {
5692 - int i;
5693 + unsigned int i;
5694 /* Walk through the relocations */
5695 for (i = 0; i < ehdr.e_shnum; i++) {
5696 char *sym_strtab;
5697 Elf32_Sym *sh_symtab;
5698 struct section *sec_applies, *sec_symtab;
5699 - int j;
5700 + unsigned int j;
5701 struct section *sec = &secs[i];
5702
5703 if (sec->shdr.sh_type != SHT_REL) {
5704 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5705 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5706 continue;
5707 }
5708 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5709 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5710 + continue;
5711 +
5712 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5713 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5714 + if (!strcmp(sec_name(sym->st_shndx), ".data") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5715 + continue;
5716 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5717 + continue;
5718 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5719 + continue;
5720 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5721 + continue;
5722 +#endif
5723 +
5724 switch (r_type) {
5725 case R_386_NONE:
5726 case R_386_PC32:
5727 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5728
5729 static void emit_relocs(int as_text)
5730 {
5731 - int i;
5732 + unsigned int i;
5733 /* Count how many relocations I have and allocate space for them. */
5734 reloc_count = 0;
5735 walk_relocs(count_reloc);
5736 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5737 fname, strerror(errno));
5738 }
5739 read_ehdr(fp);
5740 + read_phdrs(fp);
5741 read_shdrs(fp);
5742 read_strtabs(fp);
5743 read_symtabs(fp);
5744 diff -urNp linux-2.6.34.1/arch/x86/boot/cpucheck.c linux-2.6.34.1/arch/x86/boot/cpucheck.c
5745 --- linux-2.6.34.1/arch/x86/boot/cpucheck.c 2010-07-05 14:24:10.000000000 -0400
5746 +++ linux-2.6.34.1/arch/x86/boot/cpucheck.c 2010-07-07 09:04:45.000000000 -0400
5747 @@ -74,7 +74,7 @@ static int has_fpu(void)
5748 u16 fcw = -1, fsw = -1;
5749 u32 cr0;
5750
5751 - asm("movl %%cr0,%0" : "=r" (cr0));
5752 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5753 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5754 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5755 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5756 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5757 {
5758 u32 f0, f1;
5759
5760 - asm("pushfl ; "
5761 + asm volatile("pushfl ; "
5762 "pushfl ; "
5763 "popl %0 ; "
5764 "movl %0,%1 ; "
5765 @@ -115,7 +115,7 @@ static void get_flags(void)
5766 set_bit(X86_FEATURE_FPU, cpu.flags);
5767
5768 if (has_eflag(X86_EFLAGS_ID)) {
5769 - asm("cpuid"
5770 + asm volatile("cpuid"
5771 : "=a" (max_intel_level),
5772 "=b" (cpu_vendor[0]),
5773 "=d" (cpu_vendor[1]),
5774 @@ -124,7 +124,7 @@ static void get_flags(void)
5775
5776 if (max_intel_level >= 0x00000001 &&
5777 max_intel_level <= 0x0000ffff) {
5778 - asm("cpuid"
5779 + asm volatile("cpuid"
5780 : "=a" (tfms),
5781 "=c" (cpu.flags[4]),
5782 "=d" (cpu.flags[0])
5783 @@ -136,7 +136,7 @@ static void get_flags(void)
5784 cpu.model += ((tfms >> 16) & 0xf) << 4;
5785 }
5786
5787 - asm("cpuid"
5788 + asm volatile("cpuid"
5789 : "=a" (max_amd_level)
5790 : "a" (0x80000000)
5791 : "ebx", "ecx", "edx");
5792 @@ -144,7 +144,7 @@ static void get_flags(void)
5793 if (max_amd_level >= 0x80000001 &&
5794 max_amd_level <= 0x8000ffff) {
5795 u32 eax = 0x80000001;
5796 - asm("cpuid"
5797 + asm volatile("cpuid"
5798 : "+a" (eax),
5799 "=c" (cpu.flags[6]),
5800 "=d" (cpu.flags[1])
5801 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5802 u32 ecx = MSR_K7_HWCR;
5803 u32 eax, edx;
5804
5805 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5806 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5807 eax &= ~(1 << 15);
5808 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5809 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5810
5811 get_flags(); /* Make sure it really did something */
5812 err = check_flags();
5813 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5814 u32 ecx = MSR_VIA_FCR;
5815 u32 eax, edx;
5816
5817 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5818 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5819 eax |= (1<<1)|(1<<7);
5820 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5821 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5822
5823 set_bit(X86_FEATURE_CX8, cpu.flags);
5824 err = check_flags();
5825 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5826 u32 eax, edx;
5827 u32 level = 1;
5828
5829 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5830 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5831 - asm("cpuid"
5832 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5833 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5834 + asm volatile("cpuid"
5835 : "+a" (level), "=d" (cpu.flags[0])
5836 : : "ecx", "ebx");
5837 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5838 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5839
5840 err = check_flags();
5841 }
5842 diff -urNp linux-2.6.34.1/arch/x86/boot/header.S linux-2.6.34.1/arch/x86/boot/header.S
5843 --- linux-2.6.34.1/arch/x86/boot/header.S 2010-07-05 14:24:10.000000000 -0400
5844 +++ linux-2.6.34.1/arch/x86/boot/header.S 2010-07-07 09:04:45.000000000 -0400
5845 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5846 # single linked list of
5847 # struct setup_data
5848
5849 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5850 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5851
5852 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5853 #define VO_INIT_SIZE (VO__end - VO__text)
5854 diff -urNp linux-2.6.34.1/arch/x86/boot/memory.c linux-2.6.34.1/arch/x86/boot/memory.c
5855 --- linux-2.6.34.1/arch/x86/boot/memory.c 2010-07-05 14:24:10.000000000 -0400
5856 +++ linux-2.6.34.1/arch/x86/boot/memory.c 2010-07-07 09:04:45.000000000 -0400
5857 @@ -19,7 +19,7 @@
5858
5859 static int detect_memory_e820(void)
5860 {
5861 - int count = 0;
5862 + unsigned int count = 0;
5863 struct biosregs ireg, oreg;
5864 struct e820entry *desc = boot_params.e820_map;
5865 static struct e820entry buf; /* static so it is zeroed */
5866 diff -urNp linux-2.6.34.1/arch/x86/boot/video-vesa.c linux-2.6.34.1/arch/x86/boot/video-vesa.c
5867 --- linux-2.6.34.1/arch/x86/boot/video-vesa.c 2010-07-05 14:24:10.000000000 -0400
5868 +++ linux-2.6.34.1/arch/x86/boot/video-vesa.c 2010-07-07 09:04:45.000000000 -0400
5869 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5870
5871 boot_params.screen_info.vesapm_seg = oreg.es;
5872 boot_params.screen_info.vesapm_off = oreg.di;
5873 + boot_params.screen_info.vesapm_size = oreg.cx;
5874 }
5875
5876 /*
5877 diff -urNp linux-2.6.34.1/arch/x86/boot/video.c linux-2.6.34.1/arch/x86/boot/video.c
5878 --- linux-2.6.34.1/arch/x86/boot/video.c 2010-07-05 14:24:10.000000000 -0400
5879 +++ linux-2.6.34.1/arch/x86/boot/video.c 2010-07-07 09:04:45.000000000 -0400
5880 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5881 static unsigned int get_entry(void)
5882 {
5883 char entry_buf[4];
5884 - int i, len = 0;
5885 + unsigned int i, len = 0;
5886 int key;
5887 unsigned int v;
5888
5889 diff -urNp linux-2.6.34.1/arch/x86/ia32/ia32_signal.c linux-2.6.34.1/arch/x86/ia32/ia32_signal.c
5890 --- linux-2.6.34.1/arch/x86/ia32/ia32_signal.c 2010-07-05 14:24:10.000000000 -0400
5891 +++ linux-2.6.34.1/arch/x86/ia32/ia32_signal.c 2010-07-07 09:04:45.000000000 -0400
5892 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5893 sp -= frame_size;
5894 /* Align the stack pointer according to the i386 ABI,
5895 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5896 - sp = ((sp + 4) & -16ul) - 4;
5897 + sp = ((sp - 12) & -16ul) - 4;
5898 return (void __user *) sp;
5899 }
5900
5901 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5902 0xb8,
5903 __NR_ia32_rt_sigreturn,
5904 0x80cd,
5905 - 0,
5906 + 0
5907 };
5908
5909 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5910 diff -urNp linux-2.6.34.1/arch/x86/ia32/ia32entry.S linux-2.6.34.1/arch/x86/ia32/ia32entry.S
5911 --- linux-2.6.34.1/arch/x86/ia32/ia32entry.S 2010-07-05 14:24:10.000000000 -0400
5912 +++ linux-2.6.34.1/arch/x86/ia32/ia32entry.S 2010-07-07 09:04:45.000000000 -0400
5913 @@ -13,6 +13,7 @@
5914 #include <asm/thread_info.h>
5915 #include <asm/segment.h>
5916 #include <asm/irqflags.h>
5917 +#include <asm/pgtable.h>
5918 #include <linux/linkage.h>
5919
5920 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5921 @@ -114,6 +115,11 @@ ENTRY(ia32_sysenter_target)
5922 SWAPGS_UNSAFE_STACK
5923 movq PER_CPU_VAR(kernel_stack), %rsp
5924 addq $(KERNEL_STACK_OFFSET),%rsp
5925 +
5926 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5927 + call pax_enter_kernel_user
5928 +#endif
5929 +
5930 /*
5931 * No need to follow this irqs on/off section: the syscall
5932 * disabled irqs, here we enable it straight after entry:
5933 @@ -144,6 +150,12 @@ ENTRY(ia32_sysenter_target)
5934 SAVE_ARGS 0,0,1
5935 /* no need to do an access_ok check here because rbp has been
5936 32bit zero extended */
5937 +
5938 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5939 + mov $PAX_USER_SHADOW_BASE,%r10
5940 + add %r10,%rbp
5941 +#endif
5942 +
5943 1: movl (%rbp),%ebp
5944 .section __ex_table,"a"
5945 .quad 1b,ia32_badarg
5946 @@ -166,6 +178,11 @@ sysenter_dispatch:
5947 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5948 jnz sysexit_audit
5949 sysexit_from_sys_call:
5950 +
5951 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5952 + call pax_exit_kernel_user
5953 +#endif
5954 +
5955 andl $~TS_COMPAT,TI_status(%r10)
5956 /* clear IF, that popfq doesn't enable interrupts early */
5957 andl $~0x200,EFLAGS-R11(%rsp)
5958 @@ -284,6 +301,11 @@ ENTRY(ia32_cstar_target)
5959 movl %esp,%r8d
5960 CFI_REGISTER rsp,r8
5961 movq PER_CPU_VAR(kernel_stack),%rsp
5962 +
5963 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5964 + call pax_enter_kernel_user
5965 +#endif
5966 +
5967 /*
5968 * No need to follow this irqs on/off section: the syscall
5969 * disabled irqs and here we enable it straight after entry:
5970 @@ -305,6 +327,12 @@ ENTRY(ia32_cstar_target)
5971 /* no need to do an access_ok check here because r8 has been
5972 32bit zero extended */
5973 /* hardware stack frame is complete now */
5974 +
5975 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5976 + mov $PAX_USER_SHADOW_BASE,%r10
5977 + add %r10,%r8
5978 +#endif
5979 +
5980 1: movl (%r8),%r9d
5981 .section __ex_table,"a"
5982 .quad 1b,ia32_badarg
5983 @@ -327,6 +355,11 @@ cstar_dispatch:
5984 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5985 jnz sysretl_audit
5986 sysretl_from_sys_call:
5987 +
5988 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5989 + call pax_exit_kernel_user
5990 +#endif
5991 +
5992 andl $~TS_COMPAT,TI_status(%r10)
5993 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5994 movl RIP-ARGOFFSET(%rsp),%ecx
5995 @@ -409,6 +442,11 @@ ENTRY(ia32_syscall)
5996 CFI_REL_OFFSET rip,RIP-RIP
5997 PARAVIRT_ADJUST_EXCEPTION_FRAME
5998 SWAPGS
5999 +
6000 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6001 + call pax_enter_kernel_user
6002 +#endif
6003 +
6004 /*
6005 * No need to follow this irqs on/off section: the syscall
6006 * disabled irqs and here we enable it straight after entry:
6007 diff -urNp linux-2.6.34.1/arch/x86/include/asm/alternative.h linux-2.6.34.1/arch/x86/include/asm/alternative.h
6008 --- linux-2.6.34.1/arch/x86/include/asm/alternative.h 2010-07-05 14:24:10.000000000 -0400
6009 +++ linux-2.6.34.1/arch/x86/include/asm/alternative.h 2010-07-07 09:04:45.000000000 -0400
6010 @@ -91,7 +91,7 @@ static inline int alternatives_text_rese
6011 " .byte 664f-663f\n" /* replacementlen */ \
6012 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6013 ".previous\n" \
6014 - ".section .altinstr_replacement, \"ax\"\n" \
6015 + ".section .altinstr_replacement, \"a\"\n" \
6016 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6017 ".previous"
6018
6019 diff -urNp linux-2.6.34.1/arch/x86/include/asm/apm.h linux-2.6.34.1/arch/x86/include/asm/apm.h
6020 --- linux-2.6.34.1/arch/x86/include/asm/apm.h 2010-07-05 14:24:10.000000000 -0400
6021 +++ linux-2.6.34.1/arch/x86/include/asm/apm.h 2010-07-07 09:04:45.000000000 -0400
6022 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6023 __asm__ __volatile__(APM_DO_ZERO_SEGS
6024 "pushl %%edi\n\t"
6025 "pushl %%ebp\n\t"
6026 - "lcall *%%cs:apm_bios_entry\n\t"
6027 + "lcall *%%ss:apm_bios_entry\n\t"
6028 "setc %%al\n\t"
6029 "popl %%ebp\n\t"
6030 "popl %%edi\n\t"
6031 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6032 __asm__ __volatile__(APM_DO_ZERO_SEGS
6033 "pushl %%edi\n\t"
6034 "pushl %%ebp\n\t"
6035 - "lcall *%%cs:apm_bios_entry\n\t"
6036 + "lcall *%%ss:apm_bios_entry\n\t"
6037 "setc %%bl\n\t"
6038 "popl %%ebp\n\t"
6039 "popl %%edi\n\t"
6040 diff -urNp linux-2.6.34.1/arch/x86/include/asm/asm.h linux-2.6.34.1/arch/x86/include/asm/asm.h
6041 --- linux-2.6.34.1/arch/x86/include/asm/asm.h 2010-07-05 14:24:10.000000000 -0400
6042 +++ linux-2.6.34.1/arch/x86/include/asm/asm.h 2010-07-07 09:04:45.000000000 -0400
6043 @@ -37,6 +37,12 @@
6044 #define _ASM_SI __ASM_REG(si)
6045 #define _ASM_DI __ASM_REG(di)
6046
6047 +#ifdef CONFIG_X86_32
6048 +#define _ASM_INTO "into"
6049 +#else
6050 +#define _ASM_INTO "int $4"
6051 +#endif
6052 +
6053 /* Exception table entry */
6054 #ifdef __ASSEMBLY__
6055 # define _ASM_EXTABLE(from,to) \
6056 diff -urNp linux-2.6.34.1/arch/x86/include/asm/atomic.h linux-2.6.34.1/arch/x86/include/asm/atomic.h
6057 --- linux-2.6.34.1/arch/x86/include/asm/atomic.h 2010-07-05 14:24:10.000000000 -0400
6058 +++ linux-2.6.34.1/arch/x86/include/asm/atomic.h 2010-07-07 09:04:45.000000000 -0400
6059 @@ -26,6 +26,17 @@ static inline int atomic_read(const atom
6060 }
6061
6062 /**
6063 + * atomic_read_unchecked - read atomic variable
6064 + * @v: pointer of type atomic_unchecked_t
6065 + *
6066 + * Atomically reads the value of @v.
6067 + */
6068 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6069 +{
6070 + return v->counter;
6071 +}
6072 +
6073 +/**
6074 * atomic_set - set atomic variable
6075 * @v: pointer of type atomic_t
6076 * @i: required value
6077 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6078 }
6079
6080 /**
6081 + * atomic_set_unchecked - set atomic variable
6082 + * @v: pointer of type atomic_unchecked_t
6083 + * @i: required value
6084 + *
6085 + * Atomically sets the value of @v to @i.
6086 + */
6087 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6088 +{
6089 + v->counter = i;
6090 +}
6091 +
6092 +/**
6093 * atomic_add - add integer to atomic variable
6094 * @i: integer value to add
6095 * @v: pointer of type atomic_t
6096 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6097 */
6098 static inline void atomic_add(int i, atomic_t *v)
6099 {
6100 - asm volatile(LOCK_PREFIX "addl %1,%0"
6101 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6102 +
6103 +#ifdef CONFIG_PAX_REFCOUNT
6104 + "jno 0f\n"
6105 + LOCK_PREFIX "subl %1,%0\n"
6106 + _ASM_INTO "\n0:\n"
6107 + _ASM_EXTABLE(0b, 0b)
6108 +#endif
6109 +
6110 + : "+m" (v->counter)
6111 + : "ir" (i));
6112 +}
6113 +
6114 +/**
6115 + * atomic_add_unchecked - add integer to atomic variable
6116 + * @i: integer value to add
6117 + * @v: pointer of type atomic_unchecked_t
6118 + *
6119 + * Atomically adds @i to @v.
6120 + */
6121 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6122 +{
6123 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6124 : "+m" (v->counter)
6125 : "ir" (i));
6126 }
6127 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6128 */
6129 static inline void atomic_sub(int i, atomic_t *v)
6130 {
6131 - asm volatile(LOCK_PREFIX "subl %1,%0"
6132 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6133 +
6134 +#ifdef CONFIG_PAX_REFCOUNT
6135 + "jno 0f\n"
6136 + LOCK_PREFIX "addl %1,%0\n"
6137 + _ASM_INTO "\n0:\n"
6138 + _ASM_EXTABLE(0b, 0b)
6139 +#endif
6140 +
6141 + : "+m" (v->counter)
6142 + : "ir" (i));
6143 +}
6144 +
6145 +/**
6146 + * atomic_sub_unchecked - subtract integer from atomic variable
6147 + * @i: integer value to subtract
6148 + * @v: pointer of type atomic_t
6149 + *
6150 + * Atomically subtracts @i from @v.
6151 + */
6152 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6153 +{
6154 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6155 : "+m" (v->counter)
6156 : "ir" (i));
6157 }
6158 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6159 {
6160 unsigned char c;
6161
6162 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6163 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6164 +
6165 +#ifdef CONFIG_PAX_REFCOUNT
6166 + "jno 0f\n"
6167 + LOCK_PREFIX "addl %2,%0\n"
6168 + _ASM_INTO "\n0:\n"
6169 + _ASM_EXTABLE(0b, 0b)
6170 +#endif
6171 +
6172 + "sete %1\n"
6173 : "+m" (v->counter), "=qm" (c)
6174 : "ir" (i) : "memory");
6175 return c;
6176 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6177 */
6178 static inline void atomic_inc(atomic_t *v)
6179 {
6180 - asm volatile(LOCK_PREFIX "incl %0"
6181 + asm volatile(LOCK_PREFIX "incl %0\n"
6182 +
6183 +#ifdef CONFIG_PAX_REFCOUNT
6184 + "jno 0f\n"
6185 + LOCK_PREFIX "decl %0\n"
6186 + _ASM_INTO "\n0:\n"
6187 + _ASM_EXTABLE(0b, 0b)
6188 +#endif
6189 +
6190 + : "+m" (v->counter));
6191 +}
6192 +
6193 +/**
6194 + * atomic_inc_unchecked - increment atomic variable
6195 + * @v: pointer of type atomic_unchecked_t
6196 + *
6197 + * Atomically increments @v by 1.
6198 + */
6199 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6200 +{
6201 + asm volatile(LOCK_PREFIX "incl %0\n"
6202 : "+m" (v->counter));
6203 }
6204
6205 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6206 */
6207 static inline void atomic_dec(atomic_t *v)
6208 {
6209 - asm volatile(LOCK_PREFIX "decl %0"
6210 + asm volatile(LOCK_PREFIX "decl %0\n"
6211 +
6212 +#ifdef CONFIG_PAX_REFCOUNT
6213 + "jno 0f\n"
6214 + LOCK_PREFIX "incl %0\n"
6215 + _ASM_INTO "\n0:\n"
6216 + _ASM_EXTABLE(0b, 0b)
6217 +#endif
6218 +
6219 + : "+m" (v->counter));
6220 +}
6221 +
6222 +/**
6223 + * atomic_dec_unchecked - decrement atomic variable
6224 + * @v: pointer of type atomic_t
6225 + *
6226 + * Atomically decrements @v by 1.
6227 + */
6228 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6229 +{
6230 + asm volatile(LOCK_PREFIX "decl %0\n"
6231 : "+m" (v->counter));
6232 }
6233
6234 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6235 {
6236 unsigned char c;
6237
6238 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6239 + asm volatile(LOCK_PREFIX "decl %0\n"
6240 +
6241 +#ifdef CONFIG_PAX_REFCOUNT
6242 + "jno 0f\n"
6243 + LOCK_PREFIX "incl %0\n"
6244 + _ASM_INTO "\n0:\n"
6245 + _ASM_EXTABLE(0b, 0b)
6246 +#endif
6247 +
6248 + "sete %1\n"
6249 : "+m" (v->counter), "=qm" (c)
6250 : : "memory");
6251 return c != 0;
6252 @@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(at
6253 {
6254 unsigned char c;
6255
6256 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6257 + asm volatile(LOCK_PREFIX "incl %0\n"
6258 +
6259 +#ifdef CONFIG_PAX_REFCOUNT
6260 + "jno 0f\n"
6261 + LOCK_PREFIX "decl %0\n"
6262 + _ASM_INTO "\n0:\n"
6263 + _ASM_EXTABLE(0b, 0b)
6264 +#endif
6265 +
6266 + "sete %1\n"
6267 : "+m" (v->counter), "=qm" (c)
6268 : : "memory");
6269 return c != 0;
6270 @@ -157,7 +291,16 @@ static inline int atomic_add_negative(in
6271 {
6272 unsigned char c;
6273
6274 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6275 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6276 +
6277 +#ifdef CONFIG_PAX_REFCOUNT
6278 + "jno 0f\n"
6279 + LOCK_PREFIX "subl %2,%0\n"
6280 + _ASM_INTO "\n0:\n"
6281 + _ASM_EXTABLE(0b, 0b)
6282 +#endif
6283 +
6284 + "sets %1\n"
6285 : "+m" (v->counter), "=qm" (c)
6286 : "ir" (i) : "memory");
6287 return c;
6288 @@ -180,6 +323,46 @@ static inline int atomic_add_return(int
6289 #endif
6290 /* Modern 486+ processor */
6291 __i = i;
6292 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6293 +
6294 +#ifdef CONFIG_PAX_REFCOUNT
6295 + "jno 0f\n"
6296 + "movl %0, %1\n"
6297 + _ASM_INTO "\n0:\n"
6298 + _ASM_EXTABLE(0b, 0b)
6299 +#endif
6300 +
6301 + : "+r" (i), "+m" (v->counter)
6302 + : : "memory");
6303 + return i + __i;
6304 +
6305 +#ifdef CONFIG_M386
6306 +no_xadd: /* Legacy 386 processor */
6307 + local_irq_save(flags);
6308 + __i = atomic_read(v);
6309 + atomic_set(v, i + __i);
6310 + local_irq_restore(flags);
6311 + return i + __i;
6312 +#endif
6313 +}
6314 +
6315 +/**
6316 + * atomic_add_return_unchecked - add integer and return
6317 + * @v: pointer of type atomic_unchecked_t
6318 + * @i: integer value to add
6319 + *
6320 + * Atomically adds @i to @v and returns @i + @v
6321 + */
6322 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6323 +{
6324 + int __i;
6325 +#ifdef CONFIG_M386
6326 + unsigned long flags;
6327 + if (unlikely(boot_cpu_data.x86 <= 3))
6328 + goto no_xadd;
6329 +#endif
6330 + /* Modern 486+ processor */
6331 + __i = i;
6332 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6333 : "+r" (i), "+m" (v->counter)
6334 : : "memory");
6335 @@ -208,6 +391,7 @@ static inline int atomic_sub_return(int
6336 }
6337
6338 #define atomic_inc_return(v) (atomic_add_return(1, v))
6339 +#define atomic_inc_return_unchecked(v) (atomic_add_return_unchecked(1, v))
6340 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6341
6342 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6343 @@ -231,17 +415,29 @@ static inline int atomic_xchg(atomic_t *
6344 */
6345 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6346 {
6347 - int c, old;
6348 + int c, old, new;
6349 c = atomic_read(v);
6350 for (;;) {
6351 - if (unlikely(c == (u)))
6352 + if (unlikely(c == u))
6353 break;
6354 - old = atomic_cmpxchg((v), c, c + (a));
6355 +
6356 + asm volatile("addl %2,%0\n"
6357 +
6358 +#ifdef CONFIG_PAX_REFCOUNT
6359 + "jno 0f\n"
6360 + _ASM_INTO "\n0:\n"
6361 + _ASM_EXTABLE(0b, 0b)
6362 +#endif
6363 +
6364 + : "=r" (new)
6365 + : "0" (c), "ir" (a));
6366 +
6367 + old = atomic_cmpxchg(v, c, new);
6368 if (likely(old == c))
6369 break;
6370 c = old;
6371 }
6372 - return c != (u);
6373 + return c != u;
6374 }
6375
6376 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6377 diff -urNp linux-2.6.34.1/arch/x86/include/asm/atomic64_32.h linux-2.6.34.1/arch/x86/include/asm/atomic64_32.h
6378 --- linux-2.6.34.1/arch/x86/include/asm/atomic64_32.h 2010-07-05 14:24:10.000000000 -0400
6379 +++ linux-2.6.34.1/arch/x86/include/asm/atomic64_32.h 2010-07-07 09:04:45.000000000 -0400
6380 @@ -12,6 +12,14 @@ typedef struct {
6381 u64 __aligned(8) counter;
6382 } atomic64_t;
6383
6384 +#ifdef CONFIG_PAX_REFCOUNT
6385 +typedef struct {
6386 + u64 __aligned(8) counter;
6387 +} atomic64_unchecked_t;
6388 +#else
6389 +typedef atomic64_t atomic64_unchecked_t;
6390 +#endif
6391 +
6392 #define ATOMIC64_INIT(val) { (val) }
6393
6394 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
6395 diff -urNp linux-2.6.34.1/arch/x86/include/asm/atomic64_64.h linux-2.6.34.1/arch/x86/include/asm/atomic64_64.h
6396 --- linux-2.6.34.1/arch/x86/include/asm/atomic64_64.h 2010-07-05 14:24:10.000000000 -0400
6397 +++ linux-2.6.34.1/arch/x86/include/asm/atomic64_64.h 2010-07-07 09:04:45.000000000 -0400
6398 @@ -22,6 +22,18 @@ static inline long atomic64_read(const a
6399 }
6400
6401 /**
6402 + * atomic64_read_unchecked - read atomic64 variable
6403 + * @v: pointer of type atomic64_unchecked_t
6404 + *
6405 + * Atomically reads the value of @v.
6406 + * Doesn't imply a read memory barrier.
6407 + */
6408 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6409 +{
6410 + return v->counter;
6411 +}
6412 +
6413 +/**
6414 * atomic64_set - set atomic64 variable
6415 * @v: pointer to type atomic64_t
6416 * @i: required value
6417 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6418 }
6419
6420 /**
6421 + * atomic64_set_unchecked - set atomic64 variable
6422 + * @v: pointer to type atomic64_unchecked_t
6423 + * @i: required value
6424 + *
6425 + * Atomically sets the value of @v to @i.
6426 + */
6427 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6428 +{
6429 + v->counter = i;
6430 +}
6431 +
6432 +/**
6433 * atomic64_add - add integer to atomic64 variable
6434 * @i: integer value to add
6435 * @v: pointer to type atomic64_t
6436 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6437 */
6438 static inline void atomic64_add(long i, atomic64_t *v)
6439 {
6440 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6441 +
6442 +#ifdef CONFIG_PAX_REFCOUNT
6443 + "jno 0f\n"
6444 + LOCK_PREFIX "subq %1,%0\n"
6445 + "int $4\n0:\n"
6446 + _ASM_EXTABLE(0b, 0b)
6447 +#endif
6448 +
6449 + : "=m" (v->counter)
6450 + : "er" (i), "m" (v->counter));
6451 +}
6452 +
6453 +/**
6454 + * atomic64_add_unchecked - add integer to atomic64 variable
6455 + * @i: integer value to add
6456 + * @v: pointer to type atomic64_unchecked_t
6457 + *
6458 + * Atomically adds @i to @v.
6459 + */
6460 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6461 +{
6462 asm volatile(LOCK_PREFIX "addq %1,%0"
6463 : "=m" (v->counter)
6464 : "er" (i), "m" (v->counter));
6465 @@ -56,7 +102,15 @@ static inline void atomic64_add(long i,
6466 */
6467 static inline void atomic64_sub(long i, atomic64_t *v)
6468 {
6469 - asm volatile(LOCK_PREFIX "subq %1,%0"
6470 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6471 +
6472 +#ifdef CONFIG_PAX_REFCOUNT
6473 + "jno 0f\n"
6474 + LOCK_PREFIX "addq %1,%0\n"
6475 + "int $4\n0:\n"
6476 + _ASM_EXTABLE(0b, 0b)
6477 +#endif
6478 +
6479 : "=m" (v->counter)
6480 : "er" (i), "m" (v->counter));
6481 }
6482 @@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test(
6483 {
6484 unsigned char c;
6485
6486 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6487 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6488 +
6489 +#ifdef CONFIG_PAX_REFCOUNT
6490 + "jno 0f\n"
6491 + LOCK_PREFIX "addq %2,%0\n"
6492 + "int $4\n0:\n"
6493 + _ASM_EXTABLE(0b, 0b)
6494 +#endif
6495 +
6496 + "sete %1\n"
6497 : "=m" (v->counter), "=qm" (c)
6498 : "er" (i), "m" (v->counter) : "memory");
6499 return c;
6500 @@ -88,6 +151,31 @@ static inline int atomic64_sub_and_test(
6501 */
6502 static inline void atomic64_inc(atomic64_t *v)
6503 {
6504 + asm volatile(LOCK_PREFIX "incq %0\n"
6505 +
6506 +#ifdef CONFIG_PAX_REFCOUNT
6507 + "jno 0f\n"
6508 + "int $4\n0:\n"
6509 + ".pushsection .fixup,\"ax\"\n"
6510 + "1:\n"
6511 + LOCK_PREFIX "decq %0\n"
6512 + "jmp 0b\n"
6513 + ".popsection\n"
6514 + _ASM_EXTABLE(0b, 1b)
6515 +#endif
6516 +
6517 + : "=m" (v->counter)
6518 + : "m" (v->counter));
6519 +}
6520 +
6521 +/**
6522 + * atomic64_inc_unchecked - increment atomic64 variable
6523 + * @v: pointer to type atomic64_unchecked_t
6524 + *
6525 + * Atomically increments @v by 1.
6526 + */
6527 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6528 +{
6529 asm volatile(LOCK_PREFIX "incq %0"
6530 : "=m" (v->counter)
6531 : "m" (v->counter));
6532 @@ -101,7 +189,32 @@ static inline void atomic64_inc(atomic64
6533 */
6534 static inline void atomic64_dec(atomic64_t *v)
6535 {
6536 - asm volatile(LOCK_PREFIX "decq %0"
6537 + asm volatile(LOCK_PREFIX "decq %0\n"
6538 +
6539 +#ifdef CONFIG_PAX_REFCOUNT
6540 + "jno 0f\n"
6541 + "int $4\n0:\n"
6542 + ".pushsection .fixup,\"ax\"\n"
6543 + "1: \n"
6544 + LOCK_PREFIX "incq %0\n"
6545 + "jmp 0b\n"
6546 + ".popsection\n"
6547 + _ASM_EXTABLE(0b, 1b)
6548 +#endif
6549 +
6550 + : "=m" (v->counter)
6551 + : "m" (v->counter));
6552 +}
6553 +
6554 +/**
6555 + * atomic64_dec_unchecked - decrement atomic64 variable
6556 + * @v: pointer to type atomic64_t
6557 + *
6558 + * Atomically decrements @v by 1.
6559 + */
6560 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6561 +{
6562 + asm volatile(LOCK_PREFIX "decq %0\n"
6563 : "=m" (v->counter)
6564 : "m" (v->counter));
6565 }
6566 @@ -118,7 +231,20 @@ static inline int atomic64_dec_and_test(
6567 {
6568 unsigned char c;
6569
6570 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6571 + asm volatile(LOCK_PREFIX "decq %0\n"
6572 +
6573 +#ifdef CONFIG_PAX_REFCOUNT
6574 + "jno 0f\n"
6575 + "int $4\n0:\n"
6576 + ".pushsection .fixup,\"ax\"\n"
6577 + "1: \n"
6578 + LOCK_PREFIX "incq %0\n"
6579 + "jmp 0b\n"
6580 + ".popsection\n"
6581 + _ASM_EXTABLE(0b, 1b)
6582 +#endif
6583 +
6584 + "sete %1\n"
6585 : "=m" (v->counter), "=qm" (c)
6586 : "m" (v->counter) : "memory");
6587 return c != 0;
6588 @@ -136,7 +262,20 @@ static inline int atomic64_inc_and_test(
6589 {
6590 unsigned char c;
6591
6592 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6593 + asm volatile(LOCK_PREFIX "incq %0\n"
6594 +
6595 +#ifdef CONFIG_PAX_REFCOUNT
6596 + "jno 0f\n"
6597 + "int $4\n0:\n"
6598 + ".pushsection .fixup,\"ax\"\n"
6599 + "1: \n"
6600 + LOCK_PREFIX "decq %0\n"
6601 + "jmp 0b\n"
6602 + ".popsection\n"
6603 + _ASM_EXTABLE(0b, 1b)
6604 +#endif
6605 +
6606 + "sete %1\n"
6607 : "=m" (v->counter), "=qm" (c)
6608 : "m" (v->counter) : "memory");
6609 return c != 0;
6610 @@ -155,7 +294,16 @@ static inline int atomic64_add_negative(
6611 {
6612 unsigned char c;
6613
6614 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6615 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6616 +
6617 +#ifdef CONFIG_PAX_REFCOUNT
6618 + "jno 0f\n"
6619 + LOCK_PREFIX "subq %2,%0\n"
6620 + "int $4\n0:\n"
6621 + _ASM_EXTABLE(0b, 0b)
6622 +#endif
6623 +
6624 + "sets %1\n"
6625 : "=m" (v->counter), "=qm" (c)
6626 : "er" (i), "m" (v->counter) : "memory");
6627 return c;
6628 @@ -171,7 +319,31 @@ static inline int atomic64_add_negative(
6629 static inline long atomic64_add_return(long i, atomic64_t *v)
6630 {
6631 long __i = i;
6632 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6633 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6634 +
6635 +#ifdef CONFIG_PAX_REFCOUNT
6636 + "jno 0f\n"
6637 + "movq %0, %1\n"
6638 + "int $4\n0:\n"
6639 + _ASM_EXTABLE(0b, 0b)
6640 +#endif
6641 +
6642 + : "+r" (i), "+m" (v->counter)
6643 + : : "memory");
6644 + return i + __i;
6645 +}
6646 +
6647 +/**
6648 + * atomic64_add_return_unchecked - add and return
6649 + * @i: integer value to add
6650 + * @v: pointer to type atomic64_unchecked_t
6651 + *
6652 + * Atomically adds @i to @v and returns @i + @v
6653 + */
6654 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6655 +{
6656 + long __i = i;
6657 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6658 : "+r" (i), "+m" (v->counter)
6659 : : "memory");
6660 return i + __i;
6661 @@ -183,6 +355,7 @@ static inline long atomic64_sub_return(l
6662 }
6663
6664 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6665 +#define atomic64_inc_return_unchecked(v) (atomic64_add_return_unchecked(1, (v)))
6666 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6667
6668 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6669 @@ -206,17 +379,29 @@ static inline long atomic64_xchg(atomic6
6670 */
6671 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6672 {
6673 - long c, old;
6674 + long c, old, new;
6675 c = atomic64_read(v);
6676 for (;;) {
6677 - if (unlikely(c == (u)))
6678 + if (unlikely(c == u))
6679 break;
6680 - old = atomic64_cmpxchg((v), c, c + (a));
6681 +
6682 + asm volatile("add %2,%0\n"
6683 +
6684 +#ifdef CONFIG_PAX_REFCOUNT
6685 + "jno 0f\n"
6686 + "int $4\n0:\n"
6687 + _ASM_EXTABLE(0b, 0b)
6688 +#endif
6689 +
6690 + : "=r" (new)
6691 + : "0" (c), "ir" (a));
6692 +
6693 + old = atomic64_cmpxchg(v, c, new);
6694 if (likely(old == c))
6695 break;
6696 c = old;
6697 }
6698 - return c != (u);
6699 + return c != u;
6700 }
6701
6702 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6703 diff -urNp linux-2.6.34.1/arch/x86/include/asm/boot.h linux-2.6.34.1/arch/x86/include/asm/boot.h
6704 --- linux-2.6.34.1/arch/x86/include/asm/boot.h 2010-07-05 14:24:10.000000000 -0400
6705 +++ linux-2.6.34.1/arch/x86/include/asm/boot.h 2010-07-07 09:04:45.000000000 -0400
6706 @@ -11,10 +11,15 @@
6707 #include <asm/pgtable_types.h>
6708
6709 /* Physical address where kernel should be loaded. */
6710 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6711 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6712 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6713 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6714
6715 +#ifndef __ASSEMBLY__
6716 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6717 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6718 +#endif
6719 +
6720 /* Minimum kernel alignment, as a power of two */
6721 #ifdef CONFIG_X86_64
6722 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6723 diff -urNp linux-2.6.34.1/arch/x86/include/asm/cache.h linux-2.6.34.1/arch/x86/include/asm/cache.h
6724 --- linux-2.6.34.1/arch/x86/include/asm/cache.h 2010-07-05 14:24:10.000000000 -0400
6725 +++ linux-2.6.34.1/arch/x86/include/asm/cache.h 2010-07-07 09:04:45.000000000 -0400
6726 @@ -8,6 +8,7 @@
6727 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6728
6729 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
6730 +#define __read_only __attribute__((__section__(".data.read_only")))
6731
6732 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6733 #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6734 diff -urNp linux-2.6.34.1/arch/x86/include/asm/cacheflush.h linux-2.6.34.1/arch/x86/include/asm/cacheflush.h
6735 --- linux-2.6.34.1/arch/x86/include/asm/cacheflush.h 2010-07-05 14:24:10.000000000 -0400
6736 +++ linux-2.6.34.1/arch/x86/include/asm/cacheflush.h 2010-07-07 09:04:45.000000000 -0400
6737 @@ -61,7 +61,7 @@ PAGEFLAG(WC, WC)
6738 static inline unsigned long get_page_memtype(struct page *pg)
6739 {
6740 if (!PageUncached(pg) && !PageWC(pg))
6741 - return -1;
6742 + return ~0UL;
6743 else if (!PageUncached(pg) && PageWC(pg))
6744 return _PAGE_CACHE_WC;
6745 else if (PageUncached(pg) && !PageWC(pg))
6746 @@ -86,7 +86,7 @@ static inline void set_page_memtype(stru
6747 SetPageWC(pg);
6748 break;
6749 default:
6750 - case -1:
6751 + case ~0UL:
6752 ClearPageUncached(pg);
6753 ClearPageWC(pg);
6754 break;
6755 diff -urNp linux-2.6.34.1/arch/x86/include/asm/checksum_32.h linux-2.6.34.1/arch/x86/include/asm/checksum_32.h
6756 --- linux-2.6.34.1/arch/x86/include/asm/checksum_32.h 2010-07-05 14:24:10.000000000 -0400
6757 +++ linux-2.6.34.1/arch/x86/include/asm/checksum_32.h 2010-07-07 09:04:45.000000000 -0400
6758 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6759 int len, __wsum sum,
6760 int *src_err_ptr, int *dst_err_ptr);
6761
6762 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6763 + int len, __wsum sum,
6764 + int *src_err_ptr, int *dst_err_ptr);
6765 +
6766 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6767 + int len, __wsum sum,
6768 + int *src_err_ptr, int *dst_err_ptr);
6769 +
6770 /*
6771 * Note: when you get a NULL pointer exception here this means someone
6772 * passed in an incorrect kernel address to one of these functions.
6773 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6774 int *err_ptr)
6775 {
6776 might_sleep();
6777 - return csum_partial_copy_generic((__force void *)src, dst,
6778 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6779 len, sum, err_ptr, NULL);
6780 }
6781
6782 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6783 {
6784 might_sleep();
6785 if (access_ok(VERIFY_WRITE, dst, len))
6786 - return csum_partial_copy_generic(src, (__force void *)dst,
6787 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6788 len, sum, NULL, err_ptr);
6789
6790 if (len)
6791 diff -urNp linux-2.6.34.1/arch/x86/include/asm/desc.h linux-2.6.34.1/arch/x86/include/asm/desc.h
6792 --- linux-2.6.34.1/arch/x86/include/asm/desc.h 2010-07-05 14:24:10.000000000 -0400
6793 +++ linux-2.6.34.1/arch/x86/include/asm/desc.h 2010-07-07 09:04:45.000000000 -0400
6794 @@ -4,6 +4,7 @@
6795 #include <asm/desc_defs.h>
6796 #include <asm/ldt.h>
6797 #include <asm/mmu.h>
6798 +#include <asm/pgtable.h>
6799 #include <linux/smp.h>
6800
6801 static inline void fill_ldt(struct desc_struct *desc,
6802 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6803 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6804 desc->type = (info->read_exec_only ^ 1) << 1;
6805 desc->type |= info->contents << 2;
6806 + desc->type |= info->seg_not_present ^ 1;
6807 desc->s = 1;
6808 desc->dpl = 0x3;
6809 desc->p = info->seg_not_present ^ 1;
6810 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6811 }
6812
6813 extern struct desc_ptr idt_descr;
6814 -extern gate_desc idt_table[];
6815 -
6816 -struct gdt_page {
6817 - struct desc_struct gdt[GDT_ENTRIES];
6818 -} __attribute__((aligned(PAGE_SIZE)));
6819 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6820 +extern gate_desc idt_table[256];
6821
6822 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6823 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6824 {
6825 - return per_cpu(gdt_page, cpu).gdt;
6826 + return cpu_gdt_table[cpu];
6827 }
6828
6829 #ifdef CONFIG_X86_64
6830 @@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str
6831 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6832 const gate_desc *gate)
6833 {
6834 + pax_open_kernel();
6835 memcpy(&idt[entry], gate, sizeof(*gate));
6836 + pax_close_kernel();
6837 }
6838
6839 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6840 const void *desc)
6841 {
6842 + pax_open_kernel();
6843 memcpy(&ldt[entry], desc, 8);
6844 + pax_close_kernel();
6845 }
6846
6847 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
6848 const void *desc, int type)
6849 {
6850 unsigned int size;
6851 +
6852 switch (type) {
6853 case DESC_TSS:
6854 size = sizeof(tss_desc);
6855 @@ -139,7 +142,10 @@ static inline void native_write_gdt_entr
6856 size = sizeof(struct desc_struct);
6857 break;
6858 }
6859 +
6860 + pax_open_kernel();
6861 memcpy(&gdt[entry], desc, size);
6862 + pax_close_kernel();
6863 }
6864
6865 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6866 @@ -211,7 +217,9 @@ static inline void native_set_ldt(const
6867
6868 static inline void native_load_tr_desc(void)
6869 {
6870 + pax_open_kernel();
6871 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6872 + pax_close_kernel();
6873 }
6874
6875 static inline void native_load_gdt(const struct desc_ptr *dtr)
6876 @@ -246,8 +254,10 @@ static inline void native_load_tls(struc
6877 unsigned int i;
6878 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6879
6880 + pax_open_kernel();
6881 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6882 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6883 + pax_close_kernel();
6884 }
6885
6886 #define _LDT_empty(info) \
6887 @@ -309,7 +319,7 @@ static inline void set_desc_limit(struct
6888 desc->limit = (limit >> 16) & 0xf;
6889 }
6890
6891 -static inline void _set_gate(int gate, unsigned type, void *addr,
6892 +static inline void _set_gate(int gate, unsigned type, const void *addr,
6893 unsigned dpl, unsigned ist, unsigned seg)
6894 {
6895 gate_desc s;
6896 @@ -327,7 +337,7 @@ static inline void _set_gate(int gate, u
6897 * Pentium F0 0F bugfix can have resulted in the mapped
6898 * IDT being write-protected.
6899 */
6900 -static inline void set_intr_gate(unsigned int n, void *addr)
6901 +static inline void set_intr_gate(unsigned int n, const void *addr)
6902 {
6903 BUG_ON((unsigned)n > 0xFF);
6904 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
6905 @@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsig
6906 /*
6907 * This routine sets up an interrupt gate at directory privilege level 3.
6908 */
6909 -static inline void set_system_intr_gate(unsigned int n, void *addr)
6910 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
6911 {
6912 BUG_ON((unsigned)n > 0xFF);
6913 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
6914 }
6915
6916 -static inline void set_system_trap_gate(unsigned int n, void *addr)
6917 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
6918 {
6919 BUG_ON((unsigned)n > 0xFF);
6920 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
6921 }
6922
6923 -static inline void set_trap_gate(unsigned int n, void *addr)
6924 +static inline void set_trap_gate(unsigned int n, const void *addr)
6925 {
6926 BUG_ON((unsigned)n > 0xFF);
6927 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
6928 @@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigne
6929 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
6930 {
6931 BUG_ON((unsigned)n > 0xFF);
6932 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
6933 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
6934 }
6935
6936 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
6937 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
6938 {
6939 BUG_ON((unsigned)n > 0xFF);
6940 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
6941 }
6942
6943 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
6944 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
6945 {
6946 BUG_ON((unsigned)n > 0xFF);
6947 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
6948 }
6949
6950 +#ifdef CONFIG_X86_32
6951 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
6952 +{
6953 + struct desc_struct d;
6954 +
6955 + if (likely(limit))
6956 + limit = (limit - 1UL) >> PAGE_SHIFT;
6957 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
6958 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
6959 +}
6960 +#endif
6961 +
6962 #endif /* _ASM_X86_DESC_H */
6963 diff -urNp linux-2.6.34.1/arch/x86/include/asm/device.h linux-2.6.34.1/arch/x86/include/asm/device.h
6964 --- linux-2.6.34.1/arch/x86/include/asm/device.h 2010-07-05 14:24:10.000000000 -0400
6965 +++ linux-2.6.34.1/arch/x86/include/asm/device.h 2010-07-07 09:04:45.000000000 -0400
6966 @@ -6,7 +6,7 @@ struct dev_archdata {
6967 void *acpi_handle;
6968 #endif
6969 #ifdef CONFIG_X86_64
6970 -struct dma_map_ops *dma_ops;
6971 + const struct dma_map_ops *dma_ops;
6972 #endif
6973 #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
6974 void *iommu; /* hook for IOMMU specific extension */
6975 diff -urNp linux-2.6.34.1/arch/x86/include/asm/dma-mapping.h linux-2.6.34.1/arch/x86/include/asm/dma-mapping.h
6976 --- linux-2.6.34.1/arch/x86/include/asm/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
6977 +++ linux-2.6.34.1/arch/x86/include/asm/dma-mapping.h 2010-07-07 09:04:45.000000000 -0400
6978 @@ -26,9 +26,9 @@ extern int iommu_merge;
6979 extern struct device x86_dma_fallback_dev;
6980 extern int panic_on_overflow;
6981
6982 -extern struct dma_map_ops *dma_ops;
6983 +extern const struct dma_map_ops *dma_ops;
6984
6985 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
6986 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
6987 {
6988 #ifdef CONFIG_X86_32
6989 return dma_ops;
6990 @@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm
6991 /* Make sure we keep the same behaviour */
6992 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
6993 {
6994 - struct dma_map_ops *ops = get_dma_ops(dev);
6995 + const struct dma_map_ops *ops = get_dma_ops(dev);
6996 if (ops->mapping_error)
6997 return ops->mapping_error(dev, dma_addr);
6998
6999 @@ -123,7 +123,7 @@ static inline void *
7000 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
7001 gfp_t gfp)
7002 {
7003 - struct dma_map_ops *ops = get_dma_ops(dev);
7004 + const struct dma_map_ops *ops = get_dma_ops(dev);
7005 void *memory;
7006
7007 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
7008 @@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, s
7009 static inline void dma_free_coherent(struct device *dev, size_t size,
7010 void *vaddr, dma_addr_t bus)
7011 {
7012 - struct dma_map_ops *ops = get_dma_ops(dev);
7013 + const struct dma_map_ops *ops = get_dma_ops(dev);
7014
7015 WARN_ON(irqs_disabled()); /* for portability */
7016
7017 diff -urNp linux-2.6.34.1/arch/x86/include/asm/e820.h linux-2.6.34.1/arch/x86/include/asm/e820.h
7018 --- linux-2.6.34.1/arch/x86/include/asm/e820.h 2010-07-05 14:24:10.000000000 -0400
7019 +++ linux-2.6.34.1/arch/x86/include/asm/e820.h 2010-07-07 09:04:45.000000000 -0400
7020 @@ -64,7 +64,7 @@ struct e820map {
7021 #define ISA_START_ADDRESS 0xa0000
7022 #define ISA_END_ADDRESS 0x100000
7023
7024 -#define BIOS_BEGIN 0x000a0000
7025 +#define BIOS_BEGIN 0x000c0000
7026 #define BIOS_END 0x00100000
7027
7028 #ifdef __KERNEL__
7029 diff -urNp linux-2.6.34.1/arch/x86/include/asm/elf.h linux-2.6.34.1/arch/x86/include/asm/elf.h
7030 --- linux-2.6.34.1/arch/x86/include/asm/elf.h 2010-07-05 14:24:10.000000000 -0400
7031 +++ linux-2.6.34.1/arch/x86/include/asm/elf.h 2010-07-07 09:04:45.000000000 -0400
7032 @@ -237,7 +237,25 @@ extern int force_personality32;
7033 the loader. We need to make sure that it is out of the way of the program
7034 that it will "exec", and that there is sufficient room for the brk. */
7035
7036 +#ifdef CONFIG_PAX_SEGMEXEC
7037 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7038 +#else
7039 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7040 +#endif
7041 +
7042 +#ifdef CONFIG_PAX_ASLR
7043 +#ifdef CONFIG_X86_32
7044 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7045 +
7046 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7047 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7048 +#else
7049 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7050 +
7051 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7052 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7053 +#endif
7054 +#endif
7055
7056 /* This yields a mask that user programs can use to figure out what
7057 instruction set this CPU supports. This could be done in user space,
7058 @@ -291,8 +309,7 @@ do { \
7059 #define ARCH_DLINFO \
7060 do { \
7061 if (vdso_enabled) \
7062 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7063 - (unsigned long)current->mm->context.vdso); \
7064 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7065 } while (0)
7066
7067 #define AT_SYSINFO 32
7068 @@ -303,7 +320,7 @@ do { \
7069
7070 #endif /* !CONFIG_X86_32 */
7071
7072 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7073 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7074
7075 #define VDSO_ENTRY \
7076 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7077 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7078 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7079 #define compat_arch_setup_additional_pages syscall32_setup_pages
7080
7081 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7082 -#define arch_randomize_brk arch_randomize_brk
7083 -
7084 #endif /* _ASM_X86_ELF_H */
7085 diff -urNp linux-2.6.34.1/arch/x86/include/asm/futex.h linux-2.6.34.1/arch/x86/include/asm/futex.h
7086 --- linux-2.6.34.1/arch/x86/include/asm/futex.h 2010-07-05 14:24:10.000000000 -0400
7087 +++ linux-2.6.34.1/arch/x86/include/asm/futex.h 2010-07-07 09:04:45.000000000 -0400
7088 @@ -11,17 +11,54 @@
7089 #include <asm/processor.h>
7090 #include <asm/system.h>
7091
7092 +#ifdef CONFIG_X86_32
7093 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7094 + asm volatile( \
7095 + "movw\t%w6, %%ds\n" \
7096 + "1:\t" insn "\n" \
7097 + "2:\tpushl\t%%ss\n" \
7098 + "\tpopl\t%%ds\n" \
7099 + "\t.section .fixup,\"ax\"\n" \
7100 + "3:\tmov\t%3, %1\n" \
7101 + "\tjmp\t2b\n" \
7102 + "\t.previous\n" \
7103 + _ASM_EXTABLE(1b, 3b) \
7104 + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7105 + : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
7106 +
7107 +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7108 + asm volatile("movw\t%w7, %%es\n" \
7109 + "1:\tmovl\t%%es:%2, %0\n" \
7110 + "\tmovl\t%0, %3\n" \
7111 + "\t" insn "\n" \
7112 + "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
7113 + "\tjnz\t1b\n" \
7114 + "3:\tpushl\t%%ss\n" \
7115 + "\tpopl\t%%es\n" \
7116 + "\t.section .fixup,\"ax\"\n" \
7117 + "4:\tmov\t%5, %1\n" \
7118 + "\tjmp\t3b\n" \
7119 + "\t.previous\n" \
7120 + _ASM_EXTABLE(1b, 4b) \
7121 + _ASM_EXTABLE(2b, 4b) \
7122 + : "=&a" (oldval), "=&r" (ret), \
7123 + "+m" (*uaddr), "=&r" (tem) \
7124 + : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
7125 +#else
7126 +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7127 + typecheck(u32 *, uaddr); \
7128 asm volatile("1:\t" insn "\n" \
7129 "2:\t.section .fixup,\"ax\"\n" \
7130 "3:\tmov\t%3, %1\n" \
7131 "\tjmp\t2b\n" \
7132 "\t.previous\n" \
7133 _ASM_EXTABLE(1b, 3b) \
7134 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7135 + : "=r" (oldval), "=r" (ret), \
7136 + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\
7137 : "i" (-EFAULT), "0" (oparg), "1" (0))
7138
7139 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7140 + typecheck(u32 *, uaddr); \
7141 asm volatile("1:\tmovl %2, %0\n" \
7142 "\tmovl\t%0, %3\n" \
7143 "\t" insn "\n" \
7144 @@ -34,10 +71,12 @@
7145 _ASM_EXTABLE(1b, 4b) \
7146 _ASM_EXTABLE(2b, 4b) \
7147 : "=&a" (oldval), "=&r" (ret), \
7148 - "+m" (*uaddr), "=&r" (tem) \
7149 + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\
7150 + "=&r" (tem) \
7151 : "r" (oparg), "i" (-EFAULT), "1" (0))
7152 +#endif
7153
7154 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
7155 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7156 {
7157 int op = (encoded_op >> 28) & 7;
7158 int cmp = (encoded_op >> 24) & 15;
7159 @@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser
7160
7161 switch (op) {
7162 case FUTEX_OP_SET:
7163 +#ifdef CONFIG_X86_32
7164 + __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
7165 +#else
7166 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7167 +#endif
7168 break;
7169 case FUTEX_OP_ADD:
7170 +#ifdef CONFIG_X86_32
7171 + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
7172 + uaddr, oparg);
7173 +#else
7174 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7175 uaddr, oparg);
7176 +#endif
7177 break;
7178 case FUTEX_OP_OR:
7179 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
7180 @@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser
7181 return ret;
7182 }
7183
7184 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
7185 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
7186 int newval)
7187 {
7188
7189 @@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_i
7190 return -ENOSYS;
7191 #endif
7192
7193 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
7194 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7195 return -EFAULT;
7196
7197 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
7198 - "2:\t.section .fixup, \"ax\"\n"
7199 + asm volatile(
7200 +#ifdef CONFIG_X86_32
7201 + "\tmovw %w5, %%ds\n"
7202 + "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n"
7203 + "2:\tpushl %%ss\n"
7204 + "\tpopl %%ds\n"
7205 +#else
7206 + "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
7207 + "2:\n"
7208 +#endif
7209 + "\t.section .fixup, \"ax\"\n"
7210 "3:\tmov %2, %0\n"
7211 "\tjmp 2b\n"
7212 "\t.previous\n"
7213 _ASM_EXTABLE(1b, 3b)
7214 +#ifdef CONFIG_X86_32
7215 : "=a" (oldval), "+m" (*uaddr)
7216 + : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
7217 +#else
7218 + : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))
7219 : "i" (-EFAULT), "r" (newval), "0" (oldval)
7220 +#endif
7221 : "memory"
7222 );
7223
7224 diff -urNp linux-2.6.34.1/arch/x86/include/asm/i387.h linux-2.6.34.1/arch/x86/include/asm/i387.h
7225 --- linux-2.6.34.1/arch/x86/include/asm/i387.h 2010-07-05 14:24:10.000000000 -0400
7226 +++ linux-2.6.34.1/arch/x86/include/asm/i387.h 2010-07-07 09:04:45.000000000 -0400
7227 @@ -70,6 +70,11 @@ static inline int fxrstor_checking(struc
7228 {
7229 int err;
7230
7231 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7232 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7233 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7234 +#endif
7235 +
7236 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
7237 "2:\n"
7238 ".section .fixup,\"ax\"\n"
7239 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i38
7240 {
7241 int err;
7242
7243 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7244 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7245 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7246 +#endif
7247 +
7248 asm volatile("1: rex64/fxsave (%[fx])\n\t"
7249 "2:\n"
7250 ".section .fixup,\"ax\"\n"
7251 @@ -205,13 +215,8 @@ static inline int fxrstor_checking(struc
7252 }
7253
7254 /* We need a safe address that is cheap to find and that is already
7255 - in L1 during context switch. The best choices are unfortunately
7256 - different for UP and SMP */
7257 -#ifdef CONFIG_SMP
7258 -#define safe_address (__per_cpu_offset[0])
7259 -#else
7260 -#define safe_address (kstat_cpu(0).cpustat.user)
7261 -#endif
7262 + in L1 during context switch. */
7263 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7264
7265 /*
7266 * These must be called with preempt disabled
7267 diff -urNp linux-2.6.34.1/arch/x86/include/asm/io.h linux-2.6.34.1/arch/x86/include/asm/io.h
7268 --- linux-2.6.34.1/arch/x86/include/asm/io.h 2010-07-05 14:24:10.000000000 -0400
7269 +++ linux-2.6.34.1/arch/x86/include/asm/io.h 2010-07-07 09:04:45.000000000 -0400
7270 @@ -213,6 +213,17 @@ extern void iounmap(volatile void __iome
7271
7272 #include <linux/vmalloc.h>
7273
7274 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7275 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7276 +{
7277 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7278 +}
7279 +
7280 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7281 +{
7282 + return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7283 +}
7284 +
7285 /*
7286 * Convert a virtual cached pointer to an uncached pointer
7287 */
7288 diff -urNp linux-2.6.34.1/arch/x86/include/asm/iommu.h linux-2.6.34.1/arch/x86/include/asm/iommu.h
7289 --- linux-2.6.34.1/arch/x86/include/asm/iommu.h 2010-07-05 14:24:10.000000000 -0400
7290 +++ linux-2.6.34.1/arch/x86/include/asm/iommu.h 2010-07-07 09:04:45.000000000 -0400
7291 @@ -1,7 +1,7 @@
7292 #ifndef _ASM_X86_IOMMU_H
7293 #define _ASM_X86_IOMMU_H
7294
7295 -extern struct dma_map_ops nommu_dma_ops;
7296 +extern const struct dma_map_ops nommu_dma_ops;
7297 extern int force_iommu, no_iommu;
7298 extern int iommu_detected;
7299 extern int iommu_pass_through;
7300 diff -urNp linux-2.6.34.1/arch/x86/include/asm/irqflags.h linux-2.6.34.1/arch/x86/include/asm/irqflags.h
7301 --- linux-2.6.34.1/arch/x86/include/asm/irqflags.h 2010-07-05 14:24:10.000000000 -0400
7302 +++ linux-2.6.34.1/arch/x86/include/asm/irqflags.h 2010-07-07 09:04:45.000000000 -0400
7303 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
7304 sti; \
7305 sysexit
7306
7307 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7308 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7309 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7310 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7311 +
7312 #else
7313 #define INTERRUPT_RETURN iret
7314 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7315 diff -urNp linux-2.6.34.1/arch/x86/include/asm/kvm_host.h linux-2.6.34.1/arch/x86/include/asm/kvm_host.h
7316 --- linux-2.6.34.1/arch/x86/include/asm/kvm_host.h 2010-07-05 14:24:10.000000000 -0400
7317 +++ linux-2.6.34.1/arch/x86/include/asm/kvm_host.h 2010-07-07 09:04:45.000000000 -0400
7318 @@ -547,7 +547,7 @@ struct kvm_x86_ops {
7319 const struct trace_print_flags *exit_reasons_str;
7320 };
7321
7322 -extern struct kvm_x86_ops *kvm_x86_ops;
7323 +extern const struct kvm_x86_ops *kvm_x86_ops;
7324
7325 int kvm_mmu_module_init(void);
7326 void kvm_mmu_module_exit(void);
7327 diff -urNp linux-2.6.34.1/arch/x86/include/asm/local.h linux-2.6.34.1/arch/x86/include/asm/local.h
7328 --- linux-2.6.34.1/arch/x86/include/asm/local.h 2010-07-05 14:24:10.000000000 -0400
7329 +++ linux-2.6.34.1/arch/x86/include/asm/local.h 2010-07-07 09:04:45.000000000 -0400
7330 @@ -18,26 +18,90 @@ typedef struct {
7331
7332 static inline void local_inc(local_t *l)
7333 {
7334 - asm volatile(_ASM_INC "%0"
7335 + asm volatile(_ASM_INC "%0\n"
7336 +
7337 +#ifdef CONFIG_PAX_REFCOUNT
7338 +#ifdef CONFIG_X86_32
7339 + "into\n0:\n"
7340 +#else
7341 + "jno 0f\n"
7342 + "int $4\n0:\n"
7343 +#endif
7344 + ".pushsection .fixup,\"ax\"\n"
7345 + "1:\n"
7346 + _ASM_DEC "%0\n"
7347 + "jmp 0b\n"
7348 + ".popsection\n"
7349 + _ASM_EXTABLE(0b, 1b)
7350 +#endif
7351 +
7352 : "+m" (l->a.counter));
7353 }
7354
7355 static inline void local_dec(local_t *l)
7356 {
7357 - asm volatile(_ASM_DEC "%0"
7358 + asm volatile(_ASM_DEC "%0\n"
7359 +
7360 +#ifdef CONFIG_PAX_REFCOUNT
7361 +#ifdef CONFIG_X86_32
7362 + "into\n0:\n"
7363 +#else
7364 + "jno 0f\n"
7365 + "int $4\n0:\n"
7366 +#endif
7367 + ".pushsection .fixup,\"ax\"\n"
7368 + "1:\n"
7369 + _ASM_INC "%0\n"
7370 + "jmp 0b\n"
7371 + ".popsection\n"
7372 + _ASM_EXTABLE(0b, 1b)
7373 +#endif
7374 +
7375 : "+m" (l->a.counter));
7376 }
7377
7378 static inline void local_add(long i, local_t *l)
7379 {
7380 - asm volatile(_ASM_ADD "%1,%0"
7381 + asm volatile(_ASM_ADD "%1,%0\n"
7382 +
7383 +#ifdef CONFIG_PAX_REFCOUNT
7384 +#ifdef CONFIG_X86_32
7385 + "into\n0:\n"
7386 +#else
7387 + "jno 0f\n"
7388 + "int $4\n0:\n"
7389 +#endif
7390 + ".pushsection .fixup,\"ax\"\n"
7391 + "1:\n"
7392 + _ASM_SUB "%1,%0\n"
7393 + "jmp 0b\n"
7394 + ".popsection\n"
7395 + _ASM_EXTABLE(0b, 1b)
7396 +#endif
7397 +
7398 : "+m" (l->a.counter)
7399 : "ir" (i));
7400 }
7401
7402 static inline void local_sub(long i, local_t *l)
7403 {
7404 - asm volatile(_ASM_SUB "%1,%0"
7405 + asm volatile(_ASM_SUB "%1,%0\n"
7406 +
7407 +#ifdef CONFIG_PAX_REFCOUNT
7408 +#ifdef CONFIG_X86_32
7409 + "into\n0:\n"
7410 +#else
7411 + "jno 0f\n"
7412 + "int $4\n0:\n"
7413 +#endif
7414 + ".pushsection .fixup,\"ax\"\n"
7415 + "1:\n"
7416 + _ASM_ADD "%1,%0\n"
7417 + "jmp 0b\n"
7418 + ".popsection\n"
7419 + _ASM_EXTABLE(0b, 1b)
7420 +#endif
7421 +
7422 : "+m" (l->a.counter)
7423 : "ir" (i));
7424 }
7425 @@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon
7426 {
7427 unsigned char c;
7428
7429 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7430 + asm volatile(_ASM_SUB "%2,%0\n"
7431 +
7432 +#ifdef CONFIG_PAX_REFCOUNT
7433 +#ifdef CONFIG_X86_32
7434 + "into\n0:\n"
7435 +#else
7436 + "jno 0f\n"
7437 + "int $4\n0:\n"
7438 +#endif
7439 + ".pushsection .fixup,\"ax\"\n"
7440 + "1:\n"
7441 + _ASM_ADD "%2,%0\n"
7442 + "jmp 0b\n"
7443 + ".popsection\n"
7444 + _ASM_EXTABLE(0b, 1b)
7445 +#endif
7446 +
7447 + "sete %1\n"
7448 : "+m" (l->a.counter), "=qm" (c)
7449 : "ir" (i) : "memory");
7450 return c;
7451 @@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc
7452 {
7453 unsigned char c;
7454
7455 - asm volatile(_ASM_DEC "%0; sete %1"
7456 + asm volatile(_ASM_DEC "%0\n"
7457 +
7458 +#ifdef CONFIG_PAX_REFCOUNT
7459 +#ifdef CONFIG_X86_32
7460 + "into\n0:\n"
7461 +#else
7462 + "jno 0f\n"
7463 + "int $4\n0:\n"
7464 +#endif
7465 + ".pushsection .fixup,\"ax\"\n"
7466 + "1:\n"
7467 + _ASM_INC "%0\n"
7468 + "jmp 0b\n"
7469 + ".popsection\n"
7470 + _ASM_EXTABLE(0b, 1b)
7471 +#endif
7472 +
7473 + "sete %1\n"
7474 : "+m" (l->a.counter), "=qm" (c)
7475 : : "memory");
7476 return c != 0;
7477 @@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc
7478 {
7479 unsigned char c;
7480
7481 - asm volatile(_ASM_INC "%0; sete %1"
7482 + asm volatile(_ASM_INC "%0\n"
7483 +
7484 +#ifdef CONFIG_PAX_REFCOUNT
7485 +#ifdef CONFIG_X86_32
7486 + "into\n0:\n"
7487 +#else
7488 + "jno 0f\n"
7489 + "int $4\n0:\n"
7490 +#endif
7491 + ".pushsection .fixup,\"ax\"\n"
7492 + "1:\n"
7493 + _ASM_DEC "%0\n"
7494 + "jmp 0b\n"
7495 + ".popsection\n"
7496 + _ASM_EXTABLE(0b, 1b)
7497 +#endif
7498 +
7499 + "sete %1\n"
7500 : "+m" (l->a.counter), "=qm" (c)
7501 : : "memory");
7502 return c != 0;
7503 @@ -110,7 +225,24 @@ static inline int local_add_negative(lon
7504 {
7505 unsigned char c;
7506
7507 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7508 + asm volatile(_ASM_ADD "%2,%0\n"
7509 +
7510 +#ifdef CONFIG_PAX_REFCOUNT
7511 +#ifdef CONFIG_X86_32
7512 + "into\n0:\n"
7513 +#else
7514 + "jno 0f\n"
7515 + "int $4\n0:\n"
7516 +#endif
7517 + ".pushsection .fixup,\"ax\"\n"
7518 + "1:\n"
7519 + _ASM_SUB "%2,%0\n"
7520 + "jmp 0b\n"
7521 + ".popsection\n"
7522 + _ASM_EXTABLE(0b, 1b)
7523 +#endif
7524 +
7525 + "sets %1\n"
7526 : "+m" (l->a.counter), "=qm" (c)
7527 : "ir" (i) : "memory");
7528 return c;
7529 @@ -133,7 +265,23 @@ static inline long local_add_return(long
7530 #endif
7531 /* Modern 486+ processor */
7532 __i = i;
7533 - asm volatile(_ASM_XADD "%0, %1;"
7534 + asm volatile(_ASM_XADD "%0, %1\n"
7535 +
7536 +#ifdef CONFIG_PAX_REFCOUNT
7537 +#ifdef CONFIG_X86_32
7538 + "into\n0:\n"
7539 +#else
7540 + "jno 0f\n"
7541 + "int $4\n0:\n"
7542 +#endif
7543 + ".pushsection .fixup,\"ax\"\n"
7544 + "1:\n"
7545 + _ASM_MOV "%0,%1\n"
7546 + "jmp 0b\n"
7547 + ".popsection\n"
7548 + _ASM_EXTABLE(0b, 1b)
7549 +#endif
7550 +
7551 : "+r" (i), "+m" (l->a.counter)
7552 : : "memory");
7553 return i + __i;
7554 diff -urNp linux-2.6.34.1/arch/x86/include/asm/mc146818rtc.h linux-2.6.34.1/arch/x86/include/asm/mc146818rtc.h
7555 --- linux-2.6.34.1/arch/x86/include/asm/mc146818rtc.h 2010-07-05 14:24:10.000000000 -0400
7556 +++ linux-2.6.34.1/arch/x86/include/asm/mc146818rtc.h 2010-07-07 09:04:45.000000000 -0400
7557 @@ -81,8 +81,8 @@ static inline unsigned char current_lock
7558 #else
7559 #define lock_cmos_prefix(reg) do {} while (0)
7560 #define lock_cmos_suffix(reg) do {} while (0)
7561 -#define lock_cmos(reg)
7562 -#define unlock_cmos()
7563 +#define lock_cmos(reg) do {} while (0)
7564 +#define unlock_cmos() do {} while (0)
7565 #define do_i_have_lock_cmos() 0
7566 #define current_lock_cmos_reg() 0
7567 #endif
7568 diff -urNp linux-2.6.34.1/arch/x86/include/asm/microcode.h linux-2.6.34.1/arch/x86/include/asm/microcode.h
7569 --- linux-2.6.34.1/arch/x86/include/asm/microcode.h 2010-07-05 14:24:10.000000000 -0400
7570 +++ linux-2.6.34.1/arch/x86/include/asm/microcode.h 2010-07-07 09:04:45.000000000 -0400
7571 @@ -12,13 +12,13 @@ struct device;
7572 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
7573
7574 struct microcode_ops {
7575 - enum ucode_state (*request_microcode_user) (int cpu,
7576 + enum ucode_state (* const request_microcode_user) (int cpu,
7577 const void __user *buf, size_t size);
7578
7579 - enum ucode_state (*request_microcode_fw) (int cpu,
7580 + enum ucode_state (* const request_microcode_fw) (int cpu,
7581 struct device *device);
7582
7583 - void (*microcode_fini_cpu) (int cpu);
7584 + void (* const microcode_fini_cpu) (int cpu);
7585
7586 /*
7587 * The generic 'microcode_core' part guarantees that
7588 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
7589 extern struct ucode_cpu_info ucode_cpu_info[];
7590
7591 #ifdef CONFIG_MICROCODE_INTEL
7592 -extern struct microcode_ops * __init init_intel_microcode(void);
7593 +extern const struct microcode_ops * __init init_intel_microcode(void);
7594 #else
7595 -static inline struct microcode_ops * __init init_intel_microcode(void)
7596 +static inline const struct microcode_ops * __init init_intel_microcode(void)
7597 {
7598 return NULL;
7599 }
7600 #endif /* CONFIG_MICROCODE_INTEL */
7601
7602 #ifdef CONFIG_MICROCODE_AMD
7603 -extern struct microcode_ops * __init init_amd_microcode(void);
7604 +extern const struct microcode_ops * __init init_amd_microcode(void);
7605 #else
7606 -static inline struct microcode_ops * __init init_amd_microcode(void)
7607 +static inline const struct microcode_ops * __init init_amd_microcode(void)
7608 {
7609 return NULL;
7610 }
7611 diff -urNp linux-2.6.34.1/arch/x86/include/asm/mman.h linux-2.6.34.1/arch/x86/include/asm/mman.h
7612 --- linux-2.6.34.1/arch/x86/include/asm/mman.h 2010-07-05 14:24:10.000000000 -0400
7613 +++ linux-2.6.34.1/arch/x86/include/asm/mman.h 2010-07-07 09:04:45.000000000 -0400
7614 @@ -5,4 +5,14 @@
7615
7616 #include <asm-generic/mman.h>
7617
7618 +#ifdef __KERNEL__
7619 +#ifndef __ASSEMBLY__
7620 +#ifdef CONFIG_X86_32
7621 +#define arch_mmap_check i386_mmap_check
7622 +int i386_mmap_check(unsigned long addr, unsigned long len,
7623 + unsigned long flags);
7624 +#endif
7625 +#endif
7626 +#endif
7627 +
7628 #endif /* _ASM_X86_MMAN_H */
7629 diff -urNp linux-2.6.34.1/arch/x86/include/asm/mmu.h linux-2.6.34.1/arch/x86/include/asm/mmu.h
7630 --- linux-2.6.34.1/arch/x86/include/asm/mmu.h 2010-07-05 14:24:10.000000000 -0400
7631 +++ linux-2.6.34.1/arch/x86/include/asm/mmu.h 2010-07-07 09:04:45.000000000 -0400
7632 @@ -9,10 +9,23 @@
7633 * we put the segment information here.
7634 */
7635 typedef struct {
7636 - void *ldt;
7637 + struct desc_struct *ldt;
7638 int size;
7639 struct mutex lock;
7640 - void *vdso;
7641 + unsigned long vdso;
7642 +
7643 +#ifdef CONFIG_X86_32
7644 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7645 + unsigned long user_cs_base;
7646 + unsigned long user_cs_limit;
7647 +
7648 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7649 + cpumask_t cpu_user_cs_mask;
7650 +#endif
7651 +
7652 +#endif
7653 +#endif
7654 +
7655 } mm_context_t;
7656
7657 #ifdef CONFIG_SMP
7658 diff -urNp linux-2.6.34.1/arch/x86/include/asm/mmu_context.h linux-2.6.34.1/arch/x86/include/asm/mmu_context.h
7659 --- linux-2.6.34.1/arch/x86/include/asm/mmu_context.h 2010-07-05 14:24:10.000000000 -0400
7660 +++ linux-2.6.34.1/arch/x86/include/asm/mmu_context.h 2010-07-07 09:04:45.000000000 -0400
7661 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7662
7663 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7664 {
7665 +
7666 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7667 + unsigned int i;
7668 + pgd_t *pgd;
7669 +
7670 + pax_open_kernel();
7671 + pgd = get_cpu_pgd(smp_processor_id());
7672 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7673 + if (paravirt_enabled())
7674 + set_pgd(pgd+i, native_make_pgd(0));
7675 + else
7676 + pgd[i] = native_make_pgd(0);
7677 + pax_close_kernel();
7678 +#endif
7679 +
7680 #ifdef CONFIG_SMP
7681 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7682 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7683 @@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_s
7684 struct task_struct *tsk)
7685 {
7686 unsigned cpu = smp_processor_id();
7687 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7688 + int tlbstate = TLBSTATE_OK;
7689 +#endif
7690
7691 if (likely(prev != next)) {
7692 /* stop flush ipis for the previous mm */
7693 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7694 #ifdef CONFIG_SMP
7695 +#ifdef CONFIG_X86_32
7696 + tlbstate = percpu_read(cpu_tlbstate.state);
7697 +#endif
7698 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7699 percpu_write(cpu_tlbstate.active_mm, next);
7700 #endif
7701 cpumask_set_cpu(cpu, mm_cpumask(next));
7702
7703 /* Re-load page tables */
7704 +#ifdef CONFIG_PAX_PER_CPU_PGD
7705 + pax_open_kernel();
7706 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7707 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7708 + pax_close_kernel();
7709 + load_cr3(get_cpu_pgd(cpu));
7710 +#else
7711 load_cr3(next->pgd);
7712 +#endif
7713
7714 /*
7715 * load the LDT, if the LDT is different:
7716 */
7717 if (unlikely(prev->context.ldt != next->context.ldt))
7718 load_LDT_nolock(&next->context);
7719 - }
7720 +
7721 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7722 + if (!(__supported_pte_mask & _PAGE_NX)) {
7723 + smp_mb__before_clear_bit();
7724 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7725 + smp_mb__after_clear_bit();
7726 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7727 + }
7728 +#endif
7729 +
7730 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7731 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7732 + prev->context.user_cs_limit != next->context.user_cs_limit))
7733 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7734 #ifdef CONFIG_SMP
7735 + else if (unlikely(tlbstate != TLBSTATE_OK))
7736 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7737 +#endif
7738 +#endif
7739 +
7740 + }
7741 else {
7742 +
7743 +#ifdef CONFIG_PAX_PER_CPU_PGD
7744 + pax_open_kernel();
7745 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7746 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7747 + pax_close_kernel();
7748 + load_cr3(get_cpu_pgd(cpu));
7749 +#endif
7750 +
7751 +#ifdef CONFIG_SMP
7752 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7753 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7754
7755 @@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_s
7756 * tlb flush IPI delivery. We must reload CR3
7757 * to make sure to use no freed page tables.
7758 */
7759 +
7760 +#ifndef CONFIG_PAX_PER_CPU_PGD
7761 load_cr3(next->pgd);
7762 +#endif
7763 +
7764 load_LDT_nolock(&next->context);
7765 +
7766 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7767 + if (!(__supported_pte_mask & _PAGE_NX))
7768 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7769 +#endif
7770 +
7771 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7772 +#ifdef CONFIG_PAX_PAGEEXEC
7773 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7774 +#endif
7775 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7776 +#endif
7777 +
7778 }
7779 - }
7780 #endif
7781 + }
7782 }
7783
7784 #define activate_mm(prev, next) \
7785 diff -urNp linux-2.6.34.1/arch/x86/include/asm/module.h linux-2.6.34.1/arch/x86/include/asm/module.h
7786 --- linux-2.6.34.1/arch/x86/include/asm/module.h 2010-07-05 14:24:10.000000000 -0400
7787 +++ linux-2.6.34.1/arch/x86/include/asm/module.h 2010-07-07 09:04:45.000000000 -0400
7788 @@ -59,13 +59,31 @@
7789 #error unknown processor family
7790 #endif
7791
7792 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7793 +#define MODULE_PAX_UDEREF "UDEREF "
7794 +#else
7795 +#define MODULE_PAX_UDEREF ""
7796 +#endif
7797 +
7798 #ifdef CONFIG_X86_32
7799 # ifdef CONFIG_4KSTACKS
7800 # define MODULE_STACKSIZE "4KSTACKS "
7801 # else
7802 # define MODULE_STACKSIZE ""
7803 # endif
7804 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
7805 +# ifdef CONFIG_PAX_KERNEXEC
7806 +# define MODULE_PAX_KERNEXEC "KERNEXEC "
7807 +# else
7808 +# define MODULE_PAX_KERNEXEC ""
7809 +# endif
7810 +# ifdef CONFIG_GRKERNSEC
7811 +# define MODULE_GRSEC "GRSECURITY "
7812 +# else
7813 +# define MODULE_GRSEC ""
7814 +# endif
7815 +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7816 +#else
7817 +# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF
7818 #endif
7819
7820 #endif /* _ASM_X86_MODULE_H */
7821 diff -urNp linux-2.6.34.1/arch/x86/include/asm/page_32_types.h linux-2.6.34.1/arch/x86/include/asm/page_32_types.h
7822 --- linux-2.6.34.1/arch/x86/include/asm/page_32_types.h 2010-07-05 14:24:10.000000000 -0400
7823 +++ linux-2.6.34.1/arch/x86/include/asm/page_32_types.h 2010-07-07 09:04:45.000000000 -0400
7824 @@ -15,6 +15,10 @@
7825 */
7826 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
7827
7828 +#ifdef CONFIG_PAX_PAGEEXEC
7829 +#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
7830 +#endif
7831 +
7832 #ifdef CONFIG_4KSTACKS
7833 #define THREAD_ORDER 0
7834 #else
7835 diff -urNp linux-2.6.34.1/arch/x86/include/asm/page_64_types.h linux-2.6.34.1/arch/x86/include/asm/page_64_types.h
7836 --- linux-2.6.34.1/arch/x86/include/asm/page_64_types.h 2010-07-05 14:24:10.000000000 -0400
7837 +++ linux-2.6.34.1/arch/x86/include/asm/page_64_types.h 2010-07-07 09:04:45.000000000 -0400
7838 @@ -39,6 +39,9 @@
7839 #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
7840 #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
7841
7842 +#define ktla_ktva(addr) (addr)
7843 +#define ktva_ktla(addr) (addr)
7844 +
7845 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
7846 #define __PHYSICAL_MASK_SHIFT 46
7847 #define __VIRTUAL_MASK_SHIFT 47
7848 diff -urNp linux-2.6.34.1/arch/x86/include/asm/paravirt.h linux-2.6.34.1/arch/x86/include/asm/paravirt.h
7849 --- linux-2.6.34.1/arch/x86/include/asm/paravirt.h 2010-07-05 14:24:10.000000000 -0400
7850 +++ linux-2.6.34.1/arch/x86/include/asm/paravirt.h 2010-07-07 09:04:45.000000000 -0400
7851 @@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned
7852 pv_mmu_ops.set_fixmap(idx, phys, flags);
7853 }
7854
7855 +#ifdef CONFIG_PAX_KERNEXEC
7856 +static inline unsigned long pax_open_kernel(void)
7857 +{
7858 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7859 +}
7860 +
7861 +static inline unsigned long pax_close_kernel(void)
7862 +{
7863 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7864 +}
7865 +#else
7866 +static inline unsigned long pax_open_kernel(void) { return 0; }
7867 +static inline unsigned long pax_close_kernel(void) { return 0; }
7868 +#endif
7869 +
7870 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7871
7872 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7873 @@ -936,7 +951,7 @@ extern void default_banner(void);
7874
7875 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7876 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7877 -#define PARA_INDIRECT(addr) *%cs:addr
7878 +#define PARA_INDIRECT(addr) *%ss:addr
7879 #endif
7880
7881 #define INTERRUPT_RETURN \
7882 @@ -1013,6 +1028,21 @@ extern void default_banner(void);
7883 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7884 CLBR_NONE, \
7885 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7886 +
7887 +#define GET_CR0_INTO_RDI \
7888 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7889 + mov %rax,%rdi
7890 +
7891 +#define SET_RDI_INTO_CR0 \
7892 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7893 +
7894 +#define GET_CR3_INTO_RDI \
7895 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7896 + mov %rax,%rdi
7897 +
7898 +#define SET_RDI_INTO_CR3 \
7899 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7900 +
7901 #endif /* CONFIG_X86_32 */
7902
7903 #endif /* __ASSEMBLY__ */
7904 diff -urNp linux-2.6.34.1/arch/x86/include/asm/paravirt_types.h linux-2.6.34.1/arch/x86/include/asm/paravirt_types.h
7905 --- linux-2.6.34.1/arch/x86/include/asm/paravirt_types.h 2010-07-05 14:24:10.000000000 -0400
7906 +++ linux-2.6.34.1/arch/x86/include/asm/paravirt_types.h 2010-07-07 09:04:45.000000000 -0400
7907 @@ -312,6 +312,12 @@ struct pv_mmu_ops {
7908 an mfn. We can tell which is which from the index. */
7909 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7910 phys_addr_t phys, pgprot_t flags);
7911 +
7912 +#ifdef CONFIG_PAX_KERNEXEC
7913 + unsigned long (*pax_open_kernel)(void);
7914 + unsigned long (*pax_close_kernel)(void);
7915 +#endif
7916 +
7917 };
7918
7919 struct arch_spinlock;
7920 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pci_x86.h linux-2.6.34.1/arch/x86/include/asm/pci_x86.h
7921 --- linux-2.6.34.1/arch/x86/include/asm/pci_x86.h 2010-07-05 14:24:10.000000000 -0400
7922 +++ linux-2.6.34.1/arch/x86/include/asm/pci_x86.h 2010-07-07 09:04:45.000000000 -0400
7923 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
7924 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
7925
7926 struct pci_raw_ops {
7927 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
7928 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
7929 int reg, int len, u32 *val);
7930 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
7931 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
7932 int reg, int len, u32 val);
7933 };
7934
7935 -extern struct pci_raw_ops *raw_pci_ops;
7936 -extern struct pci_raw_ops *raw_pci_ext_ops;
7937 +extern const struct pci_raw_ops *raw_pci_ops;
7938 +extern const struct pci_raw_ops *raw_pci_ext_ops;
7939
7940 -extern struct pci_raw_ops pci_direct_conf1;
7941 +extern const struct pci_raw_ops pci_direct_conf1;
7942 extern bool port_cf9_safe;
7943
7944 /* arch_initcall level */
7945 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgalloc.h linux-2.6.34.1/arch/x86/include/asm/pgalloc.h
7946 --- linux-2.6.34.1/arch/x86/include/asm/pgalloc.h 2010-07-05 14:24:10.000000000 -0400
7947 +++ linux-2.6.34.1/arch/x86/include/asm/pgalloc.h 2010-07-07 09:04:45.000000000 -0400
7948 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7949 pmd_t *pmd, pte_t *pte)
7950 {
7951 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7952 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7953 +}
7954 +
7955 +static inline void pmd_populate_user(struct mm_struct *mm,
7956 + pmd_t *pmd, pte_t *pte)
7957 +{
7958 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7959 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7960 }
7961
7962 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable-2level.h linux-2.6.34.1/arch/x86/include/asm/pgtable-2level.h
7963 --- linux-2.6.34.1/arch/x86/include/asm/pgtable-2level.h 2010-07-05 14:24:10.000000000 -0400
7964 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable-2level.h 2010-07-07 09:04:45.000000000 -0400
7965 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7966
7967 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7968 {
7969 + pax_open_kernel();
7970 *pmdp = pmd;
7971 + pax_close_kernel();
7972 }
7973
7974 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7975 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable-3level.h linux-2.6.34.1/arch/x86/include/asm/pgtable-3level.h
7976 --- linux-2.6.34.1/arch/x86/include/asm/pgtable-3level.h 2010-07-05 14:24:10.000000000 -0400
7977 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable-3level.h 2010-07-07 09:04:45.000000000 -0400
7978 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
7979
7980 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7981 {
7982 + pax_open_kernel();
7983 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
7984 + pax_close_kernel();
7985 }
7986
7987 static inline void native_set_pud(pud_t *pudp, pud_t pud)
7988 {
7989 + pax_open_kernel();
7990 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
7991 + pax_close_kernel();
7992 }
7993
7994 /*
7995 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable.h linux-2.6.34.1/arch/x86/include/asm/pgtable.h
7996 --- linux-2.6.34.1/arch/x86/include/asm/pgtable.h 2010-07-05 14:24:10.000000000 -0400
7997 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable.h 2010-07-07 09:04:45.000000000 -0400
7998 @@ -76,12 +76,51 @@ extern struct list_head pgd_list;
7999
8000 #define arch_end_context_switch(prev) do {} while(0)
8001
8002 +#define pax_open_kernel() native_pax_open_kernel()
8003 +#define pax_close_kernel() native_pax_close_kernel()
8004 #endif /* CONFIG_PARAVIRT */
8005
8006 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8007 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8008 +
8009 +#ifdef CONFIG_PAX_KERNEXEC
8010 +static inline unsigned long native_pax_open_kernel(void)
8011 +{
8012 + unsigned long cr0;
8013 +
8014 + preempt_disable();
8015 + barrier();
8016 + cr0 = read_cr0() ^ X86_CR0_WP;
8017 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8018 + write_cr0(cr0);
8019 + return cr0 ^ X86_CR0_WP;
8020 +}
8021 +
8022 +static inline unsigned long native_pax_close_kernel(void)
8023 +{
8024 + unsigned long cr0;
8025 +
8026 + cr0 = read_cr0() ^ X86_CR0_WP;
8027 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8028 + write_cr0(cr0);
8029 + barrier();
8030 + preempt_enable_no_resched();
8031 + return cr0 ^ X86_CR0_WP;
8032 +}
8033 +#else
8034 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8035 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8036 +#endif
8037 +
8038 /*
8039 * The following only work if pte_present() is true.
8040 * Undefined behaviour if not..
8041 */
8042 +static inline int pte_user(pte_t pte)
8043 +{
8044 + return pte_val(pte) & _PAGE_USER;
8045 +}
8046 +
8047 static inline int pte_dirty(pte_t pte)
8048 {
8049 return pte_flags(pte) & _PAGE_DIRTY;
8050 @@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t
8051 return pte_clear_flags(pte, _PAGE_RW);
8052 }
8053
8054 +static inline pte_t pte_mkread(pte_t pte)
8055 +{
8056 + return __pte(pte_val(pte) | _PAGE_USER);
8057 +}
8058 +
8059 static inline pte_t pte_mkexec(pte_t pte)
8060 {
8061 - return pte_clear_flags(pte, _PAGE_NX);
8062 +#ifdef CONFIG_X86_PAE
8063 + if (__supported_pte_mask & _PAGE_NX)
8064 + return pte_clear_flags(pte, _PAGE_NX);
8065 + else
8066 +#endif
8067 + return pte_set_flags(pte, _PAGE_USER);
8068 +}
8069 +
8070 +static inline pte_t pte_exprotect(pte_t pte)
8071 +{
8072 +#ifdef CONFIG_X86_PAE
8073 + if (__supported_pte_mask & _PAGE_NX)
8074 + return pte_set_flags(pte, _PAGE_NX);
8075 + else
8076 +#endif
8077 + return pte_clear_flags(pte, _PAGE_USER);
8078 }
8079
8080 static inline pte_t pte_mkdirty(pte_t pte)
8081 @@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long
8082 #endif
8083
8084 #ifndef __ASSEMBLY__
8085 +
8086 +#ifdef CONFIG_PAX_PER_CPU_PGD
8087 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8088 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8089 +{
8090 + return cpu_pgd[cpu];
8091 +}
8092 +#endif
8093 +
8094 #include <linux/mm_types.h>
8095
8096 static inline int pte_none(pte_t pte)
8097 @@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *p
8098
8099 static inline int pgd_bad(pgd_t pgd)
8100 {
8101 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8102 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8103 }
8104
8105 static inline int pgd_none(pgd_t pgd)
8106 @@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd)
8107 * pgd_offset() returns a (pgd_t *)
8108 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8109 */
8110 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8111 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8112 +
8113 +#ifdef CONFIG_PAX_PER_CPU_PGD
8114 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8115 +#endif
8116 +
8117 /*
8118 * a shortcut which implies the use of the kernel's pgd, instead
8119 * of a process's
8120 @@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd)
8121 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8122 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8123
8124 +#ifdef CONFIG_X86_32
8125 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8126 +#else
8127 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8128 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8129 +
8130 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8131 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8132 +#else
8133 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8134 +#endif
8135 +
8136 +#endif
8137 +
8138 #ifndef __ASSEMBLY__
8139
8140 extern int direct_gbpages;
8141 @@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(st
8142 * dst and src can be on the same page, but the range must not overlap,
8143 * and must not cross a page boundary.
8144 */
8145 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8146 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8147 {
8148 - memcpy(dst, src, count * sizeof(pgd_t));
8149 + pax_open_kernel();
8150 + while (count--)
8151 + *dst++ = *src++;
8152 + pax_close_kernel();
8153 }
8154
8155 +#ifdef CONFIG_PAX_PER_CPU_PGD
8156 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8157 +#endif
8158 +
8159 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8160 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8161 +#else
8162 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8163 +#endif
8164
8165 #include <asm-generic/pgtable.h>
8166 #endif /* __ASSEMBLY__ */
8167 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable_32.h linux-2.6.34.1/arch/x86/include/asm/pgtable_32.h
8168 --- linux-2.6.34.1/arch/x86/include/asm/pgtable_32.h 2010-07-05 14:24:10.000000000 -0400
8169 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable_32.h 2010-07-07 09:04:45.000000000 -0400
8170 @@ -25,8 +25,6 @@
8171 struct mm_struct;
8172 struct vm_area_struct;
8173
8174 -extern pgd_t swapper_pg_dir[1024];
8175 -
8176 static inline void pgtable_cache_init(void) { }
8177 static inline void check_pgt_cache(void) { }
8178 void paging_init(void);
8179 @@ -47,6 +45,11 @@ extern void set_pmd_pfn(unsigned long, u
8180 # include <asm/pgtable-2level.h>
8181 #endif
8182
8183 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8184 +#ifdef CONFIG_X86_PAE
8185 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8186 +#endif
8187 +
8188 #if defined(CONFIG_HIGHPTE)
8189 #define __KM_PTE \
8190 (in_nmi() ? KM_NMI_PTE : \
8191 @@ -71,7 +74,9 @@ extern void set_pmd_pfn(unsigned long, u
8192 /* Clear a kernel PTE and flush it from the TLB */
8193 #define kpte_clear_flush(ptep, vaddr) \
8194 do { \
8195 + pax_open_kernel(); \
8196 pte_clear(&init_mm, (vaddr), (ptep)); \
8197 + pax_close_kernel(); \
8198 __flush_tlb_one((vaddr)); \
8199 } while (0)
8200
8201 @@ -83,6 +88,9 @@ do { \
8202
8203 #endif /* !__ASSEMBLY__ */
8204
8205 +#define HAVE_ARCH_UNMAPPED_AREA
8206 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8207 +
8208 /*
8209 * kern_addr_valid() is (1) for FLATMEM and (0) for
8210 * SPARSEMEM and DISCONTIGMEM
8211 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable_32_types.h linux-2.6.34.1/arch/x86/include/asm/pgtable_32_types.h
8212 --- linux-2.6.34.1/arch/x86/include/asm/pgtable_32_types.h 2010-07-05 14:24:10.000000000 -0400
8213 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable_32_types.h 2010-07-07 09:04:45.000000000 -0400
8214 @@ -8,7 +8,7 @@
8215 */
8216 #ifdef CONFIG_X86_PAE
8217 # include <asm/pgtable-3level_types.h>
8218 -# define PMD_SIZE (1UL << PMD_SHIFT)
8219 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8220 # define PMD_MASK (~(PMD_SIZE - 1))
8221 #else
8222 # include <asm/pgtable-2level_types.h>
8223 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8224 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8225 #endif
8226
8227 +#ifdef CONFIG_PAX_KERNEXEC
8228 +#ifndef __ASSEMBLY__
8229 +extern unsigned char MODULES_EXEC_VADDR[];
8230 +extern unsigned char MODULES_EXEC_END[];
8231 +#endif
8232 +#include <asm/boot.h>
8233 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8234 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8235 +#else
8236 +#define ktla_ktva(addr) (addr)
8237 +#define ktva_ktla(addr) (addr)
8238 +#endif
8239 +
8240 #define MODULES_VADDR VMALLOC_START
8241 #define MODULES_END VMALLOC_END
8242 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8243 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable_64.h linux-2.6.34.1/arch/x86/include/asm/pgtable_64.h
8244 --- linux-2.6.34.1/arch/x86/include/asm/pgtable_64.h 2010-07-05 14:24:10.000000000 -0400
8245 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable_64.h 2010-07-07 09:04:45.000000000 -0400
8246 @@ -16,10 +16,13 @@
8247
8248 extern pud_t level3_kernel_pgt[512];
8249 extern pud_t level3_ident_pgt[512];
8250 +extern pud_t level3_vmalloc_pgt[512];
8251 +extern pud_t level3_vmemmap_pgt[512];
8252 +extern pud_t level2_vmemmap_pgt[512];
8253 extern pmd_t level2_kernel_pgt[512];
8254 extern pmd_t level2_fixmap_pgt[512];
8255 -extern pmd_t level2_ident_pgt[512];
8256 -extern pgd_t init_level4_pgt[];
8257 +extern pmd_t level2_ident_pgt[512*2];
8258 +extern pgd_t init_level4_pgt[512];
8259
8260 #define swapper_pg_dir init_level4_pgt
8261
8262 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
8263
8264 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8265 {
8266 + pax_open_kernel();
8267 *pmdp = pmd;
8268 + pax_close_kernel();
8269 }
8270
8271 static inline void native_pmd_clear(pmd_t *pmd)
8272 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
8273
8274 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8275 {
8276 + pax_open_kernel();
8277 *pgdp = pgd;
8278 + pax_close_kernel();
8279 }
8280
8281 static inline void native_pgd_clear(pgd_t *pgd)
8282 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable_64_types.h linux-2.6.34.1/arch/x86/include/asm/pgtable_64_types.h
8283 --- linux-2.6.34.1/arch/x86/include/asm/pgtable_64_types.h 2010-07-05 14:24:10.000000000 -0400
8284 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable_64_types.h 2010-07-07 09:04:45.000000000 -0400
8285 @@ -59,5 +59,7 @@ typedef struct { pteval_t pte; } pte_t;
8286 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8287 #define MODULES_END _AC(0xffffffffff000000, UL)
8288 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8289 +#define MODULES_EXEC_VADDR MODULES_VADDR
8290 +#define MODULES_EXEC_END MODULES_END
8291
8292 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8293 diff -urNp linux-2.6.34.1/arch/x86/include/asm/pgtable_types.h linux-2.6.34.1/arch/x86/include/asm/pgtable_types.h
8294 --- linux-2.6.34.1/arch/x86/include/asm/pgtable_types.h 2010-07-05 14:24:10.000000000 -0400
8295 +++ linux-2.6.34.1/arch/x86/include/asm/pgtable_types.h 2010-07-07 09:04:46.000000000 -0400
8296 @@ -16,12 +16,11 @@
8297 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8298 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8299 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8300 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8301 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8302 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8303 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8304 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8305 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8306 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8307 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8308 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8309
8310 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8311 @@ -39,7 +38,6 @@
8312 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8313 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8314 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8315 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8316 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8317 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8318 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8319 @@ -55,8 +53,10 @@
8320
8321 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8322 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8323 -#else
8324 +#elif defined(CONFIG_KMEMCHECK)
8325 #define _PAGE_NX (_AT(pteval_t, 0))
8326 +#else
8327 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8328 #endif
8329
8330 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8331 @@ -93,6 +93,9 @@
8332 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8333 _PAGE_ACCESSED)
8334
8335 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8336 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8337 +
8338 #define __PAGE_KERNEL_EXEC \
8339 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8340 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8341 @@ -103,8 +106,8 @@
8342 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8343 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8344 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8345 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8346 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8347 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8348 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8349 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8350 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8351 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8352 @@ -163,8 +166,8 @@
8353 * bits are combined, this will alow user to access the high address mapped
8354 * VDSO in the presence of CONFIG_COMPAT_VDSO
8355 */
8356 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8357 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8358 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8359 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8360 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8361 #endif
8362
8363 @@ -278,7 +281,6 @@ typedef struct page *pgtable_t;
8364
8365 extern pteval_t __supported_pte_mask;
8366 extern void set_nx(void);
8367 -extern int nx_enabled;
8368
8369 #define pgprot_writecombine pgprot_writecombine
8370 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8371 diff -urNp linux-2.6.34.1/arch/x86/include/asm/processor.h linux-2.6.34.1/arch/x86/include/asm/processor.h
8372 --- linux-2.6.34.1/arch/x86/include/asm/processor.h 2010-07-05 14:24:10.000000000 -0400
8373 +++ linux-2.6.34.1/arch/x86/include/asm/processor.h 2010-07-07 09:04:46.000000000 -0400
8374 @@ -273,7 +273,7 @@ struct tss_struct {
8375
8376 } ____cacheline_aligned;
8377
8378 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8379 +extern struct tss_struct init_tss[NR_CPUS];
8380
8381 /*
8382 * Save the original ist values for checking stack pointers during debugging
8383 @@ -913,8 +913,15 @@ static inline void spin_lock_prefetch(co
8384 */
8385 #define TASK_SIZE PAGE_OFFSET
8386 #define TASK_SIZE_MAX TASK_SIZE
8387 +
8388 +#ifdef CONFIG_PAX_SEGMEXEC
8389 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8390 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8391 +#else
8392 #define STACK_TOP TASK_SIZE
8393 -#define STACK_TOP_MAX STACK_TOP
8394 +#endif
8395 +
8396 +#define STACK_TOP_MAX TASK_SIZE
8397
8398 #define INIT_THREAD { \
8399 .sp0 = sizeof(init_stack) + (long)&init_stack, \
8400 @@ -931,7 +938,7 @@ static inline void spin_lock_prefetch(co
8401 */
8402 #define INIT_TSS { \
8403 .x86_tss = { \
8404 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8405 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8406 .ss0 = __KERNEL_DS, \
8407 .ss1 = __KERNEL_CS, \
8408 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8409 @@ -942,11 +949,7 @@ static inline void spin_lock_prefetch(co
8410 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8411
8412 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8413 -#define KSTK_TOP(info) \
8414 -({ \
8415 - unsigned long *__ptr = (unsigned long *)(info); \
8416 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8417 -})
8418 +#define KSTK_TOP(info) ((info)->task.thread.sp0)
8419
8420 /*
8421 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8422 @@ -961,7 +964,7 @@ extern unsigned long thread_saved_pc(str
8423 #define task_pt_regs(task) \
8424 ({ \
8425 struct pt_regs *__regs__; \
8426 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8427 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8428 __regs__ - 1; \
8429 })
8430
8431 @@ -971,13 +974,13 @@ extern unsigned long thread_saved_pc(str
8432 /*
8433 * User space process size. 47bits minus one guard page.
8434 */
8435 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8436 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8437
8438 /* This decides where the kernel will search for a free chunk of vm
8439 * space during mmap's.
8440 */
8441 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8442 - 0xc0000000 : 0xFFFFe000)
8443 + 0xc0000000 : 0xFFFFf000)
8444
8445 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8446 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8447 @@ -1014,6 +1017,10 @@ extern void start_thread(struct pt_regs
8448 */
8449 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8450
8451 +#ifdef CONFIG_PAX_SEGMEXEC
8452 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8453 +#endif
8454 +
8455 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8456
8457 /* Get/set a process' ability to use the timestamp counter instruction */
8458 diff -urNp linux-2.6.34.1/arch/x86/include/asm/ptrace.h linux-2.6.34.1/arch/x86/include/asm/ptrace.h
8459 --- linux-2.6.34.1/arch/x86/include/asm/ptrace.h 2010-07-05 14:24:10.000000000 -0400
8460 +++ linux-2.6.34.1/arch/x86/include/asm/ptrace.h 2010-07-07 09:04:46.000000000 -0400
8461 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8462 }
8463
8464 /*
8465 - * user_mode_vm(regs) determines whether a register set came from user mode.
8466 + * user_mode(regs) determines whether a register set came from user mode.
8467 * This is true if V8086 mode was enabled OR if the register set was from
8468 * protected mode with RPL-3 CS value. This tricky test checks that with
8469 * one comparison. Many places in the kernel can bypass this full check
8470 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8471 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8472 + * be used.
8473 */
8474 -static inline int user_mode(struct pt_regs *regs)
8475 +static inline int user_mode_novm(struct pt_regs *regs)
8476 {
8477 #ifdef CONFIG_X86_32
8478 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8479 #else
8480 - return !!(regs->cs & 3);
8481 + return !!(regs->cs & SEGMENT_RPL_MASK);
8482 #endif
8483 }
8484
8485 -static inline int user_mode_vm(struct pt_regs *regs)
8486 +static inline int user_mode(struct pt_regs *regs)
8487 {
8488 #ifdef CONFIG_X86_32
8489 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8490 USER_RPL;
8491 #else
8492 - return user_mode(regs);
8493 + return user_mode_novm(regs);
8494 #endif
8495 }
8496
8497 diff -urNp linux-2.6.34.1/arch/x86/include/asm/reboot.h linux-2.6.34.1/arch/x86/include/asm/reboot.h
8498 --- linux-2.6.34.1/arch/x86/include/asm/reboot.h 2010-07-05 14:24:10.000000000 -0400
8499 +++ linux-2.6.34.1/arch/x86/include/asm/reboot.h 2010-07-07 09:04:46.000000000 -0400
8500 @@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
8501
8502 void native_machine_crash_shutdown(struct pt_regs *regs);
8503 void native_machine_shutdown(void);
8504 -void machine_real_restart(const unsigned char *code, int length);
8505 +void machine_real_restart(const unsigned char *code, unsigned int length);
8506
8507 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
8508 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
8509 diff -urNp linux-2.6.34.1/arch/x86/include/asm/rwsem.h linux-2.6.34.1/arch/x86/include/asm/rwsem.h
8510 --- linux-2.6.34.1/arch/x86/include/asm/rwsem.h 2010-07-05 14:24:10.000000000 -0400
8511 +++ linux-2.6.34.1/arch/x86/include/asm/rwsem.h 2010-07-07 09:04:46.000000000 -0400
8512 @@ -118,10 +118,26 @@ static inline void __down_read(struct rw
8513 {
8514 asm volatile("# beginning down_read\n\t"
8515 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8516 +
8517 +#ifdef CONFIG_PAX_REFCOUNT
8518 +#ifdef CONFIG_X86_32
8519 + "into\n0:\n"
8520 +#else
8521 + "jno 0f\n"
8522 + "int $4\n0:\n"
8523 +#endif
8524 + ".pushsection .fixup,\"ax\"\n"
8525 + "1:\n"
8526 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8527 + "jmp 0b\n"
8528 + ".popsection\n"
8529 + _ASM_EXTABLE(0b, 1b)
8530 +#endif
8531 +
8532 /* adds 0x00000001, returns the old value */
8533 - " jns 1f\n"
8534 + " jns 2f\n"
8535 " call call_rwsem_down_read_failed\n"
8536 - "1:\n\t"
8537 + "2:\n\t"
8538 "# ending down_read\n\t"
8539 : "+m" (sem->count)
8540 : "a" (sem)
8541 @@ -136,13 +152,29 @@ static inline int __down_read_trylock(st
8542 rwsem_count_t result, tmp;
8543 asm volatile("# beginning __down_read_trylock\n\t"
8544 " mov %0,%1\n\t"
8545 - "1:\n\t"
8546 + "2:\n\t"
8547 " mov %1,%2\n\t"
8548 " add %3,%2\n\t"
8549 - " jle 2f\n\t"
8550 +
8551 +#ifdef CONFIG_PAX_REFCOUNT
8552 +#ifdef CONFIG_X86_32
8553 + "into\n0:\n"
8554 +#else
8555 + "jno 0f\n"
8556 + "int $4\n0:\n"
8557 +#endif
8558 + ".pushsection .fixup,\"ax\"\n"
8559 + "1:\n"
8560 + "sub %3,%2\n"
8561 + "jmp 0b\n"
8562 + ".popsection\n"
8563 + _ASM_EXTABLE(0b, 1b)
8564 +#endif
8565 +
8566 + " jle 3f\n\t"
8567 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8568 - " jnz 1b\n\t"
8569 - "2:\n\t"
8570 + " jnz 2b\n\t"
8571 + "3:\n\t"
8572 "# ending __down_read_trylock\n\t"
8573 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
8574 : "i" (RWSEM_ACTIVE_READ_BIAS)
8575 @@ -160,12 +192,28 @@ static inline void __down_write_nested(s
8576 tmp = RWSEM_ACTIVE_WRITE_BIAS;
8577 asm volatile("# beginning down_write\n\t"
8578 LOCK_PREFIX " xadd %1,(%2)\n\t"
8579 +
8580 +#ifdef CONFIG_PAX_REFCOUNT
8581 +#ifdef CONFIG_X86_32
8582 + "into\n0:\n"
8583 +#else
8584 + "jno 0f\n"
8585 + "int $4\n0:\n"
8586 +#endif
8587 + ".pushsection .fixup,\"ax\"\n"
8588 + "1:\n"
8589 + "mov %1,(%2)\n"
8590 + "jmp 0b\n"
8591 + ".popsection\n"
8592 + _ASM_EXTABLE(0b, 1b)
8593 +#endif
8594 +
8595 /* subtract 0x0000ffff, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598 - " jz 1f\n"
8599 + " jz 2f\n"
8600 " call call_rwsem_down_write_failed\n"
8601 - "1:\n"
8602 + "2:\n"
8603 "# ending down_write"
8604 : "+m" (sem->count), "=d" (tmp)
8605 : "a" (sem), "1" (tmp)
8606 @@ -198,10 +246,26 @@ static inline void __up_read(struct rw_s
8607 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
8608 asm volatile("# beginning __up_read\n\t"
8609 LOCK_PREFIX " xadd %1,(%2)\n\t"
8610 +
8611 +#ifdef CONFIG_PAX_REFCOUNT
8612 +#ifdef CONFIG_X86_32
8613 + "into\n0:\n"
8614 +#else
8615 + "jno 0f\n"
8616 + "int $4\n0:\n"
8617 +#endif
8618 + ".pushsection .fixup,\"ax\"\n"
8619 + "1:\n"
8620 + "mov %1,(%2)\n"
8621 + "jmp 0b\n"
8622 + ".popsection\n"
8623 + _ASM_EXTABLE(0b, 1b)
8624 +#endif
8625 +
8626 /* subtracts 1, returns the old value */
8627 - " jns 1f\n\t"
8628 + " jns 2f\n\t"
8629 " call call_rwsem_wake\n"
8630 - "1:\n"
8631 + "2:\n"
8632 "# ending __up_read\n"
8633 : "+m" (sem->count), "=d" (tmp)
8634 : "a" (sem), "1" (tmp)
8635 @@ -216,11 +280,27 @@ static inline void __up_write(struct rw_
8636 rwsem_count_t tmp;
8637 asm volatile("# beginning __up_write\n\t"
8638 LOCK_PREFIX " xadd %1,(%2)\n\t"
8639 +
8640 +#ifdef CONFIG_PAX_REFCOUNT
8641 +#ifdef CONFIG_X86_32
8642 + "into\n0:\n"
8643 +#else
8644 + "jno 0f\n"
8645 + "int $4\n0:\n"
8646 +#endif
8647 + ".pushsection .fixup,\"ax\"\n"
8648 + "1:\n"
8649 + "mov %1,(%2)\n"
8650 + "jmp 0b\n"
8651 + ".popsection\n"
8652 + _ASM_EXTABLE(0b, 1b)
8653 +#endif
8654 +
8655 /* tries to transition
8656 0xffff0001 -> 0x00000000 */
8657 - " jz 1f\n"
8658 + " jz 2f\n"
8659 " call call_rwsem_wake\n"
8660 - "1:\n\t"
8661 + "2:\n\t"
8662 "# ending __up_write\n"
8663 : "+m" (sem->count), "=d" (tmp)
8664 : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
8665 @@ -234,13 +314,29 @@ static inline void __downgrade_write(str
8666 {
8667 asm volatile("# beginning __downgrade_write\n\t"
8668 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8669 +
8670 +#ifdef CONFIG_PAX_REFCOUNT
8671 +#ifdef CONFIG_X86_32
8672 + "into\n0:\n"
8673 +#else
8674 + "jno 0f\n"
8675 + "int $4\n0:\n"
8676 +#endif
8677 + ".pushsection .fixup,\"ax\"\n"
8678 + "1:\n"
8679 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8680 + "jmp 0b\n"
8681 + ".popsection\n"
8682 + _ASM_EXTABLE(0b, 1b)
8683 +#endif
8684 +
8685 /*
8686 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8687 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8688 */
8689 - " jns 1f\n\t"
8690 + " jns 2f\n\t"
8691 " call call_rwsem_downgrade_wake\n"
8692 - "1:\n\t"
8693 + "2:\n\t"
8694 "# ending __downgrade_write\n"
8695 : "+m" (sem->count)
8696 : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
8697 @@ -253,7 +349,23 @@ static inline void __downgrade_write(str
8698 static inline void rwsem_atomic_add(rwsem_count_t delta,
8699 struct rw_semaphore *sem)
8700 {
8701 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8702 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8703 +
8704 +#ifdef CONFIG_PAX_REFCOUNT
8705 +#ifdef CONFIG_X86_32
8706 + "into\n0:\n"
8707 +#else
8708 + "jno 0f\n"
8709 + "int $4\n0:\n"
8710 +#endif
8711 + ".pushsection .fixup,\"ax\"\n"
8712 + "1:\n"
8713 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8714 + "jmp 0b\n"
8715 + ".popsection\n"
8716 + _ASM_EXTABLE(0b, 1b)
8717 +#endif
8718 +
8719 : "+m" (sem->count)
8720 : "er" (delta));
8721 }
8722 @@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic
8723 {
8724 rwsem_count_t tmp = delta;
8725
8726 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8727 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8728 +
8729 +#ifdef CONFIG_PAX_REFCOUNT
8730 +#ifdef CONFIG_X86_32
8731 + "into\n0:\n"
8732 +#else
8733 + "jno 0f\n"
8734 + "int $4\n0:\n"
8735 +#endif
8736 + ".pushsection .fixup,\"ax\"\n"
8737 + "1:\n"
8738 + "mov %0,%1\n"
8739 + "jmp 0b\n"
8740 + ".popsection\n"
8741 + _ASM_EXTABLE(0b, 1b)
8742 +#endif
8743 +
8744 : "+r" (tmp), "+m" (sem->count)
8745 : : "memory");
8746
8747 diff -urNp linux-2.6.34.1/arch/x86/include/asm/segment.h linux-2.6.34.1/arch/x86/include/asm/segment.h
8748 --- linux-2.6.34.1/arch/x86/include/asm/segment.h 2010-07-05 14:24:10.000000000 -0400
8749 +++ linux-2.6.34.1/arch/x86/include/asm/segment.h 2010-07-07 09:04:46.000000000 -0400
8750 @@ -62,8 +62,8 @@
8751 * 26 - ESPFIX small SS
8752 * 27 - per-cpu [ offset to per-cpu data area ]
8753 * 28 - stack_canary-20 [ for stack protector ]
8754 - * 29 - unused
8755 - * 30 - unused
8756 + * 29 - PCI BIOS CS
8757 + * 30 - PCI BIOS DS
8758 * 31 - TSS for double fault handler
8759 */
8760 #define GDT_ENTRY_TLS_MIN 6
8761 @@ -77,6 +77,8 @@
8762
8763 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
8764
8765 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8766 +
8767 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
8768
8769 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
8770 @@ -88,7 +90,7 @@
8771 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
8772 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
8773
8774 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
8775 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
8776 #ifdef CONFIG_SMP
8777 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
8778 #else
8779 @@ -102,6 +104,12 @@
8780 #define __KERNEL_STACK_CANARY 0
8781 #endif
8782
8783 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
8784 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8785 +
8786 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
8787 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8788 +
8789 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8790
8791 /*
8792 @@ -139,7 +147,7 @@
8793 */
8794
8795 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8796 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8797 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8798
8799
8800 #else
8801 @@ -163,6 +171,8 @@
8802 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8803 #define __USER32_DS __USER_DS
8804
8805 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8806 +
8807 #define GDT_ENTRY_TSS 8 /* needs two entries */
8808 #define GDT_ENTRY_LDT 10 /* needs two entries */
8809 #define GDT_ENTRY_TLS_MIN 12
8810 @@ -183,6 +193,7 @@
8811 #endif
8812
8813 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
8814 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
8815 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
8816 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
8817 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
8818 diff -urNp linux-2.6.34.1/arch/x86/include/asm/spinlock.h linux-2.6.34.1/arch/x86/include/asm/spinlock.h
8819 --- linux-2.6.34.1/arch/x86/include/asm/spinlock.h 2010-07-05 14:24:10.000000000 -0400
8820 +++ linux-2.6.34.1/arch/x86/include/asm/spinlock.h 2010-07-07 09:04:46.000000000 -0400
8821 @@ -249,18 +249,50 @@ static inline int arch_write_can_lock(ar
8822 static inline void arch_read_lock(arch_rwlock_t *rw)
8823 {
8824 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8825 - "jns 1f\n"
8826 - "call __read_lock_failed\n\t"
8827 +
8828 +#ifdef CONFIG_PAX_REFCOUNT
8829 +#ifdef CONFIG_X86_32
8830 + "into\n0:\n"
8831 +#else
8832 + "jno 0f\n"
8833 + "int $4\n0:\n"
8834 +#endif
8835 + ".pushsection .fixup,\"ax\"\n"
8836 "1:\n"
8837 + LOCK_PREFIX " addl $1,(%0)\n"
8838 + "jmp 0b\n"
8839 + ".popsection\n"
8840 + _ASM_EXTABLE(0b, 1b)
8841 +#endif
8842 +
8843 + "jns 2f\n"
8844 + "call __read_lock_failed\n\t"
8845 + "2:\n"
8846 ::LOCK_PTR_REG (rw) : "memory");
8847 }
8848
8849 static inline void arch_write_lock(arch_rwlock_t *rw)
8850 {
8851 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8852 - "jz 1f\n"
8853 - "call __write_lock_failed\n\t"
8854 +
8855 +#ifdef CONFIG_PAX_REFCOUNT
8856 +#ifdef CONFIG_X86_32
8857 + "into\n0:\n"
8858 +#else
8859 + "jno 0f\n"
8860 + "int $4\n0:\n"
8861 +#endif
8862 + ".pushsection .fixup,\"ax\"\n"
8863 "1:\n"
8864 + LOCK_PREFIX " addl %1,(%0)\n"
8865 + "jmp 0b\n"
8866 + ".popsection\n"
8867 + _ASM_EXTABLE(0b, 1b)
8868 +#endif
8869 +
8870 + "jz 2f\n"
8871 + "call __write_lock_failed\n\t"
8872 + "2:\n"
8873 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
8874 }
8875
8876 @@ -286,12 +318,45 @@ static inline int arch_write_trylock(arc
8877
8878 static inline void arch_read_unlock(arch_rwlock_t *rw)
8879 {
8880 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8881 + asm volatile(LOCK_PREFIX "incl %0\n"
8882 +
8883 +#ifdef CONFIG_PAX_REFCOUNT
8884 +#ifdef CONFIG_X86_32
8885 + "into\n0:\n"
8886 +#else
8887 + "jno 0f\n"
8888 + "int $4\n0:\n"
8889 +#endif
8890 + ".pushsection .fixup,\"ax\"\n"
8891 + "1:\n"
8892 + LOCK_PREFIX "decl %0\n"
8893 + "jmp 0b\n"
8894 + ".popsection\n"
8895 + _ASM_EXTABLE(0b, 1b)
8896 +#endif
8897 +
8898 + :"+m" (rw->lock) : : "memory");
8899 }
8900
8901 static inline void arch_write_unlock(arch_rwlock_t *rw)
8902 {
8903 - asm volatile(LOCK_PREFIX "addl %1, %0"
8904 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8905 +
8906 +#ifdef CONFIG_PAX_REFCOUNT
8907 +#ifdef CONFIG_X86_32
8908 + "into\n0:\n"
8909 +#else
8910 + "jno 0f\n"
8911 + "int $4\n0:\n"
8912 +#endif
8913 + ".pushsection .fixup,\"ax\"\n"
8914 + "1:\n"
8915 + LOCK_PREFIX "subl %1,%0\n"
8916 + "jmp 0b\n"
8917 + ".popsection\n"
8918 + _ASM_EXTABLE(0b, 1b)
8919 +#endif
8920 +
8921 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8922 }
8923
8924 diff -urNp linux-2.6.34.1/arch/x86/include/asm/system.h linux-2.6.34.1/arch/x86/include/asm/system.h
8925 --- linux-2.6.34.1/arch/x86/include/asm/system.h 2010-07-05 14:24:10.000000000 -0400
8926 +++ linux-2.6.34.1/arch/x86/include/asm/system.h 2010-07-07 09:04:46.000000000 -0400
8927 @@ -202,7 +202,7 @@ static inline unsigned long get_limit(un
8928 {
8929 unsigned long __limit;
8930 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8931 - return __limit + 1;
8932 + return __limit;
8933 }
8934
8935 static inline void native_clts(void)
8936 @@ -342,7 +342,7 @@ void enable_hlt(void);
8937
8938 void cpu_idle_wait(void);
8939
8940 -extern unsigned long arch_align_stack(unsigned long sp);
8941 +#define arch_align_stack(x) ((x) & ~0xfUL)
8942 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8943
8944 void default_idle(void);
8945 diff -urNp linux-2.6.34.1/arch/x86/include/asm/uaccess.h linux-2.6.34.1/arch/x86/include/asm/uaccess.h
8946 --- linux-2.6.34.1/arch/x86/include/asm/uaccess.h 2010-07-05 14:24:10.000000000 -0400
8947 +++ linux-2.6.34.1/arch/x86/include/asm/uaccess.h 2010-07-07 09:04:46.000000000 -0400
8948 @@ -8,12 +8,15 @@
8949 #include <linux/thread_info.h>
8950 #include <linux/prefetch.h>
8951 #include <linux/string.h>
8952 +#include <linux/sched.h>
8953 #include <asm/asm.h>
8954 #include <asm/page.h>
8955
8956 #define VERIFY_READ 0
8957 #define VERIFY_WRITE 1
8958
8959 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
8960 +
8961 /*
8962 * The fs value determines whether argument validity checking should be
8963 * performed or not. If get_fs() == USER_DS, checking is performed, with
8964 @@ -29,7 +32,12 @@
8965
8966 #define get_ds() (KERNEL_DS)
8967 #define get_fs() (current_thread_info()->addr_limit)
8968 +#ifdef CONFIG_X86_32
8969 +void __set_fs(mm_segment_t x, int cpu);
8970 +void set_fs(mm_segment_t x);
8971 +#else
8972 #define set_fs(x) (current_thread_info()->addr_limit = (x))
8973 +#endif
8974
8975 #define segment_eq(a, b) ((a).seg == (b).seg)
8976
8977 @@ -77,7 +85,33 @@
8978 * checks that the pointer is in the user space range - after calling
8979 * this function, memory access functions may still return -EFAULT.
8980 */
8981 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
8982 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
8983 +#define access_ok(type, addr, size) \
8984 +({ \
8985 + long __size = size; \
8986 + unsigned long __addr = (unsigned long)addr; \
8987 + unsigned long __addr_ao = __addr & PAGE_MASK; \
8988 + unsigned long __end_ao = __addr + __size - 1; \
8989 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
8990 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
8991 + while(__addr_ao <= __end_ao) { \
8992 + char __c_ao; \
8993 + __addr_ao += PAGE_SIZE; \
8994 + if (__size > PAGE_SIZE) \
8995 + cond_resched(); \
8996 + if (__get_user(__c_ao, (char __user *)__addr)) \
8997 + break; \
8998 + if (type != VERIFY_WRITE) { \
8999 + __addr = __addr_ao; \
9000 + continue; \
9001 + } \
9002 + if (__put_user(__c_ao, (char __user *)__addr)) \
9003 + break; \
9004 + __addr = __addr_ao; \
9005 + } \
9006 + } \
9007 + __ret_ao; \
9008 +})
9009
9010 /*
9011 * The exception table consists of pairs of addresses: the first is the
9012 @@ -183,13 +217,21 @@ extern int __get_user_bad(void);
9013 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9014 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9015
9016 -
9017 +#ifdef CONFIG_X86_32
9018 +#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
9019 +#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
9020 +#else
9021 +#define _ASM_LOAD_USER_DS(ds)
9022 +#define _ASM_LOAD_KERNEL_DS
9023 +#endif
9024
9025 #ifdef CONFIG_X86_32
9026 #define __put_user_asm_u64(x, addr, err, errret) \
9027 - asm volatile("1: movl %%eax,0(%2)\n" \
9028 - "2: movl %%edx,4(%2)\n" \
9029 + asm volatile(_ASM_LOAD_USER_DS(5) \
9030 + "1: movl %%eax,%%ds:0(%2)\n" \
9031 + "2: movl %%edx,%%ds:4(%2)\n" \
9032 "3:\n" \
9033 + _ASM_LOAD_KERNEL_DS \
9034 ".section .fixup,\"ax\"\n" \
9035 "4: movl %3,%0\n" \
9036 " jmp 3b\n" \
9037 @@ -197,15 +239,18 @@ extern int __get_user_bad(void);
9038 _ASM_EXTABLE(1b, 4b) \
9039 _ASM_EXTABLE(2b, 4b) \
9040 : "=r" (err) \
9041 - : "A" (x), "r" (addr), "i" (errret), "0" (err))
9042 + : "A" (x), "r" (addr), "i" (errret), "0" (err), \
9043 + "r"(__USER_DS))
9044
9045 #define __put_user_asm_ex_u64(x, addr) \
9046 - asm volatile("1: movl %%eax,0(%1)\n" \
9047 - "2: movl %%edx,4(%1)\n" \
9048 + asm volatile(_ASM_LOAD_USER_DS(2) \
9049 + "1: movl %%eax,%%ds:0(%1)\n" \
9050 + "2: movl %%edx,%%ds:4(%1)\n" \
9051 "3:\n" \
9052 + _ASM_LOAD_KERNEL_DS \
9053 _ASM_EXTABLE(1b, 2b - 1b) \
9054 _ASM_EXTABLE(2b, 3b - 2b) \
9055 - : : "A" (x), "r" (addr))
9056 + : : "A" (x), "r" (addr), "r"(__USER_DS))
9057
9058 #define __put_user_x8(x, ptr, __ret_pu) \
9059 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
9060 @@ -374,16 +419,18 @@ do { \
9061 } while (0)
9062
9063 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9064 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9065 + asm volatile(_ASM_LOAD_USER_DS(5) \
9066 + "1: mov"itype" %%ds:%2,%"rtype"1\n" \
9067 "2:\n" \
9068 + _ASM_LOAD_KERNEL_DS \
9069 ".section .fixup,\"ax\"\n" \
9070 "3: mov %3,%0\n" \
9071 " xor"itype" %"rtype"1,%"rtype"1\n" \
9072 " jmp 2b\n" \
9073 ".previous\n" \
9074 _ASM_EXTABLE(1b, 3b) \
9075 - : "=r" (err), ltype(x) \
9076 - : "m" (__m(addr)), "i" (errret), "0" (err))
9077 + : "=r" (err), ltype (x) \
9078 + : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
9079
9080 #define __get_user_size_ex(x, ptr, size) \
9081 do { \
9082 @@ -407,10 +454,12 @@ do { \
9083 } while (0)
9084
9085 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9086 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9087 + asm volatile(_ASM_LOAD_USER_DS(2) \
9088 + "1: mov"itype" %%ds:%1,%"rtype"0\n" \
9089 "2:\n" \
9090 + _ASM_LOAD_KERNEL_DS \
9091 _ASM_EXTABLE(1b, 2b - 1b) \
9092 - : ltype(x) : "m" (__m(addr)))
9093 + : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
9094
9095 #define __put_user_nocheck(x, ptr, size) \
9096 ({ \
9097 @@ -424,13 +473,24 @@ do { \
9098 int __gu_err; \
9099 unsigned long __gu_val; \
9100 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9101 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9102 + (x) = (__typeof__(*(ptr)))__gu_val; \
9103 __gu_err; \
9104 })
9105
9106 /* FIXME: this hack is definitely wrong -AK */
9107 struct __large_struct { unsigned long buf[100]; };
9108 -#define __m(x) (*(struct __large_struct __user *)(x))
9109 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9110 +#define ____m(x) \
9111 +({ \
9112 + unsigned long ____x = (unsigned long)(x); \
9113 + if (____x < PAX_USER_SHADOW_BASE) \
9114 + ____x += PAX_USER_SHADOW_BASE; \
9115 + (void __user *)____x; \
9116 +})
9117 +#else
9118 +#define ____m(x) (x)
9119 +#endif
9120 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9121
9122 /*
9123 * Tell gcc we read from memory instead of writing: this is because
9124 @@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
9125 * aliasing issues.
9126 */
9127 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9128 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9129 + asm volatile(_ASM_LOAD_USER_DS(5) \
9130 + "1: mov"itype" %"rtype"1,%%ds:%2\n" \
9131 "2:\n" \
9132 + _ASM_LOAD_KERNEL_DS \
9133 ".section .fixup,\"ax\"\n" \
9134 "3: mov %3,%0\n" \
9135 " jmp 2b\n" \
9136 ".previous\n" \
9137 _ASM_EXTABLE(1b, 3b) \
9138 : "=r"(err) \
9139 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9140 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
9141 + "r"(__USER_DS))
9142
9143 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9144 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9145 + asm volatile(_ASM_LOAD_USER_DS(2) \
9146 + "1: mov"itype" %"rtype"0,%%ds:%1\n" \
9147 "2:\n" \
9148 + _ASM_LOAD_KERNEL_DS \
9149 _ASM_EXTABLE(1b, 2b - 1b) \
9150 - : : ltype(x), "m" (__m(addr)))
9151 + : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
9152
9153 /*
9154 * uaccess_try and catch
9155 @@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
9156 #define get_user_ex(x, ptr) do { \
9157 unsigned long __gue_val; \
9158 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9159 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9160 + (x) = (__typeof__(*(ptr)))__gue_val; \
9161 } while (0)
9162
9163 #ifdef CONFIG_X86_WP_WORKS_OK
9164 @@ -567,6 +632,7 @@ extern struct movsl_mask {
9165
9166 #define ARCH_HAS_NOCACHE_UACCESS 1
9167
9168 +#define ARCH_HAS_SORT_EXTABLE
9169 #ifdef CONFIG_X86_32
9170 # include "uaccess_32.h"
9171 #else
9172 diff -urNp linux-2.6.34.1/arch/x86/include/asm/uaccess_32.h linux-2.6.34.1/arch/x86/include/asm/uaccess_32.h
9173 --- linux-2.6.34.1/arch/x86/include/asm/uaccess_32.h 2010-07-05 14:24:10.000000000 -0400
9174 +++ linux-2.6.34.1/arch/x86/include/asm/uaccess_32.h 2010-07-07 09:04:46.000000000 -0400
9175 @@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
9176 static __always_inline unsigned long __must_check
9177 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9178 {
9179 + if ((long)n < 0)
9180 + return n;
9181 +
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185 @@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
9186 return ret;
9187 }
9188 }
9189 + if (!__builtin_constant_p(n))
9190 + check_object_size(from, n, true);
9191 return __copy_to_user_ll(to, from, n);
9192 }
9193
9194 @@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
9195 static __always_inline unsigned long
9196 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9197 {
9198 + if ((long)n < 0)
9199 + return n;
9200 +
9201 /* Avoid zeroing the tail if the copy fails..
9202 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9203 * but as the zeroing behaviour is only significant when n is not
9204 @@ -138,6 +146,10 @@ static __always_inline unsigned long
9205 __copy_from_user(void *to, const void __user *from, unsigned long n)
9206 {
9207 might_fault();
9208 +
9209 + if ((long)n < 0)
9210 + return n;
9211 +
9212 if (__builtin_constant_p(n)) {
9213 unsigned long ret;
9214
9215 @@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
9216 return ret;
9217 }
9218 }
9219 + if (!__builtin_constant_p(n))
9220 + check_object_size(to, n, false);
9221 return __copy_from_user_ll(to, from, n);
9222 }
9223
9224 @@ -160,6 +174,10 @@ static __always_inline unsigned long __c
9225 const void __user *from, unsigned long n)
9226 {
9227 might_fault();
9228 +
9229 + if ((long)n < 0)
9230 + return n;
9231 +
9232 if (__builtin_constant_p(n)) {
9233 unsigned long ret;
9234
9235 @@ -182,15 +200,19 @@ static __always_inline unsigned long
9236 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9237 unsigned long n)
9238 {
9239 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9240 -}
9241 + if ((long)n < 0)
9242 + return n;
9243
9244 -unsigned long __must_check copy_to_user(void __user *to,
9245 - const void *from, unsigned long n);
9246 -unsigned long __must_check _copy_from_user(void *to,
9247 - const void __user *from,
9248 - unsigned long n);
9249 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9250 +}
9251
9252 +extern void copy_to_user_overflow(void)
9253 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9254 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9255 +#else
9256 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9257 +#endif
9258 +;
9259
9260 extern void copy_from_user_overflow(void)
9261 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9262 @@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void
9263 #endif
9264 ;
9265
9266 -static inline unsigned long __must_check copy_from_user(void *to,
9267 - const void __user *from,
9268 - unsigned long n)
9269 +/**
9270 + * copy_to_user: - Copy a block of data into user space.
9271 + * @to: Destination address, in user space.
9272 + * @from: Source address, in kernel space.
9273 + * @n: Number of bytes to copy.
9274 + *
9275 + * Context: User context only. This function may sleep.
9276 + *
9277 + * Copy data from kernel space to user space.
9278 + *
9279 + * Returns number of bytes that could not be copied.
9280 + * On success, this will be zero.
9281 + */
9282 +static inline unsigned long __must_check
9283 +copy_to_user(void __user *to, const void *from, unsigned long n)
9284 +{
9285 + int sz = __compiletime_object_size(from);
9286 +
9287 + if (unlikely(sz != -1 && sz < n))
9288 + copy_to_user_overflow();
9289 + else if (access_ok(VERIFY_WRITE, to, n))
9290 + n = __copy_to_user(to, from, n);
9291 + return n;
9292 +}
9293 +
9294 +/**
9295 + * copy_from_user: - Copy a block of data from user space.
9296 + * @to: Destination address, in kernel space.
9297 + * @from: Source address, in user space.
9298 + * @n: Number of bytes to copy.
9299 + *
9300 + * Context: User context only. This function may sleep.
9301 + *
9302 + * Copy data from user space to kernel space.
9303 + *
9304 + * Returns number of bytes that could not be copied.
9305 + * On success, this will be zero.
9306 + *
9307 + * If some data could not be copied, this function will pad the copied
9308 + * data to the requested size using zero bytes.
9309 + */
9310 +static inline unsigned long __must_check
9311 +copy_from_user(void *to, const void __user *from, unsigned long n)
9312 {
9313 int sz = __compiletime_object_size(to);
9314
9315 - if (likely(sz == -1 || sz >= n))
9316 - n = _copy_from_user(to, from, n);
9317 - else
9318 + if (unlikely(sz != -1 && sz < n))
9319 copy_from_user_overflow();
9320 -
9321 + else if (access_ok(VERIFY_READ, from, n))
9322 + n = __copy_from_user(to, from, n);
9323 + else if ((long)n > 0) {
9324 + if (!__builtin_constant_p(n))
9325 + check_object_size(to, n, false);
9326 + memset(to, 0, n);
9327 + }
9328 return n;
9329 }
9330
9331 diff -urNp linux-2.6.34.1/arch/x86/include/asm/uaccess_64.h linux-2.6.34.1/arch/x86/include/asm/uaccess_64.h
9332 --- linux-2.6.34.1/arch/x86/include/asm/uaccess_64.h 2010-07-05 14:24:10.000000000 -0400
9333 +++ linux-2.6.34.1/arch/x86/include/asm/uaccess_64.h 2010-07-07 09:04:46.000000000 -0400
9334 @@ -11,6 +11,11 @@
9335 #include <asm/alternative.h>
9336 #include <asm/cpufeature.h>
9337 #include <asm/page.h>
9338 +#include <asm/pgtable.h>
9339 +
9340 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9341 +
9342 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9343
9344 /*
9345 * Copy To/From Userspace
9346 @@ -37,26 +42,26 @@ copy_user_generic(void *to, const void *
9347 return ret;
9348 }
9349
9350 -__must_check unsigned long
9351 -_copy_to_user(void __user *to, const void *from, unsigned len);
9352 -__must_check unsigned long
9353 -_copy_from_user(void *to, const void __user *from, unsigned len);
9354 +static __always_inline __must_check unsigned long
9355 +__copy_to_user(void __user *to, const void *from, unsigned len);
9356 +static __always_inline __must_check unsigned long
9357 +__copy_from_user(void *to, const void __user *from, unsigned len);
9358 __must_check unsigned long
9359 copy_in_user(void __user *to, const void __user *from, unsigned len);
9360
9361 static inline unsigned long __must_check copy_from_user(void *to,
9362 const void __user *from,
9363 - unsigned long n)
9364 + unsigned n)
9365 {
9366 - int sz = __compiletime_object_size(to);
9367 -
9368 might_fault();
9369 - if (likely(sz == -1 || sz >= n))
9370 - n = _copy_from_user(to, from, n);
9371 -#ifdef CONFIG_DEBUG_VM
9372 - else
9373 - WARN(1, "Buffer overflow detected!\n");
9374 -#endif
9375 +
9376 + if (access_ok(VERIFY_READ, from, n))
9377 + n = __copy_from_user(to, from, n);
9378 + else if ((int)n > 0) {
9379 + if (!__builtin_constant_p(n))
9380 + check_object_size(to, n, false);
9381 + memset(to, 0, n);
9382 + }
9383 return n;
9384 }
9385
9386 @@ -65,17 +70,35 @@ int copy_to_user(void __user *dst, const
9387 {
9388 might_fault();
9389
9390 - return _copy_to_user(dst, src, size);
9391 + if (access_ok(VERIFY_WRITE, dst, size))
9392 + size = __copy_to_user(dst, src, size);
9393 + return size;
9394 }
9395
9396 static __always_inline __must_check
9397 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9398 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9399 {
9400 - int ret = 0;
9401 + int sz = __compiletime_object_size(dst);
9402 + unsigned ret = 0;
9403
9404 might_fault();
9405 - if (!__builtin_constant_p(size))
9406 +
9407 + if ((int)size < 0)
9408 + return size;
9409 +
9410 + if (unlikely(sz != -1 && sz < size)) {
9411 +#ifdef CONFIG_DEBUG_VM
9412 + WARN(1, "Buffer overflow detected!\n");
9413 +#endif
9414 + return size;
9415 + }
9416 +
9417 + if (!__builtin_constant_p(size)) {
9418 + check_object_size(dst, size, false);
9419 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9420 + src += PAX_USER_SHADOW_BASE;
9421 return copy_user_generic(dst, (__force void *)src, size);
9422 + }
9423 switch (size) {
9424 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9425 ret, "b", "b", "=q", 1);
9426 @@ -108,18 +131,36 @@ int __copy_from_user(void *dst, const vo
9427 ret, "q", "", "=r", 8);
9428 return ret;
9429 default:
9430 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9431 + src += PAX_USER_SHADOW_BASE;
9432 return copy_user_generic(dst, (__force void *)src, size);
9433 }
9434 }
9435
9436 static __always_inline __must_check
9437 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9438 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9439 {
9440 - int ret = 0;
9441 + int sz = __compiletime_object_size(src);
9442 + unsigned ret = 0;
9443
9444 might_fault();
9445 - if (!__builtin_constant_p(size))
9446 +
9447 + if ((int)size < 0)
9448 + return size;
9449 +
9450 + if (unlikely(sz != -1 && sz < size)) {
9451 +#ifdef CONFIG_DEBUG_VM
9452 + WARN(1, "Buffer overflow detected!\n");
9453 +#endif
9454 + return size;
9455 + }
9456 +
9457 + if (!__builtin_constant_p(size)) {
9458 + check_object_size(src, size, true);
9459 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9460 + dst += PAX_USER_SHADOW_BASE;
9461 return copy_user_generic((__force void *)dst, src, size);
9462 + }
9463 switch (size) {
9464 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9465 ret, "b", "b", "iq", 1);
9466 @@ -152,19 +193,30 @@ int __copy_to_user(void __user *dst, con
9467 ret, "q", "", "er", 8);
9468 return ret;
9469 default:
9470 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9471 + dst += PAX_USER_SHADOW_BASE;
9472 return copy_user_generic((__force void *)dst, src, size);
9473 }
9474 }
9475
9476 static __always_inline __must_check
9477 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9478 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9479 {
9480 - int ret = 0;
9481 + unsigned ret = 0;
9482
9483 might_fault();
9484 - if (!__builtin_constant_p(size))
9485 +
9486 + if ((int)size < 0)
9487 + return size;
9488 +
9489 + if (!__builtin_constant_p(size)) {
9490 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9491 + src += PAX_USER_SHADOW_BASE;
9492 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9493 + dst += PAX_USER_SHADOW_BASE;
9494 return copy_user_generic((__force void *)dst,
9495 (__force void *)src, size);
9496 + }
9497 switch (size) {
9498 case 1: {
9499 u8 tmp;
9500 @@ -204,6 +256,10 @@ int __copy_in_user(void __user *dst, con
9501 return ret;
9502 }
9503 default:
9504 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9505 + src += PAX_USER_SHADOW_BASE;
9506 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9507 + dst += PAX_USER_SHADOW_BASE;
9508 return copy_user_generic((__force void *)dst,
9509 (__force void *)src, size);
9510 }
9511 @@ -222,33 +278,45 @@ __must_check unsigned long __clear_user(
9512 static __must_check __always_inline int
9513 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9514 {
9515 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9516 + src += PAX_USER_SHADOW_BASE;
9517 return copy_user_generic(dst, (__force const void *)src, size);
9518 }
9519
9520 -static __must_check __always_inline int
9521 +static __must_check __always_inline unsigned long
9522 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9523 {
9524 + if ((int)size < 0)
9525 + return size;
9526 +
9527 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9528 + dst += PAX_USER_SHADOW_BASE;
9529 return copy_user_generic((__force void *)dst, src, size);
9530 }
9531
9532 -extern long __copy_user_nocache(void *dst, const void __user *src,
9533 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9534 unsigned size, int zerorest);
9535
9536 -static inline int
9537 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9538 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9539 {
9540 might_sleep();
9541 +
9542 + if ((int)size < 0)
9543 + return size;
9544 +
9545 return __copy_user_nocache(dst, src, size, 1);
9546 }
9547
9548 -static inline int
9549 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9550 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9551 unsigned size)
9552 {
9553 + if ((int)size < 0)
9554 + return size;
9555 +
9556 return __copy_user_nocache(dst, src, size, 0);
9557 }
9558
9559 -unsigned long
9560 +extern unsigned long
9561 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9562
9563 #endif /* _ASM_X86_UACCESS_64_H */
9564 diff -urNp linux-2.6.34.1/arch/x86/include/asm/vgtod.h linux-2.6.34.1/arch/x86/include/asm/vgtod.h
9565 --- linux-2.6.34.1/arch/x86/include/asm/vgtod.h 2010-07-05 14:24:10.000000000 -0400
9566 +++ linux-2.6.34.1/arch/x86/include/asm/vgtod.h 2010-07-07 09:04:46.000000000 -0400
9567 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9568 int sysctl_enabled;
9569 struct timezone sys_tz;
9570 struct { /* extract of a clocksource struct */
9571 + char name[8];
9572 cycle_t (*vread)(void);
9573 cycle_t cycle_last;
9574 cycle_t mask;
9575 diff -urNp linux-2.6.34.1/arch/x86/include/asm/vmi.h linux-2.6.34.1/arch/x86/include/asm/vmi.h
9576 --- linux-2.6.34.1/arch/x86/include/asm/vmi.h 2010-07-05 14:24:10.000000000 -0400
9577 +++ linux-2.6.34.1/arch/x86/include/asm/vmi.h 2010-07-07 09:04:46.000000000 -0400
9578 @@ -191,6 +191,7 @@ struct vrom_header {
9579 u8 reserved[96]; /* Reserved for headers */
9580 char vmi_init[8]; /* VMI_Init jump point */
9581 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
9582 + char rom_data[8048]; /* rest of the option ROM */
9583 } __attribute__((packed));
9584
9585 struct pnp_header {
9586 diff -urNp linux-2.6.34.1/arch/x86/include/asm/vsyscall.h linux-2.6.34.1/arch/x86/include/asm/vsyscall.h
9587 --- linux-2.6.34.1/arch/x86/include/asm/vsyscall.h 2010-07-05 14:24:10.000000000 -0400
9588 +++ linux-2.6.34.1/arch/x86/include/asm/vsyscall.h 2010-07-07 09:04:46.000000000 -0400
9589 @@ -15,9 +15,10 @@ enum vsyscall_num {
9590
9591 #ifdef __KERNEL__
9592 #include <linux/seqlock.h>
9593 +#include <linux/getcpu.h>
9594 +#include <linux/time.h>
9595
9596 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9597 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9598
9599 /* Definitions for CONFIG_GENERIC_TIME definitions */
9600 #define __section_vsyscall_gtod_data __attribute__ \
9601 @@ -31,7 +32,6 @@ enum vsyscall_num {
9602 #define VGETCPU_LSL 2
9603
9604 extern int __vgetcpu_mode;
9605 -extern volatile unsigned long __jiffies;
9606
9607 /* kernel space (writeable) */
9608 extern int vgetcpu_mode;
9609 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9610
9611 extern void map_vsyscall(void);
9612
9613 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9614 +extern time_t vtime(time_t *t);
9615 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9616 #endif /* __KERNEL__ */
9617
9618 #endif /* _ASM_X86_VSYSCALL_H */
9619 diff -urNp linux-2.6.34.1/arch/x86/include/asm/xsave.h linux-2.6.34.1/arch/x86/include/asm/xsave.h
9620 --- linux-2.6.34.1/arch/x86/include/asm/xsave.h 2010-07-05 14:24:10.000000000 -0400
9621 +++ linux-2.6.34.1/arch/x86/include/asm/xsave.h 2010-07-07 09:04:46.000000000 -0400
9622 @@ -58,6 +58,12 @@ static inline int xrstor_checking(struct
9623 static inline int xsave_user(struct xsave_struct __user *buf)
9624 {
9625 int err;
9626 +
9627 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9628 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
9629 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
9630 +#endif
9631 +
9632 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
9633 "2:\n"
9634 ".section .fixup,\"ax\"\n"
9635 @@ -84,6 +90,11 @@ static inline int xrestore_user(struct x
9636 u32 lmask = mask;
9637 u32 hmask = mask >> 32;
9638
9639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9640 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
9641 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
9642 +#endif
9643 +
9644 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
9645 "2:\n"
9646 ".section .fixup,\"ax\"\n"
9647 diff -urNp linux-2.6.34.1/arch/x86/kernel/acpi/boot.c linux-2.6.34.1/arch/x86/kernel/acpi/boot.c
9648 --- linux-2.6.34.1/arch/x86/kernel/acpi/boot.c 2010-07-05 14:24:10.000000000 -0400
9649 +++ linux-2.6.34.1/arch/x86/kernel/acpi/boot.c 2010-07-07 09:04:46.000000000 -0400
9650 @@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata a
9651 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
9652 },
9653 },
9654 - {}
9655 + { NULL, NULL, {{0, {0}}}, NULL}
9656 };
9657
9658 /*
9659 diff -urNp linux-2.6.34.1/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.34.1/arch/x86/kernel/acpi/realmode/wakeup.S
9660 --- linux-2.6.34.1/arch/x86/kernel/acpi/realmode/wakeup.S 2010-07-05 14:24:10.000000000 -0400
9661 +++ linux-2.6.34.1/arch/x86/kernel/acpi/realmode/wakeup.S 2010-07-07 09:04:46.000000000 -0400
9662 @@ -104,7 +104,7 @@ _start:
9663 movl %eax, %ecx
9664 orl %edx, %ecx
9665 jz 1f
9666 - movl $0xc0000080, %ecx
9667 + mov $MSR_EFER, %ecx
9668 wrmsr
9669 1:
9670
9671 diff -urNp linux-2.6.34.1/arch/x86/kernel/acpi/sleep.c linux-2.6.34.1/arch/x86/kernel/acpi/sleep.c
9672 --- linux-2.6.34.1/arch/x86/kernel/acpi/sleep.c 2010-07-05 14:24:10.000000000 -0400
9673 +++ linux-2.6.34.1/arch/x86/kernel/acpi/sleep.c 2010-07-07 09:04:46.000000000 -0400
9674 @@ -11,11 +11,12 @@
9675 #include <linux/cpumask.h>
9676 #include <asm/segment.h>
9677 #include <asm/desc.h>
9678 +#include <asm/e820.h>
9679
9680 #include "realmode/wakeup.h"
9681 #include "sleep.h"
9682
9683 -unsigned long acpi_wakeup_address;
9684 +unsigned long acpi_wakeup_address = 0x2000;
9685 unsigned long acpi_realmode_flags;
9686
9687 /* address in low memory of the wakeup routine. */
9688 @@ -96,8 +97,12 @@ int acpi_save_state_mem(void)
9689 header->trampoline_segment = setup_trampoline() >> 4;
9690 #ifdef CONFIG_SMP
9691 stack_start.sp = temp_stack + sizeof(temp_stack);
9692 +
9693 + pax_open_kernel();
9694 early_gdt_descr.address =
9695 (unsigned long)get_cpu_gdt_table(smp_processor_id());
9696 + pax_close_kernel();
9697 +
9698 initial_gs = per_cpu_offset(smp_processor_id());
9699 #endif
9700 initial_code = (unsigned long)wakeup_long64;
9701 diff -urNp linux-2.6.34.1/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.34.1/arch/x86/kernel/acpi/wakeup_32.S
9702 --- linux-2.6.34.1/arch/x86/kernel/acpi/wakeup_32.S 2010-07-05 14:24:10.000000000 -0400
9703 +++ linux-2.6.34.1/arch/x86/kernel/acpi/wakeup_32.S 2010-07-07 09:04:46.000000000 -0400
9704 @@ -30,13 +30,11 @@ wakeup_pmode_return:
9705 # and restore the stack ... but you need gdt for this to work
9706 movl saved_context_esp, %esp
9707
9708 - movl %cs:saved_magic, %eax
9709 - cmpl $0x12345678, %eax
9710 + cmpl $0x12345678, saved_magic
9711 jne bogus_magic
9712
9713 # jump to place where we left off
9714 - movl saved_eip, %eax
9715 - jmp *%eax
9716 + jmp *(saved_eip)
9717
9718 bogus_magic:
9719 jmp bogus_magic
9720 diff -urNp linux-2.6.34.1/arch/x86/kernel/alternative.c linux-2.6.34.1/arch/x86/kernel/alternative.c
9721 --- linux-2.6.34.1/arch/x86/kernel/alternative.c 2010-07-05 14:24:10.000000000 -0400
9722 +++ linux-2.6.34.1/arch/x86/kernel/alternative.c 2010-07-07 09:04:46.000000000 -0400
9723 @@ -429,7 +429,7 @@ void __init_or_module apply_paravirt(str
9724
9725 BUG_ON(p->len > MAX_PATCH_LEN);
9726 /* prep the buffer with the original instructions */
9727 - memcpy(insnbuf, p->instr, p->len);
9728 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
9729 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
9730 (unsigned long)p->instr, p->len);
9731
9732 @@ -497,7 +497,7 @@ void __init alternative_instructions(voi
9733 if (smp_alt_once)
9734 free_init_pages("SMP alternatives",
9735 (unsigned long)__smp_locks,
9736 - (unsigned long)__smp_locks_end);
9737 + PAGE_ALIGN((unsigned long)__smp_locks_end));
9738
9739 restart_nmi();
9740 }
9741 @@ -514,12 +514,16 @@ void __init alternative_instructions(voi
9742 * instructions. And on the local CPU you need to be protected again NMI or MCE
9743 * handlers seeing an inconsistent instruction while you patch.
9744 */
9745 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
9746 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
9747 size_t len)
9748 {
9749 unsigned long flags;
9750 local_irq_save(flags);
9751 - memcpy(addr, opcode, len);
9752 +
9753 + pax_open_kernel();
9754 + memcpy(ktla_ktva(addr), opcode, len);
9755 + pax_close_kernel();
9756 +
9757 sync_core();
9758 local_irq_restore(flags);
9759 /* Could also do a CLFLUSH here to speed up CPU recovery; but
9760 @@ -542,36 +546,22 @@ static void *__init_or_module text_poke_
9761 */
9762 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
9763 {
9764 - unsigned long flags;
9765 - char *vaddr;
9766 + unsigned char *vaddr = ktla_ktva(addr);
9767 struct page *pages[2];
9768 - int i;
9769 + size_t i;
9770
9771 if (!core_kernel_text((unsigned long)addr)) {
9772 - pages[0] = vmalloc_to_page(addr);
9773 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
9774 + pages[0] = vmalloc_to_page(vaddr);
9775 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
9776 } else {
9777 - pages[0] = virt_to_page(addr);
9778 + pages[0] = virt_to_page(vaddr);
9779 WARN_ON(!PageReserved(pages[0]));
9780 - pages[1] = virt_to_page(addr + PAGE_SIZE);
9781 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
9782 }
9783 BUG_ON(!pages[0]);
9784 - local_irq_save(flags);
9785 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
9786 - if (pages[1])
9787 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
9788 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
9789 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
9790 - clear_fixmap(FIX_TEXT_POKE0);
9791 - if (pages[1])
9792 - clear_fixmap(FIX_TEXT_POKE1);
9793 - local_flush_tlb();
9794 - sync_core();
9795 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
9796 - that causes hangs on some VIA CPUs. */
9797 + text_poke_early(addr, opcode, len);
9798 for (i = 0; i < len; i++)
9799 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
9800 - local_irq_restore(flags);
9801 + BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
9802 return addr;
9803 }
9804
9805 diff -urNp linux-2.6.34.1/arch/x86/kernel/amd_iommu.c linux-2.6.34.1/arch/x86/kernel/amd_iommu.c
9806 --- linux-2.6.34.1/arch/x86/kernel/amd_iommu.c 2010-07-05 14:24:10.000000000 -0400
9807 +++ linux-2.6.34.1/arch/x86/kernel/amd_iommu.c 2010-07-07 09:04:46.000000000 -0400
9808 @@ -2217,7 +2217,7 @@ static void prealloc_protection_domains(
9809 }
9810 }
9811
9812 -static struct dma_map_ops amd_iommu_dma_ops = {
9813 +static const struct dma_map_ops amd_iommu_dma_ops = {
9814 .alloc_coherent = alloc_coherent,
9815 .free_coherent = free_coherent,
9816 .map_page = map_page,
9817 diff -urNp linux-2.6.34.1/arch/x86/kernel/apic/io_apic.c linux-2.6.34.1/arch/x86/kernel/apic/io_apic.c
9818 --- linux-2.6.34.1/arch/x86/kernel/apic/io_apic.c 2010-07-05 14:24:10.000000000 -0400
9819 +++ linux-2.6.34.1/arch/x86/kernel/apic/io_apic.c 2010-07-07 09:04:46.000000000 -0400
9820 @@ -688,7 +688,7 @@ struct IO_APIC_route_entry **alloc_ioapi
9821 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
9822 GFP_ATOMIC);
9823 if (!ioapic_entries)
9824 - return 0;
9825 + return NULL;
9826
9827 for (apic = 0; apic < nr_ioapics; apic++) {
9828 ioapic_entries[apic] =
9829 @@ -705,7 +705,7 @@ nomem:
9830 kfree(ioapic_entries[apic]);
9831 kfree(ioapic_entries);
9832
9833 - return 0;
9834 + return NULL;
9835 }
9836
9837 /*
9838 @@ -1122,7 +1122,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
9839 }
9840 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
9841
9842 -void lock_vector_lock(void)
9843 +void lock_vector_lock(void) __acquires(vector_lock)
9844 {
9845 /* Used to the online set of cpus does not change
9846 * during assign_irq_vector.
9847 @@ -1130,7 +1130,7 @@ void lock_vector_lock(void)
9848 raw_spin_lock(&vector_lock);
9849 }
9850
9851 -void unlock_vector_lock(void)
9852 +void unlock_vector_lock(void) __releases(vector_lock)
9853 {
9854 raw_spin_unlock(&vector_lock);
9855 }
9856 diff -urNp linux-2.6.34.1/arch/x86/kernel/apm_32.c linux-2.6.34.1/arch/x86/kernel/apm_32.c
9857 --- linux-2.6.34.1/arch/x86/kernel/apm_32.c 2010-07-05 14:24:10.000000000 -0400
9858 +++ linux-2.6.34.1/arch/x86/kernel/apm_32.c 2010-07-07 09:04:47.000000000 -0400
9859 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
9860 * This is for buggy BIOS's that refer to (real mode) segment 0x40
9861 * even though they are called in protected mode.
9862 */
9863 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
9864 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
9865 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
9866
9867 static const char driver_version[] = "1.16ac"; /* no spaces */
9868 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
9869 BUG_ON(cpu != 0);
9870 gdt = get_cpu_gdt_table(cpu);
9871 save_desc_40 = gdt[0x40 / 8];
9872 +
9873 + pax_open_kernel();
9874 gdt[0x40 / 8] = bad_bios_desc;
9875 + pax_close_kernel();
9876
9877 apm_irq_save(flags);
9878 APM_DO_SAVE_SEGS;
9879 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
9880 &call->esi);
9881 APM_DO_RESTORE_SEGS;
9882 apm_irq_restore(flags);
9883 +
9884 + pax_open_kernel();
9885 gdt[0x40 / 8] = save_desc_40;
9886 + pax_close_kernel();
9887 +
9888 put_cpu();
9889
9890 return call->eax & 0xff;
9891 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
9892 BUG_ON(cpu != 0);
9893 gdt = get_cpu_gdt_table(cpu);
9894 save_desc_40 = gdt[0x40 / 8];
9895 +
9896 + pax_open_kernel();
9897 gdt[0x40 / 8] = bad_bios_desc;
9898 + pax_close_kernel();
9899
9900 apm_irq_save(flags);
9901 APM_DO_SAVE_SEGS;
9902 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
9903 &call->eax);
9904 APM_DO_RESTORE_SEGS;
9905 apm_irq_restore(flags);
9906 +
9907 + pax_open_kernel();
9908 gdt[0x40 / 8] = save_desc_40;
9909 + pax_close_kernel();
9910 +
9911 put_cpu();
9912 return error;
9913 }
9914 @@ -975,7 +989,7 @@ recalc:
9915
9916 static void apm_power_off(void)
9917 {
9918 - unsigned char po_bios_call[] = {
9919 + const unsigned char po_bios_call[] = {
9920 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
9921 0x8e, 0xd0, /* movw ax,ss */
9922 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
9923 @@ -1931,7 +1945,10 @@ static const struct file_operations apm_
9924 static struct miscdevice apm_device = {
9925 APM_MINOR_DEV,
9926 "apm_bios",
9927 - &apm_bios_fops
9928 + &apm_bios_fops,
9929 + {NULL, NULL},
9930 + NULL,
9931 + NULL
9932 };
9933
9934
9935 @@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata a
9936 { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
9937 },
9938
9939 - { }
9940 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
9941 };
9942
9943 /*
9944 @@ -2355,12 +2372,15 @@ static int __init apm_init(void)
9945 * code to that CPU.
9946 */
9947 gdt = get_cpu_gdt_table(0);
9948 +
9949 + pax_open_kernel();
9950 set_desc_base(&gdt[APM_CS >> 3],
9951 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
9952 set_desc_base(&gdt[APM_CS_16 >> 3],
9953 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
9954 set_desc_base(&gdt[APM_DS >> 3],
9955 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
9956 + pax_close_kernel();
9957
9958 proc_create("apm", 0, NULL, &apm_file_ops);
9959
9960 diff -urNp linux-2.6.34.1/arch/x86/kernel/asm-offsets_32.c linux-2.6.34.1/arch/x86/kernel/asm-offsets_32.c
9961 --- linux-2.6.34.1/arch/x86/kernel/asm-offsets_32.c 2010-07-05 14:24:10.000000000 -0400
9962 +++ linux-2.6.34.1/arch/x86/kernel/asm-offsets_32.c 2010-07-07 09:04:48.000000000 -0400
9963 @@ -115,6 +115,11 @@ void foo(void)
9964 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
9965 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
9966 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
9967 +
9968 +#ifdef CONFIG_PAX_KERNEXEC
9969 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
9970 +#endif
9971 +
9972 #endif
9973
9974 #ifdef CONFIG_XEN
9975 diff -urNp linux-2.6.34.1/arch/x86/kernel/asm-offsets_64.c linux-2.6.34.1/arch/x86/kernel/asm-offsets_64.c
9976 --- linux-2.6.34.1/arch/x86/kernel/asm-offsets_64.c 2010-07-05 14:24:10.000000000 -0400
9977 +++ linux-2.6.34.1/arch/x86/kernel/asm-offsets_64.c 2010-07-07 09:04:48.000000000 -0400
9978 @@ -63,6 +63,18 @@ int main(void)
9979 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
9980 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
9981 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
9982 +
9983 +#ifdef CONFIG_PAX_KERNEXEC
9984 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
9985 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
9986 +#endif
9987 +
9988 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9989 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
9990 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
9991 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
9992 +#endif
9993 +
9994 #endif
9995
9996
9997 @@ -115,6 +127,7 @@ int main(void)
9998 ENTRY(cr8);
9999 BLANK();
10000 #undef ENTRY
10001 + DEFINE(TSS_size, sizeof(struct tss_struct));
10002 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
10003 BLANK();
10004 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
10005 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/Makefile linux-2.6.34.1/arch/x86/kernel/cpu/Makefile
10006 --- linux-2.6.34.1/arch/x86/kernel/cpu/Makefile 2010-07-05 14:24:10.000000000 -0400
10007 +++ linux-2.6.34.1/arch/x86/kernel/cpu/Makefile 2010-07-07 09:04:48.000000000 -0400
10008 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10009 CFLAGS_REMOVE_perf_event.o = -pg
10010 endif
10011
10012 -# Make sure load_percpu_segment has no stackprotector
10013 -nostackp := $(call cc-option, -fno-stack-protector)
10014 -CFLAGS_common.o := $(nostackp)
10015 -
10016 obj-y := intel_cacheinfo.o addon_cpuid_features.o
10017 obj-y += proc.o capflags.o powerflags.o common.o
10018 obj-y += vmware.o hypervisor.o sched.o
10019 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/common.c linux-2.6.34.1/arch/x86/kernel/cpu/common.c
10020 --- linux-2.6.34.1/arch/x86/kernel/cpu/common.c 2010-07-05 14:24:10.000000000 -0400
10021 +++ linux-2.6.34.1/arch/x86/kernel/cpu/common.c 2010-07-07 09:04:48.000000000 -0400
10022 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10023
10024 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10025
10026 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10027 -#ifdef CONFIG_X86_64
10028 - /*
10029 - * We need valid kernel segments for data and code in long mode too
10030 - * IRET will check the segment types kkeil 2000/10/28
10031 - * Also sysret mandates a special GDT layout
10032 - *
10033 - * TLS descriptors are currently at a different place compared to i386.
10034 - * Hopefully nobody expects them at a fixed place (Wine?)
10035 - */
10036 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10037 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10038 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10039 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10040 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10041 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10042 -#else
10043 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10044 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10045 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10046 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10047 - /*
10048 - * Segments used for calling PnP BIOS have byte granularity.
10049 - * They code segments and data segments have fixed 64k limits,
10050 - * the transfer segment sizes are set at run time.
10051 - */
10052 - /* 32-bit code */
10053 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10054 - /* 16-bit code */
10055 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10056 - /* 16-bit data */
10057 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10058 - /* 16-bit data */
10059 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10060 - /* 16-bit data */
10061 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10062 - /*
10063 - * The APM segments have byte granularity and their bases
10064 - * are set at run time. All have 64k limits.
10065 - */
10066 - /* 32-bit code */
10067 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10068 - /* 16-bit code */
10069 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10070 - /* data */
10071 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10072 -
10073 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10074 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10075 - GDT_STACK_CANARY_INIT
10076 -#endif
10077 -} };
10078 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10079 -
10080 static int __init x86_xsave_setup(char *s)
10081 {
10082 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10083 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
10084 {
10085 struct desc_ptr gdt_descr;
10086
10087 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10088 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10089 gdt_descr.size = GDT_SIZE - 1;
10090 load_gdt(&gdt_descr);
10091 /* Reload the per-cpu base */
10092 @@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struc
10093 /* Filter out anything that depends on CPUID levels we don't have */
10094 filter_cpuid_features(c, true);
10095
10096 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10097 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10098 +#endif
10099 +
10100 /* If the model name is still unset, do table lookup. */
10101 if (!c->x86_model_id[0]) {
10102 const char *p;
10103 @@ -1103,7 +1053,7 @@ void __cpuinit cpu_init(void)
10104 int i;
10105
10106 cpu = stack_smp_processor_id();
10107 - t = &per_cpu(init_tss, cpu);
10108 + t = init_tss + cpu;
10109 oist = &per_cpu(orig_ist, cpu);
10110
10111 #ifdef CONFIG_NUMA
10112 @@ -1129,7 +1079,7 @@ void __cpuinit cpu_init(void)
10113 switch_to_new_gdt(cpu);
10114 loadsegment(fs, 0);
10115
10116 - load_idt((const struct desc_ptr *)&idt_descr);
10117 + load_idt(&idt_descr);
10118
10119 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10120 syscall_init();
10121 @@ -1201,7 +1151,7 @@ void __cpuinit cpu_init(void)
10122 {
10123 int cpu = smp_processor_id();
10124 struct task_struct *curr = current;
10125 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10126 + struct tss_struct *t = init_tss + cpu;
10127 struct thread_struct *thread = &curr->thread;
10128
10129 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10130 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
10131 --- linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-07-05 14:24:10.000000000 -0400
10132 +++ linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-07-07 09:04:48.000000000 -0400
10133 @@ -524,7 +524,7 @@ static const struct dmi_system_id sw_any
10134 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
10135 },
10136 },
10137 - { }
10138 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
10139 };
10140
10141 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
10142 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
10143 --- linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-07-05 14:24:10.000000000 -0400
10144 +++ linux-2.6.34.1/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-07-07 09:04:48.000000000 -0400
10145 @@ -226,7 +226,7 @@ static struct cpu_model models[] =
10146 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
10147 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
10148
10149 - { NULL, }
10150 + { NULL, NULL, 0, NULL}
10151 };
10152 #undef _BANIAS
10153 #undef BANIAS
10154 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/intel.c linux-2.6.34.1/arch/x86/kernel/cpu/intel.c
10155 --- linux-2.6.34.1/arch/x86/kernel/cpu/intel.c 2010-07-05 14:24:10.000000000 -0400
10156 +++ linux-2.6.34.1/arch/x86/kernel/cpu/intel.c 2010-07-07 09:04:48.000000000 -0400
10157 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10158 * Update the IDT descriptor and reload the IDT so that
10159 * it uses the read-only mapped virtual address.
10160 */
10161 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10162 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10163 load_idt(&idt_descr);
10164 }
10165 #endif
10166 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.34.1/arch/x86/kernel/cpu/mcheck/mce.c
10167 --- linux-2.6.34.1/arch/x86/kernel/cpu/mcheck/mce.c 2010-07-05 14:24:10.000000000 -0400
10168 +++ linux-2.6.34.1/arch/x86/kernel/cpu/mcheck/mce.c 2010-07-07 09:04:48.000000000 -0400
10169 @@ -209,7 +209,7 @@ static void print_mce(struct mce *m)
10170 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10171 m->cs, m->ip);
10172
10173 - if (m->cs == __KERNEL_CS)
10174 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10175 print_symbol("{%s}", m->ip);
10176 pr_cont("\n");
10177 }
10178 @@ -1452,14 +1452,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10179 */
10180
10181 static DEFINE_SPINLOCK(mce_state_lock);
10182 -static int open_count; /* #times opened */
10183 +static atomic_t open_count; /* #times opened */
10184 static int open_exclu; /* already open exclusive? */
10185
10186 static int mce_open(struct inode *inode, struct file *file)
10187 {
10188 spin_lock(&mce_state_lock);
10189
10190 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10191 + if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) {
10192 spin_unlock(&mce_state_lock);
10193
10194 return -EBUSY;
10195 @@ -1467,7 +1467,7 @@ static int mce_open(struct inode *inode,
10196
10197 if (file->f_flags & O_EXCL)
10198 open_exclu = 1;
10199 - open_count++;
10200 + atomic_inc(&open_count);
10201
10202 spin_unlock(&mce_state_lock);
10203
10204 @@ -1478,7 +1478,7 @@ static int mce_release(struct inode *ino
10205 {
10206 spin_lock(&mce_state_lock);
10207
10208 - open_count--;
10209 + atomic_dec(&open_count);
10210 open_exclu = 0;
10211
10212 spin_unlock(&mce_state_lock);
10213 @@ -1616,6 +1616,7 @@ static struct miscdevice mce_log_device
10214 MISC_MCELOG_MINOR,
10215 "mcelog",
10216 &mce_chrdev_ops,
10217 + {NULL, NULL}, NULL, NULL
10218 };
10219
10220 /*
10221 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/generic.c
10222 --- linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/generic.c 2010-07-05 14:24:10.000000000 -0400
10223 +++ linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/generic.c 2010-07-07 09:04:48.000000000 -0400
10224 @@ -28,7 +28,7 @@ static struct fixed_range_block fixed_ra
10225 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
10226 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
10227 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
10228 - {}
10229 + { 0, 0 }
10230 };
10231
10232 static unsigned long smp_changes_mask;
10233 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/main.c
10234 --- linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/main.c 2010-07-05 14:24:10.000000000 -0400
10235 +++ linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/main.c 2010-07-07 09:04:48.000000000 -0400
10236 @@ -60,7 +60,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10237 u64 size_or_mask, size_and_mask;
10238 static bool mtrr_aps_delayed_init;
10239
10240 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10241 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10242
10243 const struct mtrr_ops *mtrr_if;
10244
10245 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/mtrr.h
10246 --- linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-07-05 14:24:10.000000000 -0400
10247 +++ linux-2.6.34.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-07-07 09:04:48.000000000 -0400
10248 @@ -12,19 +12,19 @@
10249 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10250
10251 struct mtrr_ops {
10252 - u32 vendor;
10253 - u32 use_intel_if;
10254 - void (*set)(unsigned int reg, unsigned long base,
10255 + const u32 vendor;
10256 + const u32 use_intel_if;
10257 + void (* const set)(unsigned int reg, unsigned long base,
10258 unsigned long size, mtrr_type type);
10259 - void (*set_all)(void);
10260 + void (* const set_all)(void);
10261
10262 - void (*get)(unsigned int reg, unsigned long *base,
10263 + void (* const get)(unsigned int reg, unsigned long *base,
10264 unsigned long *size, mtrr_type *type);
10265 - int (*get_free_region)(unsigned long base, unsigned long size,
10266 + int (* const get_free_region)(unsigned long base, unsigned long size,
10267 int replace_reg);
10268 - int (*validate_add_page)(unsigned long base, unsigned long size,
10269 + int (* const validate_add_page)(unsigned long base, unsigned long size,
10270 unsigned int type);
10271 - int (*have_wrcomb)(void);
10272 + int (* const have_wrcomb)(void);
10273 };
10274
10275 extern int generic_get_free_region(unsigned long base, unsigned long size,
10276 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/perf_event.c linux-2.6.34.1/arch/x86/kernel/cpu/perf_event.c
10277 --- linux-2.6.34.1/arch/x86/kernel/cpu/perf_event.c 2010-07-05 14:24:10.000000000 -0400
10278 +++ linux-2.6.34.1/arch/x86/kernel/cpu/perf_event.c 2010-07-07 09:04:48.000000000 -0400
10279 @@ -1702,7 +1702,7 @@ perf_callchain_user(struct pt_regs *regs
10280 break;
10281
10282 callchain_store(entry, frame.return_address);
10283 - fp = frame.next_frame;
10284 + fp = (__force const void __user *)frame.next_frame;
10285 }
10286 }
10287
10288 diff -urNp linux-2.6.34.1/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.34.1/arch/x86/kernel/cpu/perfctr-watchdog.c
10289 --- linux-2.6.34.1/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-07-05 14:24:10.000000000 -0400
10290 +++ linux-2.6.34.1/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-07-07 09:04:48.000000000 -0400
10291 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
10292
10293 /* Interface defining a CPU specific perfctr watchdog */
10294 struct wd_ops {
10295 - int (*reserve)(void);
10296 - void (*unreserve)(void);
10297 - int (*setup)(unsigned nmi_hz);
10298 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
10299 - void (*stop)(void);
10300 + int (* const reserve)(void);
10301 + void (* const unreserve)(void);
10302 + int (* const setup)(unsigned nmi_hz);
10303 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
10304 + void (* const stop)(void);
10305 unsigned perfctr;
10306 unsigned evntsel;
10307 u64 checkbit;
10308 @@ -634,6 +634,7 @@ static const struct wd_ops p4_wd_ops = {
10309 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
10310 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
10311
10312 +/* cannot be const, see probe_nmi_watchdog */
10313 static struct wd_ops intel_arch_wd_ops;
10314
10315 static int setup_intel_arch_watchdog(unsigned nmi_hz)
10316 @@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(uns
10317 return 1;
10318 }
10319
10320 +/* cannot be const */
10321 static struct wd_ops intel_arch_wd_ops __read_mostly = {
10322 .reserve = single_msr_reserve,
10323 .unreserve = single_msr_unreserve,
10324 diff -urNp linux-2.6.34.1/arch/x86/kernel/crash.c linux-2.6.34.1/arch/x86/kernel/crash.c
10325 --- linux-2.6.34.1/arch/x86/kernel/crash.c 2010-07-05 14:24:10.000000000 -0400
10326 +++ linux-2.6.34.1/arch/x86/kernel/crash.c 2010-07-07 09:04:48.000000000 -0400
10327 @@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu,
10328 regs = args->regs;
10329
10330 #ifdef CONFIG_X86_32
10331 - if (!user_mode_vm(regs)) {
10332 + if (!user_mode(regs)) {
10333 crash_fixup_ss_esp(&fixed_regs, regs);
10334 regs = &fixed_regs;
10335 }
10336 diff -urNp linux-2.6.34.1/arch/x86/kernel/doublefault_32.c linux-2.6.34.1/arch/x86/kernel/doublefault_32.c
10337 --- linux-2.6.34.1/arch/x86/kernel/doublefault_32.c 2010-07-05 14:24:10.000000000 -0400
10338 +++ linux-2.6.34.1/arch/x86/kernel/doublefault_32.c 2010-07-07 09:04:48.000000000 -0400
10339 @@ -11,7 +11,7 @@
10340
10341 #define DOUBLEFAULT_STACKSIZE (1024)
10342 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10343 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10344 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10345
10346 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10347
10348 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10349 unsigned long gdt, tss;
10350
10351 store_gdt(&gdt_desc);
10352 - gdt = gdt_desc.address;
10353 + gdt = (unsigned long)gdt_desc.address;
10354
10355 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10356
10357 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10358 /* 0x2 bit is always set */
10359 .flags = X86_EFLAGS_SF | 0x2,
10360 .sp = STACK_START,
10361 - .es = __USER_DS,
10362 + .es = __KERNEL_DS,
10363 .cs = __KERNEL_CS,
10364 .ss = __KERNEL_DS,
10365 - .ds = __USER_DS,
10366 + .ds = __KERNEL_DS,
10367 .fs = __KERNEL_PERCPU,
10368
10369 .__cr3 = __pa_nodebug(swapper_pg_dir),
10370 diff -urNp linux-2.6.34.1/arch/x86/kernel/dumpstack.c linux-2.6.34.1/arch/x86/kernel/dumpstack.c
10371 --- linux-2.6.34.1/arch/x86/kernel/dumpstack.c 2010-07-05 14:24:10.000000000 -0400
10372 +++ linux-2.6.34.1/arch/x86/kernel/dumpstack.c 2010-07-07 09:04:48.000000000 -0400
10373 @@ -207,7 +207,7 @@ void dump_stack(void)
10374 #endif
10375
10376 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
10377 - current->pid, current->comm, print_tainted(),
10378 + task_pid_nr(current), current->comm, print_tainted(),
10379 init_utsname()->release,
10380 (int)strcspn(init_utsname()->version, " "),
10381 init_utsname()->version);
10382 @@ -268,7 +268,7 @@ void __kprobes oops_end(unsigned long fl
10383 panic("Fatal exception in interrupt");
10384 if (panic_on_oops)
10385 panic("Fatal exception");
10386 - do_exit(signr);
10387 + do_group_exit(signr);
10388 }
10389
10390 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
10391 @@ -295,7 +295,7 @@ int __kprobes __die(const char *str, str
10392
10393 show_registers(regs);
10394 #ifdef CONFIG_X86_32
10395 - if (user_mode_vm(regs)) {
10396 + if (user_mode(regs)) {
10397 sp = regs->sp;
10398 ss = regs->ss & 0xffff;
10399 } else {
10400 @@ -323,7 +323,7 @@ void die(const char *str, struct pt_regs
10401 unsigned long flags = oops_begin();
10402 int sig = SIGSEGV;
10403
10404 - if (!user_mode_vm(regs))
10405 + if (!user_mode(regs))
10406 report_bug(regs->ip, regs);
10407
10408 if (__die(str, regs, err))
10409 diff -urNp linux-2.6.34.1/arch/x86/kernel/dumpstack_32.c linux-2.6.34.1/arch/x86/kernel/dumpstack_32.c
10410 --- linux-2.6.34.1/arch/x86/kernel/dumpstack_32.c 2010-07-05 14:24:10.000000000 -0400
10411 +++ linux-2.6.34.1/arch/x86/kernel/dumpstack_32.c 2010-07-07 09:04:48.000000000 -0400
10412 @@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs
10413 * When in-kernel, we also print out the stack and code at the
10414 * time of the fault..
10415 */
10416 - if (!user_mode_vm(regs)) {
10417 + if (!user_mode(regs)) {
10418 unsigned int code_prologue = code_bytes * 43 / 64;
10419 unsigned int code_len = code_bytes;
10420 unsigned char c;
10421 u8 *ip;
10422 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10423
10424 printk(KERN_EMERG "Stack:\n");
10425 show_stack_log_lvl(NULL, regs, &regs->sp,
10426 @@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs
10427
10428 printk(KERN_EMERG "Code: ");
10429
10430 - ip = (u8 *)regs->ip - code_prologue;
10431 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10432 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10433 /* try starting at IP */
10434 - ip = (u8 *)regs->ip;
10435 + ip = (u8 *)regs->ip + cs_base;
10436 code_len = code_len - code_prologue + 1;
10437 }
10438 for (i = 0; i < code_len; i++, ip++) {
10439 @@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs
10440 printk(" Bad EIP value.");
10441 break;
10442 }
10443 - if (ip == (u8 *)regs->ip)
10444 + if (ip == (u8 *)regs->ip + cs_base)
10445 printk("<%02x> ", c);
10446 else
10447 printk("%02x ", c);
10448 @@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip)
10449 {
10450 unsigned short ud2;
10451
10452 + ip = ktla_ktva(ip);
10453 if (ip < PAGE_OFFSET)
10454 return 0;
10455 if (probe_kernel_address((unsigned short *)ip, ud2))
10456 diff -urNp linux-2.6.34.1/arch/x86/kernel/efi_32.c linux-2.6.34.1/arch/x86/kernel/efi_32.c
10457 --- linux-2.6.34.1/arch/x86/kernel/efi_32.c 2010-07-05 14:24:10.000000000 -0400
10458 +++ linux-2.6.34.1/arch/x86/kernel/efi_32.c 2010-07-07 09:04:48.000000000 -0400
10459 @@ -38,70 +38,38 @@
10460 */
10461
10462 static unsigned long efi_rt_eflags;
10463 -static pgd_t efi_bak_pg_dir_pointer[2];
10464 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
10465
10466 -void efi_call_phys_prelog(void)
10467 +void __init efi_call_phys_prelog(void)
10468 {
10469 - unsigned long cr4;
10470 - unsigned long temp;
10471 struct desc_ptr gdt_descr;
10472
10473 local_irq_save(efi_rt_eflags);
10474
10475 - /*
10476 - * If I don't have PAE, I should just duplicate two entries in page
10477 - * directory. If I have PAE, I just need to duplicate one entry in
10478 - * page directory.
10479 - */
10480 - cr4 = read_cr4_safe();
10481
10482 - if (cr4 & X86_CR4_PAE) {
10483 - efi_bak_pg_dir_pointer[0].pgd =
10484 - swapper_pg_dir[pgd_index(0)].pgd;
10485 - swapper_pg_dir[0].pgd =
10486 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
10487 - } else {
10488 - efi_bak_pg_dir_pointer[0].pgd =
10489 - swapper_pg_dir[pgd_index(0)].pgd;
10490 - efi_bak_pg_dir_pointer[1].pgd =
10491 - swapper_pg_dir[pgd_index(0x400000)].pgd;
10492 - swapper_pg_dir[pgd_index(0)].pgd =
10493 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
10494 - temp = PAGE_OFFSET + 0x400000;
10495 - swapper_pg_dir[pgd_index(0x400000)].pgd =
10496 - swapper_pg_dir[pgd_index(temp)].pgd;
10497 - }
10498 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
10499 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
10500 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
10501
10502 /*
10503 * After the lock is released, the original page table is restored.
10504 */
10505 __flush_tlb_all();
10506
10507 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
10508 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
10509 gdt_descr.size = GDT_SIZE - 1;
10510 load_gdt(&gdt_descr);
10511 }
10512
10513 -void efi_call_phys_epilog(void)
10514 +void __init efi_call_phys_epilog(void)
10515 {
10516 - unsigned long cr4;
10517 struct desc_ptr gdt_descr;
10518
10519 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
10520 + gdt_descr.address = get_cpu_gdt_table(0);
10521 gdt_descr.size = GDT_SIZE - 1;
10522 load_gdt(&gdt_descr);
10523
10524 - cr4 = read_cr4_safe();
10525 -
10526 - if (cr4 & X86_CR4_PAE) {
10527 - swapper_pg_dir[pgd_index(0)].pgd =
10528 - efi_bak_pg_dir_pointer[0].pgd;
10529 - } else {
10530 - swapper_pg_dir[pgd_index(0)].pgd =
10531 - efi_bak_pg_dir_pointer[0].pgd;
10532 - swapper_pg_dir[pgd_index(0x400000)].pgd =
10533 - efi_bak_pg_dir_pointer[1].pgd;
10534 - }
10535 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
10536
10537 /*
10538 * After the lock is released, the original page table is restored.
10539 diff -urNp linux-2.6.34.1/arch/x86/kernel/efi_stub_32.S linux-2.6.34.1/arch/x86/kernel/efi_stub_32.S
10540 --- linux-2.6.34.1/arch/x86/kernel/efi_stub_32.S 2010-07-05 14:24:10.000000000 -0400
10541 +++ linux-2.6.34.1/arch/x86/kernel/efi_stub_32.S 2010-07-07 09:04:48.000000000 -0400
10542 @@ -6,6 +6,7 @@
10543 */
10544
10545 #include <linux/linkage.h>
10546 +#include <linux/init.h>
10547 #include <asm/page_types.h>
10548
10549 /*
10550 @@ -20,7 +21,7 @@
10551 * service functions will comply with gcc calling convention, too.
10552 */
10553
10554 -.text
10555 +__INIT
10556 ENTRY(efi_call_phys)
10557 /*
10558 * 0. The function can only be called in Linux kernel. So CS has been
10559 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
10560 * The mapping of lower virtual memory has been created in prelog and
10561 * epilog.
10562 */
10563 - movl $1f, %edx
10564 - subl $__PAGE_OFFSET, %edx
10565 - jmp *%edx
10566 + jmp 1f-__PAGE_OFFSET
10567 1:
10568
10569 /*
10570 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
10571 * parameter 2, ..., param n. To make things easy, we save the return
10572 * address of efi_call_phys in a global variable.
10573 */
10574 - popl %edx
10575 - movl %edx, saved_return_addr
10576 - /* get the function pointer into ECX*/
10577 - popl %ecx
10578 - movl %ecx, efi_rt_function_ptr
10579 - movl $2f, %edx
10580 - subl $__PAGE_OFFSET, %edx
10581 - pushl %edx
10582 + popl (saved_return_addr)
10583 + popl (efi_rt_function_ptr)
10584
10585 /*
10586 * 3. Clear PG bit in %CR0.
10587 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
10588 /*
10589 * 5. Call the physical function.
10590 */
10591 - jmp *%ecx
10592 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
10593
10594 -2:
10595 /*
10596 * 6. After EFI runtime service returns, control will return to
10597 * following instruction. We'd better readjust stack pointer first.
10598 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
10599 movl %cr0, %edx
10600 orl $0x80000000, %edx
10601 movl %edx, %cr0
10602 - jmp 1f
10603 -1:
10604 +
10605 /*
10606 * 8. Now restore the virtual mode from flat mode by
10607 * adding EIP with PAGE_OFFSET.
10608 */
10609 - movl $1f, %edx
10610 - jmp *%edx
10611 + jmp 1f+__PAGE_OFFSET
10612 1:
10613
10614 /*
10615 * 9. Balance the stack. And because EAX contain the return value,
10616 * we'd better not clobber it.
10617 */
10618 - leal efi_rt_function_ptr, %edx
10619 - movl (%edx), %ecx
10620 - pushl %ecx
10621 + pushl (efi_rt_function_ptr)
10622
10623 /*
10624 - * 10. Push the saved return address onto the stack and return.
10625 + * 10. Return to the saved return address.
10626 */
10627 - leal saved_return_addr, %edx
10628 - movl (%edx), %ecx
10629 - pushl %ecx
10630 - ret
10631 + jmpl *(saved_return_addr)
10632 ENDPROC(efi_call_phys)
10633 .previous
10634
10635 -.data
10636 +__INITDATA
10637 saved_return_addr:
10638 .long 0
10639 efi_rt_function_ptr:
10640 diff -urNp linux-2.6.34.1/arch/x86/kernel/entry_32.S linux-2.6.34.1/arch/x86/kernel/entry_32.S
10641 --- linux-2.6.34.1/arch/x86/kernel/entry_32.S 2010-07-05 14:24:10.000000000 -0400
10642 +++ linux-2.6.34.1/arch/x86/kernel/entry_32.S 2010-07-07 09:04:48.000000000 -0400
10643 @@ -191,7 +191,67 @@
10644
10645 #endif /* CONFIG_X86_32_LAZY_GS */
10646
10647 -.macro SAVE_ALL
10648 +.macro PAX_EXIT_KERNEL
10649 +#ifdef CONFIG_PAX_KERNEXEC
10650 +#ifdef CONFIG_PARAVIRT
10651 + push %eax; push %ecx;
10652 +#endif
10653 + mov %cs, %esi
10654 + cmp $__KERNEXEC_KERNEL_CS, %esi
10655 + jnz 2f
10656 +#ifdef CONFIG_PARAVIRT
10657 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
10658 + mov %eax, %esi
10659 +#else
10660 + mov %cr0, %esi
10661 +#endif
10662 + btr $16, %esi
10663 + ljmp $__KERNEL_CS, $1f
10664 +1:
10665 +#ifdef CONFIG_PARAVIRT
10666 + mov %esi, %eax
10667 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
10668 +#else
10669 + mov %esi, %cr0
10670 +#endif
10671 +2:
10672 +#ifdef CONFIG_PARAVIRT
10673 + pop %ecx; pop %eax
10674 +#endif
10675 +#endif
10676 +.endm
10677 +
10678 +.macro PAX_ENTER_KERNEL
10679 +#ifdef CONFIG_PAX_KERNEXEC
10680 +#ifdef CONFIG_PARAVIRT
10681 + push %eax; push %ecx;
10682 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
10683 + mov %eax, %esi
10684 +#else
10685 + mov %cr0, %esi
10686 +#endif
10687 + bts $16, %esi
10688 + jnc 1f
10689 + mov %cs, %esi
10690 + cmp $__KERNEL_CS, %esi
10691 + jz 3f
10692 + ljmp $__KERNEL_CS, $3f
10693 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
10694 +2:
10695 +#ifdef CONFIG_PARAVIRT
10696 + mov %esi, %eax
10697 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10698 +#else
10699 + mov %esi, %cr0
10700 +#endif
10701 +3:
10702 +#ifdef CONFIG_PARAVIRT
10703 + pop %ecx; pop %eax
10704 +#endif
10705 +#endif
10706 +.endm
10707 +
10708 +.macro __SAVE_ALL _DS
10709 cld
10710 PUSH_GS
10711 pushl %fs
10712 @@ -224,7 +284,7 @@
10713 pushl %ebx
10714 CFI_ADJUST_CFA_OFFSET 4
10715 CFI_REL_OFFSET ebx, 0
10716 - movl $(__USER_DS), %edx
10717 + movl $\_DS, %edx
10718 movl %edx, %ds
10719 movl %edx, %es
10720 movl $(__KERNEL_PERCPU), %edx
10721 @@ -232,6 +292,15 @@
10722 SET_KERNEL_GS %edx
10723 .endm
10724
10725 +.macro SAVE_ALL
10726 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
10727 + __SAVE_ALL __KERNEL_DS
10728 + PAX_ENTER_KERNEL
10729 +#else
10730 + __SAVE_ALL __USER_DS
10731 +#endif
10732 +.endm
10733 +
10734 .macro RESTORE_INT_REGS
10735 popl %ebx
10736 CFI_ADJUST_CFA_OFFSET -4
10737 @@ -356,7 +425,15 @@ check_userspace:
10738 movb PT_CS(%esp), %al
10739 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
10740 cmpl $USER_RPL, %eax
10741 +
10742 +#ifdef CONFIG_PAX_KERNEXEC
10743 + jae resume_userspace
10744 +
10745 + PAX_EXIT_KERNEL
10746 + jmp resume_kernel
10747 +#else
10748 jb resume_kernel # not returning to v8086 or userspace
10749 +#endif
10750
10751 ENTRY(resume_userspace)
10752 LOCKDEP_SYS_EXIT
10753 @@ -422,10 +499,9 @@ sysenter_past_esp:
10754 /*CFI_REL_OFFSET cs, 0*/
10755 /*
10756 * Push current_thread_info()->sysenter_return to the stack.
10757 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
10758 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
10759 */
10760 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
10761 + GET_THREAD_INFO(%ebp)
10762 + pushl TI_sysenter_return(%ebp)
10763 CFI_ADJUST_CFA_OFFSET 4
10764 CFI_REL_OFFSET eip, 0
10765
10766 @@ -438,9 +514,19 @@ sysenter_past_esp:
10767 * Load the potential sixth argument from user stack.
10768 * Careful about security.
10769 */
10770 + movl PT_OLDESP(%esp),%ebp
10771 +
10772 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10773 + mov PT_OLDSS(%esp),%ds
10774 +1: movl %ds:(%ebp),%ebp
10775 + push %ss
10776 + pop %ds
10777 +#else
10778 cmpl $__PAGE_OFFSET-3,%ebp
10779 jae syscall_fault
10780 1: movl (%ebp),%ebp
10781 +#endif
10782 +
10783 movl %ebp,PT_EBP(%esp)
10784 .section __ex_table,"a"
10785 .align 4
10786 @@ -463,12 +549,23 @@ sysenter_do_call:
10787 testl $_TIF_ALLWORK_MASK, %ecx
10788 jne sysexit_audit
10789 sysenter_exit:
10790 +
10791 +#ifdef CONFIG_PAX_RANDKSTACK
10792 + pushl %eax
10793 + CFI_ADJUST_CFA_OFFSET 4
10794 + call pax_randomize_kstack
10795 + popl %eax
10796 + CFI_ADJUST_CFA_OFFSET -4
10797 +#endif
10798 +
10799 /* if something modifies registers it must also disable sysexit */
10800 movl PT_EIP(%esp), %edx
10801 movl PT_OLDESP(%esp), %ecx
10802 xorl %ebp,%ebp
10803 TRACE_IRQS_ON
10804 1: mov PT_FS(%esp), %fs
10805 +2: mov PT_DS(%esp), %ds
10806 +3: mov PT_ES(%esp), %es
10807 PTGS_TO_GS
10808 ENABLE_INTERRUPTS_SYSEXIT
10809
10810 @@ -512,11 +609,17 @@ sysexit_audit:
10811
10812 CFI_ENDPROC
10813 .pushsection .fixup,"ax"
10814 -2: movl $0,PT_FS(%esp)
10815 +4: movl $0,PT_FS(%esp)
10816 + jmp 1b
10817 +5: movl $0,PT_DS(%esp)
10818 + jmp 1b
10819 +6: movl $0,PT_ES(%esp)
10820 jmp 1b
10821 .section __ex_table,"a"
10822 .align 4
10823 - .long 1b,2b
10824 + .long 1b,4b
10825 + .long 2b,5b
10826 + .long 3b,6b
10827 .popsection
10828 PTGS_TO_GS_EX
10829 ENDPROC(ia32_sysenter_target)
10830 @@ -550,6 +653,10 @@ syscall_exit:
10831 testl $_TIF_ALLWORK_MASK, %ecx # current->work
10832 jne syscall_exit_work
10833
10834 +#ifdef CONFIG_PAX_RANDKSTACK
10835 + call pax_randomize_kstack
10836 +#endif
10837 +
10838 restore_all:
10839 TRACE_IRQS_IRET
10840 restore_all_notrace:
10841 @@ -614,7 +721,13 @@ ldt_ss:
10842 mov PT_OLDESP(%esp), %eax /* load userspace esp */
10843 mov %dx, %ax /* eax: new kernel esp */
10844 sub %eax, %edx /* offset (low word is 0) */
10845 - PER_CPU(gdt_page, %ebx)
10846 +#ifdef CONFIG_SMP
10847 + movl PER_CPU_VAR(cpu_number), %ebx
10848 + shll $PAGE_SHIFT_asm, %ebx
10849 + addl $cpu_gdt_table, %ebx
10850 +#else
10851 + movl $cpu_gdt_table, %ebx
10852 +#endif
10853 shr $16, %edx
10854 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
10855 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
10856 @@ -654,25 +767,19 @@ work_resched:
10857
10858 work_notifysig: # deal with pending signals and
10859 # notify-resume requests
10860 + movl %esp, %eax
10861 #ifdef CONFIG_VM86
10862 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
10863 - movl %esp, %eax
10864 - jne work_notifysig_v86 # returning to kernel-space or
10865 + jz 1f # returning to kernel-space or
10866 # vm86-space
10867 - xorl %edx, %edx
10868 - call do_notify_resume
10869 - jmp resume_userspace_sig
10870
10871 - ALIGN
10872 -work_notifysig_v86:
10873 pushl %ecx # save ti_flags for do_notify_resume
10874 CFI_ADJUST_CFA_OFFSET 4
10875 call save_v86_state # %eax contains pt_regs pointer
10876 popl %ecx
10877 CFI_ADJUST_CFA_OFFSET -4
10878 movl %eax, %esp
10879 -#else
10880 - movl %esp, %eax
10881 +1:
10882 #endif
10883 xorl %edx, %edx
10884 call do_notify_resume
10885 @@ -707,6 +814,10 @@ END(syscall_exit_work)
10886
10887 RING0_INT_FRAME # can't unwind into user space anyway
10888 syscall_fault:
10889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10890 + push %ss
10891 + pop %ds
10892 +#endif
10893 GET_THREAD_INFO(%ebp)
10894 movl $-EFAULT,PT_EAX(%esp)
10895 jmp resume_userspace
10896 @@ -790,7 +901,13 @@ ptregs_clone:
10897 * normal stack and adjusts ESP with the matching offset.
10898 */
10899 /* fixup the stack */
10900 - PER_CPU(gdt_page, %ebx)
10901 +#ifdef CONFIG_SMP
10902 + movl PER_CPU_VAR(cpu_number), %ebx
10903 + shll $PAGE_SHIFT_asm, %ebx
10904 + addl $cpu_gdt_table, %ebx
10905 +#else
10906 + movl $cpu_gdt_table, %ebx
10907 +#endif
10908 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
10909 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
10910 shl $16, %eax
10911 @@ -1254,7 +1371,6 @@ return_to_handler:
10912 jmp *%ecx
10913 #endif
10914
10915 -.section .rodata,"a"
10916 #include "syscall_table_32.S"
10917
10918 syscall_table_size=(.-sys_call_table)
10919 @@ -1311,9 +1427,12 @@ error_code:
10920 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
10921 REG_TO_PTGS %ecx
10922 SET_KERNEL_GS %ecx
10923 - movl $(__USER_DS), %ecx
10924 + movl $(__KERNEL_DS), %ecx
10925 movl %ecx, %ds
10926 movl %ecx, %es
10927 +
10928 + PAX_ENTER_KERNEL
10929 +
10930 TRACE_IRQS_OFF
10931 movl %esp,%eax # pt_regs pointer
10932 call *%edi
10933 @@ -1407,6 +1526,9 @@ nmi_stack_correct:
10934 xorl %edx,%edx # zero error code
10935 movl %esp,%eax # pt_regs pointer
10936 call do_nmi
10937 +
10938 + PAX_EXIT_KERNEL
10939 +
10940 jmp restore_all_notrace
10941 CFI_ENDPROC
10942
10943 @@ -1447,6 +1569,9 @@ nmi_espfix_stack:
10944 FIXUP_ESPFIX_STACK # %eax == %esp
10945 xorl %edx,%edx # zero error code
10946 call do_nmi
10947 +
10948 + PAX_EXIT_KERNEL
10949 +
10950 RESTORE_REGS
10951 lss 12+4(%esp), %esp # back to espfix stack
10952 CFI_ADJUST_CFA_OFFSET -24
10953 diff -urNp linux-2.6.34.1/arch/x86/kernel/entry_64.S linux-2.6.34.1/arch/x86/kernel/entry_64.S
10954 --- linux-2.6.34.1/arch/x86/kernel/entry_64.S 2010-07-05 14:24:10.000000000 -0400
10955 +++ linux-2.6.34.1/arch/x86/kernel/entry_64.S 2010-07-07 09:04:48.000000000 -0400
10956 @@ -53,6 +53,7 @@
10957 #include <asm/paravirt.h>
10958 #include <asm/ftrace.h>
10959 #include <asm/percpu.h>
10960 +#include <asm/pgtable.h>
10961
10962 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
10963 #include <linux/elf-em.h>
10964 @@ -174,6 +175,189 @@ ENTRY(native_usergs_sysret64)
10965 ENDPROC(native_usergs_sysret64)
10966 #endif /* CONFIG_PARAVIRT */
10967
10968 + .macro ljmpq sel, off
10969 +#if defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
10970 + .byte 0x48; ljmp *1234f(%rip)
10971 + .pushsection .rodata
10972 + .align 16
10973 + 1234: .quad \off; .word \sel
10974 + .popsection
10975 +#else
10976 + push $\sel
10977 + push $\off
10978 + lretq
10979 +#endif
10980 + .endm
10981 +
10982 +ENTRY(pax_enter_kernel)
10983 +
10984 +#ifdef CONFIG_PAX_KERNEXEC
10985 + push %rdi
10986 +
10987 +#ifdef CONFIG_PARAVIRT
10988 + PV_SAVE_REGS(CLBR_RDI)
10989 +#endif
10990 +
10991 + GET_CR0_INTO_RDI
10992 + bts $16,%rdi
10993 + jnc 1f
10994 + mov %cs,%edi
10995 + cmp $__KERNEL_CS,%edi
10996 + jz 3f
10997 + ljmpq __KERNEL_CS,3f
10998 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
10999 +2: SET_RDI_INTO_CR0
11000 +3:
11001 +
11002 +#ifdef CONFIG_PARAVIRT
11003 + PV_RESTORE_REGS(CLBR_RDI)
11004 +#endif
11005 +
11006 + pop %rdi
11007 +#endif
11008 +
11009 + retq
11010 +ENDPROC(pax_enter_kernel)
11011 +
11012 +ENTRY(pax_exit_kernel)
11013 +
11014 +#ifdef CONFIG_PAX_KERNEXEC
11015 + push %rdi
11016 +
11017 +#ifdef CONFIG_PARAVIRT
11018 + PV_SAVE_REGS(CLBR_RDI)
11019 +#endif
11020 +
11021 + mov %cs,%rdi
11022 + cmp $__KERNEXEC_KERNEL_CS,%edi
11023 + jnz 2f
11024 + GET_CR0_INTO_RDI
11025 + btr $16,%rdi
11026 + ljmpq __KERNEL_CS,1f
11027 +1: SET_RDI_INTO_CR0
11028 +2:
11029 +
11030 +#ifdef CONFIG_PARAVIRT
11031 + PV_RESTORE_REGS(CLBR_RDI);
11032 +#endif
11033 +
11034 + pop %rdi
11035 +#endif
11036 +
11037 + retq
11038 +ENDPROC(pax_exit_kernel)
11039 +
11040 +ENTRY(pax_enter_kernel_user)
11041 +
11042 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11043 + push %rdi
11044 + push %rbx
11045 +
11046 +#ifdef CONFIG_PARAVIRT
11047 + PV_SAVE_REGS(CLBR_RDI)
11048 +#endif
11049 +
11050 + GET_CR3_INTO_RDI
11051 + mov %rdi,%rbx
11052 + add $__START_KERNEL_map,%rbx
11053 + sub phys_base(%rip),%rbx
11054 +
11055 +#ifdef CONFIG_PARAVIRT
11056 + push %rdi
11057 + cmpl $0, pv_info+PARAVIRT_enabled
11058 + jz 1f
11059 + i = 0
11060 + .rept USER_PGD_PTRS
11061 + mov i*8(%rbx),%rsi
11062 + mov $0,%sil
11063 + lea i*8(%rbx),%rdi
11064 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11065 + i = i + 1
11066 + .endr
11067 + jmp 2f
11068 +1:
11069 +#endif
11070 +
11071 + i = 0
11072 + .rept USER_PGD_PTRS
11073 + movb $0,i*8(%rbx)
11074 + i = i + 1
11075 + .endr
11076 +
11077 +#ifdef CONFIG_PARAVIRT
11078 +2: pop %rdi
11079 +#endif
11080 + SET_RDI_INTO_CR3
11081 +
11082 +#ifdef CONFIG_PAX_KERNEXEC
11083 + GET_CR0_INTO_RDI
11084 + bts $16,%rdi
11085 + SET_RDI_INTO_CR0
11086 +#endif
11087 +
11088 +#ifdef CONFIG_PARAVIRT
11089 + PV_RESTORE_REGS(CLBR_RDI)
11090 +#endif
11091 +
11092 + pop %rbx
11093 + pop %rdi
11094 +#endif
11095 +
11096 + retq
11097 +ENDPROC(pax_enter_kernel_user)
11098 +
11099 +ENTRY(pax_exit_kernel_user)
11100 +
11101 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11102 + push %rdi
11103 +
11104 +#ifdef CONFIG_PARAVIRT
11105 + push %rbx
11106 + PV_SAVE_REGS(CLBR_RDI)
11107 +#endif
11108 +
11109 +#ifdef CONFIG_PAX_KERNEXEC
11110 + GET_CR0_INTO_RDI
11111 + btr $16,%rdi
11112 + SET_RDI_INTO_CR0
11113 +#endif
11114 +
11115 + GET_CR3_INTO_RDI
11116 + add $__START_KERNEL_map,%rdi
11117 + sub phys_base(%rip),%rdi
11118 +
11119 +#ifdef CONFIG_PARAVIRT
11120 + cmpl $0, pv_info+PARAVIRT_enabled
11121 + jz 1f
11122 + mov %rdi,%rbx
11123 + i = 0
11124 + .rept USER_PGD_PTRS
11125 + mov i*8(%rbx),%rsi
11126 + mov $0x67,%sil
11127 + lea i*8(%rbx),%rdi
11128 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11129 + i = i + 1
11130 + .endr
11131 + jmp 2f
11132 +1:
11133 +#endif
11134 +
11135 + i = 0
11136 + .rept USER_PGD_PTRS
11137 + movb $0x67,i*8(%rdi)
11138 + i = i + 1
11139 + .endr
11140 +
11141 +#ifdef CONFIG_PARAVIRT
11142 +2: PV_RESTORE_REGS(CLBR_RDI)
11143 + pop %rbx
11144 +#endif
11145 +
11146 + pop %rdi
11147 +#endif
11148 +
11149 + retq
11150 +ENDPROC(pax_exit_kernel_user)
11151
11152 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
11153 #ifdef CONFIG_TRACE_IRQFLAGS
11154 @@ -317,7 +501,7 @@ ENTRY(save_args)
11155 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
11156 movq_cfi rbp, 8 /* push %rbp */
11157 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
11158 - testl $3, CS(%rdi)
11159 + testb $3, CS(%rdi)
11160 je 1f
11161 SWAPGS
11162 /*
11163 @@ -409,7 +593,7 @@ ENTRY(ret_from_fork)
11164
11165 RESTORE_REST
11166
11167 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
11168 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
11169 je int_ret_from_sys_call
11170
11171 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
11172 @@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs)
11173
11174 movq %rsp,PER_CPU_VAR(old_rsp)
11175 movq PER_CPU_VAR(kernel_stack),%rsp
11176 +
11177 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11178 + call pax_enter_kernel_user
11179 +#endif
11180 +
11181 /*
11182 * No need to follow this irqs off/on section - it's straight
11183 * and short:
11184 @@ -502,6 +691,11 @@ sysret_check:
11185 andl %edi,%edx
11186 jnz sysret_careful
11187 CFI_REMEMBER_STATE
11188 +
11189 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11190 + call pax_exit_kernel_user
11191 +#endif
11192 +
11193 /*
11194 * sysretq will re-enable interrupts:
11195 */
11196 @@ -613,7 +807,7 @@ tracesys:
11197 GLOBAL(int_ret_from_sys_call)
11198 DISABLE_INTERRUPTS(CLBR_NONE)
11199 TRACE_IRQS_OFF
11200 - testl $3,CS-ARGOFFSET(%rsp)
11201 + testb $3,CS-ARGOFFSET(%rsp)
11202 je retint_restore_args
11203 movl $_TIF_ALLWORK_MASK,%edi
11204 /* edi: mask to check */
11205 @@ -800,6 +994,16 @@ END(interrupt)
11206 CFI_ADJUST_CFA_OFFSET 10*8
11207 call save_args
11208 PARTIAL_FRAME 0
11209 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11210 + testb $3, CS(%rdi)
11211 + jnz 1f
11212 + call pax_enter_kernel
11213 + jmp 2f
11214 +1: call pax_enter_kernel_user
11215 +2:
11216 +#else
11217 + call pax_enter_kernel
11218 +#endif
11219 call \func
11220 .endm
11221
11222 @@ -826,7 +1030,7 @@ ret_from_intr:
11223 CFI_ADJUST_CFA_OFFSET -8
11224 exit_intr:
11225 GET_THREAD_INFO(%rcx)
11226 - testl $3,CS-ARGOFFSET(%rsp)
11227 + testb $3,CS-ARGOFFSET(%rsp)
11228 je retint_kernel
11229
11230 /* Interrupt came from user space */
11231 @@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space
11232 * The iretq could re-enable interrupts:
11233 */
11234 DISABLE_INTERRUPTS(CLBR_ANY)
11235 +
11236 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11237 + call pax_exit_kernel_user
11238 +#endif
11239 +
11240 TRACE_IRQS_IRETQ
11241 SWAPGS
11242 jmp restore_args
11243
11244 retint_restore_args: /* return to kernel space */
11245 DISABLE_INTERRUPTS(CLBR_ANY)
11246 + call pax_exit_kernel
11247 /*
11248 * The iretq could re-enable interrupts:
11249 */
11250 @@ -1040,6 +1250,16 @@ ENTRY(\sym)
11251 CFI_ADJUST_CFA_OFFSET 15*8
11252 call error_entry
11253 DEFAULT_FRAME 0
11254 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11255 + testb $3, CS(%rsp)
11256 + jnz 1f
11257 + call pax_enter_kernel
11258 + jmp 2f
11259 +1: call pax_enter_kernel_user
11260 +2:
11261 +#else
11262 + call pax_enter_kernel
11263 +#endif
11264 movq %rsp,%rdi /* pt_regs pointer */
11265 xorl %esi,%esi /* no error code */
11266 call \do_sym
11267 @@ -1057,6 +1277,16 @@ ENTRY(\sym)
11268 subq $15*8, %rsp
11269 call save_paranoid
11270 TRACE_IRQS_OFF
11271 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11272 + testb $3, CS(%rsp)
11273 + jnz 1f
11274 + call pax_enter_kernel
11275 + jmp 2f
11276 +1: call pax_enter_kernel_user
11277 +2:
11278 +#else
11279 + call pax_enter_kernel
11280 +#endif
11281 movq %rsp,%rdi /* pt_regs pointer */
11282 xorl %esi,%esi /* no error code */
11283 call \do_sym
11284 @@ -1074,9 +1304,24 @@ ENTRY(\sym)
11285 subq $15*8, %rsp
11286 call save_paranoid
11287 TRACE_IRQS_OFF
11288 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11289 + testb $3, CS(%rsp)
11290 + jnz 1f
11291 + call pax_enter_kernel
11292 + jmp 2f
11293 +1: call pax_enter_kernel_user
11294 +2:
11295 +#else
11296 + call pax_enter_kernel
11297 +#endif
11298 movq %rsp,%rdi /* pt_regs pointer */
11299 xorl %esi,%esi /* no error code */
11300 - PER_CPU(init_tss, %r12)
11301 +#ifdef CONFIG_SMP
11302 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
11303 + lea init_tss(%r12), %r12
11304 +#else
11305 + lea init_tss(%rip), %r12
11306 +#endif
11307 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
11308 call \do_sym
11309 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
11310 @@ -1093,6 +1338,16 @@ ENTRY(\sym)
11311 CFI_ADJUST_CFA_OFFSET 15*8
11312 call error_entry
11313 DEFAULT_FRAME 0
11314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11315 + testb $3, CS(%rsp)
11316 + jnz 1f
11317 + call pax_enter_kernel
11318 + jmp 2f
11319 +1: call pax_enter_kernel_user
11320 +2:
11321 +#else
11322 + call pax_enter_kernel
11323 +#endif
11324 movq %rsp,%rdi /* pt_regs pointer */
11325 movq ORIG_RAX(%rsp),%rsi /* get error code */
11326 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
11327 @@ -1112,6 +1367,16 @@ ENTRY(\sym)
11328 call save_paranoid
11329 DEFAULT_FRAME 0
11330 TRACE_IRQS_OFF
11331 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11332 + testb $3, CS(%rsp)
11333 + jnz 1f
11334 + call pax_enter_kernel
11335 + jmp 2f
11336 +1: call pax_enter_kernel_user
11337 +2:
11338 +#else
11339 + call pax_enter_kernel
11340 +#endif
11341 movq %rsp,%rdi /* pt_regs pointer */
11342 movq ORIG_RAX(%rsp),%rsi /* get error code */
11343 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
11344 @@ -1370,14 +1635,27 @@ ENTRY(paranoid_exit)
11345 TRACE_IRQS_OFF
11346 testl %ebx,%ebx /* swapgs needed? */
11347 jnz paranoid_restore
11348 - testl $3,CS(%rsp)
11349 + testb $3,CS(%rsp)
11350 jnz paranoid_userspace
11351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11352 + call pax_exit_kernel
11353 + TRACE_IRQS_IRETQ 0
11354 + SWAPGS_UNSAFE_STACK
11355 + RESTORE_ALL 8
11356 + jmp irq_return
11357 +#endif
11358 paranoid_swapgs:
11359 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11360 + call pax_exit_kernel_user
11361 +#else
11362 + call pax_exit_kernel
11363 +#endif
11364 TRACE_IRQS_IRETQ 0
11365 SWAPGS_UNSAFE_STACK
11366 RESTORE_ALL 8
11367 jmp irq_return
11368 paranoid_restore:
11369 + call pax_exit_kernel
11370 TRACE_IRQS_IRETQ 0
11371 RESTORE_ALL 8
11372 jmp irq_return
11373 @@ -1435,7 +1713,7 @@ ENTRY(error_entry)
11374 movq_cfi r14, R14+8
11375 movq_cfi r15, R15+8
11376 xorl %ebx,%ebx
11377 - testl $3,CS+8(%rsp)
11378 + testb $3,CS+8(%rsp)
11379 je error_kernelspace
11380 error_swapgs:
11381 SWAPGS
11382 @@ -1499,6 +1777,16 @@ ENTRY(nmi)
11383 CFI_ADJUST_CFA_OFFSET 15*8
11384 call save_paranoid
11385 DEFAULT_FRAME 0
11386 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11387 + testb $3, CS(%rsp)
11388 + jnz 1f
11389 + call pax_enter_kernel
11390 + jmp 2f
11391 +1: call pax_enter_kernel_user
11392 +2:
11393 +#else
11394 + call pax_enter_kernel
11395 +#endif
11396 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
11397 movq %rsp,%rdi
11398 movq $-1,%rsi
11399 @@ -1509,11 +1797,12 @@ ENTRY(nmi)
11400 DISABLE_INTERRUPTS(CLBR_NONE)
11401 testl %ebx,%ebx /* swapgs needed? */
11402 jnz nmi_restore
11403 - testl $3,CS(%rsp)
11404 + testb $3,CS(%rsp)
11405 jnz nmi_userspace
11406 nmi_swapgs:
11407 SWAPGS_UNSAFE_STACK
11408 nmi_restore:
11409 + call pax_exit_kernel
11410 RESTORE_ALL 8
11411 jmp irq_return
11412 nmi_userspace:
11413 diff -urNp linux-2.6.34.1/arch/x86/kernel/ftrace.c linux-2.6.34.1/arch/x86/kernel/ftrace.c
11414 --- linux-2.6.34.1/arch/x86/kernel/ftrace.c 2010-07-05 14:24:10.000000000 -0400
11415 +++ linux-2.6.34.1/arch/x86/kernel/ftrace.c 2010-07-07 09:04:48.000000000 -0400
11416 @@ -174,7 +174,9 @@ void ftrace_nmi_enter(void)
11417
11418 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
11419 smp_rmb();
11420 + pax_open_kernel();
11421 ftrace_mod_code();
11422 + pax_close_kernel();
11423 atomic_inc(&nmi_update_count);
11424 }
11425 /* Must have previous changes seen before executions */
11426 @@ -260,7 +262,7 @@ do_ftrace_mod_code(unsigned long ip, voi
11427
11428
11429
11430 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
11431 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
11432
11433 static unsigned char *ftrace_nop_replace(void)
11434 {
11435 @@ -273,6 +275,8 @@ ftrace_modify_code(unsigned long ip, uns
11436 {
11437 unsigned char replaced[MCOUNT_INSN_SIZE];
11438
11439 + ip = ktla_ktva(ip);
11440 +
11441 /*
11442 * Note: Due to modules and __init, code can
11443 * disappear and change, we need to protect against faulting
11444 @@ -329,7 +333,7 @@ int ftrace_update_ftrace_func(ftrace_fun
11445 unsigned char old[MCOUNT_INSN_SIZE], *new;
11446 int ret;
11447
11448 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
11449 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
11450 new = ftrace_call_replace(ip, (unsigned long)func);
11451 ret = ftrace_modify_code(ip, old, new);
11452
11453 @@ -382,15 +386,15 @@ int __init ftrace_dyn_arch_init(void *da
11454 switch (faulted) {
11455 case 0:
11456 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
11457 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
11458 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
11459 break;
11460 case 1:
11461 pr_info("converting mcount calls to 66 66 66 66 90\n");
11462 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
11463 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
11464 break;
11465 case 2:
11466 pr_info("converting mcount calls to jmp . + 5\n");
11467 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
11468 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
11469 break;
11470 }
11471
11472 @@ -411,6 +415,8 @@ static int ftrace_mod_jmp(unsigned long
11473 {
11474 unsigned char code[MCOUNT_INSN_SIZE];
11475
11476 + ip = ktla_ktva(ip);
11477 +
11478 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
11479 return -EFAULT;
11480
11481 diff -urNp linux-2.6.34.1/arch/x86/kernel/head32.c linux-2.6.34.1/arch/x86/kernel/head32.c
11482 --- linux-2.6.34.1/arch/x86/kernel/head32.c 2010-07-05 14:24:10.000000000 -0400
11483 +++ linux-2.6.34.1/arch/x86/kernel/head32.c 2010-07-07 09:04:48.000000000 -0400
11484 @@ -17,6 +17,7 @@
11485 #include <asm/apic.h>
11486 #include <asm/io_apic.h>
11487 #include <asm/bios_ebda.h>
11488 +#include <asm/boot.h>
11489
11490 static void __init i386_default_early_setup(void)
11491 {
11492 @@ -40,7 +41,7 @@ void __init i386_start_kernel(void)
11493 "EX TRAMPOLINE");
11494 #endif
11495
11496 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
11497 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
11498
11499 #ifdef CONFIG_BLK_DEV_INITRD
11500 /* Reserve INITRD */
11501 diff -urNp linux-2.6.34.1/arch/x86/kernel/head_32.S linux-2.6.34.1/arch/x86/kernel/head_32.S
11502 --- linux-2.6.34.1/arch/x86/kernel/head_32.S 2010-07-05 14:24:10.000000000 -0400
11503 +++ linux-2.6.34.1/arch/x86/kernel/head_32.S 2010-07-07 09:04:48.000000000 -0400
11504 @@ -25,6 +25,12 @@
11505 /* Physical address */
11506 #define pa(X) ((X) - __PAGE_OFFSET)
11507
11508 +#ifdef CONFIG_PAX_KERNEXEC
11509 +#define ta(X) (X)
11510 +#else
11511 +#define ta(X) ((X) - __PAGE_OFFSET)
11512 +#endif
11513 +
11514 /*
11515 * References to members of the new_cpu_data structure.
11516 */
11517 @@ -54,11 +60,7 @@
11518 * and small than max_low_pfn, otherwise will waste some page table entries
11519 */
11520
11521 -#if PTRS_PER_PMD > 1
11522 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
11523 -#else
11524 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
11525 -#endif
11526 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
11527
11528 /* Enough space to fit pagetables for the low memory linear map */
11529 MAPPING_BEYOND_END = \
11530 @@ -75,6 +77,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
11531 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
11532
11533 /*
11534 + * Real beginning of normal "text" segment
11535 + */
11536 +ENTRY(stext)
11537 +ENTRY(_stext)
11538 +
11539 +/*
11540 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
11541 * %esi points to the real-mode code as a 32-bit pointer.
11542 * CS and DS must be 4 GB flat segments, but we don't depend on
11543 @@ -82,6 +90,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
11544 * can.
11545 */
11546 __HEAD
11547 +
11548 +#ifdef CONFIG_PAX_KERNEXEC
11549 + jmp startup_32
11550 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
11551 +.fill PAGE_SIZE-5,1,0xcc
11552 +#endif
11553 +
11554 ENTRY(startup_32)
11555 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
11556 us to not reload segments */
11557 @@ -99,6 +114,55 @@ ENTRY(startup_32)
11558 movl %eax,%gs
11559 2:
11560
11561 +#ifdef CONFIG_SMP
11562 + movl $pa(cpu_gdt_table),%edi
11563 + movl $__per_cpu_load,%eax
11564 + movw %ax,__KERNEL_PERCPU + 2(%edi)
11565 + rorl $16,%eax
11566 + movb %al,__KERNEL_PERCPU + 4(%edi)
11567 + movb %ah,__KERNEL_PERCPU + 7(%edi)
11568 + movl $__per_cpu_end - 1,%eax
11569 + subl $__per_cpu_start,%eax
11570 + movw %ax,__KERNEL_PERCPU + 0(%edi)
11571 +#endif
11572 +
11573 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11574 + movl $NR_CPUS,%ecx
11575 + movl $pa(cpu_gdt_table),%edi
11576 +1:
11577 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
11578 + addl $PAGE_SIZE_asm,%edi
11579 + loop 1b
11580 +#endif
11581 +
11582 +#ifdef CONFIG_PAX_KERNEXEC
11583 + movl $pa(boot_gdt),%edi
11584 + movl $__LOAD_PHYSICAL_ADDR,%eax
11585 + movw %ax,__BOOT_CS + 2(%edi)
11586 + rorl $16,%eax
11587 + movb %al,__BOOT_CS + 4(%edi)
11588 + movb %ah,__BOOT_CS + 7(%edi)
11589 + rorl $16,%eax
11590 +
11591 + ljmp $(__BOOT_CS),$1f
11592 +1:
11593 +
11594 + movl $NR_CPUS,%ecx
11595 + movl $pa(cpu_gdt_table),%edi
11596 + addl $__PAGE_OFFSET,%eax
11597 +1:
11598 + movw %ax,__KERNEL_CS + 2(%edi)
11599 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
11600 + rorl $16,%eax
11601 + movb %al,__KERNEL_CS + 4(%edi)
11602 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
11603 + movb %ah,__KERNEL_CS + 7(%edi)
11604 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
11605 + rorl $16,%eax
11606 + addl $PAGE_SIZE_asm,%edi
11607 + loop 1b
11608 +#endif
11609 +
11610 /*
11611 * Clear BSS first so that there are no surprises...
11612 */
11613 @@ -142,9 +206,7 @@ ENTRY(startup_32)
11614 cmpl $num_subarch_entries, %eax
11615 jae bad_subarch
11616
11617 - movl pa(subarch_entries)(,%eax,4), %eax
11618 - subl $__PAGE_OFFSET, %eax
11619 - jmp *%eax
11620 + jmp *pa(subarch_entries)(,%eax,4)
11621
11622 bad_subarch:
11623 WEAK(lguest_entry)
11624 @@ -156,10 +218,10 @@ WEAK(xen_entry)
11625 __INITDATA
11626
11627 subarch_entries:
11628 - .long default_entry /* normal x86/PC */
11629 - .long lguest_entry /* lguest hypervisor */
11630 - .long xen_entry /* Xen hypervisor */
11631 - .long default_entry /* Moorestown MID */
11632 + .long ta(default_entry) /* normal x86/PC */
11633 + .long ta(lguest_entry) /* lguest hypervisor */
11634 + .long ta(xen_entry) /* Xen hypervisor */
11635 + .long ta(default_entry) /* Moorestown MID */
11636 num_subarch_entries = (. - subarch_entries) / 4
11637 .previous
11638 #endif /* CONFIG_PARAVIRT */
11639 @@ -220,8 +282,11 @@ default_entry:
11640 movl %eax, pa(max_pfn_mapped)
11641
11642 /* Do early initialization of the fixmap area */
11643 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
11644 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
11645 +#ifdef CONFIG_COMPAT_VDSO
11646 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
11647 +#else
11648 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
11649 +#endif
11650 #else /* Not PAE */
11651
11652 page_pde_offset = (__PAGE_OFFSET >> 20);
11653 @@ -251,8 +316,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
11654 movl %eax, pa(max_pfn_mapped)
11655
11656 /* Do early initialization of the fixmap area */
11657 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
11658 - movl %eax,pa(swapper_pg_dir+0xffc)
11659 +#ifdef CONFIG_COMPAT_VDSO
11660 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
11661 +#else
11662 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
11663 +#endif
11664 #endif
11665 jmp 3f
11666 /*
11667 @@ -299,6 +367,7 @@ ENTRY(startup_32_smp)
11668 orl %edx,%eax
11669 movl %eax,%cr4
11670
11671 +#ifdef CONFIG_X86_PAE
11672 testb $X86_CR4_PAE, %al # check if PAE is enabled
11673 jz 6f
11674
11675 @@ -323,6 +392,9 @@ ENTRY(startup_32_smp)
11676 /* Make changes effective */
11677 wrmsr
11678
11679 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
11680 +#endif
11681 +
11682 6:
11683
11684 /*
11685 @@ -348,9 +420,7 @@ ENTRY(startup_32_smp)
11686
11687 #ifdef CONFIG_SMP
11688 cmpb $0, ready
11689 - jz 1f /* Initial CPU cleans BSS */
11690 - jmp checkCPUtype
11691 -1:
11692 + jnz checkCPUtype /* Initial CPU cleans BSS */
11693 #endif /* CONFIG_SMP */
11694
11695 /*
11696 @@ -428,7 +498,7 @@ is386: movl $2,%ecx # set MP
11697 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
11698 movl %eax,%ss # after changing gdt.
11699
11700 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
11701 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
11702 movl %eax,%ds
11703 movl %eax,%es
11704
11705 @@ -442,8 +512,11 @@ is386: movl $2,%ecx # set MP
11706 */
11707 cmpb $0,ready
11708 jne 1f
11709 - movl $gdt_page,%eax
11710 + movl $cpu_gdt_table,%eax
11711 movl $stack_canary,%ecx
11712 +#ifdef CONFIG_SMP
11713 + addl $__per_cpu_load,%ecx
11714 +#endif
11715 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
11716 shrl $16, %ecx
11717 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
11718 @@ -461,10 +534,6 @@ is386: movl $2,%ecx # set MP
11719 #ifdef CONFIG_SMP
11720 movb ready, %cl
11721 movb $1, ready
11722 - cmpb $0,%cl # the first CPU calls start_kernel
11723 - je 1f
11724 - movl (stack_start), %esp
11725 -1:
11726 #endif /* CONFIG_SMP */
11727 jmp *(initial_code)
11728
11729 @@ -550,22 +619,22 @@ early_page_fault:
11730 jmp early_fault
11731
11732 early_fault:
11733 - cld
11734 #ifdef CONFIG_PRINTK
11735 + cmpl $1,%ss:early_recursion_flag
11736 + je hlt_loop
11737 + incl %ss:early_recursion_flag
11738 + cld
11739 pusha
11740 movl $(__KERNEL_DS),%eax
11741 movl %eax,%ds
11742 movl %eax,%es
11743 - cmpl $2,early_recursion_flag
11744 - je hlt_loop
11745 - incl early_recursion_flag
11746 movl %cr2,%eax
11747 pushl %eax
11748 pushl %edx /* trapno */
11749 pushl $fault_msg
11750 call printk
11751 +; call dump_stack
11752 #endif
11753 - call dump_stack
11754 hlt_loop:
11755 hlt
11756 jmp hlt_loop
11757 @@ -573,8 +642,11 @@ hlt_loop:
11758 /* This is the default interrupt "handler" :-) */
11759 ALIGN
11760 ignore_int:
11761 - cld
11762 #ifdef CONFIG_PRINTK
11763 + cmpl $2,%ss:early_recursion_flag
11764 + je hlt_loop
11765 + incl %ss:early_recursion_flag
11766 + cld
11767 pushl %eax
11768 pushl %ecx
11769 pushl %edx
11770 @@ -583,9 +655,6 @@ ignore_int:
11771 movl $(__KERNEL_DS),%eax
11772 movl %eax,%ds
11773 movl %eax,%es
11774 - cmpl $2,early_recursion_flag
11775 - je hlt_loop
11776 - incl early_recursion_flag
11777 pushl 16(%esp)
11778 pushl 24(%esp)
11779 pushl 32(%esp)
11780 @@ -612,27 +681,38 @@ ENTRY(initial_code)
11781 /*
11782 * BSS section
11783 */
11784 -__PAGE_ALIGNED_BSS
11785 - .align PAGE_SIZE_asm
11786 #ifdef CONFIG_X86_PAE
11787 +.section .swapper_pg_pmd,"a",@progbits
11788 swapper_pg_pmd:
11789 .fill 1024*KPMDS,4,0
11790 #else
11791 +.section .swapper_pg_dir,"a",@progbits
11792 ENTRY(swapper_pg_dir)
11793 .fill 1024,4,0
11794 #endif
11795 +
11796 swapper_pg_fixmap:
11797 .fill 1024,4,0
11798 +
11799 +.section .empty_zero_page,"a",@progbits
11800 ENTRY(empty_zero_page)
11801 .fill 4096,1,0
11802
11803 /*
11804 + * The IDT has to be page-aligned to simplify the Pentium
11805 + * F0 0F bug workaround.. We have a special link segment
11806 + * for this.
11807 + */
11808 +.section .idt,"a",@progbits
11809 +ENTRY(idt_table)
11810 + .fill 256,8,0
11811 +
11812 +/*
11813 * This starts the data section.
11814 */
11815 #ifdef CONFIG_X86_PAE
11816 -__PAGE_ALIGNED_DATA
11817 - /* Page-aligned for the benefit of paravirt? */
11818 - .align PAGE_SIZE_asm
11819 +.section .swapper_pg_dir,"a",@progbits
11820 +
11821 ENTRY(swapper_pg_dir)
11822 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
11823 # if KPMDS == 3
11824 @@ -651,15 +731,24 @@ ENTRY(swapper_pg_dir)
11825 # error "Kernel PMDs should be 1, 2 or 3"
11826 # endif
11827 .align PAGE_SIZE_asm /* needs to be page-sized too */
11828 +
11829 +#ifdef CONFIG_PAX_PER_CPU_PGD
11830 +ENTRY(cpu_pgd)
11831 + .rept NR_CPUS
11832 + .fill 4,8,0
11833 + .endr
11834 +#endif
11835 +
11836 #endif
11837
11838 .data
11839 ENTRY(stack_start)
11840 - .long init_thread_union+THREAD_SIZE
11841 + .long init_thread_union+THREAD_SIZE-8
11842 .long __BOOT_DS
11843
11844 ready: .byte 0
11845
11846 +.section .rodata,"a",@progbits
11847 early_recursion_flag:
11848 .long 0
11849
11850 @@ -695,7 +784,7 @@ fault_msg:
11851 .word 0 # 32 bit align gdt_desc.address
11852 boot_gdt_descr:
11853 .word __BOOT_DS+7
11854 - .long boot_gdt - __PAGE_OFFSET
11855 + .long pa(boot_gdt)
11856
11857 .word 0 # 32-bit align idt_desc.address
11858 idt_descr:
11859 @@ -706,7 +795,7 @@ idt_descr:
11860 .word 0 # 32 bit align gdt_desc.address
11861 ENTRY(early_gdt_descr)
11862 .word GDT_ENTRIES*8-1
11863 - .long gdt_page /* Overwritten for secondary CPUs */
11864 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
11865
11866 /*
11867 * The boot_gdt must mirror the equivalent in setup.S and is
11868 @@ -715,5 +804,65 @@ ENTRY(early_gdt_descr)
11869 .align L1_CACHE_BYTES
11870 ENTRY(boot_gdt)
11871 .fill GDT_ENTRY_BOOT_CS,8,0
11872 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
11873 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
11874 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
11875 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
11876 +
11877 + .align PAGE_SIZE_asm
11878 +ENTRY(cpu_gdt_table)
11879 + .rept NR_CPUS
11880 + .quad 0x0000000000000000 /* NULL descriptor */
11881 + .quad 0x0000000000000000 /* 0x0b reserved */
11882 + .quad 0x0000000000000000 /* 0x13 reserved */
11883 + .quad 0x0000000000000000 /* 0x1b reserved */
11884 +
11885 +#ifdef CONFIG_PAX_KERNEXEC
11886 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
11887 +#else
11888 + .quad 0x0000000000000000 /* 0x20 unused */
11889 +#endif
11890 +
11891 + .quad 0x0000000000000000 /* 0x28 unused */
11892 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
11893 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
11894 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
11895 + .quad 0x0000000000000000 /* 0x4b reserved */
11896 + .quad 0x0000000000000000 /* 0x53 reserved */
11897 + .quad 0x0000000000000000 /* 0x5b reserved */
11898 +
11899 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
11900 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
11901 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
11902 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
11903 +
11904 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
11905 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
11906 +
11907 + /*
11908 + * Segments used for calling PnP BIOS have byte granularity.
11909 + * The code segments and data segments have fixed 64k limits,
11910 + * the transfer segment sizes are set at run time.
11911 + */
11912 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
11913 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
11914 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
11915 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
11916 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
11917 +
11918 + /*
11919 + * The APM segments have byte granularity and their bases
11920 + * are set at run time. All have 64k limits.
11921 + */
11922 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
11923 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
11924 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
11925 +
11926 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
11927 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
11928 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
11929 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
11930 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
11931 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
11932 +
11933 + /* Be sure this is zeroed to avoid false validations in Xen */
11934 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
11935 + .endr
11936 diff -urNp linux-2.6.34.1/arch/x86/kernel/head_64.S linux-2.6.34.1/arch/x86/kernel/head_64.S
11937 --- linux-2.6.34.1/arch/x86/kernel/head_64.S 2010-07-05 14:24:10.000000000 -0400
11938 +++ linux-2.6.34.1/arch/x86/kernel/head_64.S 2010-07-07 09:04:48.000000000 -0400
11939 @@ -19,6 +19,7 @@
11940 #include <asm/cache.h>
11941 #include <asm/processor-flags.h>
11942 #include <asm/percpu.h>
11943 +#include <asm/cpufeature.h>
11944
11945 #ifdef CONFIG_PARAVIRT
11946 #include <asm/asm-offsets.h>
11947 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
11948 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
11949 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
11950 L3_START_KERNEL = pud_index(__START_KERNEL_map)
11951 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
11952 +L3_VMALLOC_START = pud_index(VMALLOC_START)
11953 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
11954 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
11955
11956 .text
11957 __HEAD
11958 @@ -85,35 +90,22 @@ startup_64:
11959 */
11960 addq %rbp, init_level4_pgt + 0(%rip)
11961 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
11962 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
11963 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
11964 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
11965
11966 addq %rbp, level3_ident_pgt + 0(%rip)
11967 +#ifndef CONFIG_XEN
11968 + addq %rbp, level3_ident_pgt + 8(%rip)
11969 +#endif
11970
11971 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
11972 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
11973 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
11974
11975 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
11976 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
11977 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
11978
11979 - /* Add an Identity mapping if I am above 1G */
11980 - leaq _text(%rip), %rdi
11981 - andq $PMD_PAGE_MASK, %rdi
11982 -
11983 - movq %rdi, %rax
11984 - shrq $PUD_SHIFT, %rax
11985 - andq $(PTRS_PER_PUD - 1), %rax
11986 - jz ident_complete
11987 -
11988 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
11989 - leaq level3_ident_pgt(%rip), %rbx
11990 - movq %rdx, 0(%rbx, %rax, 8)
11991 -
11992 - movq %rdi, %rax
11993 - shrq $PMD_SHIFT, %rax
11994 - andq $(PTRS_PER_PMD - 1), %rax
11995 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
11996 - leaq level2_spare_pgt(%rip), %rbx
11997 - movq %rdx, 0(%rbx, %rax, 8)
11998 -ident_complete:
11999 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12000 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12001
12002 /*
12003 * Fixup the kernel text+data virtual addresses. Note that
12004 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
12005 * after the boot processor executes this code.
12006 */
12007
12008 - /* Enable PAE mode and PGE */
12009 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12010 + /* Enable PAE mode and PSE/PGE */
12011 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12012 movq %rax, %cr4
12013
12014 /* Setup early boot stage 4 level pagetables. */
12015 @@ -184,9 +176,14 @@ ENTRY(secondary_startup_64)
12016 movl $MSR_EFER, %ecx
12017 rdmsr
12018 btsl $_EFER_SCE, %eax /* Enable System Call */
12019 - btl $20,%edi /* No Execute supported? */
12020 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12021 jnc 1f
12022 btsl $_EFER_NX, %eax
12023 + leaq init_level4_pgt(%rip), %rdi
12024 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12025 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12026 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12027 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12028 1: wrmsr /* Make changes effective */
12029
12030 /* Setup cr0 */
12031 @@ -271,7 +268,7 @@ ENTRY(secondary_startup_64)
12032 bad_address:
12033 jmp bad_address
12034
12035 - .section ".init.text","ax"
12036 + __INIT
12037 #ifdef CONFIG_EARLY_PRINTK
12038 .globl early_idt_handlers
12039 early_idt_handlers:
12040 @@ -316,18 +313,23 @@ ENTRY(early_idt_handler)
12041 #endif /* EARLY_PRINTK */
12042 1: hlt
12043 jmp 1b
12044 + .previous
12045
12046 #ifdef CONFIG_EARLY_PRINTK
12047 + __INITDATA
12048 early_recursion_flag:
12049 .long 0
12050 + .previous
12051
12052 + .section .rodata,"a",@progbits
12053 early_idt_msg:
12054 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12055 early_idt_ripmsg:
12056 .asciz "RIP %s\n"
12057 -#endif /* CONFIG_EARLY_PRINTK */
12058 .previous
12059 +#endif /* CONFIG_EARLY_PRINTK */
12060
12061 + .section .rodata,"a",@progbits
12062 #define NEXT_PAGE(name) \
12063 .balign PAGE_SIZE; \
12064 ENTRY(name)
12065 @@ -351,13 +353,36 @@ NEXT_PAGE(init_level4_pgt)
12066 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12067 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12068 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12069 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12070 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12071 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12072 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12073 .org init_level4_pgt + L4_START_KERNEL*8, 0
12074 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12075 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12076
12077 +#ifdef CONFIG_PAX_PER_CPU_PGD
12078 +NEXT_PAGE(cpu_pgd)
12079 + .rept NR_CPUS
12080 + .fill 512,8,0
12081 + .endr
12082 +#endif
12083 +
12084 NEXT_PAGE(level3_ident_pgt)
12085 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12086 +#ifdef CONFIG_XEN
12087 .fill 511,8,0
12088 +#else
12089 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12090 + .fill 510,8,0
12091 +#endif
12092 +
12093 +NEXT_PAGE(level3_vmalloc_pgt)
12094 + .fill 512,8,0
12095 +
12096 +NEXT_PAGE(level3_vmemmap_pgt)
12097 + .fill L3_VMEMMAP_START,8,0
12098 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12099
12100 NEXT_PAGE(level3_kernel_pgt)
12101 .fill L3_START_KERNEL,8,0
12102 @@ -365,20 +390,23 @@ NEXT_PAGE(level3_kernel_pgt)
12103 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
12104 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
12105
12106 +NEXT_PAGE(level2_vmemmap_pgt)
12107 + .fill 512,8,0
12108 +
12109 NEXT_PAGE(level2_fixmap_pgt)
12110 - .fill 506,8,0
12111 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
12112 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
12113 - .fill 5,8,0
12114 + .fill 507,8,0
12115 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
12116 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
12117 + .fill 4,8,0
12118
12119 -NEXT_PAGE(level1_fixmap_pgt)
12120 +NEXT_PAGE(level1_vsyscall_pgt)
12121 .fill 512,8,0
12122
12123 -NEXT_PAGE(level2_ident_pgt)
12124 - /* Since I easily can, map the first 1G.
12125 + /* Since I easily can, map the first 2G.
12126 * Don't set NX because code runs from these pages.
12127 */
12128 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
12129 +NEXT_PAGE(level2_ident_pgt)
12130 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
12131
12132 NEXT_PAGE(level2_kernel_pgt)
12133 /*
12134 @@ -391,33 +419,55 @@ NEXT_PAGE(level2_kernel_pgt)
12135 * If you want to increase this then increase MODULES_VADDR
12136 * too.)
12137 */
12138 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
12139 - KERNEL_IMAGE_SIZE/PMD_SIZE)
12140 -
12141 -NEXT_PAGE(level2_spare_pgt)
12142 - .fill 512, 8, 0
12143 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
12144
12145 #undef PMDS
12146 #undef NEXT_PAGE
12147
12148 - .data
12149 + .align PAGE_SIZE
12150 +ENTRY(cpu_gdt_table)
12151 + .rept NR_CPUS
12152 + .quad 0x0000000000000000 /* NULL descriptor */
12153 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
12154 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
12155 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
12156 + .quad 0x00cffb000000ffff /* __USER32_CS */
12157 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
12158 + .quad 0x00affb000000ffff /* __USER_CS */
12159 +
12160 +#ifdef CONFIG_PAX_KERNEXEC
12161 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
12162 +#else
12163 + .quad 0x0 /* unused */
12164 +#endif
12165 +
12166 + .quad 0,0 /* TSS */
12167 + .quad 0,0 /* LDT */
12168 + .quad 0,0,0 /* three TLS descriptors */
12169 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
12170 + /* asm/segment.h:GDT_ENTRIES must match this */
12171 +
12172 + /* zero the remaining page */
12173 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
12174 + .endr
12175 +
12176 .align 16
12177 .globl early_gdt_descr
12178 early_gdt_descr:
12179 .word GDT_ENTRIES*8-1
12180 early_gdt_descr_base:
12181 - .quad INIT_PER_CPU_VAR(gdt_page)
12182 + .quad cpu_gdt_table
12183
12184 ENTRY(phys_base)
12185 /* This must match the first entry in level2_kernel_pgt */
12186 .quad 0x0000000000000000
12187
12188 #include "../../x86/xen/xen-head.S"
12189 -
12190 - .section .bss, "aw", @nobits
12191 +
12192 + .section .rodata,"a",@progbits
12193 .align L1_CACHE_BYTES
12194 ENTRY(idt_table)
12195 - .skip IDT_ENTRIES * 16
12196 + .fill 512,8,0
12197
12198 __PAGE_ALIGNED_BSS
12199 .align PAGE_SIZE
12200 diff -urNp linux-2.6.34.1/arch/x86/kernel/i386_ksyms_32.c linux-2.6.34.1/arch/x86/kernel/i386_ksyms_32.c
12201 --- linux-2.6.34.1/arch/x86/kernel/i386_ksyms_32.c 2010-07-05 14:24:10.000000000 -0400
12202 +++ linux-2.6.34.1/arch/x86/kernel/i386_ksyms_32.c 2010-07-07 09:04:48.000000000 -0400
12203 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
12204 EXPORT_SYMBOL(cmpxchg8b_emu);
12205 #endif
12206
12207 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
12208 +
12209 /* Networking helper routines. */
12210 EXPORT_SYMBOL(csum_partial_copy_generic);
12211 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
12212 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
12213
12214 EXPORT_SYMBOL(__get_user_1);
12215 EXPORT_SYMBOL(__get_user_2);
12216 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
12217
12218 EXPORT_SYMBOL(csum_partial);
12219 EXPORT_SYMBOL(empty_zero_page);
12220 +
12221 +#ifdef CONFIG_PAX_KERNEXEC
12222 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
12223 +#endif
12224 diff -urNp linux-2.6.34.1/arch/x86/kernel/init_task.c linux-2.6.34.1/arch/x86/kernel/init_task.c
12225 --- linux-2.6.34.1/arch/x86/kernel/init_task.c 2010-07-05 14:24:10.000000000 -0400
12226 +++ linux-2.6.34.1/arch/x86/kernel/init_task.c 2010-07-07 09:04:48.000000000 -0400
12227 @@ -38,5 +38,5 @@ EXPORT_SYMBOL(init_task);
12228 * section. Since TSS's are completely CPU-local, we want them
12229 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
12230 */
12231 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
12232 -
12233 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
12234 +EXPORT_SYMBOL(init_tss);
12235 diff -urNp linux-2.6.34.1/arch/x86/kernel/ioport.c linux-2.6.34.1/arch/x86/kernel/ioport.c
12236 --- linux-2.6.34.1/arch/x86/kernel/ioport.c 2010-07-05 14:24:10.000000000 -0400
12237 +++ linux-2.6.34.1/arch/x86/kernel/ioport.c 2010-07-07 09:04:48.000000000 -0400
12238 @@ -6,6 +6,7 @@
12239 #include <linux/sched.h>
12240 #include <linux/kernel.h>
12241 #include <linux/capability.h>
12242 +#include <linux/security.h>
12243 #include <linux/errno.h>
12244 #include <linux/types.h>
12245 #include <linux/ioport.h>
12246 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
12247
12248 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
12249 return -EINVAL;
12250 +#ifdef CONFIG_GRKERNSEC_IO
12251 + if (turn_on && grsec_disable_privio) {
12252 + gr_handle_ioperm();
12253 + return -EPERM;
12254 + }
12255 +#endif
12256 if (turn_on && !capable(CAP_SYS_RAWIO))
12257 return -EPERM;
12258
12259 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
12260 * because the ->io_bitmap_max value must match the bitmap
12261 * contents:
12262 */
12263 - tss = &per_cpu(init_tss, get_cpu());
12264 + tss = init_tss + get_cpu();
12265
12266 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
12267
12268 @@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct
12269 return -EINVAL;
12270 /* Trying to gain more privileges? */
12271 if (level > old) {
12272 +#ifdef CONFIG_GRKERNSEC_IO
12273 + if (grsec_disable_privio) {
12274 + gr_handle_iopl();
12275 + return -EPERM;
12276 + }
12277 +#endif
12278 if (!capable(CAP_SYS_RAWIO))
12279 return -EPERM;
12280 }
12281 diff -urNp linux-2.6.34.1/arch/x86/kernel/irq_32.c linux-2.6.34.1/arch/x86/kernel/irq_32.c
12282 --- linux-2.6.34.1/arch/x86/kernel/irq_32.c 2010-07-05 14:24:10.000000000 -0400
12283 +++ linux-2.6.34.1/arch/x86/kernel/irq_32.c 2010-07-07 09:04:48.000000000 -0400
12284 @@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struc
12285 return 0;
12286
12287 /* build the stack frame on the IRQ stack */
12288 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
12289 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
12290 irqctx->tinfo.task = curctx->tinfo.task;
12291 irqctx->tinfo.previous_esp = current_stack_pointer;
12292
12293 @@ -175,7 +175,7 @@ asmlinkage void do_softirq(void)
12294 irqctx->tinfo.previous_esp = current_stack_pointer;
12295
12296 /* build the stack frame on the softirq stack */
12297 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
12298 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
12299
12300 call_on_stack(__do_softirq, isp);
12301 /*
12302 diff -urNp linux-2.6.34.1/arch/x86/kernel/kgdb.c linux-2.6.34.1/arch/x86/kernel/kgdb.c
12303 --- linux-2.6.34.1/arch/x86/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
12304 +++ linux-2.6.34.1/arch/x86/kernel/kgdb.c 2010-07-07 09:04:48.000000000 -0400
12305 @@ -89,7 +89,7 @@ void pt_regs_to_gdb_regs(unsigned long *
12306 gdb_regs[GDB_CS] = regs->cs;
12307 gdb_regs[GDB_FS] = 0xFFFF;
12308 gdb_regs[GDB_GS] = 0xFFFF;
12309 - if (user_mode_vm(regs)) {
12310 + if (user_mode(regs)) {
12311 gdb_regs[GDB_SS] = regs->ss;
12312 gdb_regs[GDB_SP] = regs->sp;
12313 } else {
12314 @@ -690,7 +690,7 @@ unsigned long kgdb_arch_pc(int exception
12315 return instruction_pointer(regs);
12316 }
12317
12318 -struct kgdb_arch arch_kgdb_ops = {
12319 +const struct kgdb_arch arch_kgdb_ops = {
12320 /* Breakpoint instruction: */
12321 .gdb_bpt_instr = { 0xcc },
12322 .flags = KGDB_HW_BREAKPOINT,
12323 diff -urNp linux-2.6.34.1/arch/x86/kernel/kprobes.c linux-2.6.34.1/arch/x86/kernel/kprobes.c
12324 --- linux-2.6.34.1/arch/x86/kernel/kprobes.c 2010-07-05 14:24:10.000000000 -0400
12325 +++ linux-2.6.34.1/arch/x86/kernel/kprobes.c 2010-07-07 09:04:48.000000000 -0400
12326 @@ -114,9 +114,12 @@ static void __kprobes __synthesize_relat
12327 s32 raddr;
12328 } __attribute__((packed)) *insn;
12329
12330 - insn = (struct __arch_relative_insn *)from;
12331 + insn = (struct __arch_relative_insn *)(ktla_ktva(from));
12332 +
12333 + pax_open_kernel();
12334 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
12335 insn->op = op;
12336 + pax_close_kernel();
12337 }
12338
12339 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
12340 @@ -315,7 +318,9 @@ static int __kprobes __copy_instruction(
12341 }
12342 }
12343 insn_get_length(&insn);
12344 + pax_open_kernel();
12345 memcpy(dest, insn.kaddr, insn.length);
12346 + pax_close_kernel();
12347
12348 #ifdef CONFIG_X86_64
12349 if (insn_rip_relative(&insn)) {
12350 @@ -339,7 +344,9 @@ static int __kprobes __copy_instruction(
12351 (u8 *) dest;
12352 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
12353 disp = (u8 *) dest + insn_offset_displacement(&insn);
12354 + pax_open_kernel();
12355 *(s32 *) disp = (s32) newdisp;
12356 + pax_close_kernel();
12357 }
12358 #endif
12359 return insn.length;
12360 @@ -353,12 +360,12 @@ static void __kprobes arch_copy_kprobe(s
12361 */
12362 __copy_instruction(p->ainsn.insn, p->addr, 0);
12363
12364 - if (can_boost(p->addr))
12365 + if (can_boost(ktla_ktva(p->addr)))
12366 p->ainsn.boostable = 0;
12367 else
12368 p->ainsn.boostable = -1;
12369
12370 - p->opcode = *p->addr;
12371 + p->opcode = *(ktla_ktva(p->addr));
12372 }
12373
12374 int __kprobes arch_prepare_kprobe(struct kprobe *p)
12375 @@ -467,7 +474,7 @@ static void __kprobes setup_singlestep(s
12376 * nor set current_kprobe, because it doesn't use single
12377 * stepping.
12378 */
12379 - regs->ip = (unsigned long)p->ainsn.insn;
12380 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
12381 preempt_enable_no_resched();
12382 return;
12383 }
12384 @@ -486,7 +493,7 @@ static void __kprobes setup_singlestep(s
12385 if (p->opcode == BREAKPOINT_INSTRUCTION)
12386 regs->ip = (unsigned long)p->addr;
12387 else
12388 - regs->ip = (unsigned long)p->ainsn.insn;
12389 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
12390 }
12391
12392 /*
12393 @@ -565,7 +572,7 @@ static int __kprobes kprobe_handler(stru
12394 setup_singlestep(p, regs, kcb, 0);
12395 return 1;
12396 }
12397 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
12398 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
12399 /*
12400 * The breakpoint instruction was removed right
12401 * after we hit it. Another cpu has removed
12402 @@ -791,7 +798,7 @@ static void __kprobes resume_execution(s
12403 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
12404 {
12405 unsigned long *tos = stack_addr(regs);
12406 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
12407 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
12408 unsigned long orig_ip = (unsigned long)p->addr;
12409 kprobe_opcode_t *insn = p->ainsn.insn;
12410
12411 @@ -974,7 +981,7 @@ int __kprobes kprobe_exceptions_notify(s
12412 struct die_args *args = data;
12413 int ret = NOTIFY_DONE;
12414
12415 - if (args->regs && user_mode_vm(args->regs))
12416 + if (args->regs && user_mode(args->regs))
12417 return ret;
12418
12419 switch (val) {
12420 diff -urNp linux-2.6.34.1/arch/x86/kernel/ldt.c linux-2.6.34.1/arch/x86/kernel/ldt.c
12421 --- linux-2.6.34.1/arch/x86/kernel/ldt.c 2010-07-05 14:24:10.000000000 -0400
12422 +++ linux-2.6.34.1/arch/x86/kernel/ldt.c 2010-07-07 09:04:48.000000000 -0400
12423 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
12424 if (reload) {
12425 #ifdef CONFIG_SMP
12426 preempt_disable();
12427 - load_LDT(pc);
12428 + load_LDT_nolock(pc);
12429 if (!cpumask_equal(mm_cpumask(current->mm),
12430 cpumask_of(smp_processor_id())))
12431 smp_call_function(flush_ldt, current->mm, 1);
12432 preempt_enable();
12433 #else
12434 - load_LDT(pc);
12435 + load_LDT_nolock(pc);
12436 #endif
12437 }
12438 if (oldsize) {
12439 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
12440 return err;
12441
12442 for (i = 0; i < old->size; i++)
12443 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
12444 + write_ldt_entry(new->ldt, i, old->ldt + i);
12445 return 0;
12446 }
12447
12448 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
12449 retval = copy_ldt(&mm->context, &old_mm->context);
12450 mutex_unlock(&old_mm->context.lock);
12451 }
12452 +
12453 + if (tsk == current) {
12454 + mm->context.vdso = ~0UL;
12455 +
12456 +#ifdef CONFIG_X86_32
12457 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
12458 + mm->context.user_cs_base = 0UL;
12459 + mm->context.user_cs_limit = ~0UL;
12460 +
12461 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12462 + cpus_clear(mm->context.cpu_user_cs_mask);
12463 +#endif
12464 +
12465 +#endif
12466 +#endif
12467 +
12468 + }
12469 +
12470 return retval;
12471 }
12472
12473 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
12474 }
12475 }
12476
12477 +#ifdef CONFIG_PAX_SEGMEXEC
12478 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
12479 + error = -EINVAL;
12480 + goto out_unlock;
12481 + }
12482 +#endif
12483 +
12484 fill_ldt(&ldt, &ldt_info);
12485 if (oldmode)
12486 ldt.avl = 0;
12487 diff -urNp linux-2.6.34.1/arch/x86/kernel/machine_kexec_32.c linux-2.6.34.1/arch/x86/kernel/machine_kexec_32.c
12488 --- linux-2.6.34.1/arch/x86/kernel/machine_kexec_32.c 2010-07-05 14:24:10.000000000 -0400
12489 +++ linux-2.6.34.1/arch/x86/kernel/machine_kexec_32.c 2010-07-07 09:04:48.000000000 -0400
12490 @@ -27,7 +27,7 @@
12491 #include <asm/cacheflush.h>
12492 #include <asm/debugreg.h>
12493
12494 -static void set_idt(void *newidt, __u16 limit)
12495 +static void set_idt(struct desc_struct *newidt, __u16 limit)
12496 {
12497 struct desc_ptr curidt;
12498
12499 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
12500 }
12501
12502
12503 -static void set_gdt(void *newgdt, __u16 limit)
12504 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
12505 {
12506 struct desc_ptr curgdt;
12507
12508 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
12509 }
12510
12511 control_page = page_address(image->control_code_page);
12512 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
12513 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
12514
12515 relocate_kernel_ptr = control_page;
12516 page_list[PA_CONTROL_PAGE] = __pa(control_page);
12517 diff -urNp linux-2.6.34.1/arch/x86/kernel/microcode_amd.c linux-2.6.34.1/arch/x86/kernel/microcode_amd.c
12518 --- linux-2.6.34.1/arch/x86/kernel/microcode_amd.c 2010-07-05 14:24:10.000000000 -0400
12519 +++ linux-2.6.34.1/arch/x86/kernel/microcode_amd.c 2010-07-07 09:04:48.000000000 -0400
12520 @@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int c
12521 uci->mc = NULL;
12522 }
12523
12524 -static struct microcode_ops microcode_amd_ops = {
12525 +static const struct microcode_ops microcode_amd_ops = {
12526 .request_microcode_user = request_microcode_user,
12527 .request_microcode_fw = request_microcode_fw,
12528 .collect_cpu_info = collect_cpu_info_amd,
12529 @@ -339,7 +339,7 @@ static struct microcode_ops microcode_am
12530 .microcode_fini_cpu = microcode_fini_cpu_amd,
12531 };
12532
12533 -struct microcode_ops * __init init_amd_microcode(void)
12534 +const struct microcode_ops * __init init_amd_microcode(void)
12535 {
12536 return &microcode_amd_ops;
12537 }
12538 diff -urNp linux-2.6.34.1/arch/x86/kernel/microcode_core.c linux-2.6.34.1/arch/x86/kernel/microcode_core.c
12539 --- linux-2.6.34.1/arch/x86/kernel/microcode_core.c 2010-07-05 14:24:10.000000000 -0400
12540 +++ linux-2.6.34.1/arch/x86/kernel/microcode_core.c 2010-07-07 09:04:48.000000000 -0400
12541 @@ -92,7 +92,7 @@ MODULE_LICENSE("GPL");
12542
12543 #define MICROCODE_VERSION "2.00"
12544
12545 -static struct microcode_ops *microcode_ops;
12546 +static const struct microcode_ops *microcode_ops;
12547
12548 /*
12549 * Synchronization.
12550 diff -urNp linux-2.6.34.1/arch/x86/kernel/microcode_intel.c linux-2.6.34.1/arch/x86/kernel/microcode_intel.c
12551 --- linux-2.6.34.1/arch/x86/kernel/microcode_intel.c 2010-07-05 14:24:10.000000000 -0400
12552 +++ linux-2.6.34.1/arch/x86/kernel/microcode_intel.c 2010-07-07 09:04:48.000000000 -0400
12553 @@ -436,13 +436,13 @@ static enum ucode_state request_microcod
12554
12555 static int get_ucode_user(void *to, const void *from, size_t n)
12556 {
12557 - return copy_from_user(to, from, n);
12558 + return copy_from_user(to, (__force const void __user *)from, n);
12559 }
12560
12561 static enum ucode_state
12562 request_microcode_user(int cpu, const void __user *buf, size_t size)
12563 {
12564 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
12565 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
12566 }
12567
12568 static void microcode_fini_cpu(int cpu)
12569 @@ -453,7 +453,7 @@ static void microcode_fini_cpu(int cpu)
12570 uci->mc = NULL;
12571 }
12572
12573 -static struct microcode_ops microcode_intel_ops = {
12574 +static const struct microcode_ops microcode_intel_ops = {
12575 .request_microcode_user = request_microcode_user,
12576 .request_microcode_fw = request_microcode_fw,
12577 .collect_cpu_info = collect_cpu_info,
12578 @@ -461,7 +461,7 @@ static struct microcode_ops microcode_in
12579 .microcode_fini_cpu = microcode_fini_cpu,
12580 };
12581
12582 -struct microcode_ops * __init init_intel_microcode(void)
12583 +const struct microcode_ops * __init init_intel_microcode(void)
12584 {
12585 return &microcode_intel_ops;
12586 }
12587 diff -urNp linux-2.6.34.1/arch/x86/kernel/module.c linux-2.6.34.1/arch/x86/kernel/module.c
12588 --- linux-2.6.34.1/arch/x86/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
12589 +++ linux-2.6.34.1/arch/x86/kernel/module.c 2010-07-07 09:04:48.000000000 -0400
12590 @@ -35,7 +35,7 @@
12591 #define DEBUGP(fmt...)
12592 #endif
12593
12594 -void *module_alloc(unsigned long size)
12595 +static void *__module_alloc(unsigned long size, pgprot_t prot)
12596 {
12597 struct vm_struct *area;
12598
12599 @@ -49,8 +49,18 @@ void *module_alloc(unsigned long size)
12600 if (!area)
12601 return NULL;
12602
12603 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
12604 - PAGE_KERNEL_EXEC);
12605 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
12606 +}
12607 +
12608 +void *module_alloc(unsigned long size)
12609 +{
12610 +
12611 +#ifdef CONFIG_PAX_KERNEXEC
12612 + return __module_alloc(size, PAGE_KERNEL);
12613 +#else
12614 + return __module_alloc(size, PAGE_KERNEL_EXEC);
12615 +#endif
12616 +
12617 }
12618
12619 /* Free memory returned from module_alloc */
12620 @@ -59,6 +69,40 @@ void module_free(struct module *mod, voi
12621 vfree(module_region);
12622 }
12623
12624 +#ifdef CONFIG_PAX_KERNEXEC
12625 +#ifdef CONFIG_X86_32
12626 +void *module_alloc_exec(unsigned long size)
12627 +{
12628 + struct vm_struct *area;
12629 +
12630 + if (size == 0)
12631 + return NULL;
12632 +
12633 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
12634 + return area ? area->addr : NULL;
12635 +}
12636 +EXPORT_SYMBOL(module_alloc_exec);
12637 +
12638 +void module_free_exec(struct module *mod, void *module_region)
12639 +{
12640 + vunmap(module_region);
12641 +}
12642 +EXPORT_SYMBOL(module_free_exec);
12643 +#else
12644 +void module_free_exec(struct module *mod, void *module_region)
12645 +{
12646 + module_free(mod, module_region);
12647 +}
12648 +EXPORT_SYMBOL(module_free_exec);
12649 +
12650 +void *module_alloc_exec(unsigned long size)
12651 +{
12652 + return __module_alloc(size, PAGE_KERNEL_RX);
12653 +}
12654 +EXPORT_SYMBOL(module_alloc_exec);
12655 +#endif
12656 +#endif
12657 +
12658 /* We don't need anything special. */
12659 int module_frob_arch_sections(Elf_Ehdr *hdr,
12660 Elf_Shdr *sechdrs,
12661 @@ -78,14 +122,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
12662 unsigned int i;
12663 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
12664 Elf32_Sym *sym;
12665 - uint32_t *location;
12666 + uint32_t *plocation, location;
12667
12668 DEBUGP("Applying relocate section %u to %u\n", relsec,
12669 sechdrs[relsec].sh_info);
12670 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
12671 /* This is where to make the change */
12672 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
12673 - + rel[i].r_offset;
12674 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
12675 + location = (uint32_t)plocation;
12676 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
12677 + plocation = ktla_ktva((void *)plocation);
12678 /* This is the symbol it is referring to. Note that all
12679 undefined symbols have been resolved. */
12680 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
12681 @@ -94,11 +140,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
12682 switch (ELF32_R_TYPE(rel[i].r_info)) {
12683 case R_386_32:
12684 /* We add the value into the location given */
12685 - *location += sym->st_value;
12686 + pax_open_kernel();
12687 + *plocation += sym->st_value;
12688 + pax_close_kernel();
12689 break;
12690 case R_386_PC32:
12691 /* Add the value, subtract its postition */
12692 - *location += sym->st_value - (uint32_t)location;
12693 + pax_open_kernel();
12694 + *plocation += sym->st_value - location;
12695 + pax_close_kernel();
12696 break;
12697 default:
12698 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
12699 @@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
12700 case R_X86_64_NONE:
12701 break;
12702 case R_X86_64_64:
12703 + pax_open_kernel();
12704 *(u64 *)loc = val;
12705 + pax_close_kernel();
12706 break;
12707 case R_X86_64_32:
12708 + pax_open_kernel();
12709 *(u32 *)loc = val;
12710 + pax_close_kernel();
12711 if (val != *(u32 *)loc)
12712 goto overflow;
12713 break;
12714 case R_X86_64_32S:
12715 + pax_open_kernel();
12716 *(s32 *)loc = val;
12717 + pax_close_kernel();
12718 if ((s64)val != *(s32 *)loc)
12719 goto overflow;
12720 break;
12721 case R_X86_64_PC32:
12722 val -= (u64)loc;
12723 + pax_open_kernel();
12724 *(u32 *)loc = val;
12725 + pax_close_kernel();
12726 +
12727 #if 0
12728 if ((s64)val != *(s32 *)loc)
12729 goto overflow;
12730 diff -urNp linux-2.6.34.1/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.34.1/arch/x86/kernel/paravirt-spinlocks.c
12731 --- linux-2.6.34.1/arch/x86/kernel/paravirt-spinlocks.c 2010-07-05 14:24:10.000000000 -0400
12732 +++ linux-2.6.34.1/arch/x86/kernel/paravirt-spinlocks.c 2010-07-07 09:04:48.000000000 -0400
12733 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
12734 arch_spin_lock(lock);
12735 }
12736
12737 -struct pv_lock_ops pv_lock_ops = {
12738 +struct pv_lock_ops pv_lock_ops __read_only = {
12739 #ifdef CONFIG_SMP
12740 .spin_is_locked = __ticket_spin_is_locked,
12741 .spin_is_contended = __ticket_spin_is_contended,
12742 diff -urNp linux-2.6.34.1/arch/x86/kernel/paravirt.c linux-2.6.34.1/arch/x86/kernel/paravirt.c
12743 --- linux-2.6.34.1/arch/x86/kernel/paravirt.c 2010-07-05 14:24:10.000000000 -0400
12744 +++ linux-2.6.34.1/arch/x86/kernel/paravirt.c 2010-07-07 09:04:48.000000000 -0400
12745 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
12746 * corresponding structure. */
12747 static void *get_call_destination(u8 type)
12748 {
12749 - struct paravirt_patch_template tmpl = {
12750 + const struct paravirt_patch_template tmpl = {
12751 .pv_init_ops = pv_init_ops,
12752 .pv_time_ops = pv_time_ops,
12753 .pv_cpu_ops = pv_cpu_ops,
12754 @@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type,
12755 if (opfunc == NULL)
12756 /* If there's no function, patch it with a ud2a (BUG) */
12757 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
12758 - else if (opfunc == _paravirt_nop)
12759 + else if (opfunc == (void *)_paravirt_nop)
12760 /* If the operation is a nop, then nop the callsite */
12761 ret = paravirt_patch_nop();
12762
12763 /* identity functions just return their single argument */
12764 - else if (opfunc == _paravirt_ident_32)
12765 + else if (opfunc == (void *)_paravirt_ident_32)
12766 ret = paravirt_patch_ident_32(insnbuf, len);
12767 - else if (opfunc == _paravirt_ident_64)
12768 + else if (opfunc == (void *)_paravirt_ident_64)
12769 ret = paravirt_patch_ident_64(insnbuf, len);
12770
12771 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
12772 @@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insn
12773 if (insn_len > len || start == NULL)
12774 insn_len = len;
12775 else
12776 - memcpy(insnbuf, start, insn_len);
12777 + memcpy(insnbuf, ktla_ktva(start), insn_len);
12778
12779 return insn_len;
12780 }
12781 @@ -294,22 +294,22 @@ void arch_flush_lazy_mmu_mode(void)
12782 preempt_enable();
12783 }
12784
12785 -struct pv_info pv_info = {
12786 +struct pv_info pv_info __read_only = {
12787 .name = "bare hardware",
12788 .paravirt_enabled = 0,
12789 .kernel_rpl = 0,
12790 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
12791 };
12792
12793 -struct pv_init_ops pv_init_ops = {
12794 +struct pv_init_ops pv_init_ops __read_only = {
12795 .patch = native_patch,
12796 };
12797
12798 -struct pv_time_ops pv_time_ops = {
12799 +struct pv_time_ops pv_time_ops __read_only = {
12800 .sched_clock = native_sched_clock,
12801 };
12802
12803 -struct pv_irq_ops pv_irq_ops = {
12804 +struct pv_irq_ops pv_irq_ops __read_only = {
12805 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
12806 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
12807 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
12808 @@ -321,7 +321,7 @@ struct pv_irq_ops pv_irq_ops = {
12809 #endif
12810 };
12811
12812 -struct pv_cpu_ops pv_cpu_ops = {
12813 +struct pv_cpu_ops pv_cpu_ops __read_only = {
12814 .cpuid = native_cpuid,
12815 .get_debugreg = native_get_debugreg,
12816 .set_debugreg = native_set_debugreg,
12817 @@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = {
12818 .end_context_switch = paravirt_nop,
12819 };
12820
12821 -struct pv_apic_ops pv_apic_ops = {
12822 +struct pv_apic_ops pv_apic_ops __read_only = {
12823 #ifdef CONFIG_X86_LOCAL_APIC
12824 .startup_ipi_hook = paravirt_nop,
12825 #endif
12826 @@ -396,7 +396,7 @@ struct pv_apic_ops pv_apic_ops = {
12827 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
12828 #endif
12829
12830 -struct pv_mmu_ops pv_mmu_ops = {
12831 +struct pv_mmu_ops pv_mmu_ops __read_only = {
12832
12833 .read_cr2 = native_read_cr2,
12834 .write_cr2 = native_write_cr2,
12835 @@ -463,6 +463,12 @@ struct pv_mmu_ops pv_mmu_ops = {
12836 },
12837
12838 .set_fixmap = native_set_fixmap,
12839 +
12840 +#ifdef CONFIG_PAX_KERNEXEC
12841 + .pax_open_kernel = native_pax_open_kernel,
12842 + .pax_close_kernel = native_pax_close_kernel,
12843 +#endif
12844 +
12845 };
12846
12847 EXPORT_SYMBOL_GPL(pv_time_ops);
12848 diff -urNp linux-2.6.34.1/arch/x86/kernel/pci-calgary_64.c linux-2.6.34.1/arch/x86/kernel/pci-calgary_64.c
12849 --- linux-2.6.34.1/arch/x86/kernel/pci-calgary_64.c 2010-07-05 14:24:10.000000000 -0400
12850 +++ linux-2.6.34.1/arch/x86/kernel/pci-calgary_64.c 2010-07-07 09:04:48.000000000 -0400
12851 @@ -470,7 +470,7 @@ static void calgary_free_coherent(struct
12852 free_pages((unsigned long)vaddr, get_order(size));
12853 }
12854
12855 -static struct dma_map_ops calgary_dma_ops = {
12856 +static const struct dma_map_ops calgary_dma_ops = {
12857 .alloc_coherent = calgary_alloc_coherent,
12858 .free_coherent = calgary_free_coherent,
12859 .map_sg = calgary_map_sg,
12860 diff -urNp linux-2.6.34.1/arch/x86/kernel/pci-dma.c linux-2.6.34.1/arch/x86/kernel/pci-dma.c
12861 --- linux-2.6.34.1/arch/x86/kernel/pci-dma.c 2010-07-05 14:24:10.000000000 -0400
12862 +++ linux-2.6.34.1/arch/x86/kernel/pci-dma.c 2010-07-07 09:04:48.000000000 -0400
12863 @@ -16,7 +16,7 @@
12864
12865 static int forbid_dac __read_mostly;
12866
12867 -struct dma_map_ops *dma_ops = &nommu_dma_ops;
12868 +const struct dma_map_ops *dma_ops = &nommu_dma_ops;
12869 EXPORT_SYMBOL(dma_ops);
12870
12871 static int iommu_sac_force __read_mostly;
12872 @@ -248,7 +248,7 @@ early_param("iommu", iommu_setup);
12873
12874 int dma_supported(struct device *dev, u64 mask)
12875 {
12876 - struct dma_map_ops *ops = get_dma_ops(dev);
12877 + const struct dma_map_ops *ops = get_dma_ops(dev);
12878
12879 #ifdef CONFIG_PCI
12880 if (mask > 0xffffffff && forbid_dac > 0) {
12881 diff -urNp linux-2.6.34.1/arch/x86/kernel/pci-gart_64.c linux-2.6.34.1/arch/x86/kernel/pci-gart_64.c
12882 --- linux-2.6.34.1/arch/x86/kernel/pci-gart_64.c 2010-07-05 14:24:10.000000000 -0400
12883 +++ linux-2.6.34.1/arch/x86/kernel/pci-gart_64.c 2010-07-07 09:04:48.000000000 -0400
12884 @@ -699,7 +699,7 @@ static __init int init_k8_gatt(struct ag
12885 return -1;
12886 }
12887
12888 -static struct dma_map_ops gart_dma_ops = {
12889 +static const struct dma_map_ops gart_dma_ops = {
12890 .map_sg = gart_map_sg,
12891 .unmap_sg = gart_unmap_sg,
12892 .map_page = gart_map_page,
12893 diff -urNp linux-2.6.34.1/arch/x86/kernel/pci-nommu.c linux-2.6.34.1/arch/x86/kernel/pci-nommu.c
12894 --- linux-2.6.34.1/arch/x86/kernel/pci-nommu.c 2010-07-05 14:24:10.000000000 -0400
12895 +++ linux-2.6.34.1/arch/x86/kernel/pci-nommu.c 2010-07-07 09:04:48.000000000 -0400
12896 @@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str
12897 flush_write_buffers();
12898 }
12899
12900 -struct dma_map_ops nommu_dma_ops = {
12901 +const struct dma_map_ops nommu_dma_ops = {
12902 .alloc_coherent = dma_generic_alloc_coherent,
12903 .free_coherent = nommu_free_coherent,
12904 .map_sg = nommu_map_sg,
12905 diff -urNp linux-2.6.34.1/arch/x86/kernel/pci-swiotlb.c linux-2.6.34.1/arch/x86/kernel/pci-swiotlb.c
12906 --- linux-2.6.34.1/arch/x86/kernel/pci-swiotlb.c 2010-07-05 14:24:10.000000000 -0400
12907 +++ linux-2.6.34.1/arch/x86/kernel/pci-swiotlb.c 2010-07-07 09:04:48.000000000 -0400
12908 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
12909 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
12910 }
12911
12912 -static struct dma_map_ops swiotlb_dma_ops = {
12913 +static const struct dma_map_ops swiotlb_dma_ops = {
12914 .mapping_error = swiotlb_dma_mapping_error,
12915 .alloc_coherent = x86_swiotlb_alloc_coherent,
12916 .free_coherent = swiotlb_free_coherent,
12917 diff -urNp linux-2.6.34.1/arch/x86/kernel/process.c linux-2.6.34.1/arch/x86/kernel/process.c
12918 --- linux-2.6.34.1/arch/x86/kernel/process.c 2010-07-05 14:24:10.000000000 -0400
12919 +++ linux-2.6.34.1/arch/x86/kernel/process.c 2010-07-07 09:04:48.000000000 -0400
12920 @@ -78,7 +78,7 @@ void exit_thread(void)
12921 unsigned long *bp = t->io_bitmap_ptr;
12922
12923 if (bp) {
12924 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
12925 + struct tss_struct *tss = init_tss + get_cpu();
12926
12927 t->io_bitmap_ptr = NULL;
12928 clear_thread_flag(TIF_IO_BITMAP);
12929 @@ -112,7 +112,7 @@ void show_regs_common(void)
12930
12931 printk(KERN_CONT "\n");
12932 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
12933 - current->pid, current->comm, print_tainted(),
12934 + task_pid_nr(current), current->comm, print_tainted(),
12935 init_utsname()->release,
12936 (int)strcspn(init_utsname()->version, " "),
12937 init_utsname()->version, board, product);
12938 @@ -122,6 +122,9 @@ void flush_thread(void)
12939 {
12940 struct task_struct *tsk = current;
12941
12942 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
12943 + loadsegment(gs, 0);
12944 +#endif
12945 flush_ptrace_hw_breakpoint(tsk);
12946 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
12947 /*
12948 @@ -279,8 +282,8 @@ int kernel_thread(int (*fn)(void *), voi
12949 regs.di = (unsigned long) arg;
12950
12951 #ifdef CONFIG_X86_32
12952 - regs.ds = __USER_DS;
12953 - regs.es = __USER_DS;
12954 + regs.ds = __KERNEL_DS;
12955 + regs.es = __KERNEL_DS;
12956 regs.fs = __KERNEL_PERCPU;
12957 regs.gs = __KERNEL_STACK_CANARY;
12958 #else
12959 @@ -689,17 +692,3 @@ static int __init idle_setup(char *str)
12960 return 0;
12961 }
12962 early_param("idle", idle_setup);
12963 -
12964 -unsigned long arch_align_stack(unsigned long sp)
12965 -{
12966 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12967 - sp -= get_random_int() % 8192;
12968 - return sp & ~0xf;
12969 -}
12970 -
12971 -unsigned long arch_randomize_brk(struct mm_struct *mm)
12972 -{
12973 - unsigned long range_end = mm->brk + 0x02000000;
12974 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
12975 -}
12976 -
12977 diff -urNp linux-2.6.34.1/arch/x86/kernel/process_32.c linux-2.6.34.1/arch/x86/kernel/process_32.c
12978 --- linux-2.6.34.1/arch/x86/kernel/process_32.c 2010-07-05 14:24:10.000000000 -0400
12979 +++ linux-2.6.34.1/arch/x86/kernel/process_32.c 2010-07-07 09:04:48.000000000 -0400
12980 @@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
12981 unsigned long thread_saved_pc(struct task_struct *tsk)
12982 {
12983 return ((unsigned long *)tsk->thread.sp)[3];
12984 +//XXX return tsk->thread.eip;
12985 }
12986
12987 #ifndef CONFIG_SMP
12988 @@ -127,7 +128,7 @@ void __show_regs(struct pt_regs *regs, i
12989 unsigned long sp;
12990 unsigned short ss, gs;
12991
12992 - if (user_mode_vm(regs)) {
12993 + if (user_mode(regs)) {
12994 sp = regs->sp;
12995 ss = regs->ss & 0xffff;
12996 gs = get_user_gs(regs);
12997 @@ -197,7 +198,7 @@ int copy_thread(unsigned long clone_flag
12998 struct task_struct *tsk;
12999 int err;
13000
13001 - childregs = task_pt_regs(p);
13002 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
13003 *childregs = *regs;
13004 childregs->ax = 0;
13005 childregs->sp = sp;
13006 @@ -231,6 +232,7 @@ int copy_thread(unsigned long clone_flag
13007 * Set a new TLS for the child thread?
13008 */
13009 if (clone_flags & CLONE_SETTLS)
13010 +//XXX needs set_fs()?
13011 err = do_set_thread_area(p, -1,
13012 (struct user_desc __user *)childregs->si, 0);
13013
13014 @@ -301,7 +303,7 @@ __switch_to(struct task_struct *prev_p,
13015 struct thread_struct *prev = &prev_p->thread,
13016 *next = &next_p->thread;
13017 int cpu = smp_processor_id();
13018 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
13019 + struct tss_struct *tss = init_tss + cpu;
13020 bool preload_fpu;
13021
13022 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
13023 @@ -336,6 +338,11 @@ __switch_to(struct task_struct *prev_p,
13024 */
13025 lazy_save_gs(prev->gs);
13026
13027 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13028 + if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
13029 + __set_fs(task_thread_info(next_p)->addr_limit, cpu);
13030 +#endif
13031 +
13032 /*
13033 * Load the per-thread Thread-Local Storage descriptor.
13034 */
13035 @@ -412,3 +419,27 @@ unsigned long get_wchan(struct task_stru
13036 return 0;
13037 }
13038
13039 +#ifdef CONFIG_PAX_RANDKSTACK
13040 +asmlinkage void pax_randomize_kstack(void)
13041 +{
13042 + struct thread_struct *thread = &current->thread;
13043 + unsigned long time;
13044 +
13045 + if (!randomize_va_space)
13046 + return;
13047 +
13048 + rdtscl(time);
13049 +
13050 + /* P4 seems to return a 0 LSB, ignore it */
13051 +#ifdef CONFIG_MPENTIUM4
13052 + time &= 0x1EUL;
13053 + time <<= 2;
13054 +#else
13055 + time &= 0xFUL;
13056 + time <<= 3;
13057 +#endif
13058 +
13059 + thread->sp0 ^= time;
13060 + load_sp0(init_tss + smp_processor_id(), thread);
13061 +}
13062 +#endif
13063 diff -urNp linux-2.6.34.1/arch/x86/kernel/process_64.c linux-2.6.34.1/arch/x86/kernel/process_64.c
13064 --- linux-2.6.34.1/arch/x86/kernel/process_64.c 2010-07-05 14:24:10.000000000 -0400
13065 +++ linux-2.6.34.1/arch/x86/kernel/process_64.c 2010-07-07 09:04:48.000000000 -0400
13066 @@ -88,7 +88,7 @@ static void __exit_idle(void)
13067 void exit_idle(void)
13068 {
13069 /* idle loop has pid 0 */
13070 - if (current->pid)
13071 + if (task_pid_nr(current))
13072 return;
13073 __exit_idle();
13074 }
13075 @@ -383,7 +383,7 @@ __switch_to(struct task_struct *prev_p,
13076 struct thread_struct *prev = &prev_p->thread;
13077 struct thread_struct *next = &next_p->thread;
13078 int cpu = smp_processor_id();
13079 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
13080 + struct tss_struct *tss = init_tss + cpu;
13081 unsigned fsindex, gsindex;
13082 bool preload_fpu;
13083
13084 @@ -536,12 +536,11 @@ unsigned long get_wchan(struct task_stru
13085 if (!p || p == current || p->state == TASK_RUNNING)
13086 return 0;
13087 stack = (unsigned long)task_stack_page(p);
13088 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
13089 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64))
13090 return 0;
13091 fp = *(u64 *)(p->thread.sp);
13092 do {
13093 - if (fp < (unsigned long)stack ||
13094 - fp >= (unsigned long)stack+THREAD_SIZE)
13095 + if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64))
13096 return 0;
13097 ip = *(u64 *)(fp+8);
13098 if (!in_sched_functions(ip))
13099 diff -urNp linux-2.6.34.1/arch/x86/kernel/ptrace.c linux-2.6.34.1/arch/x86/kernel/ptrace.c
13100 --- linux-2.6.34.1/arch/x86/kernel/ptrace.c 2010-07-05 14:24:10.000000000 -0400
13101 +++ linux-2.6.34.1/arch/x86/kernel/ptrace.c 2010-07-07 09:04:48.000000000 -0400
13102 @@ -1145,7 +1145,7 @@ static const struct user_regset_view use
13103 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
13104 {
13105 int ret;
13106 - unsigned long __user *datap = (unsigned long __user *)data;
13107 + unsigned long __user *datap = (__force unsigned long __user *)data;
13108
13109 switch (request) {
13110 /* read the word at location addr in the USER area. */
13111 @@ -1232,14 +1232,14 @@ long arch_ptrace(struct task_struct *chi
13112 if (addr < 0)
13113 return -EIO;
13114 ret = do_get_thread_area(child, addr,
13115 - (struct user_desc __user *) data);
13116 + (__force struct user_desc __user *) data);
13117 break;
13118
13119 case PTRACE_SET_THREAD_AREA:
13120 if (addr < 0)
13121 return -EIO;
13122 ret = do_set_thread_area(child, addr,
13123 - (struct user_desc __user *) data, 0);
13124 + (__force struct user_desc __user *) data, 0);
13125 break;
13126 #endif
13127
13128 @@ -1258,12 +1258,12 @@ long arch_ptrace(struct task_struct *chi
13129 #ifdef CONFIG_X86_PTRACE_BTS
13130 case PTRACE_BTS_CONFIG:
13131 ret = ptrace_bts_config
13132 - (child, data, (struct ptrace_bts_config __user *)addr);
13133 + (child, data, (__force struct ptrace_bts_config __user *)addr);
13134 break;
13135
13136 case PTRACE_BTS_STATUS:
13137 ret = ptrace_bts_status
13138 - (child, data, (struct ptrace_bts_config __user *)addr);
13139 + (child, data, (__force struct ptrace_bts_config __user *)addr);
13140 break;
13141
13142 case PTRACE_BTS_SIZE:
13143 @@ -1272,7 +1272,7 @@ long arch_ptrace(struct task_struct *chi
13144
13145 case PTRACE_BTS_GET:
13146 ret = ptrace_bts_read_record
13147 - (child, data, (struct bts_struct __user *) addr);
13148 + (child, data, (__force struct bts_struct __user *) addr);
13149 break;
13150
13151 case PTRACE_BTS_CLEAR:
13152 @@ -1281,7 +1281,7 @@ long arch_ptrace(struct task_struct *chi
13153
13154 case PTRACE_BTS_DRAIN:
13155 ret = ptrace_bts_drain
13156 - (child, data, (struct bts_struct __user *) addr);
13157 + (child, data, (__force struct bts_struct __user *) addr);
13158 break;
13159 #endif /* CONFIG_X86_PTRACE_BTS */
13160
13161 @@ -1697,7 +1697,7 @@ static void fill_sigtrap_info(struct tas
13162 memset(info, 0, sizeof(*info));
13163 info->si_signo = SIGTRAP;
13164 info->si_code = si_code;
13165 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
13166 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
13167 }
13168
13169 void user_single_step_siginfo(struct task_struct *tsk,
13170 diff -urNp linux-2.6.34.1/arch/x86/kernel/reboot.c linux-2.6.34.1/arch/x86/kernel/reboot.c
13171 --- linux-2.6.34.1/arch/x86/kernel/reboot.c 2010-07-05 14:24:10.000000000 -0400
13172 +++ linux-2.6.34.1/arch/x86/kernel/reboot.c 2010-07-07 09:04:48.000000000 -0400
13173 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
13174 EXPORT_SYMBOL(pm_power_off);
13175
13176 static const struct desc_ptr no_idt = {};
13177 -static int reboot_mode;
13178 +static unsigned short reboot_mode;
13179 enum reboot_type reboot_type = BOOT_KBD;
13180 int reboot_force;
13181
13182 @@ -276,7 +276,7 @@ static struct dmi_system_id __initdata r
13183 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
13184 },
13185 },
13186 - { }
13187 + { NULL, NULL, {{0, {0}}}, NULL}
13188 };
13189
13190 static int __init reboot_init(void)
13191 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
13192 controller to pulse the CPU reset line, which is more thorough, but
13193 doesn't work with at least one type of 486 motherboard. It is easy
13194 to stop this code working; hence the copious comments. */
13195 -static const unsigned long long
13196 -real_mode_gdt_entries [3] =
13197 +static struct desc_struct
13198 +real_mode_gdt_entries [3] __read_only =
13199 {
13200 - 0x0000000000000000ULL, /* Null descriptor */
13201 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
13202 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
13203 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
13204 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
13205 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
13206 };
13207
13208 static const struct desc_ptr
13209 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
13210 * specified by the code and length parameters.
13211 * We assume that length will aways be less that 100!
13212 */
13213 -void machine_real_restart(const unsigned char *code, int length)
13214 +void machine_real_restart(const unsigned char *code, unsigned int length)
13215 {
13216 local_irq_disable();
13217
13218 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
13219 /* Remap the kernel at virtual address zero, as well as offset zero
13220 from the kernel segment. This assumes the kernel segment starts at
13221 virtual address PAGE_OFFSET. */
13222 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13223 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
13224 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13225 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13226
13227 /*
13228 * Use `swapper_pg_dir' as our page directory.
13229 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
13230 boot)". This seems like a fairly standard thing that gets set by
13231 REBOOT.COM programs, and the previous reset routine did this
13232 too. */
13233 - *((unsigned short *)0x472) = reboot_mode;
13234 + *(unsigned short *)(__va(0x472)) = reboot_mode;
13235
13236 /* For the switch to real mode, copy some code to low memory. It has
13237 to be in the first 64k because it is running in 16-bit mode, and it
13238 has to have the same physical and virtual address, because it turns
13239 off paging. Copy it near the end of the first page, out of the way
13240 of BIOS variables. */
13241 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
13242 - real_mode_switch, sizeof (real_mode_switch));
13243 - memcpy((void *)(0x1000 - 100), code, length);
13244 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
13245 + memcpy(__va(0x1000 - 100), code, length);
13246
13247 /* Set up the IDT for real mode. */
13248 load_idt(&real_mode_idt);
13249 diff -urNp linux-2.6.34.1/arch/x86/kernel/setup.c linux-2.6.34.1/arch/x86/kernel/setup.c
13250 --- linux-2.6.34.1/arch/x86/kernel/setup.c 2010-07-05 14:24:10.000000000 -0400
13251 +++ linux-2.6.34.1/arch/x86/kernel/setup.c 2010-07-07 09:04:48.000000000 -0400
13252 @@ -704,7 +704,7 @@ static void __init trim_bios_range(void)
13253 * area (640->1Mb) as ram even though it is not.
13254 * take them out.
13255 */
13256 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
13257 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
13258 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
13259 }
13260
13261 @@ -790,14 +790,14 @@ void __init setup_arch(char **cmdline_p)
13262
13263 if (!boot_params.hdr.root_flags)
13264 root_mountflags &= ~MS_RDONLY;
13265 - init_mm.start_code = (unsigned long) _text;
13266 - init_mm.end_code = (unsigned long) _etext;
13267 + init_mm.start_code = ktla_ktva((unsigned long) _text);
13268 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
13269 init_mm.end_data = (unsigned long) _edata;
13270 init_mm.brk = _brk_end;
13271
13272 - code_resource.start = virt_to_phys(_text);
13273 - code_resource.end = virt_to_phys(_etext)-1;
13274 - data_resource.start = virt_to_phys(_etext);
13275 + code_resource.start = virt_to_phys(ktla_ktva(_text));
13276 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
13277 + data_resource.start = virt_to_phys(_sdata);
13278 data_resource.end = virt_to_phys(_edata)-1;
13279 bss_resource.start = virt_to_phys(&__bss_start);
13280 bss_resource.end = virt_to_phys(&__bss_stop)-1;
13281 diff -urNp linux-2.6.34.1/arch/x86/kernel/setup_percpu.c linux-2.6.34.1/arch/x86/kernel/setup_percpu.c
13282 --- linux-2.6.34.1/arch/x86/kernel/setup_percpu.c 2010-07-05 14:24:10.000000000 -0400
13283 +++ linux-2.6.34.1/arch/x86/kernel/setup_percpu.c 2010-07-07 09:04:48.000000000 -0400
13284 @@ -27,19 +27,17 @@
13285 # define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
13286 #endif
13287
13288 +#ifdef CONFIG_SMP
13289 DEFINE_PER_CPU(int, cpu_number);
13290 EXPORT_PER_CPU_SYMBOL(cpu_number);
13291 +#endif
13292
13293 -#ifdef CONFIG_X86_64
13294 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
13295 -#else
13296 -#define BOOT_PERCPU_OFFSET 0
13297 -#endif
13298
13299 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
13300 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
13301
13302 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
13303 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
13304 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
13305 };
13306 EXPORT_SYMBOL(__per_cpu_offset);
13307 @@ -167,10 +165,10 @@ static inline void setup_percpu_segment(
13308 {
13309 #ifdef CONFIG_X86_32
13310 struct desc_struct gdt;
13311 + unsigned long base = per_cpu_offset(cpu);
13312
13313 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
13314 - 0x2 | DESCTYPE_S, 0x8);
13315 - gdt.s = 1;
13316 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
13317 + 0x83 | DESCTYPE_S, 0xC);
13318 write_gdt_entry(get_cpu_gdt_table(cpu),
13319 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
13320 #endif
13321 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
13322 /* alrighty, percpu areas up and running */
13323 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
13324 for_each_possible_cpu(cpu) {
13325 +#ifdef CONFIG_CC_STACKPROTECTOR
13326 +#ifdef CONFIG_x86_32
13327 + unsigned long canary = per_cpu(stack_canary, cpu);
13328 +#endif
13329 +#endif
13330 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
13331 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
13332 per_cpu(cpu_number, cpu) = cpu;
13333 @@ -246,6 +249,12 @@ void __init setup_per_cpu_areas(void)
13334 early_per_cpu_map(x86_cpu_to_node_map, cpu);
13335 #endif
13336 #endif
13337 +#ifdef CONFIG_CC_STACKPROTECTOR
13338 +#ifdef CONFIG_x86_32
13339 + if (cpu == boot_cpu_id)
13340 + per_cpu(stack_canary, cpu) = canary;
13341 +#endif
13342 +#endif
13343 /*
13344 * Up to this point, the boot CPU has been using .data.init
13345 * area. Reload any changed state for the boot CPU.
13346 diff -urNp linux-2.6.34.1/arch/x86/kernel/signal.c linux-2.6.34.1/arch/x86/kernel/signal.c
13347 --- linux-2.6.34.1/arch/x86/kernel/signal.c 2010-07-05 14:24:10.000000000 -0400
13348 +++ linux-2.6.34.1/arch/x86/kernel/signal.c 2010-07-07 09:04:48.000000000 -0400
13349 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
13350 * Align the stack pointer according to the i386 ABI,
13351 * i.e. so that on function entry ((sp + 4) & 15) == 0.
13352 */
13353 - sp = ((sp + 4) & -16ul) - 4;
13354 + sp = ((sp - 12) & -16ul) - 4;
13355 #else /* !CONFIG_X86_32 */
13356 sp = round_down(sp, 16) - 8;
13357 #endif
13358 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
13359 * Return an always-bogus address instead so we will die with SIGSEGV.
13360 */
13361 if (onsigstack && !likely(on_sig_stack(sp)))
13362 - return (void __user *)-1L;
13363 + return (__force void __user *)-1L;
13364
13365 /* save i387 state */
13366 if (used_math() && save_i387_xstate(*fpstate) < 0)
13367 - return (void __user *)-1L;
13368 + return (__force void __user *)-1L;
13369
13370 return (void __user *)sp;
13371 }
13372 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
13373 }
13374
13375 if (current->mm->context.vdso)
13376 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
13377 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
13378 else
13379 - restorer = &frame->retcode;
13380 + restorer = (void __user *)&frame->retcode;
13381 if (ka->sa.sa_flags & SA_RESTORER)
13382 restorer = ka->sa.sa_restorer;
13383
13384 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
13385 * reasons and because gdb uses it as a signature to notice
13386 * signal handler stack frames.
13387 */
13388 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
13389 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
13390
13391 if (err)
13392 return -EFAULT;
13393 @@ -378,7 +378,7 @@ static int __setup_rt_frame(int sig, str
13394 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
13395
13396 /* Set up to return from userspace. */
13397 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13398 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13399 if (ka->sa.sa_flags & SA_RESTORER)
13400 restorer = ka->sa.sa_restorer;
13401 put_user_ex(restorer, &frame->pretcode);
13402 @@ -390,7 +390,7 @@ static int __setup_rt_frame(int sig, str
13403 * reasons and because gdb uses it as a signature to notice
13404 * signal handler stack frames.
13405 */
13406 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
13407 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
13408 } put_user_catch(err);
13409
13410 if (err)
13411 @@ -780,7 +780,7 @@ static void do_signal(struct pt_regs *re
13412 * X86_32: vm86 regs switched out by assembly code before reaching
13413 * here, so testing against kernel CS suffices.
13414 */
13415 - if (!user_mode(regs))
13416 + if (!user_mode_novm(regs))
13417 return;
13418
13419 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
13420 diff -urNp linux-2.6.34.1/arch/x86/kernel/smpboot.c linux-2.6.34.1/arch/x86/kernel/smpboot.c
13421 --- linux-2.6.34.1/arch/x86/kernel/smpboot.c 2010-07-05 14:24:10.000000000 -0400
13422 +++ linux-2.6.34.1/arch/x86/kernel/smpboot.c 2010-07-07 09:04:48.000000000 -0400
13423 @@ -761,7 +761,11 @@ do_rest:
13424 (unsigned long)task_stack_page(c_idle.idle) -
13425 KERNEL_STACK_OFFSET + THREAD_SIZE;
13426 #endif
13427 +
13428 + pax_open_kernel();
13429 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13430 + pax_close_kernel();
13431 +
13432 initial_code = (unsigned long)start_secondary;
13433 stack_start.sp = (void *) c_idle.idle->thread.sp;
13434
13435 @@ -894,6 +898,12 @@ int __cpuinit native_cpu_up(unsigned int
13436
13437 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
13438
13439 +#ifdef CONFIG_PAX_PER_CPU_PGD
13440 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
13441 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13442 + KERNEL_PGD_PTRS);
13443 +#endif
13444 +
13445 #ifdef CONFIG_X86_32
13446 /* init low mem mapping */
13447 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13448 diff -urNp linux-2.6.34.1/arch/x86/kernel/step.c linux-2.6.34.1/arch/x86/kernel/step.c
13449 --- linux-2.6.34.1/arch/x86/kernel/step.c 2010-07-05 14:24:10.000000000 -0400
13450 +++ linux-2.6.34.1/arch/x86/kernel/step.c 2010-07-07 09:04:48.000000000 -0400
13451 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
13452 struct desc_struct *desc;
13453 unsigned long base;
13454
13455 - seg &= ~7UL;
13456 + seg >>= 3;
13457
13458 mutex_lock(&child->mm->context.lock);
13459 - if (unlikely((seg >> 3) >= child->mm->context.size))
13460 + if (unlikely(seg >= child->mm->context.size))
13461 addr = -1L; /* bogus selector, access would fault */
13462 else {
13463 desc = child->mm->context.ldt + seg;
13464 @@ -53,6 +53,9 @@ static int is_setting_trap_flag(struct t
13465 unsigned char opcode[15];
13466 unsigned long addr = convert_ip_to_linear(child, regs);
13467
13468 + if (addr == -EINVAL)
13469 + return 0;
13470 +
13471 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
13472 for (i = 0; i < copied; i++) {
13473 switch (opcode[i]) {
13474 @@ -74,7 +77,7 @@ static int is_setting_trap_flag(struct t
13475
13476 #ifdef CONFIG_X86_64
13477 case 0x40 ... 0x4f:
13478 - if (regs->cs != __USER_CS)
13479 + if ((regs->cs & 0xffff) != __USER_CS)
13480 /* 32-bit mode: register increment */
13481 return 0;
13482 /* 64-bit mode: REX prefix */
13483 diff -urNp linux-2.6.34.1/arch/x86/kernel/sys_i386_32.c linux-2.6.34.1/arch/x86/kernel/sys_i386_32.c
13484 --- linux-2.6.34.1/arch/x86/kernel/sys_i386_32.c 2010-07-05 14:24:10.000000000 -0400
13485 +++ linux-2.6.34.1/arch/x86/kernel/sys_i386_32.c 2010-07-07 09:04:48.000000000 -0400
13486 @@ -24,6 +24,221 @@
13487
13488 #include <asm/syscalls.h>
13489
13490 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
13491 +{
13492 + unsigned long pax_task_size = TASK_SIZE;
13493 +
13494 +#ifdef CONFIG_PAX_SEGMEXEC
13495 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
13496 + pax_task_size = SEGMEXEC_TASK_SIZE;
13497 +#endif
13498 +
13499 + if (len > pax_task_size || addr > pax_task_size - len)
13500 + return -EINVAL;
13501 +
13502 + return 0;
13503 +}
13504 +
13505 +unsigned long
13506 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
13507 + unsigned long len, unsigned long pgoff, unsigned long flags)
13508 +{
13509 + struct mm_struct *mm = current->mm;
13510 + struct vm_area_struct *vma;
13511 + unsigned long start_addr, pax_task_size = TASK_SIZE;
13512 +
13513 +#ifdef CONFIG_PAX_SEGMEXEC
13514 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
13515 + pax_task_size = SEGMEXEC_TASK_SIZE;
13516 +#endif
13517 +
13518 + if (len > pax_task_size)
13519 + return -ENOMEM;
13520 +
13521 + if (flags & MAP_FIXED)
13522 + return addr;
13523 +
13524 +#ifdef CONFIG_PAX_RANDMMAP
13525 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13526 +#endif
13527 +
13528 + if (addr) {
13529 + addr = PAGE_ALIGN(addr);
13530 + vma = find_vma(mm, addr);
13531 + if (pax_task_size - len >= addr &&
13532 + (!vma || addr + len <= vma->vm_start))
13533 + return addr;
13534 + }
13535 + if (len > mm->cached_hole_size) {
13536 + start_addr = addr = mm->free_area_cache;
13537 + } else {
13538 + start_addr = addr = mm->mmap_base;
13539 + mm->cached_hole_size = 0;
13540 + }
13541 +
13542 +#ifdef CONFIG_PAX_PAGEEXEC
13543 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
13544 + start_addr = 0x00110000UL;
13545 +
13546 +#ifdef CONFIG_PAX_RANDMMAP
13547 + if (mm->pax_flags & MF_PAX_RANDMMAP)
13548 + start_addr += mm->delta_mmap & 0x03FFF000UL;
13549 +#endif
13550 +
13551 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
13552 + start_addr = addr = mm->mmap_base;
13553 + else
13554 + addr = start_addr;
13555 + }
13556 +#endif
13557 +
13558 +full_search:
13559 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
13560 + /* At this point: (!vma || addr < vma->vm_end). */
13561 + if (pax_task_size - len < addr) {
13562 + /*
13563 + * Start a new search - just in case we missed
13564 + * some holes.
13565 + */
13566 + if (start_addr != mm->mmap_base) {
13567 + start_addr = addr = mm->mmap_base;
13568 + mm->cached_hole_size = 0;
13569 + goto full_search;
13570 + }
13571 + return -ENOMEM;
13572 + }
13573 + if (!vma || addr + len <= vma->vm_start) {
13574 + /*
13575 + * Remember the place where we stopped the search:
13576 + */
13577 + mm->free_area_cache = addr + len;
13578 + return addr;
13579 + }
13580 + if (addr + mm->cached_hole_size < vma->vm_start)
13581 + mm->cached_hole_size = vma->vm_start - addr;
13582 + addr = vma->vm_end;
13583 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
13584 + start_addr = addr = mm->mmap_base;
13585 + mm->cached_hole_size = 0;
13586 + goto full_search;
13587 + }
13588 + }
13589 +}
13590 +
13591 +unsigned long
13592 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
13593 + const unsigned long len, const unsigned long pgoff,
13594 + const unsigned long flags)
13595 +{
13596 + struct vm_area_struct *vma;
13597 + struct mm_struct *mm = current->mm;
13598 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
13599 +
13600 +#ifdef CONFIG_PAX_SEGMEXEC
13601 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
13602 + pax_task_size = SEGMEXEC_TASK_SIZE;
13603 +#endif
13604 +
13605 + /* requested length too big for entire address space */
13606 + if (len > pax_task_size)
13607 + return -ENOMEM;
13608 +
13609 + if (flags & MAP_FIXED)
13610 + return addr;
13611 +
13612 +#ifdef CONFIG_PAX_PAGEEXEC
13613 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
13614 + goto bottomup;
13615 +#endif
13616 +
13617 +#ifdef CONFIG_PAX_RANDMMAP
13618 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13619 +#endif
13620 +
13621 + /* requesting a specific address */
13622 + if (addr) {
13623 + addr = PAGE_ALIGN(addr);
13624 + vma = find_vma(mm, addr);
13625 + if (pax_task_size - len >= addr &&
13626 + (!vma || addr + len <= vma->vm_start))
13627 + return addr;
13628 + }
13629 +
13630 + /* check if free_area_cache is useful for us */
13631 + if (len <= mm->cached_hole_size) {
13632 + mm->cached_hole_size = 0;
13633 + mm->free_area_cache = mm->mmap_base;
13634 + }
13635 +
13636 + /* either no address requested or can't fit in requested address hole */
13637 + addr = mm->free_area_cache;
13638 +
13639 + /* make sure it can fit in the remaining address space */
13640 + if (addr > len) {
13641 + vma = find_vma(mm, addr-len);
13642 + if (!vma || addr <= vma->vm_start)
13643 + /* remember the address as a hint for next time */
13644 + return (mm->free_area_cache = addr-len);
13645 + }
13646 +
13647 + if (mm->mmap_base < len)
13648 + goto bottomup;
13649 +
13650 + addr = mm->mmap_base-len;
13651 +
13652 + do {
13653 + /*
13654 + * Lookup failure means no vma is above this address,
13655 + * else if new region fits below vma->vm_start,
13656 + * return with success:
13657 + */
13658 + vma = find_vma(mm, addr);
13659 + if (!vma || addr+len <= vma->vm_start)
13660 + /* remember the address as a hint for next time */
13661 + return (mm->free_area_cache = addr);
13662 +
13663 + /* remember the largest hole we saw so far */
13664 + if (addr + mm->cached_hole_size < vma->vm_start)
13665 + mm->cached_hole_size = vma->vm_start - addr;
13666 +
13667 + /* try just below the current vma->vm_start */
13668 + addr = vma->vm_start-len;
13669 + } while (len < vma->vm_start);
13670 +
13671 +bottomup:
13672 + /*
13673 + * A failed mmap() very likely causes application failure,
13674 + * so fall back to the bottom-up function here. This scenario
13675 + * can happen with large stack limits and large mmap()
13676 + * allocations.
13677 + */
13678 +
13679 +#ifdef CONFIG_PAX_SEGMEXEC
13680 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
13681 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
13682 + else
13683 +#endif
13684 +
13685 + mm->mmap_base = TASK_UNMAPPED_BASE;
13686 +
13687 +#ifdef CONFIG_PAX_RANDMMAP
13688 + if (mm->pax_flags & MF_PAX_RANDMMAP)
13689 + mm->mmap_base += mm->delta_mmap;
13690 +#endif
13691 +
13692 + mm->free_area_cache = mm->mmap_base;
13693 + mm->cached_hole_size = ~0UL;
13694 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
13695 + /*
13696 + * Restore the topdown base:
13697 + */
13698 + mm->mmap_base = base;
13699 + mm->free_area_cache = base;
13700 + mm->cached_hole_size = ~0UL;
13701 +
13702 + return addr;
13703 +}
13704 +
13705 /*
13706 * Do a system call from kernel instead of calling sys_execve so we
13707 * end up with proper pt_regs.
13708 diff -urNp linux-2.6.34.1/arch/x86/kernel/sys_x86_64.c linux-2.6.34.1/arch/x86/kernel/sys_x86_64.c
13709 --- linux-2.6.34.1/arch/x86/kernel/sys_x86_64.c 2010-07-05 14:24:10.000000000 -0400
13710 +++ linux-2.6.34.1/arch/x86/kernel/sys_x86_64.c 2010-07-07 09:04:48.000000000 -0400
13711 @@ -32,8 +32,8 @@ out:
13712 return error;
13713 }
13714
13715 -static void find_start_end(unsigned long flags, unsigned long *begin,
13716 - unsigned long *end)
13717 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
13718 + unsigned long *begin, unsigned long *end)
13719 {
13720 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
13721 unsigned long new_begin;
13722 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
13723 *begin = new_begin;
13724 }
13725 } else {
13726 - *begin = TASK_UNMAPPED_BASE;
13727 + *begin = mm->mmap_base;
13728 *end = TASK_SIZE;
13729 }
13730 }
13731 @@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp
13732 if (flags & MAP_FIXED)
13733 return addr;
13734
13735 - find_start_end(flags, &begin, &end);
13736 + find_start_end(mm, flags, &begin, &end);
13737
13738 if (len > end)
13739 return -ENOMEM;
13740
13741 +#ifdef CONFIG_PAX_RANDMMAP
13742 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13743 +#endif
13744 +
13745 if (addr) {
13746 addr = PAGE_ALIGN(addr);
13747 vma = find_vma(mm, addr);
13748 @@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi
13749 {
13750 struct vm_area_struct *vma;
13751 struct mm_struct *mm = current->mm;
13752 - unsigned long addr = addr0;
13753 + unsigned long base = mm->mmap_base, addr = addr0;
13754
13755 /* requested length too big for entire address space */
13756 if (len > TASK_SIZE)
13757 @@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi
13758 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
13759 goto bottomup;
13760
13761 +#ifdef CONFIG_PAX_RANDMMAP
13762 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13763 +#endif
13764 +
13765 /* requesting a specific address */
13766 if (addr) {
13767 addr = PAGE_ALIGN(addr);
13768 @@ -198,13 +206,21 @@ bottomup:
13769 * can happen with large stack limits and large mmap()
13770 * allocations.
13771 */
13772 + mm->mmap_base = TASK_UNMAPPED_BASE;
13773 +
13774 +#ifdef CONFIG_PAX_RANDMMAP
13775 + if (mm->pax_flags & MF_PAX_RANDMMAP)
13776 + mm->mmap_base += mm->delta_mmap;
13777 +#endif
13778 +
13779 + mm->free_area_cache = mm->mmap_base;
13780 mm->cached_hole_size = ~0UL;
13781 - mm->free_area_cache = TASK_UNMAPPED_BASE;
13782 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
13783 /*
13784 * Restore the topdown base:
13785 */
13786 - mm->free_area_cache = mm->mmap_base;
13787 + mm->mmap_base = base;
13788 + mm->free_area_cache = base;
13789 mm->cached_hole_size = ~0UL;
13790
13791 return addr;
13792 diff -urNp linux-2.6.34.1/arch/x86/kernel/syscall_table_32.S linux-2.6.34.1/arch/x86/kernel/syscall_table_32.S
13793 --- linux-2.6.34.1/arch/x86/kernel/syscall_table_32.S 2010-07-05 14:24:10.000000000 -0400
13794 +++ linux-2.6.34.1/arch/x86/kernel/syscall_table_32.S 2010-07-07 09:04:48.000000000 -0400
13795 @@ -1,3 +1,4 @@
13796 +.section .rodata,"a",@progbits
13797 ENTRY(sys_call_table)
13798 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
13799 .long sys_exit
13800 diff -urNp linux-2.6.34.1/arch/x86/kernel/time.c linux-2.6.34.1/arch/x86/kernel/time.c
13801 --- linux-2.6.34.1/arch/x86/kernel/time.c 2010-07-05 14:24:10.000000000 -0400
13802 +++ linux-2.6.34.1/arch/x86/kernel/time.c 2010-07-07 09:04:48.000000000 -0400
13803 @@ -26,17 +26,13 @@
13804 int timer_ack;
13805 #endif
13806
13807 -#ifdef CONFIG_X86_64
13808 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
13809 -#endif
13810 -
13811 unsigned long profile_pc(struct pt_regs *regs)
13812 {
13813 unsigned long pc = instruction_pointer(regs);
13814
13815 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
13816 + if (!user_mode(regs) && in_lock_functions(pc)) {
13817 #ifdef CONFIG_FRAME_POINTER
13818 - return *(unsigned long *)(regs->bp + sizeof(long));
13819 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
13820 #else
13821 unsigned long *sp =
13822 (unsigned long *)kernel_stack_pointer(regs);
13823 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
13824 * or above a saved flags. Eflags has bits 22-31 zero,
13825 * kernel addresses don't.
13826 */
13827 +
13828 +#ifdef CONFIG_PAX_KERNEXEC
13829 + return ktla_ktva(sp[0]);
13830 +#else
13831 if (sp[0] >> 22)
13832 return sp[0];
13833 if (sp[1] >> 22)
13834 return sp[1];
13835 #endif
13836 +
13837 +#endif
13838 }
13839 return pc;
13840 }
13841 diff -urNp linux-2.6.34.1/arch/x86/kernel/tls.c linux-2.6.34.1/arch/x86/kernel/tls.c
13842 --- linux-2.6.34.1/arch/x86/kernel/tls.c 2010-07-05 14:24:10.000000000 -0400
13843 +++ linux-2.6.34.1/arch/x86/kernel/tls.c 2010-07-07 09:04:48.000000000 -0400
13844 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
13845 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
13846 return -EINVAL;
13847
13848 +#ifdef CONFIG_PAX_SEGMEXEC
13849 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
13850 + return -EINVAL;
13851 +#endif
13852 +
13853 set_tls_desc(p, idx, &info, 1);
13854
13855 return 0;
13856 diff -urNp linux-2.6.34.1/arch/x86/kernel/trampoline_32.S linux-2.6.34.1/arch/x86/kernel/trampoline_32.S
13857 --- linux-2.6.34.1/arch/x86/kernel/trampoline_32.S 2010-07-05 14:24:10.000000000 -0400
13858 +++ linux-2.6.34.1/arch/x86/kernel/trampoline_32.S 2010-07-07 09:04:48.000000000 -0400
13859 @@ -32,6 +32,12 @@
13860 #include <asm/segment.h>
13861 #include <asm/page_types.h>
13862
13863 +#ifdef CONFIG_PAX_KERNEXEC
13864 +#define ta(X) (X)
13865 +#else
13866 +#define ta(X) ((X) - __PAGE_OFFSET)
13867 +#endif
13868 +
13869 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
13870 __CPUINITRODATA
13871 .code16
13872 @@ -60,7 +66,7 @@ r_base = .
13873 inc %ax # protected mode (PE) bit
13874 lmsw %ax # into protected mode
13875 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
13876 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
13877 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
13878
13879 # These need to be in the same 64K segment as the above;
13880 # hence we don't use the boot_gdt_descr defined in head.S
13881 diff -urNp linux-2.6.34.1/arch/x86/kernel/traps.c linux-2.6.34.1/arch/x86/kernel/traps.c
13882 --- linux-2.6.34.1/arch/x86/kernel/traps.c 2010-07-05 14:24:10.000000000 -0400
13883 +++ linux-2.6.34.1/arch/x86/kernel/traps.c 2010-07-07 09:04:48.000000000 -0400
13884 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
13885
13886 /* Do we ignore FPU interrupts ? */
13887 char ignore_fpu_irq;
13888 -
13889 -/*
13890 - * The IDT has to be page-aligned to simplify the Pentium
13891 - * F0 0F bug workaround.
13892 - */
13893 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
13894 #endif
13895
13896 DECLARE_BITMAP(used_vectors, NR_VECTORS);
13897 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
13898 static inline void
13899 die_if_kernel(const char *str, struct pt_regs *regs, long err)
13900 {
13901 - if (!user_mode_vm(regs))
13902 + if (!user_mode(regs))
13903 die(str, regs, err);
13904 }
13905 #endif
13906
13907 static void __kprobes
13908 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
13909 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
13910 long error_code, siginfo_t *info)
13911 {
13912 struct task_struct *tsk = current;
13913
13914 #ifdef CONFIG_X86_32
13915 - if (regs->flags & X86_VM_MASK) {
13916 + if (v8086_mode(regs)) {
13917 /*
13918 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
13919 * On nmi (interrupt 2), do_trap should not be called.
13920 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
13921 }
13922 #endif
13923
13924 - if (!user_mode(regs))
13925 + if (!user_mode_novm(regs))
13926 goto kernel_trap;
13927
13928 #ifdef CONFIG_X86_32
13929 @@ -158,7 +152,7 @@ trap_signal:
13930 printk_ratelimit()) {
13931 printk(KERN_INFO
13932 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
13933 - tsk->comm, tsk->pid, str,
13934 + tsk->comm, task_pid_nr(tsk), str,
13935 regs->ip, regs->sp, error_code);
13936 print_vma_addr(" in ", regs->ip);
13937 printk("\n");
13938 @@ -175,8 +169,20 @@ kernel_trap:
13939 if (!fixup_exception(regs)) {
13940 tsk->thread.error_code = error_code;
13941 tsk->thread.trap_no = trapnr;
13942 +
13943 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13944 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
13945 + str = "PAX: suspicious stack segment fault";
13946 +#endif
13947 +
13948 die(str, regs, error_code);
13949 }
13950 +
13951 +#ifdef CONFIG_PAX_REFCOUNT
13952 + if (trapnr == 4)
13953 + pax_report_refcount_overflow(regs);
13954 +#endif
13955 +
13956 return;
13957
13958 #ifdef CONFIG_X86_32
13959 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
13960 conditional_sti(regs);
13961
13962 #ifdef CONFIG_X86_32
13963 - if (regs->flags & X86_VM_MASK)
13964 + if (v8086_mode(regs))
13965 goto gp_in_vm86;
13966 #endif
13967
13968 tsk = current;
13969 - if (!user_mode(regs))
13970 + if (!user_mode_novm(regs))
13971 goto gp_in_kernel;
13972
13973 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13974 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
13975 + struct mm_struct *mm = tsk->mm;
13976 + unsigned long limit;
13977 +
13978 + down_write(&mm->mmap_sem);
13979 + limit = mm->context.user_cs_limit;
13980 + if (limit < TASK_SIZE) {
13981 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
13982 + up_write(&mm->mmap_sem);
13983 + return;
13984 + }
13985 + up_write(&mm->mmap_sem);
13986 + }
13987 +#endif
13988 +
13989 tsk->thread.error_code = error_code;
13990 tsk->thread.trap_no = 13;
13991
13992 @@ -305,6 +327,13 @@ gp_in_kernel:
13993 if (notify_die(DIE_GPF, "general protection fault", regs,
13994 error_code, 13, SIGSEGV) == NOTIFY_STOP)
13995 return;
13996 +
13997 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13998 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
13999 + die("PAX: suspicious general protection fault", regs, error_code);
14000 + else
14001 +#endif
14002 +
14003 die("general protection fault", regs, error_code);
14004 }
14005
14006 @@ -559,7 +588,7 @@ dotraplinkage void __kprobes do_debug(st
14007 /* It's safe to allow irq's after DR6 has been saved */
14008 preempt_conditional_sti(regs);
14009
14010 - if (regs->flags & X86_VM_MASK) {
14011 + if (v8086_mode(regs)) {
14012 handle_vm86_trap((struct kernel_vm86_regs *) regs,
14013 error_code, 1);
14014 return;
14015 @@ -572,7 +601,7 @@ dotraplinkage void __kprobes do_debug(st
14016 * We already checked v86 mode above, so we can check for kernel mode
14017 * by just checking the CPL of CS.
14018 */
14019 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
14020 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
14021 tsk->thread.debugreg6 &= ~DR_STEP;
14022 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
14023 regs->flags &= ~X86_EFLAGS_TF;
14024 @@ -739,7 +768,7 @@ do_simd_coprocessor_error(struct pt_regs
14025 * Handle strange cache flush from user space exception
14026 * in all other cases. This is undocumented behaviour.
14027 */
14028 - if (regs->flags & X86_VM_MASK) {
14029 + if (v8086_mode(regs)) {
14030 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
14031 return;
14032 }
14033 diff -urNp linux-2.6.34.1/arch/x86/kernel/tsc.c linux-2.6.34.1/arch/x86/kernel/tsc.c
14034 --- linux-2.6.34.1/arch/x86/kernel/tsc.c 2010-07-05 14:24:10.000000000 -0400
14035 +++ linux-2.6.34.1/arch/x86/kernel/tsc.c 2010-07-07 09:04:48.000000000 -0400
14036 @@ -795,7 +795,7 @@ static struct dmi_system_id __initdata b
14037 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
14038 },
14039 },
14040 - {}
14041 + { NULL, NULL, {{0, {0}}}, NULL}
14042 };
14043
14044 static void __init check_system_tsc_reliable(void)
14045 diff -urNp linux-2.6.34.1/arch/x86/kernel/vm86_32.c linux-2.6.34.1/arch/x86/kernel/vm86_32.c
14046 --- linux-2.6.34.1/arch/x86/kernel/vm86_32.c 2010-07-05 14:24:10.000000000 -0400
14047 +++ linux-2.6.34.1/arch/x86/kernel/vm86_32.c 2010-07-07 09:04:48.000000000 -0400
14048 @@ -41,6 +41,7 @@
14049 #include <linux/ptrace.h>
14050 #include <linux/audit.h>
14051 #include <linux/stddef.h>
14052 +#include <linux/grsecurity.h>
14053
14054 #include <asm/uaccess.h>
14055 #include <asm/io.h>
14056 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
14057 do_exit(SIGSEGV);
14058 }
14059
14060 - tss = &per_cpu(init_tss, get_cpu());
14061 + tss = init_tss + get_cpu();
14062 current->thread.sp0 = current->thread.saved_sp0;
14063 current->thread.sysenter_cs = __KERNEL_CS;
14064 load_sp0(tss, &current->thread);
14065 @@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __use
14066 struct task_struct *tsk;
14067 int tmp, ret = -EPERM;
14068
14069 +#ifdef CONFIG_GRKERNSEC_VM86
14070 + if (!capable(CAP_SYS_RAWIO)) {
14071 + gr_handle_vm86();
14072 + goto out;
14073 + }
14074 +#endif
14075 +
14076 tsk = current;
14077 if (tsk->thread.saved_sp0)
14078 goto out;
14079 @@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned
14080 int tmp, ret;
14081 struct vm86plus_struct __user *v86;
14082
14083 +#ifdef CONFIG_GRKERNSEC_VM86
14084 + if (!capable(CAP_SYS_RAWIO)) {
14085 + gr_handle_vm86();
14086 + ret = -EPERM;
14087 + goto out;
14088 + }
14089 +#endif
14090 +
14091 tsk = current;
14092 switch (cmd) {
14093 case VM86_REQUEST_IRQ:
14094 @@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm
14095 tsk->thread.saved_fs = info->regs32->fs;
14096 tsk->thread.saved_gs = get_user_gs(info->regs32);
14097
14098 - tss = &per_cpu(init_tss, get_cpu());
14099 + tss = init_tss + get_cpu();
14100 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
14101 if (cpu_has_sep)
14102 tsk->thread.sysenter_cs = 0;
14103 @@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_re
14104 goto cannot_handle;
14105 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
14106 goto cannot_handle;
14107 - intr_ptr = (unsigned long __user *) (i << 2);
14108 + intr_ptr = (__force unsigned long __user *) (i << 2);
14109 if (get_user(segoffs, intr_ptr))
14110 goto cannot_handle;
14111 if ((segoffs >> 16) == BIOSSEG)
14112 diff -urNp linux-2.6.34.1/arch/x86/kernel/vmi_32.c linux-2.6.34.1/arch/x86/kernel/vmi_32.c
14113 --- linux-2.6.34.1/arch/x86/kernel/vmi_32.c 2010-07-05 14:24:10.000000000 -0400
14114 +++ linux-2.6.34.1/arch/x86/kernel/vmi_32.c 2010-07-07 09:04:48.000000000 -0400
14115 @@ -46,12 +46,17 @@ typedef u32 __attribute__((regparm(1)))
14116 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
14117
14118 #define call_vrom_func(rom,func) \
14119 - (((VROMFUNC *)(rom->func))())
14120 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
14121
14122 #define call_vrom_long_func(rom,func,arg) \
14123 - (((VROMLONGFUNC *)(rom->func)) (arg))
14124 +({\
14125 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
14126 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
14127 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
14128 + __reloc;\
14129 +})
14130
14131 -static struct vrom_header *vmi_rom;
14132 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
14133 static int disable_pge;
14134 static int disable_pse;
14135 static int disable_sep;
14136 @@ -78,10 +83,10 @@ static struct {
14137 void (*set_initial_ap_state)(int, int);
14138 void (*halt)(void);
14139 void (*set_lazy_mode)(int mode);
14140 -} vmi_ops;
14141 +} vmi_ops __read_only;
14142
14143 /* Cached VMI operations */
14144 -struct vmi_timer_ops vmi_timer_ops;
14145 +struct vmi_timer_ops vmi_timer_ops __read_only;
14146
14147 /*
14148 * VMI patching routines.
14149 @@ -96,7 +101,7 @@ struct vmi_timer_ops vmi_timer_ops;
14150 static inline void patch_offset(void *insnbuf,
14151 unsigned long ip, unsigned long dest)
14152 {
14153 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
14154 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
14155 }
14156
14157 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
14158 @@ -104,6 +109,7 @@ static unsigned patch_internal(int call,
14159 {
14160 u64 reloc;
14161 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
14162 +
14163 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
14164 switch(rel->type) {
14165 case VMI_RELOCATION_CALL_REL:
14166 @@ -382,13 +388,13 @@ static void vmi_set_pud(pud_t *pudp, pud
14167
14168 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
14169 {
14170 - const pte_t pte = { .pte = 0 };
14171 + const pte_t pte = __pte(0ULL);
14172 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
14173 }
14174
14175 static void vmi_pmd_clear(pmd_t *pmd)
14176 {
14177 - const pte_t pte = { .pte = 0 };
14178 + const pte_t pte = __pte(0ULL);
14179 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
14180 }
14181 #endif
14182 @@ -416,8 +422,8 @@ vmi_startup_ipi_hook(int phys_apicid, un
14183 ap.ss = __KERNEL_DS;
14184 ap.esp = (unsigned long) start_esp;
14185
14186 - ap.ds = __USER_DS;
14187 - ap.es = __USER_DS;
14188 + ap.ds = __KERNEL_DS;
14189 + ap.es = __KERNEL_DS;
14190 ap.fs = __KERNEL_PERCPU;
14191 ap.gs = __KERNEL_STACK_CANARY;
14192
14193 @@ -464,6 +470,18 @@ static void vmi_leave_lazy_mmu(void)
14194 paravirt_leave_lazy_mmu();
14195 }
14196
14197 +#ifdef CONFIG_PAX_KERNEXEC
14198 +static unsigned long vmi_pax_open_kernel(void)
14199 +{
14200 + return 0;
14201 +}
14202 +
14203 +static unsigned long vmi_pax_close_kernel(void)
14204 +{
14205 + return 0;
14206 +}
14207 +#endif
14208 +
14209 static inline int __init check_vmi_rom(struct vrom_header *rom)
14210 {
14211 struct pci_header *pci;
14212 @@ -476,6 +494,10 @@ static inline int __init check_vmi_rom(s
14213 return 0;
14214 if (rom->vrom_signature != VMI_SIGNATURE)
14215 return 0;
14216 + if (rom->rom_length * 512 > sizeof(*rom)) {
14217 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
14218 + return 0;
14219 + }
14220 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
14221 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
14222 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
14223 @@ -540,7 +562,7 @@ static inline int __init probe_vmi_rom(v
14224 struct vrom_header *romstart;
14225 romstart = (struct vrom_header *)isa_bus_to_virt(base);
14226 if (check_vmi_rom(romstart)) {
14227 - vmi_rom = romstart;
14228 + vmi_rom = *romstart;
14229 return 1;
14230 }
14231 }
14232 @@ -816,6 +838,11 @@ static inline int __init activate_vmi(vo
14233
14234 para_fill(pv_irq_ops.safe_halt, Halt);
14235
14236 +#ifdef CONFIG_PAX_KERNEXEC
14237 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
14238 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
14239 +#endif
14240 +
14241 /*
14242 * Alternative instruction rewriting doesn't happen soon enough
14243 * to convert VMI_IRET to a call instead of a jump; so we have
14244 @@ -833,16 +860,16 @@ static inline int __init activate_vmi(vo
14245
14246 void __init vmi_init(void)
14247 {
14248 - if (!vmi_rom)
14249 + if (!vmi_rom.rom_signature)
14250 probe_vmi_rom();
14251 else
14252 - check_vmi_rom(vmi_rom);
14253 + check_vmi_rom(&vmi_rom);
14254
14255 /* In case probing for or validating the ROM failed, basil */
14256 - if (!vmi_rom)
14257 + if (!vmi_rom.rom_signature)
14258 return;
14259
14260 - reserve_top_address(-vmi_rom->virtual_top);
14261 + reserve_top_address(-vmi_rom.virtual_top);
14262
14263 #ifdef CONFIG_X86_IO_APIC
14264 /* This is virtual hardware; timer routing is wired correctly */
14265 @@ -854,7 +881,7 @@ void __init vmi_activate(void)
14266 {
14267 unsigned long flags;
14268
14269 - if (!vmi_rom)
14270 + if (!vmi_rom.rom_signature)
14271 return;
14272
14273 local_irq_save(flags);
14274 diff -urNp linux-2.6.34.1/arch/x86/kernel/vmlinux.lds.S linux-2.6.34.1/arch/x86/kernel/vmlinux.lds.S
14275 --- linux-2.6.34.1/arch/x86/kernel/vmlinux.lds.S 2010-07-05 14:24:10.000000000 -0400
14276 +++ linux-2.6.34.1/arch/x86/kernel/vmlinux.lds.S 2010-07-07 09:04:48.000000000 -0400
14277 @@ -26,6 +26,22 @@
14278 #include <asm/page_types.h>
14279 #include <asm/cache.h>
14280 #include <asm/boot.h>
14281 +#include <asm/segment.h>
14282 +
14283 +#undef PMD_SIZE
14284 +#undef PMD_SHIFT
14285 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14286 +#define PMD_SHIFT 21
14287 +#else
14288 +#define PMD_SHIFT 22
14289 +#endif
14290 +#define PMD_SIZE (1 << PMD_SHIFT)
14291 +
14292 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14293 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
14294 +#else
14295 +#define __KERNEL_TEXT_OFFSET 0
14296 +#endif
14297
14298 #undef i386 /* in case the preprocessor is a 32bit one */
14299
14300 @@ -34,13 +50,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
14301 #ifdef CONFIG_X86_32
14302 OUTPUT_ARCH(i386)
14303 ENTRY(phys_startup_32)
14304 -jiffies = jiffies_64;
14305 #else
14306 OUTPUT_ARCH(i386:x86-64)
14307 ENTRY(phys_startup_64)
14308 -jiffies_64 = jiffies;
14309 #endif
14310
14311 +jiffies = jiffies_64;
14312 +
14313 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
14314 /*
14315 * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
14316 @@ -69,31 +85,46 @@ jiffies_64 = jiffies;
14317
14318 PHDRS {
14319 text PT_LOAD FLAGS(5); /* R_E */
14320 - data PT_LOAD FLAGS(7); /* RWE */
14321 +#ifdef CONFIG_XEN
14322 + rodata PT_LOAD FLAGS(5); /* R_E */
14323 +#else
14324 + rodata PT_LOAD FLAGS(4); /* R__ */
14325 +#endif
14326 +#ifdef CONFIG_X86_32
14327 + module PT_LOAD FLAGS(5); /* R_E */
14328 +#endif
14329 + data PT_LOAD FLAGS(6); /* RW_ */
14330 #ifdef CONFIG_X86_64
14331 user PT_LOAD FLAGS(5); /* R_E */
14332 +#endif
14333 + init.begin PT_LOAD FLAGS(6); /* RW_ */
14334 #ifdef CONFIG_SMP
14335 percpu PT_LOAD FLAGS(6); /* RW_ */
14336 #endif
14337 + text.init PT_LOAD FLAGS(5); /* R_E */
14338 + text.exit PT_LOAD FLAGS(5); /* R_E */
14339 init PT_LOAD FLAGS(7); /* RWE */
14340 -#endif
14341 note PT_NOTE FLAGS(0); /* ___ */
14342 }
14343
14344 SECTIONS
14345 {
14346 #ifdef CONFIG_X86_32
14347 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
14348 - phys_startup_32 = startup_32 - LOAD_OFFSET;
14349 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
14350 #else
14351 - . = __START_KERNEL;
14352 - phys_startup_64 = startup_64 - LOAD_OFFSET;
14353 + . = __START_KERNEL;
14354 #endif
14355
14356 /* Text and read-only data */
14357 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
14358 - _text = .;
14359 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
14360 /* bootstrapping code */
14361 +#ifdef CONFIG_X86_32
14362 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14363 +#else
14364 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14365 +#endif
14366 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14367 + _text = .;
14368 HEAD_TEXT
14369 #ifdef CONFIG_X86_32
14370 . = ALIGN(PAGE_SIZE);
14371 @@ -108,30 +139,66 @@ SECTIONS
14372 IRQENTRY_TEXT
14373 *(.fixup)
14374 *(.gnu.warning)
14375 - /* End of text section */
14376 - _etext = .;
14377 } :text = 0x9090
14378
14379 - NOTES :text :note
14380 + . += __KERNEL_TEXT_OFFSET;
14381
14382 - EXCEPTION_TABLE(16) :text = 0x9090
14383 + . = ALIGN(PAGE_SIZE);
14384 + NOTES :rodata :note
14385 +
14386 + EXCEPTION_TABLE(16) :rodata
14387
14388 X64_ALIGN_DEBUG_RODATA_BEGIN
14389 RO_DATA(PAGE_SIZE)
14390 X64_ALIGN_DEBUG_RODATA_END
14391
14392 +#ifdef CONFIG_X86_32
14393 + . = ALIGN(PAGE_SIZE);
14394 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
14395 + *(.idt)
14396 + . = ALIGN(PAGE_SIZE);
14397 + *(.empty_zero_page)
14398 + *(.swapper_pg_pmd)
14399 + *(.swapper_pg_dir)
14400 + }
14401 +
14402 + . = ALIGN(PAGE_SIZE);
14403 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
14404 + *(.vmi.rom)
14405 + } :module
14406 +
14407 + . = ALIGN(PAGE_SIZE);
14408 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
14409 +
14410 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
14411 + MODULES_EXEC_VADDR = .;
14412 + BYTE(0)
14413 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
14414 + . = ALIGN(PMD_SIZE);
14415 + MODULES_EXEC_END = . - 1;
14416 +#endif
14417 +
14418 + } :module
14419 +#endif
14420 +
14421 /* Data */
14422 .data : AT(ADDR(.data) - LOAD_OFFSET) {
14423 + /* End of text section */
14424 + _etext = . - __KERNEL_TEXT_OFFSET;
14425 +
14426 +#ifdef CONFIG_PAX_KERNEXEC
14427 + . = ALIGN(PMD_SIZE);
14428 +#else
14429 + . = ALIGN(PAGE_SIZE);
14430 +#endif
14431 +
14432 /* Start of data section */
14433 _sdata = .;
14434
14435 /* init_task */
14436 INIT_TASK_DATA(THREAD_SIZE)
14437
14438 -#ifdef CONFIG_X86_32
14439 - /* 32 bit has nosave before _edata */
14440 NOSAVE_DATA
14441 -#endif
14442
14443 PAGE_ALIGNED_DATA(PAGE_SIZE)
14444
14445 @@ -194,12 +261,6 @@ SECTIONS
14446 }
14447 vgetcpu_mode = VVIRT(.vgetcpu_mode);
14448
14449 - . = ALIGN(L1_CACHE_BYTES);
14450 - .jiffies : AT(VLOAD(.jiffies)) {
14451 - *(.jiffies)
14452 - }
14453 - jiffies = VVIRT(.jiffies);
14454 -
14455 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
14456 *(.vsyscall_3)
14457 }
14458 @@ -215,12 +276,19 @@ SECTIONS
14459 #endif /* CONFIG_X86_64 */
14460
14461 /* Init code and data - will be freed after init */
14462 - . = ALIGN(PAGE_SIZE);
14463 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
14464 + BYTE(0)
14465 +
14466 +#ifdef CONFIG_PAX_KERNEXEC
14467 + . = ALIGN(PMD_SIZE);
14468 +#else
14469 + . = ALIGN(PAGE_SIZE);
14470 +#endif
14471 +
14472 __init_begin = .; /* paired with __init_end */
14473 - }
14474 + } :init.begin
14475
14476 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
14477 +#ifdef CONFIG_SMP
14478 /*
14479 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
14480 * output PHDR, so the next output section - .init.text - should
14481 @@ -229,12 +297,27 @@ SECTIONS
14482 PERCPU_VADDR(0, :percpu)
14483 #endif
14484
14485 - INIT_TEXT_SECTION(PAGE_SIZE)
14486 -#ifdef CONFIG_X86_64
14487 - :init
14488 -#endif
14489 + . = ALIGN(PAGE_SIZE);
14490 + init_begin = .;
14491 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
14492 + VMLINUX_SYMBOL(_sinittext) = .;
14493 + INIT_TEXT
14494 + VMLINUX_SYMBOL(_einittext) = .;
14495 + . = ALIGN(PAGE_SIZE);
14496 + } :text.init
14497
14498 - INIT_DATA_SECTION(16)
14499 + /*
14500 + * .exit.text is discard at runtime, not link time, to deal with
14501 + * references from .altinstructions and .eh_frame
14502 + */
14503 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
14504 + EXIT_TEXT
14505 + . = ALIGN(16);
14506 + } :text.exit
14507 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
14508 +
14509 + . = ALIGN(PAGE_SIZE);
14510 + INIT_DATA_SECTION(16) :init
14511
14512 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
14513 __x86_cpu_dev_start = .;
14514 @@ -260,19 +343,11 @@ SECTIONS
14515 *(.altinstr_replacement)
14516 }
14517
14518 - /*
14519 - * .exit.text is discard at runtime, not link time, to deal with
14520 - * references from .altinstructions and .eh_frame
14521 - */
14522 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
14523 - EXIT_TEXT
14524 - }
14525 -
14526 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
14527 EXIT_DATA
14528 }
14529
14530 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
14531 +#ifndef CONFIG_SMP
14532 PERCPU(PAGE_SIZE)
14533 #endif
14534
14535 @@ -291,16 +366,10 @@ SECTIONS
14536 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
14537 __smp_locks = .;
14538 *(.smp_locks)
14539 - . = ALIGN(PAGE_SIZE);
14540 __smp_locks_end = .;
14541 + . = ALIGN(PAGE_SIZE);
14542 }
14543
14544 -#ifdef CONFIG_X86_64
14545 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
14546 - NOSAVE_DATA
14547 - }
14548 -#endif
14549 -
14550 /* BSS */
14551 . = ALIGN(PAGE_SIZE);
14552 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
14553 @@ -316,6 +385,7 @@ SECTIONS
14554 __brk_base = .;
14555 . += 64 * 1024; /* 64k alignment slop space */
14556 *(.brk_reservation) /* areas brk users have reserved */
14557 + . = ALIGN(PMD_SIZE);
14558 __brk_limit = .;
14559 }
14560
14561 @@ -342,13 +412,12 @@ SECTIONS
14562 * for the boot processor.
14563 */
14564 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
14565 -INIT_PER_CPU(gdt_page);
14566 INIT_PER_CPU(irq_stack_union);
14567
14568 /*
14569 * Build-time check on the image size:
14570 */
14571 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
14572 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
14573 "kernel image bigger than KERNEL_IMAGE_SIZE");
14574
14575 #ifdef CONFIG_SMP
14576 diff -urNp linux-2.6.34.1/arch/x86/kernel/vsyscall_64.c linux-2.6.34.1/arch/x86/kernel/vsyscall_64.c
14577 --- linux-2.6.34.1/arch/x86/kernel/vsyscall_64.c 2010-07-05 14:24:10.000000000 -0400
14578 +++ linux-2.6.34.1/arch/x86/kernel/vsyscall_64.c 2010-07-07 09:04:49.000000000 -0400
14579 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
14580
14581 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
14582 /* copy vsyscall data */
14583 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
14584 vsyscall_gtod_data.clock.vread = clock->vread;
14585 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
14586 vsyscall_gtod_data.clock.mask = clock->mask;
14587 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
14588 We do this here because otherwise user space would do it on
14589 its own in a likely inferior way (no access to jiffies).
14590 If you don't like it pass NULL. */
14591 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
14592 + if (tcache && tcache->blob[0] == (j = jiffies)) {
14593 p = tcache->blob[1];
14594 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
14595 /* Load per CPU data from RDTSCP */
14596 diff -urNp linux-2.6.34.1/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.34.1/arch/x86/kernel/x8664_ksyms_64.c
14597 --- linux-2.6.34.1/arch/x86/kernel/x8664_ksyms_64.c 2010-07-05 14:24:10.000000000 -0400
14598 +++ linux-2.6.34.1/arch/x86/kernel/x8664_ksyms_64.c 2010-07-07 09:04:49.000000000 -0400
14599 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
14600 EXPORT_SYMBOL(copy_user_generic_string);
14601 EXPORT_SYMBOL(copy_user_generic_unrolled);
14602 EXPORT_SYMBOL(__copy_user_nocache);
14603 -EXPORT_SYMBOL(_copy_from_user);
14604 -EXPORT_SYMBOL(_copy_to_user);
14605
14606 EXPORT_SYMBOL(copy_page);
14607 EXPORT_SYMBOL(clear_page);
14608 diff -urNp linux-2.6.34.1/arch/x86/kernel/xsave.c linux-2.6.34.1/arch/x86/kernel/xsave.c
14609 --- linux-2.6.34.1/arch/x86/kernel/xsave.c 2010-07-05 14:24:10.000000000 -0400
14610 +++ linux-2.6.34.1/arch/x86/kernel/xsave.c 2010-07-07 09:04:49.000000000 -0400
14611 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
14612 fx_sw_user->xstate_size > fx_sw_user->extended_size)
14613 return -1;
14614
14615 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
14616 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
14617 fx_sw_user->extended_size -
14618 FP_XSTATE_MAGIC2_SIZE));
14619 /*
14620 @@ -196,7 +196,7 @@ fx_only:
14621 * the other extended state.
14622 */
14623 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
14624 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
14625 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
14626 }
14627
14628 /*
14629 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
14630 if (task_thread_info(tsk)->status & TS_XSAVE)
14631 err = restore_user_xstate(buf);
14632 else
14633 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
14634 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
14635 buf);
14636 if (unlikely(err)) {
14637 /*
14638 diff -urNp linux-2.6.34.1/arch/x86/kvm/emulate.c linux-2.6.34.1/arch/x86/kvm/emulate.c
14639 --- linux-2.6.34.1/arch/x86/kvm/emulate.c 2010-07-05 14:24:10.000000000 -0400
14640 +++ linux-2.6.34.1/arch/x86/kvm/emulate.c 2010-07-07 09:04:49.000000000 -0400
14641 @@ -84,8 +84,8 @@
14642 #define Src2CL (1<<29)
14643 #define Src2ImmByte (2<<29)
14644 #define Src2One (3<<29)
14645 -#define Src2Imm16 (4<<29)
14646 -#define Src2Mask (7<<29)
14647 +#define Src2Imm16 (4U<<29)
14648 +#define Src2Mask (7U<<29)
14649
14650 enum {
14651 Group1_80, Group1_81, Group1_82, Group1_83,
14652 @@ -438,6 +438,7 @@ static u32 group2_table[] = {
14653
14654 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
14655 do { \
14656 + unsigned long _tmp; \
14657 __asm__ __volatile__ ( \
14658 _PRE_EFLAGS("0", "4", "2") \
14659 _op _suffix " %"_x"3,%1; " \
14660 @@ -451,8 +452,6 @@ static u32 group2_table[] = {
14661 /* Raw emulation: instruction has two explicit operands. */
14662 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
14663 do { \
14664 - unsigned long _tmp; \
14665 - \
14666 switch ((_dst).bytes) { \
14667 case 2: \
14668 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
14669 @@ -468,7 +467,6 @@ static u32 group2_table[] = {
14670
14671 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
14672 do { \
14673 - unsigned long _tmp; \
14674 switch ((_dst).bytes) { \
14675 case 1: \
14676 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
14677 diff -urNp linux-2.6.34.1/arch/x86/kvm/lapic.c linux-2.6.34.1/arch/x86/kvm/lapic.c
14678 --- linux-2.6.34.1/arch/x86/kvm/lapic.c 2010-07-05 14:24:10.000000000 -0400
14679 +++ linux-2.6.34.1/arch/x86/kvm/lapic.c 2010-07-07 09:04:49.000000000 -0400
14680 @@ -52,7 +52,7 @@
14681 #define APIC_BUS_CYCLE_NS 1
14682
14683 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
14684 -#define apic_debug(fmt, arg...)
14685 +#define apic_debug(fmt, arg...) do {} while (0)
14686
14687 #define APIC_LVT_NUM 6
14688 /* 14 is the version for Xeon and Pentium 8.4.8*/
14689 diff -urNp linux-2.6.34.1/arch/x86/kvm/svm.c linux-2.6.34.1/arch/x86/kvm/svm.c
14690 --- linux-2.6.34.1/arch/x86/kvm/svm.c 2010-07-05 14:24:10.000000000 -0400
14691 +++ linux-2.6.34.1/arch/x86/kvm/svm.c 2010-07-07 09:04:49.000000000 -0400
14692 @@ -2481,7 +2481,11 @@ static void reload_tss(struct kvm_vcpu *
14693 int cpu = raw_smp_processor_id();
14694
14695 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
14696 +
14697 + pax_open_kernel();
14698 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
14699 + pax_close_kernel();
14700 +
14701 load_TR_desc();
14702 }
14703
14704 @@ -2995,7 +2999,7 @@ static void svm_fpu_deactivate(struct kv
14705 svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
14706 }
14707
14708 -static struct kvm_x86_ops svm_x86_ops = {
14709 +static const struct kvm_x86_ops svm_x86_ops = {
14710 .cpu_has_kvm_support = has_svm,
14711 .disabled_by_bios = is_disabled,
14712 .hardware_setup = svm_hardware_setup,
14713 diff -urNp linux-2.6.34.1/arch/x86/kvm/vmx.c linux-2.6.34.1/arch/x86/kvm/vmx.c
14714 --- linux-2.6.34.1/arch/x86/kvm/vmx.c 2010-07-05 14:24:10.000000000 -0400
14715 +++ linux-2.6.34.1/arch/x86/kvm/vmx.c 2010-07-07 09:04:49.000000000 -0400
14716 @@ -606,7 +606,11 @@ static void reload_tss(void)
14717
14718 kvm_get_gdt(&gdt);
14719 descs = (void *)gdt.base;
14720 +
14721 + pax_open_kernel();
14722 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
14723 + pax_close_kernel();
14724 +
14725 load_TR_desc();
14726 }
14727
14728 @@ -1454,8 +1458,11 @@ static __init int hardware_setup(void)
14729 if (!cpu_has_vmx_flexpriority())
14730 flexpriority_enabled = 0;
14731
14732 - if (!cpu_has_vmx_tpr_shadow())
14733 - kvm_x86_ops->update_cr8_intercept = NULL;
14734 + if (!cpu_has_vmx_tpr_shadow()) {
14735 + pax_open_kernel();
14736 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
14737 + pax_close_kernel();
14738 + }
14739
14740 if (enable_ept && !cpu_has_vmx_ept_2m_page())
14741 kvm_disable_largepages();
14742 @@ -2429,7 +2436,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
14743 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
14744
14745 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
14746 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
14747 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
14748 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
14749 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
14750 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
14751 @@ -3845,6 +3852,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
14752 "jmp .Lkvm_vmx_return \n\t"
14753 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
14754 ".Lkvm_vmx_return: "
14755 +
14756 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14757 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
14758 + ".Lkvm_vmx_return2: "
14759 +#endif
14760 +
14761 /* Save guest registers, load host registers, keep flags */
14762 "xchg %0, (%%"R"sp) \n\t"
14763 "mov %%"R"ax, %c[rax](%0) \n\t"
14764 @@ -3891,8 +3904,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
14765 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
14766 #endif
14767 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
14768 +
14769 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14770 + ,[cs]"i"(__KERNEL_CS)
14771 +#endif
14772 +
14773 : "cc", "memory"
14774 - , R"bx", R"di", R"si"
14775 + , R"ax", R"bx", R"di", R"si"
14776 #ifdef CONFIG_X86_64
14777 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
14778 #endif
14779 @@ -3906,7 +3924,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
14780 if (vmx->rmode.irq.pending)
14781 fixup_rmode_irq(vmx);
14782
14783 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
14784 + asm("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS));
14785 vmx->launched = 1;
14786
14787 vmx_complete_interrupts(vmx);
14788 @@ -4129,7 +4147,7 @@ static void vmx_set_supported_cpuid(u32
14789 {
14790 }
14791
14792 -static struct kvm_x86_ops vmx_x86_ops = {
14793 +static const struct kvm_x86_ops vmx_x86_ops = {
14794 .cpu_has_kvm_support = cpu_has_kvm_support,
14795 .disabled_by_bios = vmx_disabled_by_bios,
14796 .hardware_setup = hardware_setup,
14797 diff -urNp linux-2.6.34.1/arch/x86/kvm/x86.c linux-2.6.34.1/arch/x86/kvm/x86.c
14798 --- linux-2.6.34.1/arch/x86/kvm/x86.c 2010-07-05 14:24:10.000000000 -0400
14799 +++ linux-2.6.34.1/arch/x86/kvm/x86.c 2010-07-07 09:04:49.000000000 -0400
14800 @@ -85,7 +85,7 @@ static void update_cr8_intercept(struct
14801 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
14802 struct kvm_cpuid_entry2 __user *entries);
14803
14804 -struct kvm_x86_ops *kvm_x86_ops;
14805 +const struct kvm_x86_ops *kvm_x86_ops;
14806 EXPORT_SYMBOL_GPL(kvm_x86_ops);
14807
14808 int ignore_msrs = 0;
14809 @@ -111,38 +111,38 @@ static struct kvm_shared_msrs_global __r
14810 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
14811
14812 struct kvm_stats_debugfs_item debugfs_entries[] = {
14813 - { "pf_fixed", VCPU_STAT(pf_fixed) },
14814 - { "pf_guest", VCPU_STAT(pf_guest) },
14815 - { "tlb_flush", VCPU_STAT(tlb_flush) },
14816 - { "invlpg", VCPU_STAT(invlpg) },
14817 - { "exits", VCPU_STAT(exits) },
14818 - { "io_exits", VCPU_STAT(io_exits) },
14819 - { "mmio_exits", VCPU_STAT(mmio_exits) },
14820 - { "signal_exits", VCPU_STAT(signal_exits) },
14821 - { "irq_window", VCPU_STAT(irq_window_exits) },
14822 - { "nmi_window", VCPU_STAT(nmi_window_exits) },
14823 - { "halt_exits", VCPU_STAT(halt_exits) },
14824 - { "halt_wakeup", VCPU_STAT(halt_wakeup) },
14825 - { "hypercalls", VCPU_STAT(hypercalls) },
14826 - { "request_irq", VCPU_STAT(request_irq_exits) },
14827 - { "irq_exits", VCPU_STAT(irq_exits) },
14828 - { "host_state_reload", VCPU_STAT(host_state_reload) },
14829 - { "efer_reload", VCPU_STAT(efer_reload) },
14830 - { "fpu_reload", VCPU_STAT(fpu_reload) },
14831 - { "insn_emulation", VCPU_STAT(insn_emulation) },
14832 - { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
14833 - { "irq_injections", VCPU_STAT(irq_injections) },
14834 - { "nmi_injections", VCPU_STAT(nmi_injections) },
14835 - { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
14836 - { "mmu_pte_write", VM_STAT(mmu_pte_write) },
14837 - { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
14838 - { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
14839 - { "mmu_flooded", VM_STAT(mmu_flooded) },
14840 - { "mmu_recycled", VM_STAT(mmu_recycled) },
14841 - { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
14842 - { "mmu_unsync", VM_STAT(mmu_unsync) },
14843 - { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
14844 - { "largepages", VM_STAT(lpages) },
14845 + { "pf_fixed", VCPU_STAT(pf_fixed), NULL },
14846 + { "pf_guest", VCPU_STAT(pf_guest), NULL },
14847 + { "tlb_flush", VCPU_STAT(tlb_flush), NULL },
14848 + { "invlpg", VCPU_STAT(invlpg), NULL },
14849 + { "exits", VCPU_STAT(exits), NULL },
14850 + { "io_exits", VCPU_STAT(io_exits), NULL },
14851 + { "mmio_exits", VCPU_STAT(mmio_exits), NULL },
14852 + { "signal_exits", VCPU_STAT(signal_exits), NULL },
14853 + { "irq_window", VCPU_STAT(irq_window_exits), NULL },
14854 + { "nmi_window", VCPU_STAT(nmi_window_exits), NULL },
14855 + { "halt_exits", VCPU_STAT(halt_exits), NULL },
14856 + { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL },
14857 + { "hypercalls", VCPU_STAT(hypercalls), NULL },
14858 + { "request_irq", VCPU_STAT(request_irq_exits), NULL },
14859 + { "irq_exits", VCPU_STAT(irq_exits), NULL },
14860 + { "host_state_reload", VCPU_STAT(host_state_reload), NULL },
14861 + { "efer_reload", VCPU_STAT(efer_reload), NULL },
14862 + { "fpu_reload", VCPU_STAT(fpu_reload), NULL },
14863 + { "insn_emulation", VCPU_STAT(insn_emulation), NULL },
14864 + { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL },
14865 + { "irq_injections", VCPU_STAT(irq_injections), NULL },
14866 + { "nmi_injections", VCPU_STAT(nmi_injections), NULL },
14867 + { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL },
14868 + { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL },
14869 + { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL },
14870 + { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL },
14871 + { "mmu_flooded", VM_STAT(mmu_flooded), NULL },
14872 + { "mmu_recycled", VM_STAT(mmu_recycled), NULL },
14873 + { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL },
14874 + { "mmu_unsync", VM_STAT(mmu_unsync), NULL },
14875 + { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL },
14876 + { "largepages", VM_STAT(lpages), NULL },
14877 { NULL }
14878 };
14879
14880 @@ -1604,6 +1604,8 @@ long kvm_arch_dev_ioctl(struct file *fil
14881 if (n < msr_list.nmsrs)
14882 goto out;
14883 r = -EFAULT;
14884 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
14885 + goto out;
14886 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
14887 num_msrs_to_save * sizeof(u32)))
14888 goto out;
14889 @@ -2000,7 +2002,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
14890 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
14891 struct kvm_interrupt *irq)
14892 {
14893 - if (irq->irq < 0 || irq->irq >= 256)
14894 + if (irq->irq >= 256)
14895 return -EINVAL;
14896 if (irqchip_in_kernel(vcpu->kvm))
14897 return -ENXIO;
14898 @@ -3757,10 +3759,10 @@ static void kvm_timer_init(void)
14899 }
14900 }
14901
14902 -int kvm_arch_init(void *opaque)
14903 +int kvm_arch_init(const void *opaque)
14904 {
14905 int r;
14906 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
14907 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
14908
14909 if (kvm_x86_ops) {
14910 printk(KERN_ERR "kvm: already loaded the other module\n");
14911 diff -urNp linux-2.6.34.1/arch/x86/lib/checksum_32.S linux-2.6.34.1/arch/x86/lib/checksum_32.S
14912 --- linux-2.6.34.1/arch/x86/lib/checksum_32.S 2010-07-05 14:24:10.000000000 -0400
14913 +++ linux-2.6.34.1/arch/x86/lib/checksum_32.S 2010-07-07 09:04:49.000000000 -0400
14914 @@ -28,7 +28,8 @@
14915 #include <linux/linkage.h>
14916 #include <asm/dwarf2.h>
14917 #include <asm/errno.h>
14918 -
14919 +#include <asm/segment.h>
14920 +
14921 /*
14922 * computes a partial checksum, e.g. for TCP/UDP fragments
14923 */
14924 @@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (
14925
14926 #define ARGBASE 16
14927 #define FP 12
14928 -
14929 -ENTRY(csum_partial_copy_generic)
14930 +
14931 +ENTRY(csum_partial_copy_generic_to_user)
14932 CFI_STARTPROC
14933 + pushl $(__USER_DS)
14934 + CFI_ADJUST_CFA_OFFSET 4
14935 + popl %es
14936 + CFI_ADJUST_CFA_OFFSET -4
14937 + jmp csum_partial_copy_generic
14938 +
14939 +ENTRY(csum_partial_copy_generic_from_user)
14940 + pushl $(__USER_DS)
14941 + CFI_ADJUST_CFA_OFFSET 4
14942 + popl %ds
14943 + CFI_ADJUST_CFA_OFFSET -4
14944 +
14945 +ENTRY(csum_partial_copy_generic)
14946 subl $4,%esp
14947 CFI_ADJUST_CFA_OFFSET 4
14948 pushl %edi
14949 @@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
14950 jmp 4f
14951 SRC(1: movw (%esi), %bx )
14952 addl $2, %esi
14953 -DST( movw %bx, (%edi) )
14954 +DST( movw %bx, %es:(%edi) )
14955 addl $2, %edi
14956 addw %bx, %ax
14957 adcl $0, %eax
14958 @@ -343,30 +357,30 @@ DST( movw %bx, (%edi) )
14959 SRC(1: movl (%esi), %ebx )
14960 SRC( movl 4(%esi), %edx )
14961 adcl %ebx, %eax
14962 -DST( movl %ebx, (%edi) )
14963 +DST( movl %ebx, %es:(%edi) )
14964 adcl %edx, %eax
14965 -DST( movl %edx, 4(%edi) )
14966 +DST( movl %edx, %es:4(%edi) )
14967
14968 SRC( movl 8(%esi), %ebx )
14969 SRC( movl 12(%esi), %edx )
14970 adcl %ebx, %eax
14971 -DST( movl %ebx, 8(%edi) )
14972 +DST( movl %ebx, %es:8(%edi) )
14973 adcl %edx, %eax
14974 -DST( movl %edx, 12(%edi) )
14975 +DST( movl %edx, %es:12(%edi) )
14976
14977 SRC( movl 16(%esi), %ebx )
14978 SRC( movl 20(%esi), %edx )
14979 adcl %ebx, %eax
14980 -DST( movl %ebx, 16(%edi) )
14981 +DST( movl %ebx, %es:16(%edi) )
14982 adcl %edx, %eax
14983 -DST( movl %edx, 20(%edi) )
14984 +DST( movl %edx, %es:20(%edi) )
14985
14986 SRC( movl 24(%esi), %ebx )
14987 SRC( movl 28(%esi), %edx )
14988 adcl %ebx, %eax
14989 -DST( movl %ebx, 24(%edi) )
14990 +DST( movl %ebx, %es:24(%edi) )
14991 adcl %edx, %eax
14992 -DST( movl %edx, 28(%edi) )
14993 +DST( movl %edx, %es:28(%edi) )
14994
14995 lea 32(%esi), %esi
14996 lea 32(%edi), %edi
14997 @@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) )
14998 shrl $2, %edx # This clears CF
14999 SRC(3: movl (%esi), %ebx )
15000 adcl %ebx, %eax
15001 -DST( movl %ebx, (%edi) )
15002 +DST( movl %ebx, %es:(%edi) )
15003 lea 4(%esi), %esi
15004 lea 4(%edi), %edi
15005 dec %edx
15006 @@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) )
15007 jb 5f
15008 SRC( movw (%esi), %cx )
15009 leal 2(%esi), %esi
15010 -DST( movw %cx, (%edi) )
15011 +DST( movw %cx, %es:(%edi) )
15012 leal 2(%edi), %edi
15013 je 6f
15014 shll $16,%ecx
15015 SRC(5: movb (%esi), %cl )
15016 -DST( movb %cl, (%edi) )
15017 +DST( movb %cl, %es:(%edi) )
15018 6: addl %ecx, %eax
15019 adcl $0, %eax
15020 7:
15021 @@ -408,7 +422,7 @@ DST( movb %cl, (%edi) )
15022
15023 6001:
15024 movl ARGBASE+20(%esp), %ebx # src_err_ptr
15025 - movl $-EFAULT, (%ebx)
15026 + movl $-EFAULT, %ss:(%ebx)
15027
15028 # zero the complete destination - computing the rest
15029 # is too much work
15030 @@ -421,11 +435,19 @@ DST( movb %cl, (%edi) )
15031
15032 6002:
15033 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
15034 - movl $-EFAULT,(%ebx)
15035 + movl $-EFAULT,%ss:(%ebx)
15036 jmp 5000b
15037
15038 .previous
15039
15040 + pushl %ss
15041 + CFI_ADJUST_CFA_OFFSET 4
15042 + popl %ds
15043 + CFI_ADJUST_CFA_OFFSET -4
15044 + pushl %ss
15045 + CFI_ADJUST_CFA_OFFSET 4
15046 + popl %es
15047 + CFI_ADJUST_CFA_OFFSET -4
15048 popl %ebx
15049 CFI_ADJUST_CFA_OFFSET -4
15050 CFI_RESTORE ebx
15051 @@ -439,26 +461,41 @@ DST( movb %cl, (%edi) )
15052 CFI_ADJUST_CFA_OFFSET -4
15053 ret
15054 CFI_ENDPROC
15055 -ENDPROC(csum_partial_copy_generic)
15056 +ENDPROC(csum_partial_copy_generic_to_user)
15057
15058 #else
15059
15060 /* Version for PentiumII/PPro */
15061
15062 #define ROUND1(x) \
15063 + nop; nop; nop; \
15064 SRC(movl x(%esi), %ebx ) ; \
15065 addl %ebx, %eax ; \
15066 - DST(movl %ebx, x(%edi) ) ;
15067 + DST(movl %ebx, %es:x(%edi)) ;
15068
15069 #define ROUND(x) \
15070 + nop; nop; nop; \
15071 SRC(movl x(%esi), %ebx ) ; \
15072 adcl %ebx, %eax ; \
15073 - DST(movl %ebx, x(%edi) ) ;
15074 + DST(movl %ebx, %es:x(%edi)) ;
15075
15076 #define ARGBASE 12
15077 -
15078 -ENTRY(csum_partial_copy_generic)
15079 +
15080 +ENTRY(csum_partial_copy_generic_to_user)
15081 CFI_STARTPROC
15082 + pushl $(__USER_DS)
15083 + CFI_ADJUST_CFA_OFFSET 4
15084 + popl %es
15085 + CFI_ADJUST_CFA_OFFSET -4
15086 + jmp csum_partial_copy_generic
15087 +
15088 +ENTRY(csum_partial_copy_generic_from_user)
15089 + pushl $(__USER_DS)
15090 + CFI_ADJUST_CFA_OFFSET 4
15091 + popl %ds
15092 + CFI_ADJUST_CFA_OFFSET -4
15093 +
15094 +ENTRY(csum_partial_copy_generic)
15095 pushl %ebx
15096 CFI_ADJUST_CFA_OFFSET 4
15097 CFI_REL_OFFSET ebx, 0
15098 @@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
15099 subl %ebx, %edi
15100 lea -1(%esi),%edx
15101 andl $-32,%edx
15102 - lea 3f(%ebx,%ebx), %ebx
15103 + lea 3f(%ebx,%ebx,2), %ebx
15104 testl %esi, %esi
15105 jmp *%ebx
15106 1: addl $64,%esi
15107 @@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
15108 jb 5f
15109 SRC( movw (%esi), %dx )
15110 leal 2(%esi), %esi
15111 -DST( movw %dx, (%edi) )
15112 +DST( movw %dx, %es:(%edi) )
15113 leal 2(%edi), %edi
15114 je 6f
15115 shll $16,%edx
15116 5:
15117 SRC( movb (%esi), %dl )
15118 -DST( movb %dl, (%edi) )
15119 +DST( movb %dl, %es:(%edi) )
15120 6: addl %edx, %eax
15121 adcl $0, %eax
15122 7:
15123 .section .fixup, "ax"
15124 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
15125 - movl $-EFAULT, (%ebx)
15126 + movl $-EFAULT, %ss:(%ebx)
15127 # zero the complete destination (computing the rest is too much work)
15128 movl ARGBASE+8(%esp),%edi # dst
15129 movl ARGBASE+12(%esp),%ecx # len
15130 @@ -523,10 +560,18 @@ DST( movb %dl, (%edi) )
15131 rep; stosb
15132 jmp 7b
15133 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
15134 - movl $-EFAULT, (%ebx)
15135 + movl $-EFAULT, %ss:(%ebx)
15136 jmp 7b
15137 .previous
15138
15139 + pushl %ss
15140 + CFI_ADJUST_CFA_OFFSET 4
15141 + popl %ds
15142 + CFI_ADJUST_CFA_OFFSET -4
15143 + pushl %ss
15144 + CFI_ADJUST_CFA_OFFSET 4
15145 + popl %es
15146 + CFI_ADJUST_CFA_OFFSET -4
15147 popl %esi
15148 CFI_ADJUST_CFA_OFFSET -4
15149 CFI_RESTORE esi
15150 @@ -538,7 +583,7 @@ DST( movb %dl, (%edi) )
15151 CFI_RESTORE ebx
15152 ret
15153 CFI_ENDPROC
15154 -ENDPROC(csum_partial_copy_generic)
15155 +ENDPROC(csum_partial_copy_generic_to_user)
15156
15157 #undef ROUND
15158 #undef ROUND1
15159 diff -urNp linux-2.6.34.1/arch/x86/lib/clear_page_64.S linux-2.6.34.1/arch/x86/lib/clear_page_64.S
15160 --- linux-2.6.34.1/arch/x86/lib/clear_page_64.S 2010-07-05 14:24:10.000000000 -0400
15161 +++ linux-2.6.34.1/arch/x86/lib/clear_page_64.S 2010-07-07 09:04:49.000000000 -0400
15162 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
15163
15164 #include <asm/cpufeature.h>
15165
15166 - .section .altinstr_replacement,"ax"
15167 + .section .altinstr_replacement,"a"
15168 1: .byte 0xeb /* jmp <disp8> */
15169 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
15170 2:
15171 diff -urNp linux-2.6.34.1/arch/x86/lib/copy_page_64.S linux-2.6.34.1/arch/x86/lib/copy_page_64.S
15172 --- linux-2.6.34.1/arch/x86/lib/copy_page_64.S 2010-07-05 14:24:10.000000000 -0400
15173 +++ linux-2.6.34.1/arch/x86/lib/copy_page_64.S 2010-07-07 09:04:49.000000000 -0400
15174 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
15175
15176 #include <asm/cpufeature.h>
15177
15178 - .section .altinstr_replacement,"ax"
15179 + .section .altinstr_replacement,"a"
15180 1: .byte 0xeb /* jmp <disp8> */
15181 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
15182 2:
15183 diff -urNp linux-2.6.34.1/arch/x86/lib/copy_user_64.S linux-2.6.34.1/arch/x86/lib/copy_user_64.S
15184 --- linux-2.6.34.1/arch/x86/lib/copy_user_64.S 2010-07-05 14:24:10.000000000 -0400
15185 +++ linux-2.6.34.1/arch/x86/lib/copy_user_64.S 2010-07-07 09:04:49.000000000 -0400
15186 @@ -15,13 +15,14 @@
15187 #include <asm/asm-offsets.h>
15188 #include <asm/thread_info.h>
15189 #include <asm/cpufeature.h>
15190 +#include <asm/pgtable.h>
15191
15192 .macro ALTERNATIVE_JUMP feature,orig,alt
15193 0:
15194 .byte 0xe9 /* 32bit jump */
15195 .long \orig-1f /* by default jump to orig */
15196 1:
15197 - .section .altinstr_replacement,"ax"
15198 + .section .altinstr_replacement,"a"
15199 2: .byte 0xe9 /* near jump with 32bit immediate */
15200 .long \alt-1b /* offset */ /* or alternatively to alt */
15201 .previous
15202 @@ -64,37 +65,13 @@
15203 #endif
15204 .endm
15205
15206 -/* Standard copy_to_user with segment limit checking */
15207 -ENTRY(_copy_to_user)
15208 - CFI_STARTPROC
15209 - GET_THREAD_INFO(%rax)
15210 - movq %rdi,%rcx
15211 - addq %rdx,%rcx
15212 - jc bad_to_user
15213 - cmpq TI_addr_limit(%rax),%rcx
15214 - jae bad_to_user
15215 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
15216 - CFI_ENDPROC
15217 -ENDPROC(_copy_to_user)
15218 -
15219 -/* Standard copy_from_user with segment limit checking */
15220 -ENTRY(_copy_from_user)
15221 - CFI_STARTPROC
15222 - GET_THREAD_INFO(%rax)
15223 - movq %rsi,%rcx
15224 - addq %rdx,%rcx
15225 - jc bad_from_user
15226 - cmpq TI_addr_limit(%rax),%rcx
15227 - jae bad_from_user
15228 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
15229 - CFI_ENDPROC
15230 -ENDPROC(_copy_from_user)
15231 -
15232 .section .fixup,"ax"
15233 /* must zero dest */
15234 ENTRY(bad_from_user)
15235 bad_from_user:
15236 CFI_STARTPROC
15237 + testl %edx,%edx
15238 + js bad_to_user
15239 movl %edx,%ecx
15240 xorl %eax,%eax
15241 rep
15242 diff -urNp linux-2.6.34.1/arch/x86/lib/copy_user_nocache_64.S linux-2.6.34.1/arch/x86/lib/copy_user_nocache_64.S
15243 --- linux-2.6.34.1/arch/x86/lib/copy_user_nocache_64.S 2010-07-05 14:24:10.000000000 -0400
15244 +++ linux-2.6.34.1/arch/x86/lib/copy_user_nocache_64.S 2010-07-07 09:04:49.000000000 -0400
15245 @@ -14,6 +14,7 @@
15246 #include <asm/current.h>
15247 #include <asm/asm-offsets.h>
15248 #include <asm/thread_info.h>
15249 +#include <asm/pgtable.h>
15250
15251 .macro ALIGN_DESTINATION
15252 #ifdef FIX_ALIGNMENT
15253 @@ -50,6 +51,15 @@
15254 */
15255 ENTRY(__copy_user_nocache)
15256 CFI_STARTPROC
15257 +
15258 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15259 + mov $PAX_USER_SHADOW_BASE,%rcx
15260 + cmp %rcx,%rsi
15261 + jae 1f
15262 + add %rcx,%rsi
15263 +1:
15264 +#endif
15265 +
15266 cmpl $8,%edx
15267 jb 20f /* less then 8 bytes, go to byte copy loop */
15268 ALIGN_DESTINATION
15269 diff -urNp linux-2.6.34.1/arch/x86/lib/csum-wrappers_64.c linux-2.6.34.1/arch/x86/lib/csum-wrappers_64.c
15270 --- linux-2.6.34.1/arch/x86/lib/csum-wrappers_64.c 2010-07-05 14:24:10.000000000 -0400
15271 +++ linux-2.6.34.1/arch/x86/lib/csum-wrappers_64.c 2010-07-07 09:04:49.000000000 -0400
15272 @@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void _
15273 len -= 2;
15274 }
15275 }
15276 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
15277 + src += PAX_USER_SHADOW_BASE;
15278 isum = csum_partial_copy_generic((__force const void *)src,
15279 dst, len, isum, errp, NULL);
15280 if (unlikely(*errp))
15281 @@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *sr
15282 }
15283
15284 *errp = 0;
15285 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
15286 + dst += PAX_USER_SHADOW_BASE;
15287 return csum_partial_copy_generic(src, (void __force *)dst,
15288 len, isum, NULL, errp);
15289 }
15290 diff -urNp linux-2.6.34.1/arch/x86/lib/getuser.S linux-2.6.34.1/arch/x86/lib/getuser.S
15291 --- linux-2.6.34.1/arch/x86/lib/getuser.S 2010-07-05 14:24:10.000000000 -0400
15292 +++ linux-2.6.34.1/arch/x86/lib/getuser.S 2010-07-07 09:04:49.000000000 -0400
15293 @@ -33,14 +33,38 @@
15294 #include <asm/asm-offsets.h>
15295 #include <asm/thread_info.h>
15296 #include <asm/asm.h>
15297 +#include <asm/segment.h>
15298 +#include <asm/pgtable.h>
15299
15300 .text
15301 ENTRY(__get_user_1)
15302 CFI_STARTPROC
15303 +
15304 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15305 + pushl $(__USER_DS)
15306 + popl %ds
15307 +#else
15308 GET_THREAD_INFO(%_ASM_DX)
15309 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15310 jae bad_get_user
15311 +
15312 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15313 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15314 + cmp %_ASM_DX,%_ASM_AX
15315 + jae 1234f
15316 + add %_ASM_DX,%_ASM_AX
15317 +1234:
15318 +#endif
15319 +
15320 +#endif
15321 +
15322 1: movzb (%_ASM_AX),%edx
15323 +
15324 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15325 + pushl %ss
15326 + pop %ds
15327 +#endif
15328 +
15329 xor %eax,%eax
15330 ret
15331 CFI_ENDPROC
15332 @@ -49,11 +73,33 @@ ENDPROC(__get_user_1)
15333 ENTRY(__get_user_2)
15334 CFI_STARTPROC
15335 add $1,%_ASM_AX
15336 +
15337 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15338 + pushl $(__USER_DS)
15339 + popl %ds
15340 +#else
15341 jc bad_get_user
15342 GET_THREAD_INFO(%_ASM_DX)
15343 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15344 jae bad_get_user
15345 +
15346 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15347 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15348 + cmp %_ASM_DX,%_ASM_AX
15349 + jae 1234f
15350 + add %_ASM_DX,%_ASM_AX
15351 +1234:
15352 +#endif
15353 +
15354 +#endif
15355 +
15356 2: movzwl -1(%_ASM_AX),%edx
15357 +
15358 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15359 + pushl %ss
15360 + pop %ds
15361 +#endif
15362 +
15363 xor %eax,%eax
15364 ret
15365 CFI_ENDPROC
15366 @@ -62,11 +108,33 @@ ENDPROC(__get_user_2)
15367 ENTRY(__get_user_4)
15368 CFI_STARTPROC
15369 add $3,%_ASM_AX
15370 +
15371 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15372 + pushl $(__USER_DS)
15373 + popl %ds
15374 +#else
15375 jc bad_get_user
15376 GET_THREAD_INFO(%_ASM_DX)
15377 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15378 jae bad_get_user
15379 +
15380 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15381 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15382 + cmp %_ASM_DX,%_ASM_AX
15383 + jae 1234f
15384 + add %_ASM_DX,%_ASM_AX
15385 +1234:
15386 +#endif
15387 +
15388 +#endif
15389 +
15390 3: mov -3(%_ASM_AX),%edx
15391 +
15392 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15393 + pushl %ss
15394 + pop %ds
15395 +#endif
15396 +
15397 xor %eax,%eax
15398 ret
15399 CFI_ENDPROC
15400 @@ -80,6 +148,15 @@ ENTRY(__get_user_8)
15401 GET_THREAD_INFO(%_ASM_DX)
15402 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15403 jae bad_get_user
15404 +
15405 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15406 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15407 + cmp %_ASM_DX,%_ASM_AX
15408 + jae 1234f
15409 + add %_ASM_DX,%_ASM_AX
15410 +1234:
15411 +#endif
15412 +
15413 4: movq -7(%_ASM_AX),%_ASM_DX
15414 xor %eax,%eax
15415 ret
15416 @@ -89,6 +166,12 @@ ENDPROC(__get_user_8)
15417
15418 bad_get_user:
15419 CFI_STARTPROC
15420 +
15421 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15422 + pushl %ss
15423 + pop %ds
15424 +#endif
15425 +
15426 xor %edx,%edx
15427 mov $(-EFAULT),%_ASM_AX
15428 ret
15429 diff -urNp linux-2.6.34.1/arch/x86/lib/mmx_32.c linux-2.6.34.1/arch/x86/lib/mmx_32.c
15430 --- linux-2.6.34.1/arch/x86/lib/mmx_32.c 2010-07-05 14:24:10.000000000 -0400
15431 +++ linux-2.6.34.1/arch/x86/lib/mmx_32.c 2010-07-07 09:04:49.000000000 -0400
15432 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
15433 {
15434 void *p;
15435 int i;
15436 + unsigned long cr0;
15437
15438 if (unlikely(in_interrupt()))
15439 return __memcpy(to, from, len);
15440 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
15441 kernel_fpu_begin();
15442
15443 __asm__ __volatile__ (
15444 - "1: prefetch (%0)\n" /* This set is 28 bytes */
15445 - " prefetch 64(%0)\n"
15446 - " prefetch 128(%0)\n"
15447 - " prefetch 192(%0)\n"
15448 - " prefetch 256(%0)\n"
15449 + "1: prefetch (%1)\n" /* This set is 28 bytes */
15450 + " prefetch 64(%1)\n"
15451 + " prefetch 128(%1)\n"
15452 + " prefetch 192(%1)\n"
15453 + " prefetch 256(%1)\n"
15454 "2: \n"
15455 ".section .fixup, \"ax\"\n"
15456 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15457 + "3: \n"
15458 +
15459 +#ifdef CONFIG_PAX_KERNEXEC
15460 + " movl %%cr0, %0\n"
15461 + " movl %0, %%eax\n"
15462 + " andl $0xFFFEFFFF, %%eax\n"
15463 + " movl %%eax, %%cr0\n"
15464 +#endif
15465 +
15466 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15467 +
15468 +#ifdef CONFIG_PAX_KERNEXEC
15469 + " movl %0, %%cr0\n"
15470 +#endif
15471 +
15472 " jmp 2b\n"
15473 ".previous\n"
15474 _ASM_EXTABLE(1b, 3b)
15475 - : : "r" (from));
15476 + : "=&r" (cr0) : "r" (from) : "ax");
15477
15478 for ( ; i > 5; i--) {
15479 __asm__ __volatile__ (
15480 - "1: prefetch 320(%0)\n"
15481 - "2: movq (%0), %%mm0\n"
15482 - " movq 8(%0), %%mm1\n"
15483 - " movq 16(%0), %%mm2\n"
15484 - " movq 24(%0), %%mm3\n"
15485 - " movq %%mm0, (%1)\n"
15486 - " movq %%mm1, 8(%1)\n"
15487 - " movq %%mm2, 16(%1)\n"
15488 - " movq %%mm3, 24(%1)\n"
15489 - " movq 32(%0), %%mm0\n"
15490 - " movq 40(%0), %%mm1\n"
15491 - " movq 48(%0), %%mm2\n"
15492 - " movq 56(%0), %%mm3\n"
15493 - " movq %%mm0, 32(%1)\n"
15494 - " movq %%mm1, 40(%1)\n"
15495 - " movq %%mm2, 48(%1)\n"
15496 - " movq %%mm3, 56(%1)\n"
15497 + "1: prefetch 320(%1)\n"
15498 + "2: movq (%1), %%mm0\n"
15499 + " movq 8(%1), %%mm1\n"
15500 + " movq 16(%1), %%mm2\n"
15501 + " movq 24(%1), %%mm3\n"
15502 + " movq %%mm0, (%2)\n"
15503 + " movq %%mm1, 8(%2)\n"
15504 + " movq %%mm2, 16(%2)\n"
15505 + " movq %%mm3, 24(%2)\n"
15506 + " movq 32(%1), %%mm0\n"
15507 + " movq 40(%1), %%mm1\n"
15508 + " movq 48(%1), %%mm2\n"
15509 + " movq 56(%1), %%mm3\n"
15510 + " movq %%mm0, 32(%2)\n"
15511 + " movq %%mm1, 40(%2)\n"
15512 + " movq %%mm2, 48(%2)\n"
15513 + " movq %%mm3, 56(%2)\n"
15514 ".section .fixup, \"ax\"\n"
15515 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15516 + "3:\n"
15517 +
15518 +#ifdef CONFIG_PAX_KERNEXEC
15519 + " movl %%cr0, %0\n"
15520 + " movl %0, %%eax\n"
15521 + " andl $0xFFFEFFFF, %%eax\n"
15522 + " movl %%eax, %%cr0\n"
15523 +#endif
15524 +
15525 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15526 +
15527 +#ifdef CONFIG_PAX_KERNEXEC
15528 + " movl %0, %%cr0\n"
15529 +#endif
15530 +
15531 " jmp 2b\n"
15532 ".previous\n"
15533 _ASM_EXTABLE(1b, 3b)
15534 - : : "r" (from), "r" (to) : "memory");
15535 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
15536
15537 from += 64;
15538 to += 64;
15539 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
15540 static void fast_copy_page(void *to, void *from)
15541 {
15542 int i;
15543 + unsigned long cr0;
15544
15545 kernel_fpu_begin();
15546
15547 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
15548 * but that is for later. -AV
15549 */
15550 __asm__ __volatile__(
15551 - "1: prefetch (%0)\n"
15552 - " prefetch 64(%0)\n"
15553 - " prefetch 128(%0)\n"
15554 - " prefetch 192(%0)\n"
15555 - " prefetch 256(%0)\n"
15556 + "1: prefetch (%1)\n"
15557 + " prefetch 64(%1)\n"
15558 + " prefetch 128(%1)\n"
15559 + " prefetch 192(%1)\n"
15560 + " prefetch 256(%1)\n"
15561 "2: \n"
15562 ".section .fixup, \"ax\"\n"
15563 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15564 + "3: \n"
15565 +
15566 +#ifdef CONFIG_PAX_KERNEXEC
15567 + " movl %%cr0, %0\n"
15568 + " movl %0, %%eax\n"
15569 + " andl $0xFFFEFFFF, %%eax\n"
15570 + " movl %%eax, %%cr0\n"
15571 +#endif
15572 +
15573 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15574 +
15575 +#ifdef CONFIG_PAX_KERNEXEC
15576 + " movl %0, %%cr0\n"
15577 +#endif
15578 +
15579 " jmp 2b\n"
15580 ".previous\n"
15581 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
15582 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
15583
15584 for (i = 0; i < (4096-320)/64; i++) {
15585 __asm__ __volatile__ (
15586 - "1: prefetch 320(%0)\n"
15587 - "2: movq (%0), %%mm0\n"
15588 - " movntq %%mm0, (%1)\n"
15589 - " movq 8(%0), %%mm1\n"
15590 - " movntq %%mm1, 8(%1)\n"
15591 - " movq 16(%0), %%mm2\n"
15592 - " movntq %%mm2, 16(%1)\n"
15593 - " movq 24(%0), %%mm3\n"
15594 - " movntq %%mm3, 24(%1)\n"
15595 - " movq 32(%0), %%mm4\n"
15596 - " movntq %%mm4, 32(%1)\n"
15597 - " movq 40(%0), %%mm5\n"
15598 - " movntq %%mm5, 40(%1)\n"
15599 - " movq 48(%0), %%mm6\n"
15600 - " movntq %%mm6, 48(%1)\n"
15601 - " movq 56(%0), %%mm7\n"
15602 - " movntq %%mm7, 56(%1)\n"
15603 + "1: prefetch 320(%1)\n"
15604 + "2: movq (%1), %%mm0\n"
15605 + " movntq %%mm0, (%2)\n"
15606 + " movq 8(%1), %%mm1\n"
15607 + " movntq %%mm1, 8(%2)\n"
15608 + " movq 16(%1), %%mm2\n"
15609 + " movntq %%mm2, 16(%2)\n"
15610 + " movq 24(%1), %%mm3\n"
15611 + " movntq %%mm3, 24(%2)\n"
15612 + " movq 32(%1), %%mm4\n"
15613 + " movntq %%mm4, 32(%2)\n"
15614 + " movq 40(%1), %%mm5\n"
15615 + " movntq %%mm5, 40(%2)\n"
15616 + " movq 48(%1), %%mm6\n"
15617 + " movntq %%mm6, 48(%2)\n"
15618 + " movq 56(%1), %%mm7\n"
15619 + " movntq %%mm7, 56(%2)\n"
15620 ".section .fixup, \"ax\"\n"
15621 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15622 + "3:\n"
15623 +
15624 +#ifdef CONFIG_PAX_KERNEXEC
15625 + " movl %%cr0, %0\n"
15626 + " movl %0, %%eax\n"
15627 + " andl $0xFFFEFFFF, %%eax\n"
15628 + " movl %%eax, %%cr0\n"
15629 +#endif
15630 +
15631 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15632 +
15633 +#ifdef CONFIG_PAX_KERNEXEC
15634 + " movl %0, %%cr0\n"
15635 +#endif
15636 +
15637 " jmp 2b\n"
15638 ".previous\n"
15639 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
15640 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
15641
15642 from += 64;
15643 to += 64;
15644 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
15645 static void fast_copy_page(void *to, void *from)
15646 {
15647 int i;
15648 + unsigned long cr0;
15649
15650 kernel_fpu_begin();
15651
15652 __asm__ __volatile__ (
15653 - "1: prefetch (%0)\n"
15654 - " prefetch 64(%0)\n"
15655 - " prefetch 128(%0)\n"
15656 - " prefetch 192(%0)\n"
15657 - " prefetch 256(%0)\n"
15658 + "1: prefetch (%1)\n"
15659 + " prefetch 64(%1)\n"
15660 + " prefetch 128(%1)\n"
15661 + " prefetch 192(%1)\n"
15662 + " prefetch 256(%1)\n"
15663 "2: \n"
15664 ".section .fixup, \"ax\"\n"
15665 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15666 + "3: \n"
15667 +
15668 +#ifdef CONFIG_PAX_KERNEXEC
15669 + " movl %%cr0, %0\n"
15670 + " movl %0, %%eax\n"
15671 + " andl $0xFFFEFFFF, %%eax\n"
15672 + " movl %%eax, %%cr0\n"
15673 +#endif
15674 +
15675 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15676 +
15677 +#ifdef CONFIG_PAX_KERNEXEC
15678 + " movl %0, %%cr0\n"
15679 +#endif
15680 +
15681 " jmp 2b\n"
15682 ".previous\n"
15683 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
15684 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
15685
15686 for (i = 0; i < 4096/64; i++) {
15687 __asm__ __volatile__ (
15688 - "1: prefetch 320(%0)\n"
15689 - "2: movq (%0), %%mm0\n"
15690 - " movq 8(%0), %%mm1\n"
15691 - " movq 16(%0), %%mm2\n"
15692 - " movq 24(%0), %%mm3\n"
15693 - " movq %%mm0, (%1)\n"
15694 - " movq %%mm1, 8(%1)\n"
15695 - " movq %%mm2, 16(%1)\n"
15696 - " movq %%mm3, 24(%1)\n"
15697 - " movq 32(%0), %%mm0\n"
15698 - " movq 40(%0), %%mm1\n"
15699 - " movq 48(%0), %%mm2\n"
15700 - " movq 56(%0), %%mm3\n"
15701 - " movq %%mm0, 32(%1)\n"
15702 - " movq %%mm1, 40(%1)\n"
15703 - " movq %%mm2, 48(%1)\n"
15704 - " movq %%mm3, 56(%1)\n"
15705 + "1: prefetch 320(%1)\n"
15706 + "2: movq (%1), %%mm0\n"
15707 + " movq 8(%1), %%mm1\n"
15708 + " movq 16(%1), %%mm2\n"
15709 + " movq 24(%1), %%mm3\n"
15710 + " movq %%mm0, (%2)\n"
15711 + " movq %%mm1, 8(%2)\n"
15712 + " movq %%mm2, 16(%2)\n"
15713 + " movq %%mm3, 24(%2)\n"
15714 + " movq 32(%1), %%mm0\n"
15715 + " movq 40(%1), %%mm1\n"
15716 + " movq 48(%1), %%mm2\n"
15717 + " movq 56(%1), %%mm3\n"
15718 + " movq %%mm0, 32(%2)\n"
15719 + " movq %%mm1, 40(%2)\n"
15720 + " movq %%mm2, 48(%2)\n"
15721 + " movq %%mm3, 56(%2)\n"
15722 ".section .fixup, \"ax\"\n"
15723 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15724 + "3:\n"
15725 +
15726 +#ifdef CONFIG_PAX_KERNEXEC
15727 + " movl %%cr0, %0\n"
15728 + " movl %0, %%eax\n"
15729 + " andl $0xFFFEFFFF, %%eax\n"
15730 + " movl %%eax, %%cr0\n"
15731 +#endif
15732 +
15733 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15734 +
15735 +#ifdef CONFIG_PAX_KERNEXEC
15736 + " movl %0, %%cr0\n"
15737 +#endif
15738 +
15739 " jmp 2b\n"
15740 ".previous\n"
15741 _ASM_EXTABLE(1b, 3b)
15742 - : : "r" (from), "r" (to) : "memory");
15743 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
15744
15745 from += 64;
15746 to += 64;
15747 diff -urNp linux-2.6.34.1/arch/x86/lib/putuser.S linux-2.6.34.1/arch/x86/lib/putuser.S
15748 --- linux-2.6.34.1/arch/x86/lib/putuser.S 2010-07-05 14:24:10.000000000 -0400
15749 +++ linux-2.6.34.1/arch/x86/lib/putuser.S 2010-07-07 09:04:49.000000000 -0400
15750 @@ -15,7 +15,8 @@
15751 #include <asm/thread_info.h>
15752 #include <asm/errno.h>
15753 #include <asm/asm.h>
15754 -
15755 +#include <asm/segment.h>
15756 +#include <asm/pgtable.h>
15757
15758 /*
15759 * __put_user_X
15760 @@ -29,59 +30,156 @@
15761 * as they get called from within inline assembly.
15762 */
15763
15764 -#define ENTER CFI_STARTPROC ; \
15765 - GET_THREAD_INFO(%_ASM_BX)
15766 +#define ENTER CFI_STARTPROC
15767 #define EXIT ret ; \
15768 CFI_ENDPROC
15769
15770 .text
15771 ENTRY(__put_user_1)
15772 ENTER
15773 +
15774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15775 + pushl $(__USER_DS)
15776 + popl %ds
15777 +#else
15778 + GET_THREAD_INFO(%_ASM_BX)
15779 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
15780 jae bad_put_user
15781 +
15782 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15783 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
15784 + cmp %_ASM_BX,%_ASM_CX
15785 + jae 1234f
15786 + add %_ASM_BX,%_ASM_CX
15787 +1234:
15788 +#endif
15789 +
15790 +#endif
15791 +
15792 1: movb %al,(%_ASM_CX)
15793 +
15794 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15795 + pushl %ss
15796 + popl %ds
15797 +#endif
15798 +
15799 xor %eax,%eax
15800 EXIT
15801 ENDPROC(__put_user_1)
15802
15803 ENTRY(__put_user_2)
15804 ENTER
15805 +
15806 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15807 + pushl $(__USER_DS)
15808 + popl %ds
15809 +#else
15810 + GET_THREAD_INFO(%_ASM_BX)
15811 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
15812 sub $1,%_ASM_BX
15813 cmp %_ASM_BX,%_ASM_CX
15814 jae bad_put_user
15815 +
15816 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15817 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
15818 + cmp %_ASM_BX,%_ASM_CX
15819 + jae 1234f
15820 + add %_ASM_BX,%_ASM_CX
15821 +1234:
15822 +#endif
15823 +
15824 +#endif
15825 +
15826 2: movw %ax,(%_ASM_CX)
15827 +
15828 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15829 + pushl %ss
15830 + popl %ds
15831 +#endif
15832 +
15833 xor %eax,%eax
15834 EXIT
15835 ENDPROC(__put_user_2)
15836
15837 ENTRY(__put_user_4)
15838 ENTER
15839 +
15840 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15841 + pushl $(__USER_DS)
15842 + popl %ds
15843 +#else
15844 + GET_THREAD_INFO(%_ASM_BX)
15845 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
15846 sub $3,%_ASM_BX
15847 cmp %_ASM_BX,%_ASM_CX
15848 jae bad_put_user
15849 +
15850 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15851 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
15852 + cmp %_ASM_BX,%_ASM_CX
15853 + jae 1234f
15854 + add %_ASM_BX,%_ASM_CX
15855 +1234:
15856 +#endif
15857 +
15858 +#endif
15859 +
15860 3: movl %eax,(%_ASM_CX)
15861 +
15862 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15863 + pushl %ss
15864 + popl %ds
15865 +#endif
15866 +
15867 xor %eax,%eax
15868 EXIT
15869 ENDPROC(__put_user_4)
15870
15871 ENTRY(__put_user_8)
15872 ENTER
15873 +
15874 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15875 + pushl $(__USER_DS)
15876 + popl %ds
15877 +#else
15878 + GET_THREAD_INFO(%_ASM_BX)
15879 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
15880 sub $7,%_ASM_BX
15881 cmp %_ASM_BX,%_ASM_CX
15882 jae bad_put_user
15883 +
15884 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15885 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
15886 + cmp %_ASM_BX,%_ASM_CX
15887 + jae 1234f
15888 + add %_ASM_BX,%_ASM_CX
15889 +1234:
15890 +#endif
15891 +
15892 +#endif
15893 +
15894 4: mov %_ASM_AX,(%_ASM_CX)
15895 #ifdef CONFIG_X86_32
15896 5: movl %edx,4(%_ASM_CX)
15897 #endif
15898 +
15899 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15900 + pushl %ss
15901 + popl %ds
15902 +#endif
15903 +
15904 xor %eax,%eax
15905 EXIT
15906 ENDPROC(__put_user_8)
15907
15908 bad_put_user:
15909 CFI_STARTPROC
15910 +
15911 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15912 + pushl %ss
15913 + popl %ds
15914 +#endif
15915 +
15916 movl $-EFAULT,%eax
15917 EXIT
15918 END(bad_put_user)
15919 diff -urNp linux-2.6.34.1/arch/x86/lib/usercopy_32.c linux-2.6.34.1/arch/x86/lib/usercopy_32.c
15920 --- linux-2.6.34.1/arch/x86/lib/usercopy_32.c 2010-07-05 14:24:10.000000000 -0400
15921 +++ linux-2.6.34.1/arch/x86/lib/usercopy_32.c 2010-07-07 09:04:49.000000000 -0400
15922 @@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned
15923 * Copy a null terminated string from userspace.
15924 */
15925
15926 -#define __do_strncpy_from_user(dst, src, count, res) \
15927 -do { \
15928 - int __d0, __d1, __d2; \
15929 - might_fault(); \
15930 - __asm__ __volatile__( \
15931 - " testl %1,%1\n" \
15932 - " jz 2f\n" \
15933 - "0: lodsb\n" \
15934 - " stosb\n" \
15935 - " testb %%al,%%al\n" \
15936 - " jz 1f\n" \
15937 - " decl %1\n" \
15938 - " jnz 0b\n" \
15939 - "1: subl %1,%0\n" \
15940 - "2:\n" \
15941 - ".section .fixup,\"ax\"\n" \
15942 - "3: movl %5,%0\n" \
15943 - " jmp 2b\n" \
15944 - ".previous\n" \
15945 - _ASM_EXTABLE(0b,3b) \
15946 - : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
15947 - "=&D" (__d2) \
15948 - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
15949 - : "memory"); \
15950 -} while (0)
15951 +static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
15952 +{
15953 + int __d0, __d1, __d2;
15954 + long res = -EFAULT;
15955 +
15956 + might_fault();
15957 + __asm__ __volatile__(
15958 + " movw %w10,%%ds\n"
15959 + " testl %1,%1\n"
15960 + " jz 2f\n"
15961 + "0: lodsb\n"
15962 + " stosb\n"
15963 + " testb %%al,%%al\n"
15964 + " jz 1f\n"
15965 + " decl %1\n"
15966 + " jnz 0b\n"
15967 + "1: subl %1,%0\n"
15968 + "2:\n"
15969 + " pushl %%ss\n"
15970 + " popl %%ds\n"
15971 + ".section .fixup,\"ax\"\n"
15972 + "3: movl %5,%0\n"
15973 + " jmp 2b\n"
15974 + ".previous\n"
15975 + _ASM_EXTABLE(0b,3b)
15976 + : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),
15977 + "=&D" (__d2)
15978 + : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
15979 + "r"(__USER_DS)
15980 + : "memory");
15981 + return res;
15982 +}
15983
15984 /**
15985 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
15986 @@ -85,9 +92,7 @@ do { \
15987 long
15988 __strncpy_from_user(char *dst, const char __user *src, long count)
15989 {
15990 - long res;
15991 - __do_strncpy_from_user(dst, src, count, res);
15992 - return res;
15993 + return __do_strncpy_from_user(dst, src, count);
15994 }
15995 EXPORT_SYMBOL(__strncpy_from_user);
15996
15997 @@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char
15998 {
15999 long res = -EFAULT;
16000 if (access_ok(VERIFY_READ, src, 1))
16001 - __do_strncpy_from_user(dst, src, count, res);
16002 + res = __do_strncpy_from_user(dst, src, count);
16003 return res;
16004 }
16005 EXPORT_SYMBOL(strncpy_from_user);
16006 @@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user);
16007 * Zero Userspace
16008 */
16009
16010 -#define __do_clear_user(addr,size) \
16011 -do { \
16012 - int __d0; \
16013 - might_fault(); \
16014 - __asm__ __volatile__( \
16015 - "0: rep; stosl\n" \
16016 - " movl %2,%0\n" \
16017 - "1: rep; stosb\n" \
16018 - "2:\n" \
16019 - ".section .fixup,\"ax\"\n" \
16020 - "3: lea 0(%2,%0,4),%0\n" \
16021 - " jmp 2b\n" \
16022 - ".previous\n" \
16023 - _ASM_EXTABLE(0b,3b) \
16024 - _ASM_EXTABLE(1b,2b) \
16025 - : "=&c"(size), "=&D" (__d0) \
16026 - : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
16027 -} while (0)
16028 +static unsigned long __do_clear_user(void __user *addr, unsigned long size)
16029 +{
16030 + int __d0;
16031 +
16032 + might_fault();
16033 + __asm__ __volatile__(
16034 + " movw %w6,%%es\n"
16035 + "0: rep; stosl\n"
16036 + " movl %2,%0\n"
16037 + "1: rep; stosb\n"
16038 + "2:\n"
16039 + " pushl %%ss\n"
16040 + " popl %%es\n"
16041 + ".section .fixup,\"ax\"\n"
16042 + "3: lea 0(%2,%0,4),%0\n"
16043 + " jmp 2b\n"
16044 + ".previous\n"
16045 + _ASM_EXTABLE(0b,3b)
16046 + _ASM_EXTABLE(1b,2b)
16047 + : "=&c"(size), "=&D" (__d0)
16048 + : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
16049 + "r"(__USER_DS));
16050 + return size;
16051 +}
16052
16053 /**
16054 * clear_user: - Zero a block of memory in user space.
16055 @@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned lon
16056 {
16057 might_fault();
16058 if (access_ok(VERIFY_WRITE, to, n))
16059 - __do_clear_user(to, n);
16060 + n = __do_clear_user(to, n);
16061 return n;
16062 }
16063 EXPORT_SYMBOL(clear_user);
16064 @@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user);
16065 unsigned long
16066 __clear_user(void __user *to, unsigned long n)
16067 {
16068 - __do_clear_user(to, n);
16069 - return n;
16070 + return __do_clear_user(to, n);
16071 }
16072 EXPORT_SYMBOL(__clear_user);
16073
16074 @@ -200,14 +210,17 @@ long strnlen_user(const char __user *s,
16075 might_fault();
16076
16077 __asm__ __volatile__(
16078 + " movw %w8,%%es\n"
16079 " testl %0, %0\n"
16080 " jz 3f\n"
16081 - " andl %0,%%ecx\n"
16082 + " movl %0,%%ecx\n"
16083 "0: repne; scasb\n"
16084 " setne %%al\n"
16085 " subl %%ecx,%0\n"
16086 " addl %0,%%eax\n"
16087 "1:\n"
16088 + " pushl %%ss\n"
16089 + " popl %%es\n"
16090 ".section .fixup,\"ax\"\n"
16091 "2: xorl %%eax,%%eax\n"
16092 " jmp 1b\n"
16093 @@ -219,7 +232,7 @@ long strnlen_user(const char __user *s,
16094 " .long 0b,2b\n"
16095 ".previous"
16096 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
16097 - :"0" (n), "1" (s), "2" (0), "3" (mask)
16098 + :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
16099 :"cc");
16100 return res & mask;
16101 }
16102 @@ -227,10 +240,121 @@ EXPORT_SYMBOL(strnlen_user);
16103
16104 #ifdef CONFIG_X86_INTEL_USERCOPY
16105 static unsigned long
16106 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
16107 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
16108 +{
16109 + int d0, d1;
16110 + __asm__ __volatile__(
16111 + " movw %w6, %%es\n"
16112 + " .align 2,0x90\n"
16113 + "1: movl 32(%4), %%eax\n"
16114 + " cmpl $67, %0\n"
16115 + " jbe 3f\n"
16116 + "2: movl 64(%4), %%eax\n"
16117 + " .align 2,0x90\n"
16118 + "3: movl 0(%4), %%eax\n"
16119 + "4: movl 4(%4), %%edx\n"
16120 + "5: movl %%eax, %%es:0(%3)\n"
16121 + "6: movl %%edx, %%es:4(%3)\n"
16122 + "7: movl 8(%4), %%eax\n"
16123 + "8: movl 12(%4),%%edx\n"
16124 + "9: movl %%eax, %%es:8(%3)\n"
16125 + "10: movl %%edx, %%es:12(%3)\n"
16126 + "11: movl 16(%4), %%eax\n"
16127 + "12: movl 20(%4), %%edx\n"
16128 + "13: movl %%eax, %%es:16(%3)\n"
16129 + "14: movl %%edx, %%es:20(%3)\n"
16130 + "15: movl 24(%4), %%eax\n"
16131 + "16: movl 28(%4), %%edx\n"
16132 + "17: movl %%eax, %%es:24(%3)\n"
16133 + "18: movl %%edx, %%es:28(%3)\n"
16134 + "19: movl 32(%4), %%eax\n"
16135 + "20: movl 36(%4), %%edx\n"
16136 + "21: movl %%eax, %%es:32(%3)\n"
16137 + "22: movl %%edx, %%es:36(%3)\n"
16138 + "23: movl 40(%4), %%eax\n"
16139 + "24: movl 44(%4), %%edx\n"
16140 + "25: movl %%eax, %%es:40(%3)\n"
16141 + "26: movl %%edx, %%es:44(%3)\n"
16142 + "27: movl 48(%4), %%eax\n"
16143 + "28: movl 52(%4), %%edx\n"
16144 + "29: movl %%eax, %%es:48(%3)\n"
16145 + "30: movl %%edx, %%es:52(%3)\n"
16146 + "31: movl 56(%4), %%eax\n"
16147 + "32: movl 60(%4), %%edx\n"
16148 + "33: movl %%eax, %%es:56(%3)\n"
16149 + "34: movl %%edx, %%es:60(%3)\n"
16150 + " addl $-64, %0\n"
16151 + " addl $64, %4\n"
16152 + " addl $64, %3\n"
16153 + " cmpl $63, %0\n"
16154 + " ja 1b\n"
16155 + "35: movl %0, %%eax\n"
16156 + " shrl $2, %0\n"
16157 + " andl $3, %%eax\n"
16158 + " cld\n"
16159 + "99: rep; movsl\n"
16160 + "36: movl %%eax, %0\n"
16161 + "37: rep; movsb\n"
16162 + "100:\n"
16163 + " pushl %%ss\n"
16164 + " popl %%es\n"
16165 + ".section .fixup,\"ax\"\n"
16166 + "101: lea 0(%%eax,%0,4),%0\n"
16167 + " jmp 100b\n"
16168 + ".previous\n"
16169 + ".section __ex_table,\"a\"\n"
16170 + " .align 4\n"
16171 + " .long 1b,100b\n"
16172 + " .long 2b,100b\n"
16173 + " .long 3b,100b\n"
16174 + " .long 4b,100b\n"
16175 + " .long 5b,100b\n"
16176 + " .long 6b,100b\n"
16177 + " .long 7b,100b\n"
16178 + " .long 8b,100b\n"
16179 + " .long 9b,100b\n"
16180 + " .long 10b,100b\n"
16181 + " .long 11b,100b\n"
16182 + " .long 12b,100b\n"
16183 + " .long 13b,100b\n"
16184 + " .long 14b,100b\n"
16185 + " .long 15b,100b\n"
16186 + " .long 16b,100b\n"
16187 + " .long 17b,100b\n"
16188 + " .long 18b,100b\n"
16189 + " .long 19b,100b\n"
16190 + " .long 20b,100b\n"
16191 + " .long 21b,100b\n"
16192 + " .long 22b,100b\n"
16193 + " .long 23b,100b\n"
16194 + " .long 24b,100b\n"
16195 + " .long 25b,100b\n"
16196 + " .long 26b,100b\n"
16197 + " .long 27b,100b\n"
16198 + " .long 28b,100b\n"
16199 + " .long 29b,100b\n"
16200 + " .long 30b,100b\n"
16201 + " .long 31b,100b\n"
16202 + " .long 32b,100b\n"
16203 + " .long 33b,100b\n"
16204 + " .long 34b,100b\n"
16205 + " .long 35b,100b\n"
16206 + " .long 36b,100b\n"
16207 + " .long 37b,100b\n"
16208 + " .long 99b,101b\n"
16209 + ".previous"
16210 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
16211 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16212 + : "eax", "edx", "memory");
16213 + return size;
16214 +}
16215 +
16216 +static unsigned long
16217 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
16218 {
16219 int d0, d1;
16220 __asm__ __volatile__(
16221 + " movw %w6, %%ds\n"
16222 " .align 2,0x90\n"
16223 "1: movl 32(%4), %%eax\n"
16224 " cmpl $67, %0\n"
16225 @@ -239,36 +363,36 @@ __copy_user_intel(void __user *to, const
16226 " .align 2,0x90\n"
16227 "3: movl 0(%4), %%eax\n"
16228 "4: movl 4(%4), %%edx\n"
16229 - "5: movl %%eax, 0(%3)\n"
16230 - "6: movl %%edx, 4(%3)\n"
16231 + "5: movl %%eax, %%es:0(%3)\n"
16232 + "6: movl %%edx, %%es:4(%3)\n"
16233 "7: movl 8(%4), %%eax\n"
16234 "8: movl 12(%4),%%edx\n"
16235 - "9: movl %%eax, 8(%3)\n"
16236 - "10: movl %%edx, 12(%3)\n"
16237 + "9: movl %%eax, %%es:8(%3)\n"
16238 + "10: movl %%edx, %%es:12(%3)\n"
16239 "11: movl 16(%4), %%eax\n"
16240 "12: movl 20(%4), %%edx\n"
16241 - "13: movl %%eax, 16(%3)\n"
16242 - "14: movl %%edx, 20(%3)\n"
16243 + "13: movl %%eax, %%es:16(%3)\n"
16244 + "14: movl %%edx, %%es:20(%3)\n"
16245 "15: movl 24(%4), %%eax\n"
16246 "16: movl 28(%4), %%edx\n"
16247 - "17: movl %%eax, 24(%3)\n"
16248 - "18: movl %%edx, 28(%3)\n"
16249 + "17: movl %%eax, %%es:24(%3)\n"
16250 + "18: movl %%edx, %%es:28(%3)\n"
16251 "19: movl 32(%4), %%eax\n"
16252 "20: movl 36(%4), %%edx\n"
16253 - "21: movl %%eax, 32(%3)\n"
16254 - "22: movl %%edx, 36(%3)\n"
16255 + "21: movl %%eax, %%es:32(%3)\n"
16256 + "22: movl %%edx, %%es:36(%3)\n"
16257 "23: movl 40(%4), %%eax\n"
16258 "24: movl 44(%4), %%edx\n"
16259 - "25: movl %%eax, 40(%3)\n"
16260 - "26: movl %%edx, 44(%3)\n"
16261 + "25: movl %%eax, %%es:40(%3)\n"
16262 + "26: movl %%edx, %%es:44(%3)\n"
16263 "27: movl 48(%4), %%eax\n"
16264 "28: movl 52(%4), %%edx\n"
16265 - "29: movl %%eax, 48(%3)\n"
16266 - "30: movl %%edx, 52(%3)\n"
16267 + "29: movl %%eax, %%es:48(%3)\n"
16268 + "30: movl %%edx, %%es:52(%3)\n"
16269 "31: movl 56(%4), %%eax\n"
16270 "32: movl 60(%4), %%edx\n"
16271 - "33: movl %%eax, 56(%3)\n"
16272 - "34: movl %%edx, 60(%3)\n"
16273 + "33: movl %%eax, %%es:56(%3)\n"
16274 + "34: movl %%edx, %%es:60(%3)\n"
16275 " addl $-64, %0\n"
16276 " addl $64, %4\n"
16277 " addl $64, %3\n"
16278 @@ -282,6 +406,8 @@ __copy_user_intel(void __user *to, const
16279 "36: movl %%eax, %0\n"
16280 "37: rep; movsb\n"
16281 "100:\n"
16282 + " pushl %%ss\n"
16283 + " popl %%ds\n"
16284 ".section .fixup,\"ax\"\n"
16285 "101: lea 0(%%eax,%0,4),%0\n"
16286 " jmp 100b\n"
16287 @@ -328,7 +454,7 @@ __copy_user_intel(void __user *to, const
16288 " .long 99b,101b\n"
16289 ".previous"
16290 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16291 - : "1"(to), "2"(from), "0"(size)
16292 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16293 : "eax", "edx", "memory");
16294 return size;
16295 }
16296 @@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, cons
16297 {
16298 int d0, d1;
16299 __asm__ __volatile__(
16300 + " movw %w6, %%ds\n"
16301 " .align 2,0x90\n"
16302 "0: movl 32(%4), %%eax\n"
16303 " cmpl $67, %0\n"
16304 @@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, cons
16305 " .align 2,0x90\n"
16306 "2: movl 0(%4), %%eax\n"
16307 "21: movl 4(%4), %%edx\n"
16308 - " movl %%eax, 0(%3)\n"
16309 - " movl %%edx, 4(%3)\n"
16310 + " movl %%eax, %%es:0(%3)\n"
16311 + " movl %%edx, %%es:4(%3)\n"
16312 "3: movl 8(%4), %%eax\n"
16313 "31: movl 12(%4),%%edx\n"
16314 - " movl %%eax, 8(%3)\n"
16315 - " movl %%edx, 12(%3)\n"
16316 + " movl %%eax, %%es:8(%3)\n"
16317 + " movl %%edx, %%es:12(%3)\n"
16318 "4: movl 16(%4), %%eax\n"
16319 "41: movl 20(%4), %%edx\n"
16320 - " movl %%eax, 16(%3)\n"
16321 - " movl %%edx, 20(%3)\n"
16322 + " movl %%eax, %%es:16(%3)\n"
16323 + " movl %%edx, %%es:20(%3)\n"
16324 "10: movl 24(%4), %%eax\n"
16325 "51: movl 28(%4), %%edx\n"
16326 - " movl %%eax, 24(%3)\n"
16327 - " movl %%edx, 28(%3)\n"
16328 + " movl %%eax, %%es:24(%3)\n"
16329 + " movl %%edx, %%es:28(%3)\n"
16330 "11: movl 32(%4), %%eax\n"
16331 "61: movl 36(%4), %%edx\n"
16332 - " movl %%eax, 32(%3)\n"
16333 - " movl %%edx, 36(%3)\n"
16334 + " movl %%eax, %%es:32(%3)\n"
16335 + " movl %%edx, %%es:36(%3)\n"
16336 "12: movl 40(%4), %%eax\n"
16337 "71: movl 44(%4), %%edx\n"
16338 - " movl %%eax, 40(%3)\n"
16339 - " movl %%edx, 44(%3)\n"
16340 + " movl %%eax, %%es:40(%3)\n"
16341 + " movl %%edx, %%es:44(%3)\n"
16342 "13: movl 48(%4), %%eax\n"
16343 "81: movl 52(%4), %%edx\n"
16344 - " movl %%eax, 48(%3)\n"
16345 - " movl %%edx, 52(%3)\n"
16346 + " movl %%eax, %%es:48(%3)\n"
16347 + " movl %%edx, %%es:52(%3)\n"
16348 "14: movl 56(%4), %%eax\n"
16349 "91: movl 60(%4), %%edx\n"
16350 - " movl %%eax, 56(%3)\n"
16351 - " movl %%edx, 60(%3)\n"
16352 + " movl %%eax, %%es:56(%3)\n"
16353 + " movl %%edx, %%es:60(%3)\n"
16354 " addl $-64, %0\n"
16355 " addl $64, %4\n"
16356 " addl $64, %3\n"
16357 @@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, cons
16358 " movl %%eax,%0\n"
16359 "7: rep; movsb\n"
16360 "8:\n"
16361 + " pushl %%ss\n"
16362 + " popl %%ds\n"
16363 ".section .fixup,\"ax\"\n"
16364 "9: lea 0(%%eax,%0,4),%0\n"
16365 "16: pushl %0\n"
16366 @@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, cons
16367 " .long 7b,16b\n"
16368 ".previous"
16369 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16370 - : "1"(to), "2"(from), "0"(size)
16371 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16372 : "eax", "edx", "memory");
16373 return size;
16374 }
16375 @@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing
16376 int d0, d1;
16377
16378 __asm__ __volatile__(
16379 + " movw %w6, %%ds\n"
16380 " .align 2,0x90\n"
16381 "0: movl 32(%4), %%eax\n"
16382 " cmpl $67, %0\n"
16383 @@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing
16384 " .align 2,0x90\n"
16385 "2: movl 0(%4), %%eax\n"
16386 "21: movl 4(%4), %%edx\n"
16387 - " movnti %%eax, 0(%3)\n"
16388 - " movnti %%edx, 4(%3)\n"
16389 + " movnti %%eax, %%es:0(%3)\n"
16390 + " movnti %%edx, %%es:4(%3)\n"
16391 "3: movl 8(%4), %%eax\n"
16392 "31: movl 12(%4),%%edx\n"
16393 - " movnti %%eax, 8(%3)\n"
16394 - " movnti %%edx, 12(%3)\n"
16395 + " movnti %%eax, %%es:8(%3)\n"
16396 + " movnti %%edx, %%es:12(%3)\n"
16397 "4: movl 16(%4), %%eax\n"
16398 "41: movl 20(%4), %%edx\n"
16399 - " movnti %%eax, 16(%3)\n"
16400 - " movnti %%edx, 20(%3)\n"
16401 + " movnti %%eax, %%es:16(%3)\n"
16402 + " movnti %%edx, %%es:20(%3)\n"
16403 "10: movl 24(%4), %%eax\n"
16404 "51: movl 28(%4), %%edx\n"
16405 - " movnti %%eax, 24(%3)\n"
16406 - " movnti %%edx, 28(%3)\n"
16407 + " movnti %%eax, %%es:24(%3)\n"
16408 + " movnti %%edx, %%es:28(%3)\n"
16409 "11: movl 32(%4), %%eax\n"
16410 "61: movl 36(%4), %%edx\n"
16411 - " movnti %%eax, 32(%3)\n"
16412 - " movnti %%edx, 36(%3)\n"
16413 + " movnti %%eax, %%es:32(%3)\n"
16414 + " movnti %%edx, %%es:36(%3)\n"
16415 "12: movl 40(%4), %%eax\n"
16416 "71: movl 44(%4), %%edx\n"
16417 - " movnti %%eax, 40(%3)\n"
16418 - " movnti %%edx, 44(%3)\n"
16419 + " movnti %%eax, %%es:40(%3)\n"
16420 + " movnti %%edx, %%es:44(%3)\n"
16421 "13: movl 48(%4), %%eax\n"
16422 "81: movl 52(%4), %%edx\n"
16423 - " movnti %%eax, 48(%3)\n"
16424 - " movnti %%edx, 52(%3)\n"
16425 + " movnti %%eax, %%es:48(%3)\n"
16426 + " movnti %%edx, %%es:52(%3)\n"
16427 "14: movl 56(%4), %%eax\n"
16428 "91: movl 60(%4), %%edx\n"
16429 - " movnti %%eax, 56(%3)\n"
16430 - " movnti %%edx, 60(%3)\n"
16431 + " movnti %%eax, %%es:56(%3)\n"
16432 + " movnti %%edx, %%es:60(%3)\n"
16433 " addl $-64, %0\n"
16434 " addl $64, %4\n"
16435 " addl $64, %3\n"
16436 @@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing
16437 " movl %%eax,%0\n"
16438 "7: rep; movsb\n"
16439 "8:\n"
16440 + " pushl %%ss\n"
16441 + " popl %%ds\n"
16442 ".section .fixup,\"ax\"\n"
16443 "9: lea 0(%%eax,%0,4),%0\n"
16444 "16: pushl %0\n"
16445 @@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing
16446 " .long 7b,16b\n"
16447 ".previous"
16448 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16449 - : "1"(to), "2"(from), "0"(size)
16450 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16451 : "eax", "edx", "memory");
16452 return size;
16453 }
16454 @@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_n
16455 int d0, d1;
16456
16457 __asm__ __volatile__(
16458 + " movw %w6, %%ds\n"
16459 " .align 2,0x90\n"
16460 "0: movl 32(%4), %%eax\n"
16461 " cmpl $67, %0\n"
16462 @@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_n
16463 " .align 2,0x90\n"
16464 "2: movl 0(%4), %%eax\n"
16465 "21: movl 4(%4), %%edx\n"
16466 - " movnti %%eax, 0(%3)\n"
16467 - " movnti %%edx, 4(%3)\n"
16468 + " movnti %%eax, %%es:0(%3)\n"
16469 + " movnti %%edx, %%es:4(%3)\n"
16470 "3: movl 8(%4), %%eax\n"
16471 "31: movl 12(%4),%%edx\n"
16472 - " movnti %%eax, 8(%3)\n"
16473 - " movnti %%edx, 12(%3)\n"
16474 + " movnti %%eax, %%es:8(%3)\n"
16475 + " movnti %%edx, %%es:12(%3)\n"
16476 "4: movl 16(%4), %%eax\n"
16477 "41: movl 20(%4), %%edx\n"
16478 - " movnti %%eax, 16(%3)\n"
16479 - " movnti %%edx, 20(%3)\n"
16480 + " movnti %%eax, %%es:16(%3)\n"
16481 + " movnti %%edx, %%es:20(%3)\n"
16482 "10: movl 24(%4), %%eax\n"
16483 "51: movl 28(%4), %%edx\n"
16484 - " movnti %%eax, 24(%3)\n"
16485 - " movnti %%edx, 28(%3)\n"
16486 + " movnti %%eax, %%es:24(%3)\n"
16487 + " movnti %%edx, %%es:28(%3)\n"
16488 "11: movl 32(%4), %%eax\n"
16489 "61: movl 36(%4), %%edx\n"
16490 - " movnti %%eax, 32(%3)\n"
16491 - " movnti %%edx, 36(%3)\n"
16492 + " movnti %%eax, %%es:32(%3)\n"
16493 + " movnti %%edx, %%es:36(%3)\n"
16494 "12: movl 40(%4), %%eax\n"
16495 "71: movl 44(%4), %%edx\n"
16496 - " movnti %%eax, 40(%3)\n"
16497 - " movnti %%edx, 44(%3)\n"
16498 + " movnti %%eax, %%es:40(%3)\n"
16499 + " movnti %%edx, %%es:44(%3)\n"
16500 "13: movl 48(%4), %%eax\n"
16501 "81: movl 52(%4), %%edx\n"
16502 - " movnti %%eax, 48(%3)\n"
16503 - " movnti %%edx, 52(%3)\n"
16504 + " movnti %%eax, %%es:48(%3)\n"
16505 + " movnti %%edx, %%es:52(%3)\n"
16506 "14: movl 56(%4), %%eax\n"
16507 "91: movl 60(%4), %%edx\n"
16508 - " movnti %%eax, 56(%3)\n"
16509 - " movnti %%edx, 60(%3)\n"
16510 + " movnti %%eax, %%es:56(%3)\n"
16511 + " movnti %%edx, %%es:60(%3)\n"
16512 " addl $-64, %0\n"
16513 " addl $64, %4\n"
16514 " addl $64, %3\n"
16515 @@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_n
16516 " movl %%eax,%0\n"
16517 "7: rep; movsb\n"
16518 "8:\n"
16519 + " pushl %%ss\n"
16520 + " popl %%ds\n"
16521 ".section .fixup,\"ax\"\n"
16522 "9: lea 0(%%eax,%0,4),%0\n"
16523 "16: jmp 8b\n"
16524 @@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_n
16525 " .long 7b,16b\n"
16526 ".previous"
16527 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16528 - : "1"(to), "2"(from), "0"(size)
16529 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16530 : "eax", "edx", "memory");
16531 return size;
16532 }
16533 @@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_n
16534 */
16535 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
16536 unsigned long size);
16537 -unsigned long __copy_user_intel(void __user *to, const void *from,
16538 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
16539 + unsigned long size);
16540 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
16541 unsigned long size);
16542 unsigned long __copy_user_zeroing_intel_nocache(void *to,
16543 const void __user *from, unsigned long size);
16544 #endif /* CONFIG_X86_INTEL_USERCOPY */
16545
16546 /* Generic arbitrary sized copy. */
16547 -#define __copy_user(to, from, size) \
16548 -do { \
16549 - int __d0, __d1, __d2; \
16550 - __asm__ __volatile__( \
16551 - " cmp $7,%0\n" \
16552 - " jbe 1f\n" \
16553 - " movl %1,%0\n" \
16554 - " negl %0\n" \
16555 - " andl $7,%0\n" \
16556 - " subl %0,%3\n" \
16557 - "4: rep; movsb\n" \
16558 - " movl %3,%0\n" \
16559 - " shrl $2,%0\n" \
16560 - " andl $3,%3\n" \
16561 - " .align 2,0x90\n" \
16562 - "0: rep; movsl\n" \
16563 - " movl %3,%0\n" \
16564 - "1: rep; movsb\n" \
16565 - "2:\n" \
16566 - ".section .fixup,\"ax\"\n" \
16567 - "5: addl %3,%0\n" \
16568 - " jmp 2b\n" \
16569 - "3: lea 0(%3,%0,4),%0\n" \
16570 - " jmp 2b\n" \
16571 - ".previous\n" \
16572 - ".section __ex_table,\"a\"\n" \
16573 - " .align 4\n" \
16574 - " .long 4b,5b\n" \
16575 - " .long 0b,3b\n" \
16576 - " .long 1b,2b\n" \
16577 - ".previous" \
16578 - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
16579 - : "3"(size), "0"(size), "1"(to), "2"(from) \
16580 - : "memory"); \
16581 -} while (0)
16582 -
16583 -#define __copy_user_zeroing(to, from, size) \
16584 -do { \
16585 - int __d0, __d1, __d2; \
16586 - __asm__ __volatile__( \
16587 - " cmp $7,%0\n" \
16588 - " jbe 1f\n" \
16589 - " movl %1,%0\n" \
16590 - " negl %0\n" \
16591 - " andl $7,%0\n" \
16592 - " subl %0,%3\n" \
16593 - "4: rep; movsb\n" \
16594 - " movl %3,%0\n" \
16595 - " shrl $2,%0\n" \
16596 - " andl $3,%3\n" \
16597 - " .align 2,0x90\n" \
16598 - "0: rep; movsl\n" \
16599 - " movl %3,%0\n" \
16600 - "1: rep; movsb\n" \
16601 - "2:\n" \
16602 - ".section .fixup,\"ax\"\n" \
16603 - "5: addl %3,%0\n" \
16604 - " jmp 6f\n" \
16605 - "3: lea 0(%3,%0,4),%0\n" \
16606 - "6: pushl %0\n" \
16607 - " pushl %%eax\n" \
16608 - " xorl %%eax,%%eax\n" \
16609 - " rep; stosb\n" \
16610 - " popl %%eax\n" \
16611 - " popl %0\n" \
16612 - " jmp 2b\n" \
16613 - ".previous\n" \
16614 - ".section __ex_table,\"a\"\n" \
16615 - " .align 4\n" \
16616 - " .long 4b,5b\n" \
16617 - " .long 0b,3b\n" \
16618 - " .long 1b,6b\n" \
16619 - ".previous" \
16620 - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
16621 - : "3"(size), "0"(size), "1"(to), "2"(from) \
16622 - : "memory"); \
16623 -} while (0)
16624 +static unsigned long
16625 +__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
16626 +{
16627 + int __d0, __d1, __d2;
16628 +
16629 + __asm__ __volatile__(
16630 + " movw %w8,%%es\n"
16631 + " cmp $7,%0\n"
16632 + " jbe 1f\n"
16633 + " movl %1,%0\n"
16634 + " negl %0\n"
16635 + " andl $7,%0\n"
16636 + " subl %0,%3\n"
16637 + "4: rep; movsb\n"
16638 + " movl %3,%0\n"
16639 + " shrl $2,%0\n"
16640 + " andl $3,%3\n"
16641 + " .align 2,0x90\n"
16642 + "0: rep; movsl\n"
16643 + " movl %3,%0\n"
16644 + "1: rep; movsb\n"
16645 + "2:\n"
16646 + " pushl %%ss\n"
16647 + " popl %%es\n"
16648 + ".section .fixup,\"ax\"\n"
16649 + "5: addl %3,%0\n"
16650 + " jmp 2b\n"
16651 + "3: lea 0(%3,%0,4),%0\n"
16652 + " jmp 2b\n"
16653 + ".previous\n"
16654 + ".section __ex_table,\"a\"\n"
16655 + " .align 4\n"
16656 + " .long 4b,5b\n"
16657 + " .long 0b,3b\n"
16658 + " .long 1b,2b\n"
16659 + ".previous"
16660 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
16661 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
16662 + : "memory");
16663 + return size;
16664 +}
16665 +
16666 +static unsigned long
16667 +__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
16668 +{
16669 + int __d0, __d1, __d2;
16670 +
16671 + __asm__ __volatile__(
16672 + " movw %w8,%%ds\n"
16673 + " cmp $7,%0\n"
16674 + " jbe 1f\n"
16675 + " movl %1,%0\n"
16676 + " negl %0\n"
16677 + " andl $7,%0\n"
16678 + " subl %0,%3\n"
16679 + "4: rep; movsb\n"
16680 + " movl %3,%0\n"
16681 + " shrl $2,%0\n"
16682 + " andl $3,%3\n"
16683 + " .align 2,0x90\n"
16684 + "0: rep; movsl\n"
16685 + " movl %3,%0\n"
16686 + "1: rep; movsb\n"
16687 + "2:\n"
16688 + " pushl %%ss\n"
16689 + " popl %%ds\n"
16690 + ".section .fixup,\"ax\"\n"
16691 + "5: addl %3,%0\n"
16692 + " jmp 2b\n"
16693 + "3: lea 0(%3,%0,4),%0\n"
16694 + " jmp 2b\n"
16695 + ".previous\n"
16696 + ".section __ex_table,\"a\"\n"
16697 + " .align 4\n"
16698 + " .long 4b,5b\n"
16699 + " .long 0b,3b\n"
16700 + " .long 1b,2b\n"
16701 + ".previous"
16702 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
16703 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
16704 + : "memory");
16705 + return size;
16706 +}
16707 +
16708 +static unsigned long
16709 +__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
16710 +{
16711 + int __d0, __d1, __d2;
16712 +
16713 + __asm__ __volatile__(
16714 + " movw %w8,%%ds\n"
16715 + " cmp $7,%0\n"
16716 + " jbe 1f\n"
16717 + " movl %1,%0\n"
16718 + " negl %0\n"
16719 + " andl $7,%0\n"
16720 + " subl %0,%3\n"
16721 + "4: rep; movsb\n"
16722 + " movl %3,%0\n"
16723 + " shrl $2,%0\n"
16724 + " andl $3,%3\n"
16725 + " .align 2,0x90\n"
16726 + "0: rep; movsl\n"
16727 + " movl %3,%0\n"
16728 + "1: rep; movsb\n"
16729 + "2:\n"
16730 + " pushl %%ss\n"
16731 + " popl %%ds\n"
16732 + ".section .fixup,\"ax\"\n"
16733 + "5: addl %3,%0\n"
16734 + " jmp 6f\n"
16735 + "3: lea 0(%3,%0,4),%0\n"
16736 + "6: pushl %0\n"
16737 + " pushl %%eax\n"
16738 + " xorl %%eax,%%eax\n"
16739 + " rep; stosb\n"
16740 + " popl %%eax\n"
16741 + " popl %0\n"
16742 + " jmp 2b\n"
16743 + ".previous\n"
16744 + ".section __ex_table,\"a\"\n"
16745 + " .align 4\n"
16746 + " .long 4b,5b\n"
16747 + " .long 0b,3b\n"
16748 + " .long 1b,6b\n"
16749 + ".previous"
16750 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
16751 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
16752 + : "memory");
16753 + return size;
16754 +}
16755
16756 unsigned long __copy_to_user_ll(void __user *to, const void *from,
16757 unsigned long n)
16758 @@ -775,9 +966,9 @@ survive:
16759 }
16760 #endif
16761 if (movsl_is_ok(to, from, n))
16762 - __copy_user(to, from, n);
16763 + n = __generic_copy_to_user(to, from, n);
16764 else
16765 - n = __copy_user_intel(to, from, n);
16766 + n = __generic_copy_to_user_intel(to, from, n);
16767 return n;
16768 }
16769 EXPORT_SYMBOL(__copy_to_user_ll);
16770 @@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *
16771 unsigned long n)
16772 {
16773 if (movsl_is_ok(to, from, n))
16774 - __copy_user_zeroing(to, from, n);
16775 + n = __copy_user_zeroing(to, from, n);
16776 else
16777 n = __copy_user_zeroing_intel(to, from, n);
16778 return n;
16779 @@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero
16780 unsigned long n)
16781 {
16782 if (movsl_is_ok(to, from, n))
16783 - __copy_user(to, from, n);
16784 + n = __generic_copy_from_user(to, from, n);
16785 else
16786 - n = __copy_user_intel((void __user *)to,
16787 - (const void *)from, n);
16788 + n = __generic_copy_from_user_intel(to, from, n);
16789 return n;
16790 }
16791 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
16792 @@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocach
16793 if (n > 64 && cpu_has_xmm2)
16794 n = __copy_user_zeroing_intel_nocache(to, from, n);
16795 else
16796 - __copy_user_zeroing(to, from, n);
16797 + n = __copy_user_zeroing(to, from, n);
16798 #else
16799 - __copy_user_zeroing(to, from, n);
16800 + n = __copy_user_zeroing(to, from, n);
16801 #endif
16802 return n;
16803 }
16804 @@ -827,65 +1017,53 @@ unsigned long __copy_from_user_ll_nocach
16805 if (n > 64 && cpu_has_xmm2)
16806 n = __copy_user_intel_nocache(to, from, n);
16807 else
16808 - __copy_user(to, from, n);
16809 + n = __generic_copy_from_user(to, from, n);
16810 #else
16811 - __copy_user(to, from, n);
16812 + n = __generic_copy_from_user(to, from, n);
16813 #endif
16814 return n;
16815 }
16816 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
16817
16818 -/**
16819 - * copy_to_user: - Copy a block of data into user space.
16820 - * @to: Destination address, in user space.
16821 - * @from: Source address, in kernel space.
16822 - * @n: Number of bytes to copy.
16823 - *
16824 - * Context: User context only. This function may sleep.
16825 - *
16826 - * Copy data from kernel space to user space.
16827 - *
16828 - * Returns number of bytes that could not be copied.
16829 - * On success, this will be zero.
16830 - */
16831 -unsigned long
16832 -copy_to_user(void __user *to, const void *from, unsigned long n)
16833 +void copy_from_user_overflow(void)
16834 {
16835 - if (access_ok(VERIFY_WRITE, to, n))
16836 - n = __copy_to_user(to, from, n);
16837 - return n;
16838 + WARN(1, "Buffer overflow detected!\n");
16839 }
16840 -EXPORT_SYMBOL(copy_to_user);
16841 +EXPORT_SYMBOL(copy_from_user_overflow);
16842
16843 -/**
16844 - * copy_from_user: - Copy a block of data from user space.
16845 - * @to: Destination address, in kernel space.
16846 - * @from: Source address, in user space.
16847 - * @n: Number of bytes to copy.
16848 - *
16849 - * Context: User context only. This function may sleep.
16850 - *
16851 - * Copy data from user space to kernel space.
16852 - *
16853 - * Returns number of bytes that could not be copied.
16854 - * On success, this will be zero.
16855 - *
16856 - * If some data could not be copied, this function will pad the copied
16857 - * data to the requested size using zero bytes.
16858 - */
16859 -unsigned long
16860 -_copy_from_user(void *to, const void __user *from, unsigned long n)
16861 +void copy_to_user_overflow(void)
16862 {
16863 - if (access_ok(VERIFY_READ, from, n))
16864 - n = __copy_from_user(to, from, n);
16865 - else
16866 - memset(to, 0, n);
16867 - return n;
16868 + WARN(1, "Buffer overflow detected!\n");
16869 }
16870 -EXPORT_SYMBOL(_copy_from_user);
16871 +EXPORT_SYMBOL(copy_to_user_overflow);
16872
16873 -void copy_from_user_overflow(void)
16874 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16875 +void __set_fs(mm_segment_t x, int cpu)
16876 {
16877 - WARN(1, "Buffer overflow detected!\n");
16878 + unsigned long limit = x.seg;
16879 + struct desc_struct d;
16880 +
16881 + current_thread_info()->addr_limit = x;
16882 + if (unlikely(paravirt_enabled()))
16883 + return;
16884 +
16885 + if (likely(limit))
16886 + limit = (limit - 1UL) >> PAGE_SHIFT;
16887 + pack_descriptor(&d, 0UL, limit, 0xF3, 0xC);
16888 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S);
16889 }
16890 -EXPORT_SYMBOL(copy_from_user_overflow);
16891 +
16892 +void set_fs(mm_segment_t x)
16893 +{
16894 + __set_fs(x, get_cpu());
16895 + put_cpu();
16896 +}
16897 +EXPORT_SYMBOL(copy_from_user);
16898 +#else
16899 +void set_fs(mm_segment_t x)
16900 +{
16901 + current_thread_info()->addr_limit = x;
16902 +}
16903 +#endif
16904 +
16905 +EXPORT_SYMBOL(set_fs);
16906 diff -urNp linux-2.6.34.1/arch/x86/lib/usercopy_64.c linux-2.6.34.1/arch/x86/lib/usercopy_64.c
16907 --- linux-2.6.34.1/arch/x86/lib/usercopy_64.c 2010-07-05 14:24:10.000000000 -0400
16908 +++ linux-2.6.34.1/arch/x86/lib/usercopy_64.c 2010-07-07 09:04:49.000000000 -0400
16909 @@ -42,6 +42,8 @@ long
16910 __strncpy_from_user(char *dst, const char __user *src, long count)
16911 {
16912 long res;
16913 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
16914 + src += PAX_USER_SHADOW_BASE;
16915 __do_strncpy_from_user(dst, src, count, res);
16916 return res;
16917 }
16918 @@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *
16919 {
16920 long __d0;
16921 might_fault();
16922 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
16923 + addr += PAX_USER_SHADOW_BASE;
16924 /* no memory constraint because it doesn't change any memory gcc knows
16925 about */
16926 asm volatile(
16927 @@ -151,10 +155,14 @@ EXPORT_SYMBOL(strlen_user);
16928
16929 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
16930 {
16931 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
16932 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
16933 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
16934 + to += PAX_USER_SHADOW_BASE;
16935 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
16936 + from += PAX_USER_SHADOW_BASE;
16937 return copy_user_generic((__force void *)to, (__force void *)from, len);
16938 - }
16939 - return len;
16940 + }
16941 + return len;
16942 }
16943 EXPORT_SYMBOL(copy_in_user);
16944
16945 diff -urNp linux-2.6.34.1/arch/x86/mm/extable.c linux-2.6.34.1/arch/x86/mm/extable.c
16946 --- linux-2.6.34.1/arch/x86/mm/extable.c 2010-07-05 14:24:10.000000000 -0400
16947 +++ linux-2.6.34.1/arch/x86/mm/extable.c 2010-07-07 09:04:49.000000000 -0400
16948 @@ -1,14 +1,71 @@
16949 #include <linux/module.h>
16950 #include <linux/spinlock.h>
16951 +#include <linux/sort.h>
16952 #include <asm/uaccess.h>
16953 +#include <asm/pgtable.h>
16954
16955 +/*
16956 + * The exception table needs to be sorted so that the binary
16957 + * search that we use to find entries in it works properly.
16958 + * This is used both for the kernel exception table and for
16959 + * the exception tables of modules that get loaded.
16960 + */
16961 +static int cmp_ex(const void *a, const void *b)
16962 +{
16963 + const struct exception_table_entry *x = a, *y = b;
16964 +
16965 + /* avoid overflow */
16966 + if (x->insn > y->insn)
16967 + return 1;
16968 + if (x->insn < y->insn)
16969 + return -1;
16970 + return 0;
16971 +}
16972 +
16973 +static void swap_ex(void *a, void *b, int size)
16974 +{
16975 + struct exception_table_entry t, *x = a, *y = b;
16976 +
16977 + t = *x;
16978 +
16979 + pax_open_kernel();
16980 + *x = *y;
16981 + *y = t;
16982 + pax_close_kernel();
16983 +}
16984 +
16985 +void sort_extable(struct exception_table_entry *start,
16986 + struct exception_table_entry *finish)
16987 +{
16988 + sort(start, finish - start, sizeof(struct exception_table_entry),
16989 + cmp_ex, swap_ex);
16990 +}
16991 +
16992 +#ifdef CONFIG_MODULES
16993 +/*
16994 + * If the exception table is sorted, any referring to the module init
16995 + * will be at the beginning or the end.
16996 + */
16997 +void trim_init_extable(struct module *m)
16998 +{
16999 + /*trim the beginning*/
17000 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
17001 + m->extable++;
17002 + m->num_exentries--;
17003 + }
17004 + /*trim the end*/
17005 + while (m->num_exentries &&
17006 + within_module_init(m->extable[m->num_exentries-1].insn, m))
17007 + m->num_exentries--;
17008 +}
17009 +#endif /* CONFIG_MODULES */
17010
17011 int fixup_exception(struct pt_regs *regs)
17012 {
17013 const struct exception_table_entry *fixup;
17014
17015 #ifdef CONFIG_PNPBIOS
17016 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
17017 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
17018 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
17019 extern u32 pnp_bios_is_utter_crap;
17020 pnp_bios_is_utter_crap = 1;
17021 diff -urNp linux-2.6.34.1/arch/x86/mm/fault.c linux-2.6.34.1/arch/x86/mm/fault.c
17022 --- linux-2.6.34.1/arch/x86/mm/fault.c 2010-07-05 14:24:10.000000000 -0400
17023 +++ linux-2.6.34.1/arch/x86/mm/fault.c 2010-07-07 09:04:49.000000000 -0400
17024 @@ -11,10 +11,19 @@
17025 #include <linux/kprobes.h> /* __kprobes, ... */
17026 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
17027 #include <linux/perf_event.h> /* perf_sw_event */
17028 +#include <linux/unistd.h>
17029 +#include <linux/compiler.h>
17030
17031 #include <asm/traps.h> /* dotraplinkage, ... */
17032 #include <asm/pgalloc.h> /* pgd_*(), ... */
17033 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
17034 +#include <asm/vsyscall.h>
17035 +#include <asm/tlbflush.h>
17036 +
17037 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17038 +#include <asm/stacktrace.h>
17039 +#include "../kernel/dumpstack.h"
17040 +#endif
17041
17042 /*
17043 * Page fault error code bits:
17044 @@ -52,7 +61,7 @@ static inline int __kprobes notify_page_
17045 int ret = 0;
17046
17047 /* kprobe_running() needs smp_processor_id() */
17048 - if (kprobes_built_in() && !user_mode_vm(regs)) {
17049 + if (kprobes_built_in() && !user_mode(regs)) {
17050 preempt_disable();
17051 if (kprobe_running() && kprobe_fault_handler(regs, 14))
17052 ret = 1;
17053 @@ -173,6 +182,30 @@ force_sig_info_fault(int si_signo, int s
17054 force_sig_info(si_signo, &info, tsk);
17055 }
17056
17057 +#ifdef CONFIG_PAX_EMUTRAMP
17058 +static int pax_handle_fetch_fault(struct pt_regs *regs);
17059 +#endif
17060 +
17061 +#ifdef CONFIG_PAX_PAGEEXEC
17062 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
17063 +{
17064 + pgd_t *pgd;
17065 + pud_t *pud;
17066 + pmd_t *pmd;
17067 +
17068 + pgd = pgd_offset(mm, address);
17069 + if (!pgd_present(*pgd))
17070 + return NULL;
17071 + pud = pud_offset(pgd, address);
17072 + if (!pud_present(*pud))
17073 + return NULL;
17074 + pmd = pmd_offset(pud, address);
17075 + if (!pmd_present(*pmd))
17076 + return NULL;
17077 + return pmd;
17078 +}
17079 +#endif
17080 +
17081 DEFINE_SPINLOCK(pgd_lock);
17082 LIST_HEAD(pgd_list);
17083
17084 @@ -225,11 +258,24 @@ void vmalloc_sync_all(void)
17085 address += PMD_SIZE) {
17086
17087 unsigned long flags;
17088 +
17089 +#ifdef CONFIG_PAX_PER_CPU_PGD
17090 + unsigned long cpu;
17091 +#else
17092 struct page *page;
17093 +#endif
17094
17095 spin_lock_irqsave(&pgd_lock, flags);
17096 +
17097 +#ifdef CONFIG_PAX_PER_CPU_PGD
17098 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
17099 + pgd_t *pgd = get_cpu_pgd(cpu);
17100 +#else
17101 list_for_each_entry(page, &pgd_list, lru) {
17102 - if (!vmalloc_sync_one(page_address(page), address))
17103 + pgd_t *pgd = page_address(page);
17104 +#endif
17105 +
17106 + if (!vmalloc_sync_one(pgd, address))
17107 break;
17108 }
17109 spin_unlock_irqrestore(&pgd_lock, flags);
17110 @@ -259,6 +305,11 @@ static noinline __kprobes int vmalloc_fa
17111 * an interrupt in the middle of a task switch..
17112 */
17113 pgd_paddr = read_cr3();
17114 +
17115 +#ifdef CONFIG_PAX_PER_CPU_PGD
17116 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
17117 +#endif
17118 +
17119 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
17120 if (!pmd_k)
17121 return -1;
17122 @@ -333,15 +384,27 @@ void vmalloc_sync_all(void)
17123
17124 const pgd_t *pgd_ref = pgd_offset_k(address);
17125 unsigned long flags;
17126 +
17127 +#ifdef CONFIG_PAX_PER_CPU_PGD
17128 + unsigned long cpu;
17129 +#else
17130 struct page *page;
17131 +#endif
17132
17133 if (pgd_none(*pgd_ref))
17134 continue;
17135
17136 spin_lock_irqsave(&pgd_lock, flags);
17137 +
17138 +#ifdef CONFIG_PAX_PER_CPU_PGD
17139 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
17140 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
17141 +#else
17142 list_for_each_entry(page, &pgd_list, lru) {
17143 pgd_t *pgd;
17144 pgd = (pgd_t *)page_address(page) + pgd_index(address);
17145 +#endif
17146 +
17147 if (pgd_none(*pgd))
17148 set_pgd(pgd, *pgd_ref);
17149 else
17150 @@ -374,7 +437,14 @@ static noinline __kprobes int vmalloc_fa
17151 * happen within a race in page table update. In the later
17152 * case just flush:
17153 */
17154 +
17155 +#ifdef CONFIG_PAX_PER_CPU_PGD
17156 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
17157 + pgd = pgd_offset_cpu(smp_processor_id(), address);
17158 +#else
17159 pgd = pgd_offset(current->active_mm, address);
17160 +#endif
17161 +
17162 pgd_ref = pgd_offset_k(address);
17163 if (pgd_none(*pgd_ref))
17164 return -1;
17165 @@ -536,7 +606,7 @@ static int is_errata93(struct pt_regs *r
17166 static int is_errata100(struct pt_regs *regs, unsigned long address)
17167 {
17168 #ifdef CONFIG_X86_64
17169 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
17170 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
17171 return 1;
17172 #endif
17173 return 0;
17174 @@ -563,7 +633,7 @@ static int is_f00f_bug(struct pt_regs *r
17175 }
17176
17177 static const char nx_warning[] = KERN_CRIT
17178 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
17179 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
17180
17181 static void
17182 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
17183 @@ -572,15 +642,26 @@ show_fault_oops(struct pt_regs *regs, un
17184 if (!oops_may_print())
17185 return;
17186
17187 - if (error_code & PF_INSTR) {
17188 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
17189 unsigned int level;
17190
17191 pte_t *pte = lookup_address(address, &level);
17192
17193 if (pte && pte_present(*pte) && !pte_exec(*pte))
17194 - printk(nx_warning, current_uid());
17195 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
17196 }
17197
17198 +#ifdef CONFIG_PAX_KERNEXEC
17199 + if (init_mm.start_code <= address && address < init_mm.end_code) {
17200 + if (current->signal->curr_ip)
17201 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
17202 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
17203 + else
17204 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
17205 + current->comm, task_pid_nr(current), current_uid(), current_euid());
17206 + }
17207 +#endif
17208 +
17209 printk(KERN_ALERT "BUG: unable to handle kernel ");
17210 if (address < PAGE_SIZE)
17211 printk(KERN_CONT "NULL pointer dereference");
17212 @@ -705,6 +786,68 @@ __bad_area_nosemaphore(struct pt_regs *r
17213 unsigned long address, int si_code)
17214 {
17215 struct task_struct *tsk = current;
17216 + struct mm_struct *mm = tsk->mm;
17217 +
17218 +#ifdef CONFIG_X86_64
17219 + if (mm && (error_code & PF_INSTR)) {
17220 + if (regs->ip == (unsigned long)vgettimeofday) {
17221 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
17222 + return;
17223 + } else if (regs->ip == (unsigned long)vtime) {
17224 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
17225 + return;
17226 + } else if (regs->ip == (unsigned long)vgetcpu) {
17227 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
17228 + return;
17229 + }
17230 + }
17231 +#endif
17232 +
17233 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17234 + if (mm && (error_code & PF_USER)) {
17235 + unsigned long ip = regs->ip;
17236 +
17237 + if (v8086_mode(regs))
17238 + ip = ((regs->cs & 0xffff) << 4) + (regs->ip & 0xffff);
17239 +
17240 + /*
17241 + * It's possible to have interrupts off here:
17242 + */
17243 + local_irq_enable();
17244 +
17245 +#ifdef CONFIG_PAX_PAGEEXEC
17246 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
17247 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && regs->ip == address))) {
17248 +
17249 +#ifdef CONFIG_PAX_EMUTRAMP
17250 + switch (pax_handle_fetch_fault(regs)) {
17251 + case 2:
17252 + return;
17253 + }
17254 +#endif
17255 +
17256 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17257 + do_group_exit(SIGKILL);
17258 + }
17259 +#endif
17260 +
17261 +#ifdef CONFIG_PAX_SEGMEXEC
17262 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (regs->ip + SEGMEXEC_TASK_SIZE == address)) {
17263 +
17264 +#ifdef CONFIG_PAX_EMUTRAMP
17265 + switch (pax_handle_fetch_fault(regs)) {
17266 + case 2:
17267 + return;
17268 + }
17269 +#endif
17270 +
17271 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17272 + do_group_exit(SIGKILL);
17273 + }
17274 +#endif
17275 +
17276 + }
17277 +#endif
17278
17279 /* User mode accesses just cause a SIGSEGV */
17280 if (error_code & PF_USER) {
17281 @@ -849,6 +992,106 @@ static int spurious_fault_check(unsigned
17282 return 1;
17283 }
17284
17285 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17286 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
17287 +{
17288 + pte_t *pte;
17289 + pmd_t *pmd;
17290 + spinlock_t *ptl;
17291 + unsigned char pte_mask;
17292 +
17293 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
17294 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
17295 + return 0;
17296 +
17297 + /* PaX: it's our fault, let's handle it if we can */
17298 +
17299 + /* PaX: take a look at read faults before acquiring any locks */
17300 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
17301 + /* instruction fetch attempt from a protected page in user mode */
17302 + up_read(&mm->mmap_sem);
17303 +
17304 +#ifdef CONFIG_PAX_EMUTRAMP
17305 + switch (pax_handle_fetch_fault(regs)) {
17306 + case 2:
17307 + return 1;
17308 + }
17309 +#endif
17310 +
17311 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17312 + do_group_exit(SIGKILL);
17313 + }
17314 +
17315 + pmd = pax_get_pmd(mm, address);
17316 + if (unlikely(!pmd))
17317 + return 0;
17318 +
17319 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
17320 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
17321 + pte_unmap_unlock(pte, ptl);
17322 + return 0;
17323 + }
17324 +
17325 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
17326 + /* write attempt to a protected page in user mode */
17327 + pte_unmap_unlock(pte, ptl);
17328 + return 0;
17329 + }
17330 +
17331 +#ifdef CONFIG_SMP
17332 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
17333 +#else
17334 + if (likely(address > get_limit(regs->cs)))
17335 +#endif
17336 + {
17337 + set_pte(pte, pte_mkread(*pte));
17338 + __flush_tlb_one(address);
17339 + pte_unmap_unlock(pte, ptl);
17340 + up_read(&mm->mmap_sem);
17341 + return 1;
17342 + }
17343 +
17344 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
17345 +
17346 + /*
17347 + * PaX: fill DTLB with user rights and retry
17348 + */
17349 + __asm__ __volatile__ (
17350 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17351 + "movw %w4,%%es\n"
17352 +#endif
17353 + "orb %2,(%1)\n"
17354 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
17355 +/*
17356 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
17357 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
17358 + * page fault when examined during a TLB load attempt. this is true not only
17359 + * for PTEs holding a non-present entry but also present entries that will
17360 + * raise a page fault (such as those set up by PaX, or the copy-on-write
17361 + * mechanism). in effect it means that we do *not* need to flush the TLBs
17362 + * for our target pages since their PTEs are simply not in the TLBs at all.
17363 +
17364 + * the best thing in omitting it is that we gain around 15-20% speed in the
17365 + * fast path of the page fault handler and can get rid of tracing since we
17366 + * can no longer flush unintended entries.
17367 + */
17368 + "invlpg (%0)\n"
17369 +#endif
17370 + "testb $0,%%es:(%0)\n"
17371 + "xorb %3,(%1)\n"
17372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17373 + "pushl %%ss\n"
17374 + "popl %%es\n"
17375 +#endif
17376 + :
17377 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
17378 + : "memory", "cc");
17379 + pte_unmap_unlock(pte, ptl);
17380 + up_read(&mm->mmap_sem);
17381 + return 1;
17382 +}
17383 +#endif
17384 +
17385 /*
17386 * Handle a spurious fault caused by a stale TLB entry.
17387 *
17388 @@ -915,6 +1158,9 @@ int show_unhandled_signals = 1;
17389 static inline int
17390 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
17391 {
17392 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
17393 + return 1;
17394 +
17395 if (write) {
17396 /* write, present and write, not present: */
17397 if (unlikely(!(vma->vm_flags & VM_WRITE)))
17398 @@ -948,17 +1194,31 @@ do_page_fault(struct pt_regs *regs, unsi
17399 {
17400 struct vm_area_struct *vma;
17401 struct task_struct *tsk;
17402 - unsigned long address;
17403 struct mm_struct *mm;
17404 int write;
17405 int fault;
17406
17407 + /* Get the faulting address: */
17408 + unsigned long address = read_cr2();
17409 +
17410 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17411 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
17412 + if (!search_exception_tables(regs->ip)) {
17413 + bad_area_nosemaphore(regs, error_code, address);
17414 + return;
17415 + }
17416 + if (address < PAX_USER_SHADOW_BASE) {
17417 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
17418 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
17419 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
17420 + } else
17421 + address -= PAX_USER_SHADOW_BASE;
17422 + }
17423 +#endif
17424 +
17425 tsk = current;
17426 mm = tsk->mm;
17427
17428 - /* Get the faulting address: */
17429 - address = read_cr2();
17430 -
17431 /*
17432 * Detect and handle instructions that would cause a page fault for
17433 * both a tracked kernel page and a userspace page.
17434 @@ -1018,7 +1278,7 @@ do_page_fault(struct pt_regs *regs, unsi
17435 * User-mode registers count as a user access even for any
17436 * potential system fault or CPU buglet:
17437 */
17438 - if (user_mode_vm(regs)) {
17439 + if (user_mode(regs)) {
17440 local_irq_enable();
17441 error_code |= PF_USER;
17442 } else {
17443 @@ -1072,6 +1332,11 @@ do_page_fault(struct pt_regs *regs, unsi
17444 might_sleep();
17445 }
17446
17447 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17448 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
17449 + return;
17450 +#endif
17451 +
17452 vma = find_vma(mm, address);
17453 if (unlikely(!vma)) {
17454 bad_area(regs, error_code, address);
17455 @@ -1083,18 +1348,24 @@ do_page_fault(struct pt_regs *regs, unsi
17456 bad_area(regs, error_code, address);
17457 return;
17458 }
17459 - if (error_code & PF_USER) {
17460 - /*
17461 - * Accessing the stack below %sp is always a bug.
17462 - * The large cushion allows instructions like enter
17463 - * and pusha to work. ("enter $65535, $31" pushes
17464 - * 32 pointers and then decrements %sp by 65535.)
17465 - */
17466 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
17467 - bad_area(regs, error_code, address);
17468 - return;
17469 - }
17470 + /*
17471 + * Accessing the stack below %sp is always a bug.
17472 + * The large cushion allows instructions like enter
17473 + * and pusha to work. ("enter $65535, $31" pushes
17474 + * 32 pointers and then decrements %sp by 65535.)
17475 + */
17476 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
17477 + bad_area(regs, error_code, address);
17478 + return;
17479 }
17480 +
17481 +#ifdef CONFIG_PAX_SEGMEXEC
17482 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
17483 + bad_area(regs, error_code, address);
17484 + return;
17485 + }
17486 +#endif
17487 +
17488 if (unlikely(expand_stack(vma, address))) {
17489 bad_area(regs, error_code, address);
17490 return;
17491 @@ -1138,3 +1409,199 @@ good_area:
17492
17493 up_read(&mm->mmap_sem);
17494 }
17495 +
17496 +#ifdef CONFIG_PAX_EMUTRAMP
17497 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
17498 +{
17499 + int err;
17500 +
17501 + do { /* PaX: gcc trampoline emulation #1 */
17502 + unsigned char mov1, mov2;
17503 + unsigned short jmp;
17504 + unsigned int addr1, addr2;
17505 +
17506 +#ifdef CONFIG_X86_64
17507 + if ((regs->ip + 11) >> 32)
17508 + break;
17509 +#endif
17510 +
17511 + err = get_user(mov1, (unsigned char __user *)regs->ip);
17512 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
17513 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
17514 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
17515 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
17516 +
17517 + if (err)
17518 + break;
17519 +
17520 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
17521 + regs->cx = addr1;
17522 + regs->ax = addr2;
17523 + regs->ip = addr2;
17524 + return 2;
17525 + }
17526 + } while (0);
17527 +
17528 + do { /* PaX: gcc trampoline emulation #2 */
17529 + unsigned char mov, jmp;
17530 + unsigned int addr1, addr2;
17531 +
17532 +#ifdef CONFIG_X86_64
17533 + if ((regs->ip + 9) >> 32)
17534 + break;
17535 +#endif
17536 +
17537 + err = get_user(mov, (unsigned char __user *)regs->ip);
17538 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
17539 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
17540 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
17541 +
17542 + if (err)
17543 + break;
17544 +
17545 + if (mov == 0xB9 && jmp == 0xE9) {
17546 + regs->cx = addr1;
17547 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
17548 + return 2;
17549 + }
17550 + } while (0);
17551 +
17552 + return 1; /* PaX in action */
17553 +}
17554 +
17555 +#ifdef CONFIG_X86_64
17556 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
17557 +{
17558 + int err;
17559 +
17560 + do { /* PaX: gcc trampoline emulation #1 */
17561 + unsigned short mov1, mov2, jmp1;
17562 + unsigned char jmp2;
17563 + unsigned int addr1;
17564 + unsigned long addr2;
17565 +
17566 + err = get_user(mov1, (unsigned short __user *)regs->ip);
17567 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
17568 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
17569 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
17570 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
17571 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
17572 +
17573 + if (err)
17574 + break;
17575 +
17576 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
17577 + regs->r11 = addr1;
17578 + regs->r10 = addr2;
17579 + regs->ip = addr1;
17580 + return 2;
17581 + }
17582 + } while (0);
17583 +
17584 + do { /* PaX: gcc trampoline emulation #2 */
17585 + unsigned short mov1, mov2, jmp1;
17586 + unsigned char jmp2;
17587 + unsigned long addr1, addr2;
17588 +
17589 + err = get_user(mov1, (unsigned short __user *)regs->ip);
17590 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
17591 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
17592 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
17593 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
17594 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
17595 +
17596 + if (err)
17597 + break;
17598 +
17599 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
17600 + regs->r11 = addr1;
17601 + regs->r10 = addr2;
17602 + regs->ip = addr1;
17603 + return 2;
17604 + }
17605 + } while (0);
17606 +
17607 + return 1; /* PaX in action */
17608 +}
17609 +#endif
17610 +
17611 +/*
17612 + * PaX: decide what to do with offenders (regs->ip = fault address)
17613 + *
17614 + * returns 1 when task should be killed
17615 + * 2 when gcc trampoline was detected
17616 + */
17617 +static int pax_handle_fetch_fault(struct pt_regs *regs)
17618 +{
17619 + if (v8086_mode(regs))
17620 + return 1;
17621 +
17622 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
17623 + return 1;
17624 +
17625 +#ifdef CONFIG_X86_32
17626 + return pax_handle_fetch_fault_32(regs);
17627 +#else
17628 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
17629 + return pax_handle_fetch_fault_32(regs);
17630 + else
17631 + return pax_handle_fetch_fault_64(regs);
17632 +#endif
17633 +}
17634 +#endif
17635 +
17636 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17637 +void pax_report_insns(void *pc, void *sp)
17638 +{
17639 + long i;
17640 +
17641 + printk(KERN_ERR "PAX: bytes at PC: ");
17642 + for (i = 0; i < 20; i++) {
17643 + unsigned char c;
17644 + if (get_user(c, (__force unsigned char __user *)pc+i))
17645 + printk(KERN_CONT "?? ");
17646 + else
17647 + printk(KERN_CONT "%02x ", c);
17648 + }
17649 + printk("\n");
17650 +
17651 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
17652 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
17653 + unsigned long c;
17654 + if (get_user(c, (__force unsigned long __user *)sp+i))
17655 +#ifdef CONFIG_X86_32
17656 + printk(KERN_CONT "???????? ");
17657 +#else
17658 + printk(KERN_CONT "???????????????? ");
17659 +#endif
17660 + else
17661 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
17662 + }
17663 + printk("\n");
17664 +}
17665 +#endif
17666 +
17667 +/**
17668 + * probe_kernel_write(): safely attempt to write to a location
17669 + * @dst: address to write to
17670 + * @src: pointer to the data that shall be written
17671 + * @size: size of the data chunk
17672 + *
17673 + * Safely write to address @dst from the buffer at @src. If a kernel fault
17674 + * happens, handle that and return -EFAULT.
17675 + */
17676 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
17677 +{
17678 + long ret;
17679 + mm_segment_t old_fs = get_fs();
17680 +
17681 + set_fs(KERNEL_DS);
17682 + pagefault_disable();
17683 + pax_open_kernel();
17684 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
17685 + pax_close_kernel();
17686 + pagefault_enable();
17687 + set_fs(old_fs);
17688 +
17689 + return ret ? -EFAULT : 0;
17690 +}
17691 diff -urNp linux-2.6.34.1/arch/x86/mm/gup.c linux-2.6.34.1/arch/x86/mm/gup.c
17692 --- linux-2.6.34.1/arch/x86/mm/gup.c 2010-07-05 14:24:10.000000000 -0400
17693 +++ linux-2.6.34.1/arch/x86/mm/gup.c 2010-07-07 09:04:49.000000000 -0400
17694 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
17695 addr = start;
17696 len = (unsigned long) nr_pages << PAGE_SHIFT;
17697 end = start + len;
17698 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
17699 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
17700 (void __user *)start, len)))
17701 return 0;
17702
17703 diff -urNp linux-2.6.34.1/arch/x86/mm/highmem_32.c linux-2.6.34.1/arch/x86/mm/highmem_32.c
17704 --- linux-2.6.34.1/arch/x86/mm/highmem_32.c 2010-07-05 14:24:10.000000000 -0400
17705 +++ linux-2.6.34.1/arch/x86/mm/highmem_32.c 2010-07-07 09:04:49.000000000 -0400
17706 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
17707 idx = type + KM_TYPE_NR*smp_processor_id();
17708 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
17709 BUG_ON(!pte_none(*(kmap_pte-idx)));
17710 +
17711 + pax_open_kernel();
17712 set_pte(kmap_pte-idx, mk_pte(page, prot));
17713 + pax_close_kernel();
17714
17715 return (void *)vaddr;
17716 }
17717 diff -urNp linux-2.6.34.1/arch/x86/mm/hugetlbpage.c linux-2.6.34.1/arch/x86/mm/hugetlbpage.c
17718 --- linux-2.6.34.1/arch/x86/mm/hugetlbpage.c 2010-07-05 14:24:10.000000000 -0400
17719 +++ linux-2.6.34.1/arch/x86/mm/hugetlbpage.c 2010-07-07 09:04:49.000000000 -0400
17720 @@ -266,13 +266,18 @@ static unsigned long hugetlb_get_unmappe
17721 struct hstate *h = hstate_file(file);
17722 struct mm_struct *mm = current->mm;
17723 struct vm_area_struct *vma;
17724 - unsigned long start_addr;
17725 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17726 +
17727 +#ifdef CONFIG_PAX_SEGMEXEC
17728 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17729 + pax_task_size = SEGMEXEC_TASK_SIZE;
17730 +#endif
17731
17732 if (len > mm->cached_hole_size) {
17733 - start_addr = mm->free_area_cache;
17734 + start_addr = mm->free_area_cache;
17735 } else {
17736 - start_addr = TASK_UNMAPPED_BASE;
17737 - mm->cached_hole_size = 0;
17738 + start_addr = mm->mmap_base;
17739 + mm->cached_hole_size = 0;
17740 }
17741
17742 full_search:
17743 @@ -280,13 +285,13 @@ full_search:
17744
17745 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17746 /* At this point: (!vma || addr < vma->vm_end). */
17747 - if (TASK_SIZE - len < addr) {
17748 + if (pax_task_size - len < addr) {
17749 /*
17750 * Start a new search - just in case we missed
17751 * some holes.
17752 */
17753 - if (start_addr != TASK_UNMAPPED_BASE) {
17754 - start_addr = TASK_UNMAPPED_BASE;
17755 + if (start_addr != mm->mmap_base) {
17756 + start_addr = mm->mmap_base;
17757 mm->cached_hole_size = 0;
17758 goto full_search;
17759 }
17760 @@ -309,9 +314,8 @@ static unsigned long hugetlb_get_unmappe
17761 struct hstate *h = hstate_file(file);
17762 struct mm_struct *mm = current->mm;
17763 struct vm_area_struct *vma, *prev_vma;
17764 - unsigned long base = mm->mmap_base, addr = addr0;
17765 + unsigned long base = mm->mmap_base, addr;
17766 unsigned long largest_hole = mm->cached_hole_size;
17767 - int first_time = 1;
17768
17769 /* don't allow allocations above current base */
17770 if (mm->free_area_cache > base)
17771 @@ -321,7 +325,7 @@ static unsigned long hugetlb_get_unmappe
17772 largest_hole = 0;
17773 mm->free_area_cache = base;
17774 }
17775 -try_again:
17776 +
17777 /* make sure it can fit in the remaining address space */
17778 if (mm->free_area_cache < len)
17779 goto fail;
17780 @@ -363,22 +367,26 @@ try_again:
17781
17782 fail:
17783 /*
17784 - * if hint left us with no space for the requested
17785 - * mapping then try again:
17786 - */
17787 - if (first_time) {
17788 - mm->free_area_cache = base;
17789 - largest_hole = 0;
17790 - first_time = 0;
17791 - goto try_again;
17792 - }
17793 - /*
17794 * A failed mmap() very likely causes application failure,
17795 * so fall back to the bottom-up function here. This scenario
17796 * can happen with large stack limits and large mmap()
17797 * allocations.
17798 */
17799 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17800 +
17801 +#ifdef CONFIG_PAX_SEGMEXEC
17802 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17803 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17804 + else
17805 +#endif
17806 +
17807 + mm->mmap_base = TASK_UNMAPPED_BASE;
17808 +
17809 +#ifdef CONFIG_PAX_RANDMMAP
17810 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17811 + mm->mmap_base += mm->delta_mmap;
17812 +#endif
17813 +
17814 + mm->free_area_cache = mm->mmap_base;
17815 mm->cached_hole_size = ~0UL;
17816 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
17817 len, pgoff, flags);
17818 @@ -386,6 +394,7 @@ fail:
17819 /*
17820 * Restore the topdown base:
17821 */
17822 + mm->mmap_base = base;
17823 mm->free_area_cache = base;
17824 mm->cached_hole_size = ~0UL;
17825
17826 @@ -399,10 +408,17 @@ hugetlb_get_unmapped_area(struct file *f
17827 struct hstate *h = hstate_file(file);
17828 struct mm_struct *mm = current->mm;
17829 struct vm_area_struct *vma;
17830 + unsigned long pax_task_size = TASK_SIZE;
17831
17832 if (len & ~huge_page_mask(h))
17833 return -EINVAL;
17834 - if (len > TASK_SIZE)
17835 +
17836 +#ifdef CONFIG_PAX_SEGMEXEC
17837 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17838 + pax_task_size = SEGMEXEC_TASK_SIZE;
17839 +#endif
17840 +
17841 + if (len > pax_task_size)
17842 return -ENOMEM;
17843
17844 if (flags & MAP_FIXED) {
17845 @@ -414,7 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
17846 if (addr) {
17847 addr = ALIGN(addr, huge_page_size(h));
17848 vma = find_vma(mm, addr);
17849 - if (TASK_SIZE - len >= addr &&
17850 + if (pax_task_size - len >= addr &&
17851 (!vma || addr + len <= vma->vm_start))
17852 return addr;
17853 }
17854 diff -urNp linux-2.6.34.1/arch/x86/mm/init.c linux-2.6.34.1/arch/x86/mm/init.c
17855 --- linux-2.6.34.1/arch/x86/mm/init.c 2010-07-05 14:24:10.000000000 -0400
17856 +++ linux-2.6.34.1/arch/x86/mm/init.c 2010-07-07 09:04:49.000000000 -0400
17857 @@ -70,11 +70,7 @@ static void __init find_early_table_spac
17858 * cause a hotspot and fill up ZONE_DMA. The page tables
17859 * need roughly 0.5KB per GB.
17860 */
17861 -#ifdef CONFIG_X86_32
17862 - start = 0x7000;
17863 -#else
17864 - start = 0x8000;
17865 -#endif
17866 + start = 0x100000;
17867 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
17868 tables, PAGE_SIZE);
17869 if (e820_table_start == -1UL)
17870 @@ -321,7 +317,13 @@ unsigned long __init_refok init_memory_m
17871 */
17872 int devmem_is_allowed(unsigned long pagenr)
17873 {
17874 - if (pagenr <= 256)
17875 + if (!pagenr)
17876 + return 1;
17877 +#ifdef CONFIG_VM86
17878 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
17879 + return 1;
17880 +#endif
17881 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
17882 return 1;
17883 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
17884 return 0;
17885 @@ -380,6 +382,88 @@ void free_init_pages(char *what, unsigne
17886
17887 void free_initmem(void)
17888 {
17889 +
17890 +#ifdef CONFIG_PAX_KERNEXEC
17891 +#ifdef CONFIG_X86_32
17892 + /* PaX: limit KERNEL_CS to actual size */
17893 + unsigned long addr, limit;
17894 + struct desc_struct d;
17895 + int cpu;
17896 +
17897 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
17898 + limit = (limit - 1UL) >> PAGE_SHIFT;
17899 +
17900 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
17901 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
17902 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
17903 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
17904 + }
17905 +
17906 + /* PaX: make KERNEL_CS read-only */
17907 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
17908 + if (!paravirt_enabled())
17909 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
17910 +/*
17911 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
17912 + pgd = pgd_offset_k(addr);
17913 + pud = pud_offset(pgd, addr);
17914 + pmd = pmd_offset(pud, addr);
17915 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
17916 + }
17917 +*/
17918 +#ifdef CONFIG_X86_PAE
17919 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
17920 +/*
17921 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
17922 + pgd = pgd_offset_k(addr);
17923 + pud = pud_offset(pgd, addr);
17924 + pmd = pmd_offset(pud, addr);
17925 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
17926 + }
17927 +*/
17928 +#endif
17929 +
17930 +#ifdef CONFIG_MODULES
17931 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
17932 +#endif
17933 +
17934 +#else
17935 + pgd_t *pgd;
17936 + pud_t *pud;
17937 + pmd_t *pmd;
17938 + unsigned long addr, end;
17939 +
17940 + /* PaX: make kernel code/rodata read-only, rest non-executable */
17941 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
17942 + pgd = pgd_offset_k(addr);
17943 + pud = pud_offset(pgd, addr);
17944 + pmd = pmd_offset(pud, addr);
17945 + if (!pmd_present(*pmd))
17946 + continue;
17947 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
17948 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
17949 + else
17950 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
17951 + }
17952 +
17953 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
17954 + end = addr + KERNEL_IMAGE_SIZE;
17955 + for (; addr < end; addr += PMD_SIZE) {
17956 + pgd = pgd_offset_k(addr);
17957 + pud = pud_offset(pgd, addr);
17958 + pmd = pmd_offset(pud, addr);
17959 + if (!pmd_present(*pmd))
17960 + continue;
17961 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
17962 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
17963 + else
17964 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
17965 + }
17966 +#endif
17967 +
17968 + flush_tlb_all();
17969 +#endif
17970 +
17971 free_init_pages("unused kernel memory",
17972 (unsigned long)(&__init_begin),
17973 (unsigned long)(&__init_end));
17974 diff -urNp linux-2.6.34.1/arch/x86/mm/init_32.c linux-2.6.34.1/arch/x86/mm/init_32.c
17975 --- linux-2.6.34.1/arch/x86/mm/init_32.c 2010-07-05 14:24:10.000000000 -0400
17976 +++ linux-2.6.34.1/arch/x86/mm/init_32.c 2010-07-07 09:04:49.000000000 -0400
17977 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
17978 }
17979
17980 /*
17981 - * Creates a middle page table and puts a pointer to it in the
17982 - * given global directory entry. This only returns the gd entry
17983 - * in non-PAE compilation mode, since the middle layer is folded.
17984 - */
17985 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
17986 -{
17987 - pud_t *pud;
17988 - pmd_t *pmd_table;
17989 -
17990 -#ifdef CONFIG_X86_PAE
17991 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
17992 - if (after_bootmem)
17993 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
17994 - else
17995 - pmd_table = (pmd_t *)alloc_low_page();
17996 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
17997 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
17998 - pud = pud_offset(pgd, 0);
17999 - BUG_ON(pmd_table != pmd_offset(pud, 0));
18000 -
18001 - return pmd_table;
18002 - }
18003 -#endif
18004 - pud = pud_offset(pgd, 0);
18005 - pmd_table = pmd_offset(pud, 0);
18006 -
18007 - return pmd_table;
18008 -}
18009 -
18010 -/*
18011 * Create a page table and place a pointer to it in a middle page
18012 * directory entry:
18013 */
18014 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
18015 page_table = (pte_t *)alloc_low_page();
18016
18017 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
18018 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18019 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
18020 +#else
18021 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
18022 +#endif
18023 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
18024 }
18025
18026 return pte_offset_kernel(pmd, 0);
18027 }
18028
18029 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
18030 +{
18031 + pud_t *pud;
18032 + pmd_t *pmd_table;
18033 +
18034 + pud = pud_offset(pgd, 0);
18035 + pmd_table = pmd_offset(pud, 0);
18036 +
18037 + return pmd_table;
18038 +}
18039 +
18040 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
18041 {
18042 int pgd_idx = pgd_index(vaddr);
18043 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
18044 int pgd_idx, pmd_idx;
18045 unsigned long vaddr;
18046 pgd_t *pgd;
18047 + pud_t *pud;
18048 pmd_t *pmd;
18049 pte_t *pte = NULL;
18050
18051 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
18052 pgd = pgd_base + pgd_idx;
18053
18054 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
18055 - pmd = one_md_table_init(pgd);
18056 - pmd = pmd + pmd_index(vaddr);
18057 + pud = pud_offset(pgd, vaddr);
18058 + pmd = pmd_offset(pud, vaddr);
18059 +
18060 +#ifdef CONFIG_X86_PAE
18061 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
18062 +#endif
18063 +
18064 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
18065 pmd++, pmd_idx++) {
18066 pte = page_table_kmap_check(one_page_table_init(pmd),
18067 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
18068 }
18069 }
18070
18071 -static inline int is_kernel_text(unsigned long addr)
18072 +static inline int is_kernel_text(unsigned long start, unsigned long end)
18073 {
18074 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
18075 - return 1;
18076 - return 0;
18077 + if ((start > ktla_ktva((unsigned long)_etext) ||
18078 + end <= ktla_ktva((unsigned long)_stext)) &&
18079 + (start > ktla_ktva((unsigned long)_einittext) ||
18080 + end <= ktla_ktva((unsigned long)_sinittext)) &&
18081 +
18082 +#ifdef CONFIG_ACPI_SLEEP
18083 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
18084 +#endif
18085 +
18086 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
18087 + return 0;
18088 + return 1;
18089 }
18090
18091 /*
18092 @@ -244,9 +244,10 @@ kernel_physical_mapping_init(unsigned lo
18093 unsigned long last_map_addr = end;
18094 unsigned long start_pfn, end_pfn;
18095 pgd_t *pgd_base = swapper_pg_dir;
18096 - int pgd_idx, pmd_idx, pte_ofs;
18097 + unsigned int pgd_idx, pmd_idx, pte_ofs;
18098 unsigned long pfn;
18099 pgd_t *pgd;
18100 + pud_t *pud;
18101 pmd_t *pmd;
18102 pte_t *pte;
18103 unsigned pages_2m, pages_4k;
18104 @@ -279,8 +280,13 @@ repeat:
18105 pfn = start_pfn;
18106 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
18107 pgd = pgd_base + pgd_idx;
18108 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
18109 - pmd = one_md_table_init(pgd);
18110 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
18111 + pud = pud_offset(pgd, 0);
18112 + pmd = pmd_offset(pud, 0);
18113 +
18114 +#ifdef CONFIG_X86_PAE
18115 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
18116 +#endif
18117
18118 if (pfn >= end_pfn)
18119 continue;
18120 @@ -292,14 +298,13 @@ repeat:
18121 #endif
18122 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
18123 pmd++, pmd_idx++) {
18124 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
18125 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
18126
18127 /*
18128 * Map with big pages if possible, otherwise
18129 * create normal page tables:
18130 */
18131 if (use_pse) {
18132 - unsigned int addr2;
18133 pgprot_t prot = PAGE_KERNEL_LARGE;
18134 /*
18135 * first pass will use the same initial
18136 @@ -309,11 +314,7 @@ repeat:
18137 __pgprot(PTE_IDENT_ATTR |
18138 _PAGE_PSE);
18139
18140 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
18141 - PAGE_OFFSET + PAGE_SIZE-1;
18142 -
18143 - if (is_kernel_text(addr) ||
18144 - is_kernel_text(addr2))
18145 + if (is_kernel_text(address, address + PMD_SIZE))
18146 prot = PAGE_KERNEL_LARGE_EXEC;
18147
18148 pages_2m++;
18149 @@ -330,7 +331,7 @@ repeat:
18150 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
18151 pte += pte_ofs;
18152 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
18153 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
18154 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
18155 pgprot_t prot = PAGE_KERNEL;
18156 /*
18157 * first pass will use the same initial
18158 @@ -338,7 +339,7 @@ repeat:
18159 */
18160 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
18161
18162 - if (is_kernel_text(addr))
18163 + if (is_kernel_text(address, address + PAGE_SIZE))
18164 prot = PAGE_KERNEL_EXEC;
18165
18166 pages_4k++;
18167 @@ -491,7 +492,7 @@ void __init native_pagetable_setup_start
18168
18169 pud = pud_offset(pgd, va);
18170 pmd = pmd_offset(pud, va);
18171 - if (!pmd_present(*pmd))
18172 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
18173 break;
18174
18175 pte = pte_offset_kernel(pmd, va);
18176 @@ -543,9 +544,7 @@ void __init early_ioremap_page_table_ran
18177
18178 static void __init pagetable_init(void)
18179 {
18180 - pgd_t *pgd_base = swapper_pg_dir;
18181 -
18182 - permanent_kmaps_init(pgd_base);
18183 + permanent_kmaps_init(swapper_pg_dir);
18184 }
18185
18186 #ifdef CONFIG_ACPI_SLEEP
18187 @@ -553,12 +552,12 @@ static void __init pagetable_init(void)
18188 * ACPI suspend needs this for resume, because things like the intel-agp
18189 * driver might have split up a kernel 4MB mapping.
18190 */
18191 -char swsusp_pg_dir[PAGE_SIZE]
18192 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
18193 __attribute__ ((aligned(PAGE_SIZE)));
18194
18195 static inline void save_pg_dir(void)
18196 {
18197 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
18198 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
18199 }
18200 #else /* !CONFIG_ACPI_SLEEP */
18201 static inline void save_pg_dir(void)
18202 @@ -590,7 +589,7 @@ void zap_low_mappings(bool early)
18203 flush_tlb_all();
18204 }
18205
18206 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
18207 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
18208 EXPORT_SYMBOL_GPL(__supported_pte_mask);
18209
18210 /* user-defined highmem size */
18211 @@ -781,7 +780,7 @@ void __init setup_bootmem_allocator(void
18212 * Initialize the boot-time allocator (with low memory only):
18213 */
18214 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
18215 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
18216 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
18217 PAGE_SIZE);
18218 if (bootmap == -1L)
18219 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
18220 @@ -871,6 +870,12 @@ void __init mem_init(void)
18221
18222 pci_iommu_alloc();
18223
18224 +#ifdef CONFIG_PAX_PER_CPU_PGD
18225 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
18226 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18227 + KERNEL_PGD_PTRS);
18228 +#endif
18229 +
18230 #ifdef CONFIG_FLATMEM
18231 BUG_ON(!mem_map);
18232 #endif
18233 @@ -888,7 +893,7 @@ void __init mem_init(void)
18234 set_highmem_pages_init();
18235
18236 codesize = (unsigned long) &_etext - (unsigned long) &_text;
18237 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
18238 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
18239 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
18240
18241 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
18242 @@ -929,10 +934,10 @@ void __init mem_init(void)
18243 ((unsigned long)&__init_end -
18244 (unsigned long)&__init_begin) >> 10,
18245
18246 - (unsigned long)&_etext, (unsigned long)&_edata,
18247 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
18248 + (unsigned long)&_sdata, (unsigned long)&_edata,
18249 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
18250
18251 - (unsigned long)&_text, (unsigned long)&_etext,
18252 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
18253 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
18254
18255 /*
18256 @@ -1013,6 +1018,7 @@ void set_kernel_text_rw(void)
18257 if (!kernel_set_to_readonly)
18258 return;
18259
18260 + start = ktla_ktva(start);
18261 pr_debug("Set kernel text: %lx - %lx for read write\n",
18262 start, start+size);
18263
18264 @@ -1027,6 +1033,7 @@ void set_kernel_text_ro(void)
18265 if (!kernel_set_to_readonly)
18266 return;
18267
18268 + start = ktla_ktva(start);
18269 pr_debug("Set kernel text: %lx - %lx for read only\n",
18270 start, start+size);
18271
18272 @@ -1038,6 +1045,7 @@ void mark_rodata_ro(void)
18273 unsigned long start = PFN_ALIGN(_text);
18274 unsigned long size = PFN_ALIGN(_etext) - start;
18275
18276 + start = ktla_ktva(start);
18277 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
18278 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
18279 size >> 10);
18280 diff -urNp linux-2.6.34.1/arch/x86/mm/init_64.c linux-2.6.34.1/arch/x86/mm/init_64.c
18281 --- linux-2.6.34.1/arch/x86/mm/init_64.c 2010-07-05 14:24:10.000000000 -0400
18282 +++ linux-2.6.34.1/arch/x86/mm/init_64.c 2010-07-07 09:04:49.000000000 -0400
18283 @@ -50,7 +50,6 @@
18284 #include <asm/numa.h>
18285 #include <asm/cacheflush.h>
18286 #include <asm/init.h>
18287 -#include <linux/bootmem.h>
18288
18289 static unsigned long dma_reserve __initdata;
18290
18291 @@ -74,7 +73,7 @@ early_param("gbpages", parse_direct_gbpa
18292 * around without checking the pgd every time.
18293 */
18294
18295 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
18296 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
18297 EXPORT_SYMBOL_GPL(__supported_pte_mask);
18298
18299 int force_personality32;
18300 @@ -165,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
18301 pmd = fill_pmd(pud, vaddr);
18302 pte = fill_pte(pmd, vaddr);
18303
18304 + pax_open_kernel();
18305 set_pte(pte, new_pte);
18306 + pax_close_kernel();
18307
18308 /*
18309 * It's enough to flush this one mapping.
18310 @@ -224,14 +225,12 @@ static void __init __init_extra_mapping(
18311 pgd = pgd_offset_k((unsigned long)__va(phys));
18312 if (pgd_none(*pgd)) {
18313 pud = (pud_t *) spp_getpage();
18314 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
18315 - _PAGE_USER));
18316 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
18317 }
18318 pud = pud_offset(pgd, (unsigned long)__va(phys));
18319 if (pud_none(*pud)) {
18320 pmd = (pmd_t *) spp_getpage();
18321 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
18322 - _PAGE_USER));
18323 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
18324 }
18325 pmd = pmd_offset(pud, phys);
18326 BUG_ON(!pmd_none(*pmd));
18327 @@ -680,6 +679,12 @@ void __init mem_init(void)
18328
18329 pci_iommu_alloc();
18330
18331 +#ifdef CONFIG_PAX_PER_CPU_PGD
18332 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
18333 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18334 + KERNEL_PGD_PTRS);
18335 +#endif
18336 +
18337 /* clear_bss() already clear the empty_zero_page */
18338
18339 reservedpages = 0;
18340 @@ -886,8 +891,8 @@ int kern_addr_valid(unsigned long addr)
18341 static struct vm_area_struct gate_vma = {
18342 .vm_start = VSYSCALL_START,
18343 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
18344 - .vm_page_prot = PAGE_READONLY_EXEC,
18345 - .vm_flags = VM_READ | VM_EXEC
18346 + .vm_page_prot = PAGE_READONLY,
18347 + .vm_flags = VM_READ
18348 };
18349
18350 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
18351 @@ -921,7 +926,7 @@ int in_gate_area_no_task(unsigned long a
18352
18353 const char *arch_vma_name(struct vm_area_struct *vma)
18354 {
18355 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
18356 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
18357 return "[vdso]";
18358 if (vma == &gate_vma)
18359 return "[vsyscall]";
18360 diff -urNp linux-2.6.34.1/arch/x86/mm/iomap_32.c linux-2.6.34.1/arch/x86/mm/iomap_32.c
18361 --- linux-2.6.34.1/arch/x86/mm/iomap_32.c 2010-07-05 14:24:10.000000000 -0400
18362 +++ linux-2.6.34.1/arch/x86/mm/iomap_32.c 2010-07-07 09:04:49.000000000 -0400
18363 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
18364 debug_kmap_atomic(type);
18365 idx = type + KM_TYPE_NR * smp_processor_id();
18366 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
18367 +
18368 + pax_open_kernel();
18369 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
18370 + pax_close_kernel();
18371 +
18372 arch_flush_lazy_mmu_mode();
18373
18374 return (void *)vaddr;
18375 diff -urNp linux-2.6.34.1/arch/x86/mm/ioremap.c linux-2.6.34.1/arch/x86/mm/ioremap.c
18376 --- linux-2.6.34.1/arch/x86/mm/ioremap.c 2010-07-05 14:24:10.000000000 -0400
18377 +++ linux-2.6.34.1/arch/x86/mm/ioremap.c 2010-07-07 09:04:49.000000000 -0400
18378 @@ -100,13 +100,10 @@ static void __iomem *__ioremap_caller(re
18379 /*
18380 * Don't allow anybody to remap normal RAM that we're using..
18381 */
18382 - for (pfn = phys_addr >> PAGE_SHIFT;
18383 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
18384 - pfn++) {
18385 -
18386 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
18387 int is_ram = page_is_ram(pfn);
18388
18389 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
18390 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
18391 return NULL;
18392 WARN_ON_ONCE(is_ram);
18393 }
18394 @@ -346,7 +343,7 @@ static int __init early_ioremap_debug_se
18395 early_param("early_ioremap_debug", early_ioremap_debug_setup);
18396
18397 static __initdata int after_paging_init;
18398 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
18399 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
18400
18401 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
18402 {
18403 @@ -378,8 +375,7 @@ void __init early_ioremap_init(void)
18404 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
18405
18406 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
18407 - memset(bm_pte, 0, sizeof(bm_pte));
18408 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
18409 + pmd_populate_user(&init_mm, pmd, bm_pte);
18410
18411 /*
18412 * The boot-ioremap range spans multiple pmds, for which
18413 diff -urNp linux-2.6.34.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.34.1/arch/x86/mm/kmemcheck/kmemcheck.c
18414 --- linux-2.6.34.1/arch/x86/mm/kmemcheck/kmemcheck.c 2010-07-05 14:24:10.000000000 -0400
18415 +++ linux-2.6.34.1/arch/x86/mm/kmemcheck/kmemcheck.c 2010-07-07 09:04:49.000000000 -0400
18416 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
18417 * memory (e.g. tracked pages)? For now, we need this to avoid
18418 * invoking kmemcheck for PnP BIOS calls.
18419 */
18420 - if (regs->flags & X86_VM_MASK)
18421 + if (v8086_mode(regs))
18422 return false;
18423 - if (regs->cs != __KERNEL_CS)
18424 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
18425 return false;
18426
18427 pte = kmemcheck_pte_lookup(address);
18428 diff -urNp linux-2.6.34.1/arch/x86/mm/mmap.c linux-2.6.34.1/arch/x86/mm/mmap.c
18429 --- linux-2.6.34.1/arch/x86/mm/mmap.c 2010-07-05 14:24:10.000000000 -0400
18430 +++ linux-2.6.34.1/arch/x86/mm/mmap.c 2010-07-07 09:04:49.000000000 -0400
18431 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
18432 * Leave an at least ~128 MB hole with possible stack randomization.
18433 */
18434 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
18435 -#define MAX_GAP (TASK_SIZE/6*5)
18436 +#define MAX_GAP (pax_task_size/6*5)
18437
18438 /*
18439 * True on X86_32 or when emulating IA32 on X86_64
18440 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
18441 return rnd << PAGE_SHIFT;
18442 }
18443
18444 -static unsigned long mmap_base(void)
18445 +static unsigned long mmap_base(struct mm_struct *mm)
18446 {
18447 unsigned long gap = rlimit(RLIMIT_STACK);
18448 + unsigned long pax_task_size = TASK_SIZE;
18449 +
18450 +#ifdef CONFIG_PAX_SEGMEXEC
18451 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18452 + pax_task_size = SEGMEXEC_TASK_SIZE;
18453 +#endif
18454
18455 if (gap < MIN_GAP)
18456 gap = MIN_GAP;
18457 else if (gap > MAX_GAP)
18458 gap = MAX_GAP;
18459
18460 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
18461 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
18462 }
18463
18464 /*
18465 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
18466 * does, but not when emulating X86_32
18467 */
18468 -static unsigned long mmap_legacy_base(void)
18469 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
18470 {
18471 - if (mmap_is_ia32())
18472 + if (mmap_is_ia32()) {
18473 +
18474 +#ifdef CONFIG_PAX_SEGMEXEC
18475 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18476 + return SEGMEXEC_TASK_UNMAPPED_BASE;
18477 + else
18478 +#endif
18479 +
18480 return TASK_UNMAPPED_BASE;
18481 - else
18482 + } else
18483 return TASK_UNMAPPED_BASE + mmap_rnd();
18484 }
18485
18486 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
18487 void arch_pick_mmap_layout(struct mm_struct *mm)
18488 {
18489 if (mmap_is_legacy()) {
18490 - mm->mmap_base = mmap_legacy_base();
18491 + mm->mmap_base = mmap_legacy_base(mm);
18492 +
18493 +#ifdef CONFIG_PAX_RANDMMAP
18494 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18495 + mm->mmap_base += mm->delta_mmap;
18496 +#endif
18497 +
18498 mm->get_unmapped_area = arch_get_unmapped_area;
18499 mm->unmap_area = arch_unmap_area;
18500 } else {
18501 - mm->mmap_base = mmap_base();
18502 + mm->mmap_base = mmap_base(mm);
18503 +
18504 +#ifdef CONFIG_PAX_RANDMMAP
18505 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18506 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
18507 +#endif
18508 +
18509 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
18510 mm->unmap_area = arch_unmap_area_topdown;
18511 }
18512 diff -urNp linux-2.6.34.1/arch/x86/mm/numa_32.c linux-2.6.34.1/arch/x86/mm/numa_32.c
18513 --- linux-2.6.34.1/arch/x86/mm/numa_32.c 2010-07-05 14:24:10.000000000 -0400
18514 +++ linux-2.6.34.1/arch/x86/mm/numa_32.c 2010-07-07 09:04:49.000000000 -0400
18515 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
18516 }
18517 #endif
18518
18519 -extern unsigned long find_max_low_pfn(void);
18520 extern unsigned long highend_pfn, highstart_pfn;
18521
18522 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
18523 diff -urNp linux-2.6.34.1/arch/x86/mm/pageattr-test.c linux-2.6.34.1/arch/x86/mm/pageattr-test.c
18524 --- linux-2.6.34.1/arch/x86/mm/pageattr-test.c 2010-07-05 14:24:10.000000000 -0400
18525 +++ linux-2.6.34.1/arch/x86/mm/pageattr-test.c 2010-07-07 09:04:49.000000000 -0400
18526 @@ -36,7 +36,7 @@ enum {
18527
18528 static int pte_testbit(pte_t pte)
18529 {
18530 - return pte_flags(pte) & _PAGE_UNUSED1;
18531 + return pte_flags(pte) & _PAGE_CPA_TEST;
18532 }
18533
18534 struct split_state {
18535 diff -urNp linux-2.6.34.1/arch/x86/mm/pageattr.c linux-2.6.34.1/arch/x86/mm/pageattr.c
18536 --- linux-2.6.34.1/arch/x86/mm/pageattr.c 2010-07-05 14:24:10.000000000 -0400
18537 +++ linux-2.6.34.1/arch/x86/mm/pageattr.c 2010-07-07 09:04:49.000000000 -0400
18538 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
18539 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
18540 */
18541 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
18542 - pgprot_val(forbidden) |= _PAGE_NX;
18543 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
18544
18545 /*
18546 * The kernel text needs to be executable for obvious reasons
18547 * Does not cover __inittext since that is gone later on. On
18548 * 64bit we do not enforce !NX on the low mapping
18549 */
18550 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
18551 - pgprot_val(forbidden) |= _PAGE_NX;
18552 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
18553 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
18554
18555 +#ifdef CONFIG_DEBUG_RODATA
18556 /*
18557 * The .rodata section needs to be read-only. Using the pfn
18558 * catches all aliases.
18559 @@ -278,6 +279,7 @@ static inline pgprot_t static_protection
18560 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
18561 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
18562 pgprot_val(forbidden) |= _PAGE_RW;
18563 +#endif
18564
18565 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
18566 /*
18567 @@ -316,6 +318,13 @@ static inline pgprot_t static_protection
18568 }
18569 #endif
18570
18571 +#ifdef CONFIG_PAX_KERNEXEC
18572 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
18573 + pgprot_val(forbidden) |= _PAGE_RW;
18574 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
18575 + }
18576 +#endif
18577 +
18578 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
18579
18580 return prot;
18581 @@ -368,23 +377,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
18582 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
18583 {
18584 /* change init_mm */
18585 + pax_open_kernel();
18586 set_pte_atomic(kpte, pte);
18587 +
18588 #ifdef CONFIG_X86_32
18589 if (!SHARED_KERNEL_PMD) {
18590 +
18591 +#ifdef CONFIG_PAX_PER_CPU_PGD
18592 + unsigned long cpu;
18593 +#else
18594 struct page *page;
18595 +#endif
18596
18597 +#ifdef CONFIG_PAX_PER_CPU_PGD
18598 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18599 + pgd_t *pgd = get_cpu_pgd(cpu);
18600 +#else
18601 list_for_each_entry(page, &pgd_list, lru) {
18602 - pgd_t *pgd;
18603 + pgd_t *pgd = (pgd_t *)page_address(page);
18604 +#endif
18605 +
18606 pud_t *pud;
18607 pmd_t *pmd;
18608
18609 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
18610 + pgd += pgd_index(address);
18611 pud = pud_offset(pgd, address);
18612 pmd = pmd_offset(pud, address);
18613 set_pte_atomic((pte_t *)pmd, pte);
18614 }
18615 }
18616 #endif
18617 + pax_close_kernel();
18618 }
18619
18620 static int
18621 diff -urNp linux-2.6.34.1/arch/x86/mm/pat.c linux-2.6.34.1/arch/x86/mm/pat.c
18622 --- linux-2.6.34.1/arch/x86/mm/pat.c 2010-07-05 14:24:10.000000000 -0400
18623 +++ linux-2.6.34.1/arch/x86/mm/pat.c 2010-07-07 09:04:49.000000000 -0400
18624 @@ -259,7 +259,7 @@ chk_conflict(struct memtype *new, struct
18625
18626 conflict:
18627 printk(KERN_INFO "%s:%d conflicting memory types "
18628 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
18629 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
18630 new->end, cattr_name(new->type), cattr_name(entry->type));
18631 return -EBUSY;
18632 }
18633 @@ -555,7 +555,7 @@ unlock_ret:
18634
18635 if (err) {
18636 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
18637 - current->comm, current->pid, start, end);
18638 + current->comm, task_pid_nr(current), start, end);
18639 }
18640
18641 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
18642 @@ -750,7 +750,7 @@ int kernel_map_sync_memtype(u64 base, un
18643 printk(KERN_INFO
18644 "%s:%d ioremap_change_attr failed %s "
18645 "for %Lx-%Lx\n",
18646 - current->comm, current->pid,
18647 + current->comm, task_pid_nr(current),
18648 cattr_name(flags),
18649 base, (unsigned long long)(base + size));
18650 return -EINVAL;
18651 @@ -808,7 +808,7 @@ static int reserve_pfn_range(u64 paddr,
18652 free_memtype(paddr, paddr + size);
18653 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
18654 " for %Lx-%Lx, got %s\n",
18655 - current->comm, current->pid,
18656 + current->comm, task_pid_nr(current),
18657 cattr_name(want_flags),
18658 (unsigned long long)paddr,
18659 (unsigned long long)(paddr + size),
18660 diff -urNp linux-2.6.34.1/arch/x86/mm/pgtable.c linux-2.6.34.1/arch/x86/mm/pgtable.c
18661 --- linux-2.6.34.1/arch/x86/mm/pgtable.c 2010-07-05 14:24:10.000000000 -0400
18662 +++ linux-2.6.34.1/arch/x86/mm/pgtable.c 2010-07-07 09:04:49.000000000 -0400
18663 @@ -84,8 +84,59 @@ static inline void pgd_list_del(pgd_t *p
18664 list_del(&page->lru);
18665 }
18666
18667 -#define UNSHARED_PTRS_PER_PGD \
18668 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
18669 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18670 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
18671 +
18672 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
18673 +{
18674 + while (count--)
18675 + *dst++ = __pgd((pgd_val(*src++) | _PAGE_NX) & ~_PAGE_USER);
18676 +
18677 +}
18678 +#endif
18679 +
18680 +#ifdef CONFIG_PAX_PER_CPU_PGD
18681 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
18682 +{
18683 + while (count--)
18684 +
18685 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18686 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
18687 +#else
18688 + *dst++ = *src++;
18689 +#endif
18690 +
18691 +}
18692 +#endif
18693 +
18694 +#ifdef CONFIG_PAX_PER_CPU_PGD
18695 +static inline void pgd_ctor(pgd_t *pgd) {}
18696 +static inline void pgd_dtor(pgd_t *pgd) {}
18697 +#ifdef CONFIG_X86_64
18698 +#define pxd_t pud_t
18699 +#define pyd_t pgd_t
18700 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
18701 +#define pxd_free(mm, pud) pud_free((mm), (pud))
18702 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
18703 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
18704 +#define PYD_SIZE PGDIR_SIZE
18705 +#else
18706 +#define pxd_t pmd_t
18707 +#define pyd_t pud_t
18708 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
18709 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
18710 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
18711 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
18712 +#define PYD_SIZE PUD_SIZE
18713 +#endif
18714 +#else
18715 +#define pxd_t pmd_t
18716 +#define pyd_t pud_t
18717 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
18718 +#define pxd_free(mm, pmd) pmd_free((mm), (pmd))
18719 +#define pyd_populate(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
18720 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
18721 +#define PYD_SIZE PUD_SIZE
18722
18723 static void pgd_ctor(pgd_t *pgd)
18724 {
18725 @@ -120,6 +171,7 @@ static void pgd_dtor(pgd_t *pgd)
18726 pgd_list_del(pgd);
18727 spin_unlock_irqrestore(&pgd_lock, flags);
18728 }
18729 +#endif
18730
18731 /*
18732 * List of all pgd's needed for non-PAE so it can invalidate entries
18733 @@ -132,7 +184,7 @@ static void pgd_dtor(pgd_t *pgd)
18734 * -- wli
18735 */
18736
18737 -#ifdef CONFIG_X86_PAE
18738 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18739 /*
18740 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
18741 * updating the top-level pagetable entries to guarantee the
18742 @@ -144,7 +196,7 @@ static void pgd_dtor(pgd_t *pgd)
18743 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
18744 * and initialize the kernel pmds here.
18745 */
18746 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
18747 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
18748
18749 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18750 {
18751 @@ -163,36 +215,38 @@ void pud_populate(struct mm_struct *mm,
18752 if (mm == current->active_mm)
18753 write_cr3(read_cr3());
18754 }
18755 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
18756 +#define PREALLOCATED_PXDS USER_PGD_PTRS
18757 #else /* !CONFIG_X86_PAE */
18758
18759 /* No need to prepopulate any pagetable entries in non-PAE modes. */
18760 -#define PREALLOCATED_PMDS 0
18761 +#define PREALLOCATED_PXDS 0
18762
18763 #endif /* CONFIG_X86_PAE */
18764
18765 -static void free_pmds(pmd_t *pmds[])
18766 +static void free_pxds(pxd_t *pxds[])
18767 {
18768 int i;
18769
18770 - for(i = 0; i < PREALLOCATED_PMDS; i++)
18771 - if (pmds[i])
18772 - free_page((unsigned long)pmds[i]);
18773 + for(i = 0; i < PREALLOCATED_PXDS; i++)
18774 + if (pxds[i])
18775 + free_page((unsigned long)pxds[i]);
18776 }
18777
18778 -static int preallocate_pmds(pmd_t *pmds[])
18779 +static int preallocate_pxds(pxd_t *pxds[])
18780 {
18781 int i;
18782 bool failed = false;
18783
18784 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
18785 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
18786 - if (pmd == NULL)
18787 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
18788 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
18789 + if (pxd == NULL)
18790 failed = true;
18791 - pmds[i] = pmd;
18792 + pxds[i] = pxd;
18793 }
18794
18795 if (failed) {
18796 - free_pmds(pmds);
18797 + free_pxds(pxds);
18798 return -ENOMEM;
18799 }
18800
18801 @@ -205,51 +259,56 @@ static int preallocate_pmds(pmd_t *pmds[
18802 * preallocate which never got a corresponding vma will need to be
18803 * freed manually.
18804 */
18805 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
18806 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
18807 {
18808 int i;
18809
18810 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
18811 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
18812 pgd_t pgd = pgdp[i];
18813
18814 if (pgd_val(pgd) != 0) {
18815 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
18816 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
18817
18818 - pgdp[i] = native_make_pgd(0);
18819 + set_pgd(pgdp + i, native_make_pgd(0));
18820
18821 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
18822 - pmd_free(mm, pmd);
18823 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
18824 + pxd_free(mm, pxd);
18825 }
18826 }
18827 }
18828
18829 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
18830 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
18831 {
18832 - pud_t *pud;
18833 + pyd_t *pyd;
18834 unsigned long addr;
18835 int i;
18836
18837 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
18838 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
18839 return;
18840
18841 - pud = pud_offset(pgd, 0);
18842 +#ifdef CONFIG_X86_64
18843 + pyd = pyd_offset(mm, 0L);
18844 +#else
18845 + pyd = pyd_offset(pgd, 0L);
18846 +#endif
18847
18848 - for (addr = i = 0; i < PREALLOCATED_PMDS;
18849 - i++, pud++, addr += PUD_SIZE) {
18850 - pmd_t *pmd = pmds[i];
18851 + for (addr = i = 0; i < PREALLOCATED_PXDS;
18852 + i++, pyd++, addr += PYD_SIZE) {
18853 + pxd_t *pxd = pxds[i];
18854
18855 if (i >= KERNEL_PGD_BOUNDARY)
18856 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
18857 - sizeof(pmd_t) * PTRS_PER_PMD);
18858 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
18859 + sizeof(pxd_t) * PTRS_PER_PMD);
18860
18861 - pud_populate(mm, pud, pmd);
18862 + pyd_populate(mm, pyd, pxd);
18863 }
18864 }
18865
18866 pgd_t *pgd_alloc(struct mm_struct *mm)
18867 {
18868 pgd_t *pgd;
18869 - pmd_t *pmds[PREALLOCATED_PMDS];
18870 + pxd_t *pxds[PREALLOCATED_PXDS];
18871 +
18872 unsigned long flags;
18873
18874 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
18875 @@ -259,11 +318,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
18876
18877 mm->pgd = pgd;
18878
18879 - if (preallocate_pmds(pmds) != 0)
18880 + if (preallocate_pxds(pxds) != 0)
18881 goto out_free_pgd;
18882
18883 if (paravirt_pgd_alloc(mm) != 0)
18884 - goto out_free_pmds;
18885 + goto out_free_pxds;
18886
18887 /*
18888 * Make sure that pre-populating the pmds is atomic with
18889 @@ -273,14 +332,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
18890 spin_lock_irqsave(&pgd_lock, flags);
18891
18892 pgd_ctor(pgd);
18893 - pgd_prepopulate_pmd(mm, pgd, pmds);
18894 + pgd_prepopulate_pxd(mm, pgd, pxds);
18895
18896 spin_unlock_irqrestore(&pgd_lock, flags);
18897
18898 return pgd;
18899
18900 -out_free_pmds:
18901 - free_pmds(pmds);
18902 +out_free_pxds:
18903 + free_pxds(pxds);
18904 out_free_pgd:
18905 free_page((unsigned long)pgd);
18906 out:
18907 @@ -289,7 +348,7 @@ out:
18908
18909 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
18910 {
18911 - pgd_mop_up_pmds(mm, pgd);
18912 + pgd_mop_up_pxds(mm, pgd);
18913 pgd_dtor(pgd);
18914 paravirt_pgd_free(mm, pgd);
18915 free_page((unsigned long)pgd);
18916 diff -urNp linux-2.6.34.1/arch/x86/mm/pgtable_32.c linux-2.6.34.1/arch/x86/mm/pgtable_32.c
18917 --- linux-2.6.34.1/arch/x86/mm/pgtable_32.c 2010-07-05 14:24:10.000000000 -0400
18918 +++ linux-2.6.34.1/arch/x86/mm/pgtable_32.c 2010-07-07 09:04:49.000000000 -0400
18919 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
18920 return;
18921 }
18922 pte = pte_offset_kernel(pmd, vaddr);
18923 +
18924 + pax_open_kernel();
18925 if (pte_val(pteval))
18926 set_pte_at(&init_mm, vaddr, pte, pteval);
18927 else
18928 pte_clear(&init_mm, vaddr, pte);
18929 + pax_close_kernel();
18930
18931 /*
18932 * It's enough to flush this one mapping.
18933 diff -urNp linux-2.6.34.1/arch/x86/mm/setup_nx.c linux-2.6.34.1/arch/x86/mm/setup_nx.c
18934 --- linux-2.6.34.1/arch/x86/mm/setup_nx.c 2010-07-05 14:24:10.000000000 -0400
18935 +++ linux-2.6.34.1/arch/x86/mm/setup_nx.c 2010-07-07 09:05:57.000000000 -0400
18936 @@ -5,8 +5,10 @@
18937 #include <asm/pgtable.h>
18938 #include <asm/proto.h>
18939
18940 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18941 static int disable_nx __cpuinitdata;
18942
18943 +#ifndef CONFIG_PAX_PAGEEXEC
18944 /*
18945 * noexec = on|off
18946 *
18947 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
18948 return 0;
18949 }
18950 early_param("noexec", noexec_setup);
18951 +#endif
18952 +
18953 +#endif
18954
18955 void __cpuinit x86_configure_nx(void)
18956 {
18957 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18958 if (cpu_has_nx && !disable_nx)
18959 __supported_pte_mask |= _PAGE_NX;
18960 else
18961 +#endif
18962 __supported_pte_mask &= ~_PAGE_NX;
18963 }
18964
18965 diff -urNp linux-2.6.34.1/arch/x86/mm/tlb.c linux-2.6.34.1/arch/x86/mm/tlb.c
18966 --- linux-2.6.34.1/arch/x86/mm/tlb.c 2010-07-05 14:24:10.000000000 -0400
18967 +++ linux-2.6.34.1/arch/x86/mm/tlb.c 2010-07-07 09:04:49.000000000 -0400
18968 @@ -13,7 +13,7 @@
18969 #include <asm/uv/uv.h>
18970
18971 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18972 - = { &init_mm, 0, };
18973 + = { &init_mm, 0 };
18974
18975 /*
18976 * Smarter SMP flushing macros.
18977 @@ -62,7 +62,11 @@ void leave_mm(int cpu)
18978 BUG();
18979 cpumask_clear_cpu(cpu,
18980 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
18981 +
18982 +#ifndef CONFIG_PAX_PER_CPU_PGD
18983 load_cr3(swapper_pg_dir);
18984 +#endif
18985 +
18986 }
18987 EXPORT_SYMBOL_GPL(leave_mm);
18988
18989 diff -urNp linux-2.6.34.1/arch/x86/oprofile/backtrace.c linux-2.6.34.1/arch/x86/oprofile/backtrace.c
18990 --- linux-2.6.34.1/arch/x86/oprofile/backtrace.c 2010-07-05 14:24:10.000000000 -0400
18991 +++ linux-2.6.34.1/arch/x86/oprofile/backtrace.c 2010-07-07 09:04:49.000000000 -0400
18992 @@ -58,7 +58,7 @@ static struct frame_head *dump_user_back
18993 struct frame_head bufhead[2];
18994
18995 /* Also check accessibility of one struct frame_head beyond */
18996 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
18997 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
18998 return NULL;
18999 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
19000 return NULL;
19001 @@ -78,7 +78,7 @@ x86_backtrace(struct pt_regs * const reg
19002 {
19003 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
19004
19005 - if (!user_mode_vm(regs)) {
19006 + if (!user_mode(regs)) {
19007 unsigned long stack = kernel_stack_pointer(regs);
19008 if (depth)
19009 dump_trace(NULL, regs, (unsigned long *)stack, 0,
19010 diff -urNp linux-2.6.34.1/arch/x86/oprofile/op_model_p4.c linux-2.6.34.1/arch/x86/oprofile/op_model_p4.c
19011 --- linux-2.6.34.1/arch/x86/oprofile/op_model_p4.c 2010-07-05 14:24:10.000000000 -0400
19012 +++ linux-2.6.34.1/arch/x86/oprofile/op_model_p4.c 2010-07-07 09:04:49.000000000 -0400
19013 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
19014 #endif
19015 }
19016
19017 -static int inline addr_increment(void)
19018 +static inline int addr_increment(void)
19019 {
19020 #ifdef CONFIG_SMP
19021 return smp_num_siblings == 2 ? 2 : 1;
19022 diff -urNp linux-2.6.34.1/arch/x86/pci/common.c linux-2.6.34.1/arch/x86/pci/common.c
19023 --- linux-2.6.34.1/arch/x86/pci/common.c 2010-07-05 14:24:10.000000000 -0400
19024 +++ linux-2.6.34.1/arch/x86/pci/common.c 2010-07-07 09:04:49.000000000 -0400
19025 @@ -32,8 +32,8 @@ int noioapicreroute = 1;
19026 int pcibios_last_bus = -1;
19027 unsigned long pirq_table_addr;
19028 struct pci_bus *pci_root_bus;
19029 -struct pci_raw_ops *raw_pci_ops;
19030 -struct pci_raw_ops *raw_pci_ext_ops;
19031 +const struct pci_raw_ops *raw_pci_ops;
19032 +const struct pci_raw_ops *raw_pci_ext_ops;
19033
19034 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
19035 int reg, int len, u32 *val)
19036 @@ -365,7 +365,7 @@ static const struct dmi_system_id __devi
19037 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
19038 },
19039 },
19040 - {}
19041 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
19042 };
19043
19044 void __init dmi_check_pciprobe(void)
19045 diff -urNp linux-2.6.34.1/arch/x86/pci/direct.c linux-2.6.34.1/arch/x86/pci/direct.c
19046 --- linux-2.6.34.1/arch/x86/pci/direct.c 2010-07-05 14:24:10.000000000 -0400
19047 +++ linux-2.6.34.1/arch/x86/pci/direct.c 2010-07-07 09:04:49.000000000 -0400
19048 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
19049
19050 #undef PCI_CONF1_ADDRESS
19051
19052 -struct pci_raw_ops pci_direct_conf1 = {
19053 +const struct pci_raw_ops pci_direct_conf1 = {
19054 .read = pci_conf1_read,
19055 .write = pci_conf1_write,
19056 };
19057 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
19058
19059 #undef PCI_CONF2_ADDRESS
19060
19061 -struct pci_raw_ops pci_direct_conf2 = {
19062 +const struct pci_raw_ops pci_direct_conf2 = {
19063 .read = pci_conf2_read,
19064 .write = pci_conf2_write,
19065 };
19066 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
19067 * This should be close to trivial, but it isn't, because there are buggy
19068 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
19069 */
19070 -static int __init pci_sanity_check(struct pci_raw_ops *o)
19071 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
19072 {
19073 u32 x = 0;
19074 int year, devfn;
19075 diff -urNp linux-2.6.34.1/arch/x86/pci/fixup.c linux-2.6.34.1/arch/x86/pci/fixup.c
19076 --- linux-2.6.34.1/arch/x86/pci/fixup.c 2010-07-05 14:24:10.000000000 -0400
19077 +++ linux-2.6.34.1/arch/x86/pci/fixup.c 2010-07-07 09:04:49.000000000 -0400
19078 @@ -364,7 +364,7 @@ static const struct dmi_system_id __devi
19079 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
19080 },
19081 },
19082 - {}
19083 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19084 };
19085
19086 /*
19087 @@ -435,7 +435,7 @@ static const struct dmi_system_id __devi
19088 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
19089 },
19090 },
19091 - { }
19092 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19093 };
19094
19095 static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
19096 diff -urNp linux-2.6.34.1/arch/x86/pci/irq.c linux-2.6.34.1/arch/x86/pci/irq.c
19097 --- linux-2.6.34.1/arch/x86/pci/irq.c 2010-07-05 14:24:10.000000000 -0400
19098 +++ linux-2.6.34.1/arch/x86/pci/irq.c 2010-07-07 09:04:49.000000000 -0400
19099 @@ -542,7 +542,7 @@ static __init int intel_router_probe(str
19100 static struct pci_device_id __initdata pirq_440gx[] = {
19101 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
19102 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
19103 - { },
19104 + { PCI_DEVICE(0, 0) }
19105 };
19106
19107 /* 440GX has a proprietary PIRQ router -- don't use it */
19108 @@ -1108,7 +1108,7 @@ static struct dmi_system_id __initdata p
19109 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
19110 },
19111 },
19112 - { }
19113 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19114 };
19115
19116 void __init pcibios_irq_init(void)
19117 diff -urNp linux-2.6.34.1/arch/x86/pci/mmconfig_32.c linux-2.6.34.1/arch/x86/pci/mmconfig_32.c
19118 --- linux-2.6.34.1/arch/x86/pci/mmconfig_32.c 2010-07-05 14:24:10.000000000 -0400
19119 +++ linux-2.6.34.1/arch/x86/pci/mmconfig_32.c 2010-07-07 09:04:49.000000000 -0400
19120 @@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int
19121 return 0;
19122 }
19123
19124 -static struct pci_raw_ops pci_mmcfg = {
19125 +static const struct pci_raw_ops pci_mmcfg = {
19126 .read = pci_mmcfg_read,
19127 .write = pci_mmcfg_write,
19128 };
19129 diff -urNp linux-2.6.34.1/arch/x86/pci/mmconfig_64.c linux-2.6.34.1/arch/x86/pci/mmconfig_64.c
19130 --- linux-2.6.34.1/arch/x86/pci/mmconfig_64.c 2010-07-05 14:24:10.000000000 -0400
19131 +++ linux-2.6.34.1/arch/x86/pci/mmconfig_64.c 2010-07-07 09:04:49.000000000 -0400
19132 @@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int
19133 return 0;
19134 }
19135
19136 -static struct pci_raw_ops pci_mmcfg = {
19137 +static const struct pci_raw_ops pci_mmcfg = {
19138 .read = pci_mmcfg_read,
19139 .write = pci_mmcfg_write,
19140 };
19141 diff -urNp linux-2.6.34.1/arch/x86/pci/numaq_32.c linux-2.6.34.1/arch/x86/pci/numaq_32.c
19142 --- linux-2.6.34.1/arch/x86/pci/numaq_32.c 2010-07-05 14:24:10.000000000 -0400
19143 +++ linux-2.6.34.1/arch/x86/pci/numaq_32.c 2010-07-07 09:04:49.000000000 -0400
19144 @@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned i
19145
19146 #undef PCI_CONF1_MQ_ADDRESS
19147
19148 -static struct pci_raw_ops pci_direct_conf1_mq = {
19149 +static const struct pci_raw_ops pci_direct_conf1_mq = {
19150 .read = pci_conf1_mq_read,
19151 .write = pci_conf1_mq_write
19152 };
19153 diff -urNp linux-2.6.34.1/arch/x86/pci/olpc.c linux-2.6.34.1/arch/x86/pci/olpc.c
19154 --- linux-2.6.34.1/arch/x86/pci/olpc.c 2010-07-05 14:24:10.000000000 -0400
19155 +++ linux-2.6.34.1/arch/x86/pci/olpc.c 2010-07-07 09:04:49.000000000 -0400
19156 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
19157 return 0;
19158 }
19159
19160 -static struct pci_raw_ops pci_olpc_conf = {
19161 +static const struct pci_raw_ops pci_olpc_conf = {
19162 .read = pci_olpc_read,
19163 .write = pci_olpc_write,
19164 };
19165 diff -urNp linux-2.6.34.1/arch/x86/pci/pcbios.c linux-2.6.34.1/arch/x86/pci/pcbios.c
19166 --- linux-2.6.34.1/arch/x86/pci/pcbios.c 2010-07-05 14:24:10.000000000 -0400
19167 +++ linux-2.6.34.1/arch/x86/pci/pcbios.c 2010-07-07 09:04:49.000000000 -0400
19168 @@ -57,50 +57,93 @@ union bios32 {
19169 static struct {
19170 unsigned long address;
19171 unsigned short segment;
19172 -} bios32_indirect = { 0, __KERNEL_CS };
19173 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
19174
19175 /*
19176 * Returns the entry point for the given service, NULL on error
19177 */
19178
19179 -static unsigned long bios32_service(unsigned long service)
19180 +static unsigned long __devinit bios32_service(unsigned long service)
19181 {
19182 unsigned char return_code; /* %al */
19183 unsigned long address; /* %ebx */
19184 unsigned long length; /* %ecx */
19185 unsigned long entry; /* %edx */
19186 unsigned long flags;
19187 + struct desc_struct d, *gdt;
19188
19189 local_irq_save(flags);
19190 - __asm__("lcall *(%%edi); cld"
19191 +
19192 + gdt = get_cpu_gdt_table(smp_processor_id());
19193 +
19194 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
19195 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
19196 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
19197 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
19198 +
19199 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
19200 : "=a" (return_code),
19201 "=b" (address),
19202 "=c" (length),
19203 "=d" (entry)
19204 : "0" (service),
19205 "1" (0),
19206 - "D" (&bios32_indirect));
19207 + "D" (&bios32_indirect),
19208 + "r"(__PCIBIOS_DS)
19209 + : "memory");
19210 +
19211 + pax_open_kernel();
19212 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
19213 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
19214 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
19215 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
19216 + pax_close_kernel();
19217 +
19218 local_irq_restore(flags);
19219
19220 switch (return_code) {
19221 - case 0:
19222 - return address + entry;
19223 - case 0x80: /* Not present */
19224 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
19225 - return 0;
19226 - default: /* Shouldn't happen */
19227 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
19228 - service, return_code);
19229 + case 0: {
19230 + int cpu;
19231 + unsigned char flags;
19232 +
19233 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
19234 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
19235 + printk(KERN_WARNING "bios32_service: not valid\n");
19236 return 0;
19237 + }
19238 + address = address + PAGE_OFFSET;
19239 + length += 16UL; /* some BIOSs underreport this... */
19240 + flags = 4;
19241 + if (length >= 64*1024*1024) {
19242 + length >>= PAGE_SHIFT;
19243 + flags |= 8;
19244 + }
19245 +
19246 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19247 + gdt = get_cpu_gdt_table(cpu);
19248 + pack_descriptor(&d, address, length, 0x9b, flags);
19249 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
19250 + pack_descriptor(&d, address, length, 0x93, flags);
19251 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
19252 + }
19253 + return entry;
19254 + }
19255 + case 0x80: /* Not present */
19256 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
19257 + return 0;
19258 + default: /* Shouldn't happen */
19259 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
19260 + service, return_code);
19261 + return 0;
19262 }
19263 }
19264
19265 static struct {
19266 unsigned long address;
19267 unsigned short segment;
19268 -} pci_indirect = { 0, __KERNEL_CS };
19269 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
19270
19271 -static int pci_bios_present;
19272 +static int pci_bios_present __read_only;
19273
19274 static int __devinit check_pcibios(void)
19275 {
19276 @@ -109,11 +152,13 @@ static int __devinit check_pcibios(void)
19277 unsigned long flags, pcibios_entry;
19278
19279 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
19280 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
19281 + pci_indirect.address = pcibios_entry;
19282
19283 local_irq_save(flags);
19284 - __asm__(
19285 - "lcall *(%%edi); cld\n\t"
19286 + __asm__("movw %w6, %%ds\n\t"
19287 + "lcall *%%ss:(%%edi); cld\n\t"
19288 + "push %%ss\n\t"
19289 + "pop %%ds\n\t"
19290 "jc 1f\n\t"
19291 "xor %%ah, %%ah\n"
19292 "1:"
19293 @@ -122,7 +167,8 @@ static int __devinit check_pcibios(void)
19294 "=b" (ebx),
19295 "=c" (ecx)
19296 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
19297 - "D" (&pci_indirect)
19298 + "D" (&pci_indirect),
19299 + "r" (__PCIBIOS_DS)
19300 : "memory");
19301 local_irq_restore(flags);
19302
19303 @@ -166,7 +212,10 @@ static int pci_bios_read(unsigned int se
19304
19305 switch (len) {
19306 case 1:
19307 - __asm__("lcall *(%%esi); cld\n\t"
19308 + __asm__("movw %w6, %%ds\n\t"
19309 + "lcall *%%ss:(%%esi); cld\n\t"
19310 + "push %%ss\n\t"
19311 + "pop %%ds\n\t"
19312 "jc 1f\n\t"
19313 "xor %%ah, %%ah\n"
19314 "1:"
19315 @@ -175,7 +224,8 @@ static int pci_bios_read(unsigned int se
19316 : "1" (PCIBIOS_READ_CONFIG_BYTE),
19317 "b" (bx),
19318 "D" ((long)reg),
19319 - "S" (&pci_indirect));
19320 + "S" (&pci_indirect),
19321 + "r" (__PCIBIOS_DS));
19322 /*
19323 * Zero-extend the result beyond 8 bits, do not trust the
19324 * BIOS having done it:
19325 @@ -183,7 +233,10 @@ static int pci_bios_read(unsigned int se
19326 *value &= 0xff;
19327 break;
19328 case 2:
19329 - __asm__("lcall *(%%esi); cld\n\t"
19330 + __asm__("movw %w6, %%ds\n\t"
19331 + "lcall *%%ss:(%%esi); cld\n\t"
19332 + "push %%ss\n\t"
19333 + "pop %%ds\n\t"
19334 "jc 1f\n\t"
19335 "xor %%ah, %%ah\n"
19336 "1:"
19337 @@ -192,7 +245,8 @@ static int pci_bios_read(unsigned int se
19338 : "1" (PCIBIOS_READ_CONFIG_WORD),
19339 "b" (bx),
19340 "D" ((long)reg),
19341 - "S" (&pci_indirect));
19342 + "S" (&pci_indirect),
19343 + "r" (__PCIBIOS_DS));
19344 /*
19345 * Zero-extend the result beyond 16 bits, do not trust the
19346 * BIOS having done it:
19347 @@ -200,7 +254,10 @@ static int pci_bios_read(unsigned int se
19348 *value &= 0xffff;
19349 break;
19350 case 4:
19351 - __asm__("lcall *(%%esi); cld\n\t"
19352 + __asm__("movw %w6, %%ds\n\t"
19353 + "lcall *%%ss:(%%esi); cld\n\t"
19354 + "push %%ss\n\t"
19355 + "pop %%ds\n\t"
19356 "jc 1f\n\t"
19357 "xor %%ah, %%ah\n"
19358 "1:"
19359 @@ -209,7 +266,8 @@ static int pci_bios_read(unsigned int se
19360 : "1" (PCIBIOS_READ_CONFIG_DWORD),
19361 "b" (bx),
19362 "D" ((long)reg),
19363 - "S" (&pci_indirect));
19364 + "S" (&pci_indirect),
19365 + "r" (__PCIBIOS_DS));
19366 break;
19367 }
19368
19369 @@ -232,7 +290,10 @@ static int pci_bios_write(unsigned int s
19370
19371 switch (len) {
19372 case 1:
19373 - __asm__("lcall *(%%esi); cld\n\t"
19374 + __asm__("movw %w6, %%ds\n\t"
19375 + "lcall *%%ss:(%%esi); cld\n\t"
19376 + "push %%ss\n\t"
19377 + "pop %%ds\n\t"
19378 "jc 1f\n\t"
19379 "xor %%ah, %%ah\n"
19380 "1:"
19381 @@ -241,10 +302,14 @@ static int pci_bios_write(unsigned int s
19382 "c" (value),
19383 "b" (bx),
19384 "D" ((long)reg),
19385 - "S" (&pci_indirect));
19386 + "S" (&pci_indirect),
19387 + "r" (__PCIBIOS_DS));
19388 break;
19389 case 2:
19390 - __asm__("lcall *(%%esi); cld\n\t"
19391 + __asm__("movw %w6, %%ds\n\t"
19392 + "lcall *%%ss:(%%esi); cld\n\t"
19393 + "push %%ss\n\t"
19394 + "pop %%ds\n\t"
19395 "jc 1f\n\t"
19396 "xor %%ah, %%ah\n"
19397 "1:"
19398 @@ -253,10 +318,14 @@ static int pci_bios_write(unsigned int s
19399 "c" (value),
19400 "b" (bx),
19401 "D" ((long)reg),
19402 - "S" (&pci_indirect));
19403 + "S" (&pci_indirect),
19404 + "r" (__PCIBIOS_DS));
19405 break;
19406 case 4:
19407 - __asm__("lcall *(%%esi); cld\n\t"
19408 + __asm__("movw %w6, %%ds\n\t"
19409 + "lcall *%%ss:(%%esi); cld\n\t"
19410 + "push %%ss\n\t"
19411 + "pop %%ds\n\t"
19412 "jc 1f\n\t"
19413 "xor %%ah, %%ah\n"
19414 "1:"
19415 @@ -265,7 +334,8 @@ static int pci_bios_write(unsigned int s
19416 "c" (value),
19417 "b" (bx),
19418 "D" ((long)reg),
19419 - "S" (&pci_indirect));
19420 + "S" (&pci_indirect),
19421 + "r" (__PCIBIOS_DS));
19422 break;
19423 }
19424
19425 @@ -279,7 +349,7 @@ static int pci_bios_write(unsigned int s
19426 * Function table for BIOS32 access
19427 */
19428
19429 -static struct pci_raw_ops pci_bios_access = {
19430 +static const struct pci_raw_ops pci_bios_access = {
19431 .read = pci_bios_read,
19432 .write = pci_bios_write
19433 };
19434 @@ -288,7 +358,7 @@ static struct pci_raw_ops pci_bios_acces
19435 * Try to find PCI BIOS.
19436 */
19437
19438 -static struct pci_raw_ops * __devinit pci_find_bios(void)
19439 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
19440 {
19441 union bios32 *check;
19442 unsigned char sum;
19443 @@ -369,10 +439,13 @@ struct irq_routing_table * pcibios_get_i
19444
19445 DBG("PCI: Fetching IRQ routing table... ");
19446 __asm__("push %%es\n\t"
19447 + "movw %w8, %%ds\n\t"
19448 "push %%ds\n\t"
19449 "pop %%es\n\t"
19450 - "lcall *(%%esi); cld\n\t"
19451 + "lcall *%%ss:(%%esi); cld\n\t"
19452 "pop %%es\n\t"
19453 + "push %%ss\n\t"
19454 + "pop %%ds\n"
19455 "jc 1f\n\t"
19456 "xor %%ah, %%ah\n"
19457 "1:"
19458 @@ -383,7 +456,8 @@ struct irq_routing_table * pcibios_get_i
19459 "1" (0),
19460 "D" ((long) &opt),
19461 "S" (&pci_indirect),
19462 - "m" (opt)
19463 + "m" (opt),
19464 + "r" (__PCIBIOS_DS)
19465 : "memory");
19466 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
19467 if (ret & 0xff00)
19468 @@ -407,7 +481,10 @@ int pcibios_set_irq_routing(struct pci_d
19469 {
19470 int ret;
19471
19472 - __asm__("lcall *(%%esi); cld\n\t"
19473 + __asm__("movw %w5, %%ds\n\t"
19474 + "lcall *%%ss:(%%esi); cld\n\t"
19475 + "push %%ss\n\t"
19476 + "pop %%ds\n"
19477 "jc 1f\n\t"
19478 "xor %%ah, %%ah\n"
19479 "1:"
19480 @@ -415,7 +492,8 @@ int pcibios_set_irq_routing(struct pci_d
19481 : "0" (PCIBIOS_SET_PCI_HW_INT),
19482 "b" ((dev->bus->number << 8) | dev->devfn),
19483 "c" ((irq << 8) | (pin + 10)),
19484 - "S" (&pci_indirect));
19485 + "S" (&pci_indirect),
19486 + "r" (__PCIBIOS_DS));
19487 return !(ret & 0xff00);
19488 }
19489 EXPORT_SYMBOL(pcibios_set_irq_routing);
19490 diff -urNp linux-2.6.34.1/arch/x86/power/cpu.c linux-2.6.34.1/arch/x86/power/cpu.c
19491 --- linux-2.6.34.1/arch/x86/power/cpu.c 2010-07-05 14:24:10.000000000 -0400
19492 +++ linux-2.6.34.1/arch/x86/power/cpu.c 2010-07-07 09:04:49.000000000 -0400
19493 @@ -127,7 +127,7 @@ static void do_fpu_end(void)
19494 static void fix_processor_context(void)
19495 {
19496 int cpu = smp_processor_id();
19497 - struct tss_struct *t = &per_cpu(init_tss, cpu);
19498 + struct tss_struct *t = init_tss + cpu;
19499
19500 set_tss_desc(cpu, t); /*
19501 * This just modifies memory; should not be
19502 @@ -137,7 +137,9 @@ static void fix_processor_context(void)
19503 */
19504
19505 #ifdef CONFIG_X86_64
19506 + pax_open_kernel();
19507 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
19508 + pax_close_kernel();
19509
19510 syscall_init(); /* This sets MSR_*STAR and related */
19511 #endif
19512 diff -urNp linux-2.6.34.1/arch/x86/vdso/Makefile linux-2.6.34.1/arch/x86/vdso/Makefile
19513 --- linux-2.6.34.1/arch/x86/vdso/Makefile 2010-07-05 14:24:10.000000000 -0400
19514 +++ linux-2.6.34.1/arch/x86/vdso/Makefile 2010-07-07 09:04:49.000000000 -0400
19515 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
19516 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
19517 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
19518
19519 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
19520 +VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
19521 GCOV_PROFILE := n
19522
19523 #
19524 diff -urNp linux-2.6.34.1/arch/x86/vdso/vclock_gettime.c linux-2.6.34.1/arch/x86/vdso/vclock_gettime.c
19525 --- linux-2.6.34.1/arch/x86/vdso/vclock_gettime.c 2010-07-05 14:24:10.000000000 -0400
19526 +++ linux-2.6.34.1/arch/x86/vdso/vclock_gettime.c 2010-07-07 09:04:49.000000000 -0400
19527 @@ -22,24 +22,48 @@
19528 #include <asm/hpet.h>
19529 #include <asm/unistd.h>
19530 #include <asm/io.h>
19531 +#include <asm/fixmap.h>
19532 #include "vextern.h"
19533
19534 #define gtod vdso_vsyscall_gtod_data
19535
19536 +notrace noinline long __vdso_fallback_time(long *t)
19537 +{
19538 + long secs;
19539 + asm volatile("syscall"
19540 + : "=a" (secs)
19541 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
19542 + return secs;
19543 +}
19544 +
19545 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
19546 {
19547 long ret;
19548 asm("syscall" : "=a" (ret) :
19549 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
19550 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
19551 return ret;
19552 }
19553
19554 +notrace static inline cycle_t __vdso_vread_hpet(void)
19555 +{
19556 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
19557 +}
19558 +
19559 +notrace static inline cycle_t __vdso_vread_tsc(void)
19560 +{
19561 + cycle_t ret = (cycle_t)vget_cycles();
19562 +
19563 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
19564 +}
19565 +
19566 notrace static inline long vgetns(void)
19567 {
19568 long v;
19569 - cycles_t (*vread)(void);
19570 - vread = gtod->clock.vread;
19571 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
19572 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
19573 + v = __vdso_vread_tsc();
19574 + else
19575 + v = __vdso_vread_hpet();
19576 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
19577 return (v * gtod->clock.mult) >> gtod->clock.shift;
19578 }
19579
19580 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
19581
19582 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
19583 {
19584 - if (likely(gtod->sysctl_enabled))
19585 + if (likely(gtod->sysctl_enabled &&
19586 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
19587 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
19588 switch (clock) {
19589 case CLOCK_REALTIME:
19590 if (likely(gtod->clock.vread))
19591 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
19592 int clock_gettime(clockid_t, struct timespec *)
19593 __attribute__((weak, alias("__vdso_clock_gettime")));
19594
19595 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
19596 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
19597 {
19598 long ret;
19599 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
19600 + asm("syscall" : "=a" (ret) :
19601 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
19602 + return ret;
19603 +}
19604 +
19605 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
19606 +{
19607 + if (likely(gtod->sysctl_enabled &&
19608 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
19609 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
19610 + {
19611 if (likely(tv != NULL)) {
19612 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
19613 offsetof(struct timespec, tv_nsec) ||
19614 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
19615 }
19616 return 0;
19617 }
19618 - asm("syscall" : "=a" (ret) :
19619 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
19620 - return ret;
19621 + return __vdso_fallback_gettimeofday(tv, tz);
19622 }
19623 int gettimeofday(struct timeval *, struct timezone *)
19624 __attribute__((weak, alias("__vdso_gettimeofday")));
19625 diff -urNp linux-2.6.34.1/arch/x86/vdso/vdso.lds.S linux-2.6.34.1/arch/x86/vdso/vdso.lds.S
19626 --- linux-2.6.34.1/arch/x86/vdso/vdso.lds.S 2010-07-05 14:24:10.000000000 -0400
19627 +++ linux-2.6.34.1/arch/x86/vdso/vdso.lds.S 2010-07-07 09:04:49.000000000 -0400
19628 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
19629 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
19630 #include "vextern.h"
19631 #undef VEXTERN
19632 +
19633 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
19634 +VEXTERN(fallback_gettimeofday)
19635 +VEXTERN(fallback_time)
19636 +VEXTERN(getcpu)
19637 +#undef VEXTERN
19638 diff -urNp linux-2.6.34.1/arch/x86/vdso/vdso32-setup.c linux-2.6.34.1/arch/x86/vdso/vdso32-setup.c
19639 --- linux-2.6.34.1/arch/x86/vdso/vdso32-setup.c 2010-07-05 14:24:10.000000000 -0400
19640 +++ linux-2.6.34.1/arch/x86/vdso/vdso32-setup.c 2010-07-07 09:04:49.000000000 -0400
19641 @@ -25,6 +25,7 @@
19642 #include <asm/tlbflush.h>
19643 #include <asm/vdso.h>
19644 #include <asm/proto.h>
19645 +#include <asm/mman.h>
19646
19647 enum {
19648 VDSO_DISABLED = 0,
19649 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
19650 void enable_sep_cpu(void)
19651 {
19652 int cpu = get_cpu();
19653 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19654 + struct tss_struct *tss = init_tss + cpu;
19655
19656 if (!boot_cpu_has(X86_FEATURE_SEP)) {
19657 put_cpu();
19658 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
19659 gate_vma.vm_start = FIXADDR_USER_START;
19660 gate_vma.vm_end = FIXADDR_USER_END;
19661 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
19662 - gate_vma.vm_page_prot = __P101;
19663 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
19664 /*
19665 * Make sure the vDSO gets into every core dump.
19666 * Dumping its contents makes post-mortem fully interpretable later
19667 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
19668 if (compat)
19669 addr = VDSO_HIGH_BASE;
19670 else {
19671 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
19672 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
19673 if (IS_ERR_VALUE(addr)) {
19674 ret = addr;
19675 goto up_fail;
19676 }
19677 }
19678
19679 - current->mm->context.vdso = (void *)addr;
19680 + current->mm->context.vdso = addr;
19681
19682 if (compat_uses_vma || !compat) {
19683 /*
19684 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
19685 }
19686
19687 current_thread_info()->sysenter_return =
19688 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
19689 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
19690
19691 up_fail:
19692 if (ret)
19693 - current->mm->context.vdso = NULL;
19694 + current->mm->context.vdso = 0;
19695
19696 up_write(&mm->mmap_sem);
19697
19698 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
19699
19700 const char *arch_vma_name(struct vm_area_struct *vma)
19701 {
19702 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19703 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19704 return "[vdso]";
19705 +
19706 +#ifdef CONFIG_PAX_SEGMEXEC
19707 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
19708 + return "[vdso]";
19709 +#endif
19710 +
19711 return NULL;
19712 }
19713
19714 @@ -422,7 +429,7 @@ struct vm_area_struct *get_gate_vma(stru
19715 struct mm_struct *mm = tsk->mm;
19716
19717 /* Check to see if this task was created in compat vdso mode */
19718 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
19719 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
19720 return &gate_vma;
19721 return NULL;
19722 }
19723 diff -urNp linux-2.6.34.1/arch/x86/vdso/vextern.h linux-2.6.34.1/arch/x86/vdso/vextern.h
19724 --- linux-2.6.34.1/arch/x86/vdso/vextern.h 2010-07-05 14:24:10.000000000 -0400
19725 +++ linux-2.6.34.1/arch/x86/vdso/vextern.h 2010-07-07 09:04:49.000000000 -0400
19726 @@ -11,6 +11,5 @@
19727 put into vextern.h and be referenced as a pointer with vdso prefix.
19728 The main kernel later fills in the values. */
19729
19730 -VEXTERN(jiffies)
19731 VEXTERN(vgetcpu_mode)
19732 VEXTERN(vsyscall_gtod_data)
19733 diff -urNp linux-2.6.34.1/arch/x86/vdso/vma.c linux-2.6.34.1/arch/x86/vdso/vma.c
19734 --- linux-2.6.34.1/arch/x86/vdso/vma.c 2010-07-05 14:24:10.000000000 -0400
19735 +++ linux-2.6.34.1/arch/x86/vdso/vma.c 2010-07-07 09:04:49.000000000 -0400
19736 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
19737 if (!vbase)
19738 goto oom;
19739
19740 - if (memcmp(vbase, "\177ELF", 4)) {
19741 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
19742 printk("VDSO: I'm broken; not ELF\n");
19743 vdso_enabled = 0;
19744 }
19745 @@ -67,6 +67,7 @@ static int __init init_vdso_vars(void)
19746 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
19747 #include "vextern.h"
19748 #undef VEXTERN
19749 + vunmap(vbase);
19750 return 0;
19751
19752 oom:
19753 @@ -117,7 +118,7 @@ int arch_setup_additional_pages(struct l
19754 goto up_fail;
19755 }
19756
19757 - current->mm->context.vdso = (void *)addr;
19758 + current->mm->context.vdso = addr;
19759
19760 ret = install_special_mapping(mm, addr, vdso_size,
19761 VM_READ|VM_EXEC|
19762 @@ -125,7 +126,7 @@ int arch_setup_additional_pages(struct l
19763 VM_ALWAYSDUMP,
19764 vdso_pages);
19765 if (ret) {
19766 - current->mm->context.vdso = NULL;
19767 + current->mm->context.vdso = 0;
19768 goto up_fail;
19769 }
19770
19771 @@ -133,10 +134,3 @@ up_fail:
19772 up_write(&mm->mmap_sem);
19773 return ret;
19774 }
19775 -
19776 -static __init int vdso_setup(char *s)
19777 -{
19778 - vdso_enabled = simple_strtoul(s, NULL, 0);
19779 - return 0;
19780 -}
19781 -__setup("vdso=", vdso_setup);
19782 diff -urNp linux-2.6.34.1/arch/x86/xen/enlighten.c linux-2.6.34.1/arch/x86/xen/enlighten.c
19783 --- linux-2.6.34.1/arch/x86/xen/enlighten.c 2010-07-05 14:24:10.000000000 -0400
19784 +++ linux-2.6.34.1/arch/x86/xen/enlighten.c 2010-07-07 09:04:49.000000000 -0400
19785 @@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
19786
19787 struct shared_info xen_dummy_shared_info;
19788
19789 -void *xen_initial_gdt;
19790 -
19791 /*
19792 * Point at some empty memory to start with. We map the real shared_info
19793 * page as soon as fixmap is up and running.
19794 @@ -551,7 +549,7 @@ static void xen_write_idt_entry(gate_des
19795
19796 preempt_disable();
19797
19798 - start = __get_cpu_var(idt_desc).address;
19799 + start = (unsigned long)__get_cpu_var(idt_desc).address;
19800 end = start + __get_cpu_var(idt_desc).size + 1;
19801
19802 xen_mc_flush();
19803 @@ -1103,7 +1101,17 @@ asmlinkage void __init xen_start_kernel(
19804 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
19805
19806 /* Work out if we support NX */
19807 - x86_configure_nx();
19808 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19809 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
19810 + (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) {
19811 + unsigned l, h;
19812 +
19813 + __supported_pte_mask |= _PAGE_NX;
19814 + rdmsr(MSR_EFER, l, h);
19815 + l |= EFER_NX;
19816 + wrmsr(MSR_EFER, l, h);
19817 + }
19818 +#endif
19819
19820 xen_setup_features();
19821
19822 @@ -1134,13 +1142,6 @@ asmlinkage void __init xen_start_kernel(
19823
19824 machine_ops = xen_machine_ops;
19825
19826 - /*
19827 - * The only reliable way to retain the initial address of the
19828 - * percpu gdt_page is to remember it here, so we can go and
19829 - * mark it RW later, when the initial percpu area is freed.
19830 - */
19831 - xen_initial_gdt = &per_cpu(gdt_page, 0);
19832 -
19833 xen_smp_init();
19834
19835 pgd = (pgd_t *)xen_start_info->pt_base;
19836 diff -urNp linux-2.6.34.1/arch/x86/xen/mmu.c linux-2.6.34.1/arch/x86/xen/mmu.c
19837 --- linux-2.6.34.1/arch/x86/xen/mmu.c 2010-07-05 14:24:10.000000000 -0400
19838 +++ linux-2.6.34.1/arch/x86/xen/mmu.c 2010-07-07 09:04:49.000000000 -0400
19839 @@ -1694,6 +1694,8 @@ __init pgd_t *xen_setup_kernel_pagetable
19840 convert_pfn_mfn(init_level4_pgt);
19841 convert_pfn_mfn(level3_ident_pgt);
19842 convert_pfn_mfn(level3_kernel_pgt);
19843 + convert_pfn_mfn(level3_vmalloc_pgt);
19844 + convert_pfn_mfn(level3_vmemmap_pgt);
19845
19846 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
19847 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
19848 @@ -1712,7 +1714,10 @@ __init pgd_t *xen_setup_kernel_pagetable
19849 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
19850 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
19851 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
19852 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
19853 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
19854 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
19855 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
19856 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
19857 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
19858
19859 diff -urNp linux-2.6.34.1/arch/x86/xen/smp.c linux-2.6.34.1/arch/x86/xen/smp.c
19860 --- linux-2.6.34.1/arch/x86/xen/smp.c 2010-07-05 14:24:10.000000000 -0400
19861 +++ linux-2.6.34.1/arch/x86/xen/smp.c 2010-07-07 09:04:49.000000000 -0400
19862 @@ -169,11 +169,6 @@ static void __init xen_smp_prepare_boot_
19863 {
19864 BUG_ON(smp_processor_id() != 0);
19865 native_smp_prepare_boot_cpu();
19866 -
19867 - /* We've switched to the "real" per-cpu gdt, so make sure the
19868 - old memory can be recycled */
19869 - make_lowmem_page_readwrite(xen_initial_gdt);
19870 -
19871 xen_setup_vcpu_info_placement();
19872 }
19873
19874 @@ -233,8 +228,8 @@ cpu_initialize_context(unsigned int cpu,
19875 gdt = get_cpu_gdt_table(cpu);
19876
19877 ctxt->flags = VGCF_IN_KERNEL;
19878 - ctxt->user_regs.ds = __USER_DS;
19879 - ctxt->user_regs.es = __USER_DS;
19880 + ctxt->user_regs.ds = __KERNEL_DS;
19881 + ctxt->user_regs.es = __KERNEL_DS;
19882 ctxt->user_regs.ss = __KERNEL_DS;
19883 #ifdef CONFIG_X86_32
19884 ctxt->user_regs.fs = __KERNEL_PERCPU;
19885 diff -urNp linux-2.6.34.1/arch/x86/xen/xen-head.S linux-2.6.34.1/arch/x86/xen/xen-head.S
19886 --- linux-2.6.34.1/arch/x86/xen/xen-head.S 2010-07-05 14:24:10.000000000 -0400
19887 +++ linux-2.6.34.1/arch/x86/xen/xen-head.S 2010-07-07 09:04:49.000000000 -0400
19888 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
19889 #ifdef CONFIG_X86_32
19890 mov %esi,xen_start_info
19891 mov $init_thread_union+THREAD_SIZE,%esp
19892 +#ifdef CONFIG_SMP
19893 + movl $cpu_gdt_table,%edi
19894 + movl $__per_cpu_load,%eax
19895 + movw %ax,__KERNEL_PERCPU + 2(%edi)
19896 + rorl $16,%eax
19897 + movb %al,__KERNEL_PERCPU + 4(%edi)
19898 + movb %ah,__KERNEL_PERCPU + 7(%edi)
19899 + movl $__per_cpu_end - 1,%eax
19900 + subl $__per_cpu_start,%eax
19901 + movw %ax,__KERNEL_PERCPU + 0(%edi)
19902 +#endif
19903 #else
19904 mov %rsi,xen_start_info
19905 mov $init_thread_union+THREAD_SIZE,%rsp
19906 diff -urNp linux-2.6.34.1/arch/x86/xen/xen-ops.h linux-2.6.34.1/arch/x86/xen/xen-ops.h
19907 --- linux-2.6.34.1/arch/x86/xen/xen-ops.h 2010-07-05 14:24:10.000000000 -0400
19908 +++ linux-2.6.34.1/arch/x86/xen/xen-ops.h 2010-07-07 09:04:49.000000000 -0400
19909 @@ -10,8 +10,6 @@
19910 extern const char xen_hypervisor_callback[];
19911 extern const char xen_failsafe_callback[];
19912
19913 -extern void *xen_initial_gdt;
19914 -
19915 struct trap_info;
19916 void xen_copy_trap_info(struct trap_info *traps);
19917
19918 diff -urNp linux-2.6.34.1/block/blk-iopoll.c linux-2.6.34.1/block/blk-iopoll.c
19919 --- linux-2.6.34.1/block/blk-iopoll.c 2010-07-05 14:24:10.000000000 -0400
19920 +++ linux-2.6.34.1/block/blk-iopoll.c 2010-07-07 09:04:49.000000000 -0400
19921 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
19922 }
19923 EXPORT_SYMBOL(blk_iopoll_complete);
19924
19925 -static void blk_iopoll_softirq(struct softirq_action *h)
19926 +static void blk_iopoll_softirq(void)
19927 {
19928 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
19929 int rearm = 0, budget = blk_iopoll_budget;
19930 diff -urNp linux-2.6.34.1/block/blk-map.c linux-2.6.34.1/block/blk-map.c
19931 --- linux-2.6.34.1/block/blk-map.c 2010-07-05 14:24:10.000000000 -0400
19932 +++ linux-2.6.34.1/block/blk-map.c 2010-07-07 09:04:49.000000000 -0400
19933 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
19934 * direct dma. else, set up kernel bounce buffers
19935 */
19936 uaddr = (unsigned long) ubuf;
19937 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
19938 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
19939 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
19940 else
19941 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
19942 @@ -297,7 +297,7 @@ int blk_rq_map_kern(struct request_queue
19943 if (!len || !kbuf)
19944 return -EINVAL;
19945
19946 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
19947 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
19948 if (do_copy)
19949 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
19950 else
19951 diff -urNp linux-2.6.34.1/block/blk-softirq.c linux-2.6.34.1/block/blk-softirq.c
19952 --- linux-2.6.34.1/block/blk-softirq.c 2010-07-05 14:24:10.000000000 -0400
19953 +++ linux-2.6.34.1/block/blk-softirq.c 2010-07-07 09:04:49.000000000 -0400
19954 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
19955 * Softirq action handler - move entries to local list and loop over them
19956 * while passing them to the queue registered handler.
19957 */
19958 -static void blk_done_softirq(struct softirq_action *h)
19959 +static void blk_done_softirq(void)
19960 {
19961 struct list_head *cpu_list, local_list;
19962
19963 diff -urNp linux-2.6.34.1/crypto/lrw.c linux-2.6.34.1/crypto/lrw.c
19964 --- linux-2.6.34.1/crypto/lrw.c 2010-07-05 14:24:10.000000000 -0400
19965 +++ linux-2.6.34.1/crypto/lrw.c 2010-07-07 09:04:49.000000000 -0400
19966 @@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *par
19967 struct priv *ctx = crypto_tfm_ctx(parent);
19968 struct crypto_cipher *child = ctx->child;
19969 int err, i;
19970 - be128 tmp = { 0 };
19971 + be128 tmp = { 0, 0 };
19972 int bsize = crypto_cipher_blocksize(child);
19973
19974 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
19975 diff -urNp linux-2.6.34.1/drivers/acpi/battery.c linux-2.6.34.1/drivers/acpi/battery.c
19976 --- linux-2.6.34.1/drivers/acpi/battery.c 2010-07-05 14:24:10.000000000 -0400
19977 +++ linux-2.6.34.1/drivers/acpi/battery.c 2010-07-07 09:04:50.000000000 -0400
19978 @@ -810,7 +810,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
19979 }
19980
19981 static struct battery_file {
19982 - struct file_operations ops;
19983 + const struct file_operations ops;
19984 mode_t mode;
19985 const char *name;
19986 } acpi_battery_file[] = {
19987 diff -urNp linux-2.6.34.1/drivers/acpi/blacklist.c linux-2.6.34.1/drivers/acpi/blacklist.c
19988 --- linux-2.6.34.1/drivers/acpi/blacklist.c 2010-07-05 14:24:10.000000000 -0400
19989 +++ linux-2.6.34.1/drivers/acpi/blacklist.c 2010-07-07 09:04:50.000000000 -0400
19990 @@ -73,7 +73,7 @@ static struct acpi_blacklist_item acpi_b
19991 {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
19992 "Incorrect _ADR", 1},
19993
19994 - {""}
19995 + {"", "", 0, NULL, all_versions, NULL, 0}
19996 };
19997
19998 #if CONFIG_ACPI_BLACKLIST_YEAR
19999 diff -urNp linux-2.6.34.1/drivers/acpi/dock.c linux-2.6.34.1/drivers/acpi/dock.c
20000 --- linux-2.6.34.1/drivers/acpi/dock.c 2010-07-05 14:24:10.000000000 -0400
20001 +++ linux-2.6.34.1/drivers/acpi/dock.c 2010-07-07 09:04:50.000000000 -0400
20002 @@ -77,7 +77,7 @@ struct dock_dependent_device {
20003 struct list_head list;
20004 struct list_head hotplug_list;
20005 acpi_handle handle;
20006 - struct acpi_dock_ops *ops;
20007 + const struct acpi_dock_ops *ops;
20008 void *context;
20009 };
20010
20011 @@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
20012 * the dock driver after _DCK is executed.
20013 */
20014 int
20015 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
20016 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
20017 void *context)
20018 {
20019 struct dock_dependent_device *dd;
20020 diff -urNp linux-2.6.34.1/drivers/acpi/osl.c linux-2.6.34.1/drivers/acpi/osl.c
20021 --- linux-2.6.34.1/drivers/acpi/osl.c 2010-07-05 14:24:10.000000000 -0400
20022 +++ linux-2.6.34.1/drivers/acpi/osl.c 2010-07-07 09:04:50.000000000 -0400
20023 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
20024 void __iomem *virt_addr;
20025
20026 virt_addr = ioremap(phys_addr, width);
20027 + if (!virt_addr)
20028 + return AE_NO_MEMORY;
20029 if (!value)
20030 value = &dummy;
20031
20032 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
20033 void __iomem *virt_addr;
20034
20035 virt_addr = ioremap(phys_addr, width);
20036 + if (!virt_addr)
20037 + return AE_NO_MEMORY;
20038
20039 switch (width) {
20040 case 8:
20041 diff -urNp linux-2.6.34.1/drivers/acpi/power_meter.c linux-2.6.34.1/drivers/acpi/power_meter.c
20042 --- linux-2.6.34.1/drivers/acpi/power_meter.c 2010-07-05 14:24:10.000000000 -0400
20043 +++ linux-2.6.34.1/drivers/acpi/power_meter.c 2010-07-07 09:04:50.000000000 -0400
20044 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
20045 return res;
20046
20047 temp /= 1000;
20048 - if (temp < 0)
20049 - return -EINVAL;
20050
20051 mutex_lock(&resource->lock);
20052 resource->trip[attr->index - 7] = temp;
20053 diff -urNp linux-2.6.34.1/drivers/acpi/proc.c linux-2.6.34.1/drivers/acpi/proc.c
20054 --- linux-2.6.34.1/drivers/acpi/proc.c 2010-07-05 14:24:10.000000000 -0400
20055 +++ linux-2.6.34.1/drivers/acpi/proc.c 2010-07-07 09:04:50.000000000 -0400
20056 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
20057 size_t count, loff_t * ppos)
20058 {
20059 struct list_head *node, *next;
20060 - char strbuf[5];
20061 - char str[5] = "";
20062 - unsigned int len = count;
20063 + char strbuf[5] = {0};
20064 struct acpi_device *found_dev = NULL;
20065
20066 - if (len > 4)
20067 - len = 4;
20068 - if (len < 0)
20069 - return -EFAULT;
20070 + if (count > 4)
20071 + count = 4;
20072
20073 - if (copy_from_user(strbuf, buffer, len))
20074 + if (copy_from_user(strbuf, buffer, count))
20075 return -EFAULT;
20076 - strbuf[len] = '\0';
20077 - sscanf(strbuf, "%s", str);
20078 + strbuf[count] = '\0';
20079
20080 mutex_lock(&acpi_device_lock);
20081 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
20082 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
20083 if (!dev->wakeup.flags.valid)
20084 continue;
20085
20086 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
20087 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
20088 dev->wakeup.state.enabled =
20089 dev->wakeup.state.enabled ? 0 : 1;
20090 found_dev = dev;
20091 diff -urNp linux-2.6.34.1/drivers/acpi/processor_driver.c linux-2.6.34.1/drivers/acpi/processor_driver.c
20092 --- linux-2.6.34.1/drivers/acpi/processor_driver.c 2010-07-05 14:24:10.000000000 -0400
20093 +++ linux-2.6.34.1/drivers/acpi/processor_driver.c 2010-07-07 09:04:50.000000000 -0400
20094 @@ -581,7 +581,7 @@ static int __cpuinit acpi_processor_add(
20095 return 0;
20096 }
20097
20098 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
20099 + BUG_ON(pr->id >= nr_cpu_ids);
20100
20101 /*
20102 * Buggy BIOS check
20103 diff -urNp linux-2.6.34.1/drivers/acpi/processor_idle.c linux-2.6.34.1/drivers/acpi/processor_idle.c
20104 --- linux-2.6.34.1/drivers/acpi/processor_idle.c 2010-07-05 14:24:10.000000000 -0400
20105 +++ linux-2.6.34.1/drivers/acpi/processor_idle.c 2010-07-07 09:04:50.000000000 -0400
20106 @@ -119,7 +119,7 @@ static struct dmi_system_id __cpuinitdat
20107 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
20108 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
20109 (void *)1},
20110 - {},
20111 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL},
20112 };
20113
20114
20115 diff -urNp linux-2.6.34.1/drivers/acpi/sleep.c linux-2.6.34.1/drivers/acpi/sleep.c
20116 --- linux-2.6.34.1/drivers/acpi/sleep.c 2010-07-05 14:24:10.000000000 -0400
20117 +++ linux-2.6.34.1/drivers/acpi/sleep.c 2010-07-07 09:04:50.000000000 -0400
20118 @@ -302,7 +302,7 @@ static int acpi_suspend_state_valid(susp
20119 }
20120 }
20121
20122 -static struct platform_suspend_ops acpi_suspend_ops = {
20123 +static const struct platform_suspend_ops acpi_suspend_ops = {
20124 .valid = acpi_suspend_state_valid,
20125 .begin = acpi_suspend_begin,
20126 .prepare_late = acpi_pm_prepare,
20127 @@ -330,7 +330,7 @@ static int acpi_suspend_begin_old(suspen
20128 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
20129 * been requested.
20130 */
20131 -static struct platform_suspend_ops acpi_suspend_ops_old = {
20132 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
20133 .valid = acpi_suspend_state_valid,
20134 .begin = acpi_suspend_begin_old,
20135 .prepare_late = acpi_pm_disable_gpes,
20136 @@ -622,7 +622,7 @@ static void acpi_pm_restore_cleanup(void
20137 acpi_enable_all_runtime_gpes();
20138 }
20139
20140 -static struct platform_hibernation_ops acpi_hibernation_ops = {
20141 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
20142 .begin = acpi_hibernation_begin,
20143 .end = acpi_pm_end,
20144 .pre_snapshot = acpi_hibernation_pre_snapshot,
20145 @@ -675,7 +675,7 @@ static int acpi_hibernation_pre_snapshot
20146 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
20147 * been requested.
20148 */
20149 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
20150 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
20151 .begin = acpi_hibernation_begin_old,
20152 .end = acpi_pm_end,
20153 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
20154 diff -urNp linux-2.6.34.1/drivers/acpi/video.c linux-2.6.34.1/drivers/acpi/video.c
20155 --- linux-2.6.34.1/drivers/acpi/video.c 2010-07-05 14:24:10.000000000 -0400
20156 +++ linux-2.6.34.1/drivers/acpi/video.c 2010-07-07 09:04:50.000000000 -0400
20157 @@ -367,7 +367,7 @@ static int acpi_video_set_brightness(str
20158 vd->brightness->levels[request_level]);
20159 }
20160
20161 -static struct backlight_ops acpi_backlight_ops = {
20162 +static const struct backlight_ops acpi_backlight_ops = {
20163 .get_brightness = acpi_video_get_brightness,
20164 .update_status = acpi_video_set_brightness,
20165 };
20166 diff -urNp linux-2.6.34.1/drivers/ata/ahci.c linux-2.6.34.1/drivers/ata/ahci.c
20167 --- linux-2.6.34.1/drivers/ata/ahci.c 2010-07-05 14:24:10.000000000 -0400
20168 +++ linux-2.6.34.1/drivers/ata/ahci.c 2010-07-07 09:04:50.000000000 -0400
20169 @@ -407,7 +407,7 @@ static struct scsi_host_template ahci_sh
20170 .sdev_attrs = ahci_sdev_attrs,
20171 };
20172
20173 -static struct ata_port_operations ahci_ops = {
20174 +static const struct ata_port_operations ahci_ops = {
20175 .inherits = &sata_pmp_port_ops,
20176
20177 .qc_defer = ahci_pmp_qc_defer,
20178 @@ -444,17 +444,17 @@ static struct ata_port_operations ahci_o
20179 .port_stop = ahci_port_stop,
20180 };
20181
20182 -static struct ata_port_operations ahci_vt8251_ops = {
20183 +static const struct ata_port_operations ahci_vt8251_ops = {
20184 .inherits = &ahci_ops,
20185 .hardreset = ahci_vt8251_hardreset,
20186 };
20187
20188 -static struct ata_port_operations ahci_p5wdh_ops = {
20189 +static const struct ata_port_operations ahci_p5wdh_ops = {
20190 .inherits = &ahci_ops,
20191 .hardreset = ahci_p5wdh_hardreset,
20192 };
20193
20194 -static struct ata_port_operations ahci_sb600_ops = {
20195 +static const struct ata_port_operations ahci_sb600_ops = {
20196 .inherits = &ahci_ops,
20197 .softreset = ahci_sb600_softreset,
20198 .pmp_softreset = ahci_sb600_softreset,
20199 @@ -722,7 +722,7 @@ static const struct pci_device_id ahci_p
20200 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
20201 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
20202
20203 - { } /* terminate list */
20204 + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
20205 };
20206
20207
20208 diff -urNp linux-2.6.34.1/drivers/ata/ata_generic.c linux-2.6.34.1/drivers/ata/ata_generic.c
20209 --- linux-2.6.34.1/drivers/ata/ata_generic.c 2010-07-05 14:24:10.000000000 -0400
20210 +++ linux-2.6.34.1/drivers/ata/ata_generic.c 2010-07-07 09:04:50.000000000 -0400
20211 @@ -95,7 +95,7 @@ static struct scsi_host_template generic
20212 ATA_BMDMA_SHT(DRV_NAME),
20213 };
20214
20215 -static struct ata_port_operations generic_port_ops = {
20216 +static const struct ata_port_operations generic_port_ops = {
20217 .inherits = &ata_bmdma_port_ops,
20218 .cable_detect = ata_cable_unknown,
20219 .set_mode = generic_set_mode,
20220 diff -urNp linux-2.6.34.1/drivers/ata/ata_piix.c linux-2.6.34.1/drivers/ata/ata_piix.c
20221 --- linux-2.6.34.1/drivers/ata/ata_piix.c 2010-07-05 14:24:10.000000000 -0400
20222 +++ linux-2.6.34.1/drivers/ata/ata_piix.c 2010-07-07 09:04:50.000000000 -0400
20223 @@ -301,7 +301,7 @@ static const struct pci_device_id piix_p
20224 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
20225 /* SATA Controller IDE (CPT) */
20226 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
20227 - { } /* terminate list */
20228 + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
20229 };
20230
20231 static struct pci_driver piix_pci_driver = {
20232 @@ -319,12 +319,12 @@ static struct scsi_host_template piix_sh
20233 ATA_BMDMA_SHT(DRV_NAME),
20234 };
20235
20236 -static struct ata_port_operations piix_sata_ops = {
20237 +static const struct ata_port_operations piix_sata_ops = {
20238 .inherits = &ata_bmdma32_port_ops,
20239 .sff_irq_check = piix_irq_check,
20240 };
20241
20242 -static struct ata_port_operations piix_pata_ops = {
20243 +static const struct ata_port_operations piix_pata_ops = {
20244 .inherits = &piix_sata_ops,
20245 .cable_detect = ata_cable_40wire,
20246 .set_piomode = piix_set_piomode,
20247 @@ -332,18 +332,18 @@ static struct ata_port_operations piix_p
20248 .prereset = piix_pata_prereset,
20249 };
20250
20251 -static struct ata_port_operations piix_vmw_ops = {
20252 +static const struct ata_port_operations piix_vmw_ops = {
20253 .inherits = &piix_pata_ops,
20254 .bmdma_status = piix_vmw_bmdma_status,
20255 };
20256
20257 -static struct ata_port_operations ich_pata_ops = {
20258 +static const struct ata_port_operations ich_pata_ops = {
20259 .inherits = &piix_pata_ops,
20260 .cable_detect = ich_pata_cable_detect,
20261 .set_dmamode = ich_set_dmamode,
20262 };
20263
20264 -static struct ata_port_operations piix_sidpr_sata_ops = {
20265 +static const struct ata_port_operations piix_sidpr_sata_ops = {
20266 .inherits = &piix_sata_ops,
20267 .hardreset = sata_std_hardreset,
20268 .scr_read = piix_sidpr_scr_read,
20269 @@ -619,7 +619,7 @@ static const struct ich_laptop ich_lapto
20270 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
20271 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
20272 /* end marker */
20273 - { 0, }
20274 + { 0, 0, 0 }
20275 };
20276
20277 /**
20278 @@ -1105,7 +1105,7 @@ static int piix_broken_suspend(void)
20279 },
20280 },
20281
20282 - { } /* terminate list */
20283 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */
20284 };
20285 static const char *oemstrs[] = {
20286 "Tecra M3,",
20287 diff -urNp linux-2.6.34.1/drivers/ata/libata-acpi.c linux-2.6.34.1/drivers/ata/libata-acpi.c
20288 --- linux-2.6.34.1/drivers/ata/libata-acpi.c 2010-07-05 14:24:10.000000000 -0400
20289 +++ linux-2.6.34.1/drivers/ata/libata-acpi.c 2010-07-07 09:04:50.000000000 -0400
20290 @@ -224,12 +224,12 @@ static void ata_acpi_dev_uevent(acpi_han
20291 ata_acpi_uevent(dev->link->ap, dev, event);
20292 }
20293
20294 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
20295 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
20296 .handler = ata_acpi_dev_notify_dock,
20297 .uevent = ata_acpi_dev_uevent,
20298 };
20299
20300 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
20301 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
20302 .handler = ata_acpi_ap_notify_dock,
20303 .uevent = ata_acpi_ap_uevent,
20304 };
20305 diff -urNp linux-2.6.34.1/drivers/ata/libata-core.c linux-2.6.34.1/drivers/ata/libata-core.c
20306 --- linux-2.6.34.1/drivers/ata/libata-core.c 2010-07-05 14:24:10.000000000 -0400
20307 +++ linux-2.6.34.1/drivers/ata/libata-core.c 2010-07-07 09:04:50.000000000 -0400
20308 @@ -901,7 +901,7 @@ static const struct ata_xfer_ent {
20309 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
20310 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
20311 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
20312 - { -1, },
20313 + { -1, 0, 0 }
20314 };
20315
20316 /**
20317 @@ -3170,7 +3170,7 @@ static const struct ata_timing ata_timin
20318 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
20319 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
20320
20321 - { 0xFF }
20322 + { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 }
20323 };
20324
20325 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
20326 @@ -4415,7 +4415,7 @@ static const struct ata_blacklist_entry
20327 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
20328
20329 /* End Marker */
20330 - { }
20331 + { NULL, NULL, 0 }
20332 };
20333
20334 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
20335 @@ -5991,7 +5991,7 @@ static void ata_host_stop(struct device
20336 * LOCKING:
20337 * None.
20338 */
20339 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
20340 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
20341 {
20342 static DEFINE_SPINLOCK(lock);
20343 const struct ata_port_operations *cur;
20344 @@ -6003,6 +6003,7 @@ static void ata_finalize_port_ops(struct
20345 return;
20346
20347 spin_lock(&lock);
20348 + pax_open_kernel();
20349
20350 for (cur = ops->inherits; cur; cur = cur->inherits) {
20351 void **inherit = (void **)cur;
20352 @@ -6016,8 +6017,9 @@ static void ata_finalize_port_ops(struct
20353 if (IS_ERR(*pp))
20354 *pp = NULL;
20355
20356 - ops->inherits = NULL;
20357 + ((struct ata_port_operations *)ops)->inherits = NULL;
20358
20359 + pax_close_kernel();
20360 spin_unlock(&lock);
20361 }
20362
20363 @@ -6114,7 +6116,7 @@ int ata_host_start(struct ata_host *host
20364 */
20365 /* KILLME - the only user left is ipr */
20366 void ata_host_init(struct ata_host *host, struct device *dev,
20367 - unsigned long flags, struct ata_port_operations *ops)
20368 + unsigned long flags, const struct ata_port_operations *ops)
20369 {
20370 spin_lock_init(&host->lock);
20371 host->dev = dev;
20372 @@ -6784,7 +6786,7 @@ static void ata_dummy_error_handler(stru
20373 /* truly dummy */
20374 }
20375
20376 -struct ata_port_operations ata_dummy_port_ops = {
20377 +const struct ata_port_operations ata_dummy_port_ops = {
20378 .qc_prep = ata_noop_qc_prep,
20379 .qc_issue = ata_dummy_qc_issue,
20380 .error_handler = ata_dummy_error_handler,
20381 diff -urNp linux-2.6.34.1/drivers/ata/libata-eh.c linux-2.6.34.1/drivers/ata/libata-eh.c
20382 --- linux-2.6.34.1/drivers/ata/libata-eh.c 2010-07-05 14:24:10.000000000 -0400
20383 +++ linux-2.6.34.1/drivers/ata/libata-eh.c 2010-07-07 09:04:50.000000000 -0400
20384 @@ -3680,7 +3680,7 @@ void ata_do_eh(struct ata_port *ap, ata_
20385 */
20386 void ata_std_error_handler(struct ata_port *ap)
20387 {
20388 - struct ata_port_operations *ops = ap->ops;
20389 + const struct ata_port_operations *ops = ap->ops;
20390 ata_reset_fn_t hardreset = ops->hardreset;
20391
20392 /* ignore built-in hardreset if SCR access is not available */
20393 diff -urNp linux-2.6.34.1/drivers/ata/libata-pmp.c linux-2.6.34.1/drivers/ata/libata-pmp.c
20394 --- linux-2.6.34.1/drivers/ata/libata-pmp.c 2010-07-05 14:24:10.000000000 -0400
20395 +++ linux-2.6.34.1/drivers/ata/libata-pmp.c 2010-07-07 09:04:50.000000000 -0400
20396 @@ -842,7 +842,7 @@ static int sata_pmp_handle_link_fail(str
20397 */
20398 static int sata_pmp_eh_recover(struct ata_port *ap)
20399 {
20400 - struct ata_port_operations *ops = ap->ops;
20401 + const struct ata_port_operations *ops = ap->ops;
20402 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
20403 struct ata_link *pmp_link = &ap->link;
20404 struct ata_device *pmp_dev = pmp_link->device;
20405 diff -urNp linux-2.6.34.1/drivers/ata/pata_acpi.c linux-2.6.34.1/drivers/ata/pata_acpi.c
20406 --- linux-2.6.34.1/drivers/ata/pata_acpi.c 2010-07-05 14:24:10.000000000 -0400
20407 +++ linux-2.6.34.1/drivers/ata/pata_acpi.c 2010-07-07 09:04:50.000000000 -0400
20408 @@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_s
20409 ATA_BMDMA_SHT(DRV_NAME),
20410 };
20411
20412 -static struct ata_port_operations pacpi_ops = {
20413 +static const struct ata_port_operations pacpi_ops = {
20414 .inherits = &ata_bmdma_port_ops,
20415 .qc_issue = pacpi_qc_issue,
20416 .cable_detect = pacpi_cable_detect,
20417 diff -urNp linux-2.6.34.1/drivers/ata/pata_ali.c linux-2.6.34.1/drivers/ata/pata_ali.c
20418 --- linux-2.6.34.1/drivers/ata/pata_ali.c 2010-07-05 14:24:10.000000000 -0400
20419 +++ linux-2.6.34.1/drivers/ata/pata_ali.c 2010-07-07 09:04:50.000000000 -0400
20420 @@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht
20421 * Port operations for PIO only ALi
20422 */
20423
20424 -static struct ata_port_operations ali_early_port_ops = {
20425 +static const struct ata_port_operations ali_early_port_ops = {
20426 .inherits = &ata_sff_port_ops,
20427 .cable_detect = ata_cable_40wire,
20428 .set_piomode = ali_set_piomode,
20429 @@ -380,7 +380,7 @@ static const struct ata_port_operations
20430 * Port operations for DMA capable ALi without cable
20431 * detect
20432 */
20433 -static struct ata_port_operations ali_20_port_ops = {
20434 +static const struct ata_port_operations ali_20_port_ops = {
20435 .inherits = &ali_dma_base_ops,
20436 .cable_detect = ata_cable_40wire,
20437 .mode_filter = ali_20_filter,
20438 @@ -391,7 +391,7 @@ static struct ata_port_operations ali_20
20439 /*
20440 * Port operations for DMA capable ALi with cable detect
20441 */
20442 -static struct ata_port_operations ali_c2_port_ops = {
20443 +static const struct ata_port_operations ali_c2_port_ops = {
20444 .inherits = &ali_dma_base_ops,
20445 .check_atapi_dma = ali_check_atapi_dma,
20446 .cable_detect = ali_c2_cable_detect,
20447 @@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2
20448 /*
20449 * Port operations for DMA capable ALi with cable detect
20450 */
20451 -static struct ata_port_operations ali_c4_port_ops = {
20452 +static const struct ata_port_operations ali_c4_port_ops = {
20453 .inherits = &ali_dma_base_ops,
20454 .check_atapi_dma = ali_check_atapi_dma,
20455 .cable_detect = ali_c2_cable_detect,
20456 @@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4
20457 /*
20458 * Port operations for DMA capable ALi with cable detect and LBA48
20459 */
20460 -static struct ata_port_operations ali_c5_port_ops = {
20461 +static const struct ata_port_operations ali_c5_port_ops = {
20462 .inherits = &ali_dma_base_ops,
20463 .check_atapi_dma = ali_check_atapi_dma,
20464 .dev_config = ali_warn_atapi_dma,
20465 diff -urNp linux-2.6.34.1/drivers/ata/pata_amd.c linux-2.6.34.1/drivers/ata/pata_amd.c
20466 --- linux-2.6.34.1/drivers/ata/pata_amd.c 2010-07-05 14:24:10.000000000 -0400
20467 +++ linux-2.6.34.1/drivers/ata/pata_amd.c 2010-07-07 09:04:50.000000000 -0400
20468 @@ -397,28 +397,28 @@ static const struct ata_port_operations
20469 .prereset = amd_pre_reset,
20470 };
20471
20472 -static struct ata_port_operations amd33_port_ops = {
20473 +static const struct ata_port_operations amd33_port_ops = {
20474 .inherits = &amd_base_port_ops,
20475 .cable_detect = ata_cable_40wire,
20476 .set_piomode = amd33_set_piomode,
20477 .set_dmamode = amd33_set_dmamode,
20478 };
20479
20480 -static struct ata_port_operations amd66_port_ops = {
20481 +static const struct ata_port_operations amd66_port_ops = {
20482 .inherits = &amd_base_port_ops,
20483 .cable_detect = ata_cable_unknown,
20484 .set_piomode = amd66_set_piomode,
20485 .set_dmamode = amd66_set_dmamode,
20486 };
20487
20488 -static struct ata_port_operations amd100_port_ops = {
20489 +static const struct ata_port_operations amd100_port_ops = {
20490 .inherits = &amd_base_port_ops,
20491 .cable_detect = ata_cable_unknown,
20492 .set_piomode = amd100_set_piomode,
20493 .set_dmamode = amd100_set_dmamode,
20494 };
20495
20496 -static struct ata_port_operations amd133_port_ops = {
20497 +static const struct ata_port_operations amd133_port_ops = {
20498 .inherits = &amd_base_port_ops,
20499 .cable_detect = amd_cable_detect,
20500 .set_piomode = amd133_set_piomode,
20501 @@ -433,13 +433,13 @@ static const struct ata_port_operations
20502 .host_stop = nv_host_stop,
20503 };
20504
20505 -static struct ata_port_operations nv100_port_ops = {
20506 +static const struct ata_port_operations nv100_port_ops = {
20507 .inherits = &nv_base_port_ops,
20508 .set_piomode = nv100_set_piomode,
20509 .set_dmamode = nv100_set_dmamode,
20510 };
20511
20512 -static struct ata_port_operations nv133_port_ops = {
20513 +static const struct ata_port_operations nv133_port_ops = {
20514 .inherits = &nv_base_port_ops,
20515 .set_piomode = nv133_set_piomode,
20516 .set_dmamode = nv133_set_dmamode,
20517 diff -urNp linux-2.6.34.1/drivers/ata/pata_artop.c linux-2.6.34.1/drivers/ata/pata_artop.c
20518 --- linux-2.6.34.1/drivers/ata/pata_artop.c 2010-07-05 14:24:10.000000000 -0400
20519 +++ linux-2.6.34.1/drivers/ata/pata_artop.c 2010-07-07 09:04:50.000000000 -0400
20520 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
20521 ATA_BMDMA_SHT(DRV_NAME),
20522 };
20523
20524 -static struct ata_port_operations artop6210_ops = {
20525 +static const struct ata_port_operations artop6210_ops = {
20526 .inherits = &ata_bmdma_port_ops,
20527 .cable_detect = ata_cable_40wire,
20528 .set_piomode = artop6210_set_piomode,
20529 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
20530 .qc_defer = artop6210_qc_defer,
20531 };
20532
20533 -static struct ata_port_operations artop6260_ops = {
20534 +static const struct ata_port_operations artop6260_ops = {
20535 .inherits = &ata_bmdma_port_ops,
20536 .cable_detect = artop6260_cable_detect,
20537 .set_piomode = artop6260_set_piomode,
20538 diff -urNp linux-2.6.34.1/drivers/ata/pata_at32.c linux-2.6.34.1/drivers/ata/pata_at32.c
20539 --- linux-2.6.34.1/drivers/ata/pata_at32.c 2010-07-05 14:24:10.000000000 -0400
20540 +++ linux-2.6.34.1/drivers/ata/pata_at32.c 2010-07-07 09:04:50.000000000 -0400
20541 @@ -173,7 +173,7 @@ static struct scsi_host_template at32_sh
20542 ATA_PIO_SHT(DRV_NAME),
20543 };
20544
20545 -static struct ata_port_operations at32_port_ops = {
20546 +static const struct ata_port_operations at32_port_ops = {
20547 .inherits = &ata_sff_port_ops,
20548 .cable_detect = ata_cable_40wire,
20549 .set_piomode = pata_at32_set_piomode,
20550 diff -urNp linux-2.6.34.1/drivers/ata/pata_at91.c linux-2.6.34.1/drivers/ata/pata_at91.c
20551 --- linux-2.6.34.1/drivers/ata/pata_at91.c 2010-07-05 14:24:10.000000000 -0400
20552 +++ linux-2.6.34.1/drivers/ata/pata_at91.c 2010-07-07 09:04:50.000000000 -0400
20553 @@ -196,7 +196,7 @@ static struct scsi_host_template pata_at
20554 ATA_PIO_SHT(DRV_NAME),
20555 };
20556
20557 -static struct ata_port_operations pata_at91_port_ops = {
20558 +static const struct ata_port_operations pata_at91_port_ops = {
20559 .inherits = &ata_sff_port_ops,
20560
20561 .sff_data_xfer = pata_at91_data_xfer_noirq,
20562 diff -urNp linux-2.6.34.1/drivers/ata/pata_atiixp.c linux-2.6.34.1/drivers/ata/pata_atiixp.c
20563 --- linux-2.6.34.1/drivers/ata/pata_atiixp.c 2010-07-05 14:24:10.000000000 -0400
20564 +++ linux-2.6.34.1/drivers/ata/pata_atiixp.c 2010-07-07 09:04:50.000000000 -0400
20565 @@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_
20566 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
20567 };
20568
20569 -static struct ata_port_operations atiixp_port_ops = {
20570 +static const struct ata_port_operations atiixp_port_ops = {
20571 .inherits = &ata_bmdma_port_ops,
20572
20573 .qc_prep = ata_sff_dumb_qc_prep,
20574 diff -urNp linux-2.6.34.1/drivers/ata/pata_atp867x.c linux-2.6.34.1/drivers/ata/pata_atp867x.c
20575 --- linux-2.6.34.1/drivers/ata/pata_atp867x.c 2010-07-05 14:24:10.000000000 -0400
20576 +++ linux-2.6.34.1/drivers/ata/pata_atp867x.c 2010-07-07 09:04:50.000000000 -0400
20577 @@ -275,7 +275,7 @@ static struct scsi_host_template atp867x
20578 ATA_BMDMA_SHT(DRV_NAME),
20579 };
20580
20581 -static struct ata_port_operations atp867x_ops = {
20582 +static const struct ata_port_operations atp867x_ops = {
20583 .inherits = &ata_bmdma_port_ops,
20584 .cable_detect = atp867x_cable_detect,
20585 .set_piomode = atp867x_set_piomode,
20586 diff -urNp linux-2.6.34.1/drivers/ata/pata_bf54x.c linux-2.6.34.1/drivers/ata/pata_bf54x.c
20587 --- linux-2.6.34.1/drivers/ata/pata_bf54x.c 2010-07-05 14:24:10.000000000 -0400
20588 +++ linux-2.6.34.1/drivers/ata/pata_bf54x.c 2010-07-07 09:04:50.000000000 -0400
20589 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
20590 .dma_boundary = ATA_DMA_BOUNDARY,
20591 };
20592
20593 -static struct ata_port_operations bfin_pata_ops = {
20594 +static const struct ata_port_operations bfin_pata_ops = {
20595 .inherits = &ata_sff_port_ops,
20596
20597 .set_piomode = bfin_set_piomode,
20598 diff -urNp linux-2.6.34.1/drivers/ata/pata_cmd640.c linux-2.6.34.1/drivers/ata/pata_cmd640.c
20599 --- linux-2.6.34.1/drivers/ata/pata_cmd640.c 2010-07-05 14:24:10.000000000 -0400
20600 +++ linux-2.6.34.1/drivers/ata/pata_cmd640.c 2010-07-07 09:04:50.000000000 -0400
20601 @@ -169,7 +169,7 @@ static struct scsi_host_template cmd640_
20602 ATA_BMDMA_SHT(DRV_NAME),
20603 };
20604
20605 -static struct ata_port_operations cmd640_port_ops = {
20606 +static const struct ata_port_operations cmd640_port_ops = {
20607 .inherits = &ata_bmdma_port_ops,
20608 /* In theory xfer_noirq is not needed once we kill the prefetcher */
20609 .sff_data_xfer = ata_sff_data_xfer_noirq,
20610 diff -urNp linux-2.6.34.1/drivers/ata/pata_cmd64x.c linux-2.6.34.1/drivers/ata/pata_cmd64x.c
20611 --- linux-2.6.34.1/drivers/ata/pata_cmd64x.c 2010-07-05 14:24:10.000000000 -0400
20612 +++ linux-2.6.34.1/drivers/ata/pata_cmd64x.c 2010-07-07 09:04:50.000000000 -0400
20613 @@ -274,18 +274,18 @@ static const struct ata_port_operations
20614 .set_dmamode = cmd64x_set_dmamode,
20615 };
20616
20617 -static struct ata_port_operations cmd64x_port_ops = {
20618 +static const struct ata_port_operations cmd64x_port_ops = {
20619 .inherits = &cmd64x_base_ops,
20620 .cable_detect = ata_cable_40wire,
20621 };
20622
20623 -static struct ata_port_operations cmd646r1_port_ops = {
20624 +static const struct ata_port_operations cmd646r1_port_ops = {
20625 .inherits = &cmd64x_base_ops,
20626 .bmdma_stop = cmd646r1_bmdma_stop,
20627 .cable_detect = ata_cable_40wire,
20628 };
20629
20630 -static struct ata_port_operations cmd648_port_ops = {
20631 +static const struct ata_port_operations cmd648_port_ops = {
20632 .inherits = &cmd64x_base_ops,
20633 .bmdma_stop = cmd648_bmdma_stop,
20634 .cable_detect = cmd648_cable_detect,
20635 diff -urNp linux-2.6.34.1/drivers/ata/pata_cs5520.c linux-2.6.34.1/drivers/ata/pata_cs5520.c
20636 --- linux-2.6.34.1/drivers/ata/pata_cs5520.c 2010-07-05 14:24:10.000000000 -0400
20637 +++ linux-2.6.34.1/drivers/ata/pata_cs5520.c 2010-07-07 09:04:50.000000000 -0400
20638 @@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_
20639 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
20640 };
20641
20642 -static struct ata_port_operations cs5520_port_ops = {
20643 +static const struct ata_port_operations cs5520_port_ops = {
20644 .inherits = &ata_bmdma_port_ops,
20645 .qc_prep = ata_sff_dumb_qc_prep,
20646 .cable_detect = ata_cable_40wire,
20647 diff -urNp linux-2.6.34.1/drivers/ata/pata_cs5530.c linux-2.6.34.1/drivers/ata/pata_cs5530.c
20648 --- linux-2.6.34.1/drivers/ata/pata_cs5530.c 2010-07-05 14:24:10.000000000 -0400
20649 +++ linux-2.6.34.1/drivers/ata/pata_cs5530.c 2010-07-07 09:04:50.000000000 -0400
20650 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
20651 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
20652 };
20653
20654 -static struct ata_port_operations cs5530_port_ops = {
20655 +static const struct ata_port_operations cs5530_port_ops = {
20656 .inherits = &ata_bmdma_port_ops,
20657
20658 .qc_prep = ata_sff_dumb_qc_prep,
20659 diff -urNp linux-2.6.34.1/drivers/ata/pata_cs5535.c linux-2.6.34.1/drivers/ata/pata_cs5535.c
20660 --- linux-2.6.34.1/drivers/ata/pata_cs5535.c 2010-07-05 14:24:10.000000000 -0400
20661 +++ linux-2.6.34.1/drivers/ata/pata_cs5535.c 2010-07-07 09:04:50.000000000 -0400
20662 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
20663 ATA_BMDMA_SHT(DRV_NAME),
20664 };
20665
20666 -static struct ata_port_operations cs5535_port_ops = {
20667 +static const struct ata_port_operations cs5535_port_ops = {
20668 .inherits = &ata_bmdma_port_ops,
20669 .cable_detect = cs5535_cable_detect,
20670 .set_piomode = cs5535_set_piomode,
20671 diff -urNp linux-2.6.34.1/drivers/ata/pata_cs5536.c linux-2.6.34.1/drivers/ata/pata_cs5536.c
20672 --- linux-2.6.34.1/drivers/ata/pata_cs5536.c 2010-07-05 14:24:10.000000000 -0400
20673 +++ linux-2.6.34.1/drivers/ata/pata_cs5536.c 2010-07-07 09:04:50.000000000 -0400
20674 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
20675 ATA_BMDMA_SHT(DRV_NAME),
20676 };
20677
20678 -static struct ata_port_operations cs5536_port_ops = {
20679 +static const struct ata_port_operations cs5536_port_ops = {
20680 .inherits = &ata_bmdma32_port_ops,
20681 .cable_detect = cs5536_cable_detect,
20682 .set_piomode = cs5536_set_piomode,
20683 diff -urNp linux-2.6.34.1/drivers/ata/pata_cypress.c linux-2.6.34.1/drivers/ata/pata_cypress.c
20684 --- linux-2.6.34.1/drivers/ata/pata_cypress.c 2010-07-05 14:24:10.000000000 -0400
20685 +++ linux-2.6.34.1/drivers/ata/pata_cypress.c 2010-07-07 09:04:50.000000000 -0400
20686 @@ -115,7 +115,7 @@ static struct scsi_host_template cy82c69
20687 ATA_BMDMA_SHT(DRV_NAME),
20688 };
20689
20690 -static struct ata_port_operations cy82c693_port_ops = {
20691 +static const struct ata_port_operations cy82c693_port_ops = {
20692 .inherits = &ata_bmdma_port_ops,
20693 .cable_detect = ata_cable_40wire,
20694 .set_piomode = cy82c693_set_piomode,
20695 diff -urNp linux-2.6.34.1/drivers/ata/pata_efar.c linux-2.6.34.1/drivers/ata/pata_efar.c
20696 --- linux-2.6.34.1/drivers/ata/pata_efar.c 2010-07-05 14:24:10.000000000 -0400
20697 +++ linux-2.6.34.1/drivers/ata/pata_efar.c 2010-07-07 09:04:50.000000000 -0400
20698 @@ -238,7 +238,7 @@ static struct scsi_host_template efar_sh
20699 ATA_BMDMA_SHT(DRV_NAME),
20700 };
20701
20702 -static struct ata_port_operations efar_ops = {
20703 +static const struct ata_port_operations efar_ops = {
20704 .inherits = &ata_bmdma_port_ops,
20705 .cable_detect = efar_cable_detect,
20706 .set_piomode = efar_set_piomode,
20707 diff -urNp linux-2.6.34.1/drivers/ata/pata_hpt366.c linux-2.6.34.1/drivers/ata/pata_hpt366.c
20708 --- linux-2.6.34.1/drivers/ata/pata_hpt366.c 2010-07-05 14:24:10.000000000 -0400
20709 +++ linux-2.6.34.1/drivers/ata/pata_hpt366.c 2010-07-07 09:04:50.000000000 -0400
20710 @@ -269,7 +269,7 @@ static struct scsi_host_template hpt36x_
20711 * Configuration for HPT366/68
20712 */
20713
20714 -static struct ata_port_operations hpt366_port_ops = {
20715 +static const struct ata_port_operations hpt366_port_ops = {
20716 .inherits = &ata_bmdma_port_ops,
20717 .cable_detect = hpt36x_cable_detect,
20718 .mode_filter = hpt366_filter,
20719 diff -urNp linux-2.6.34.1/drivers/ata/pata_hpt37x.c linux-2.6.34.1/drivers/ata/pata_hpt37x.c
20720 --- linux-2.6.34.1/drivers/ata/pata_hpt37x.c 2010-07-05 14:24:10.000000000 -0400
20721 +++ linux-2.6.34.1/drivers/ata/pata_hpt37x.c 2010-07-07 09:04:50.000000000 -0400
20722 @@ -564,7 +564,7 @@ static struct scsi_host_template hpt37x_
20723 * Configuration for HPT370
20724 */
20725
20726 -static struct ata_port_operations hpt370_port_ops = {
20727 +static const struct ata_port_operations hpt370_port_ops = {
20728 .inherits = &ata_bmdma_port_ops,
20729
20730 .bmdma_stop = hpt370_bmdma_stop,
20731 @@ -580,7 +580,7 @@ static struct ata_port_operations hpt370
20732 * Configuration for HPT370A. Close to 370 but less filters
20733 */
20734
20735 -static struct ata_port_operations hpt370a_port_ops = {
20736 +static const struct ata_port_operations hpt370a_port_ops = {
20737 .inherits = &hpt370_port_ops,
20738 .mode_filter = hpt370a_filter,
20739 };
20740 @@ -590,7 +590,7 @@ static struct ata_port_operations hpt370
20741 * and DMA mode setting functionality.
20742 */
20743
20744 -static struct ata_port_operations hpt372_port_ops = {
20745 +static const struct ata_port_operations hpt372_port_ops = {
20746 .inherits = &ata_bmdma_port_ops,
20747
20748 .bmdma_stop = hpt37x_bmdma_stop,
20749 @@ -606,7 +606,7 @@ static struct ata_port_operations hpt372
20750 * but we have a different cable detection procedure for function 1.
20751 */
20752
20753 -static struct ata_port_operations hpt374_fn1_port_ops = {
20754 +static const struct ata_port_operations hpt374_fn1_port_ops = {
20755 .inherits = &hpt372_port_ops,
20756 .cable_detect = hpt374_fn1_cable_detect,
20757 .prereset = hpt37x_pre_reset,
20758 diff -urNp linux-2.6.34.1/drivers/ata/pata_hpt3x2n.c linux-2.6.34.1/drivers/ata/pata_hpt3x2n.c
20759 --- linux-2.6.34.1/drivers/ata/pata_hpt3x2n.c 2010-07-05 14:24:10.000000000 -0400
20760 +++ linux-2.6.34.1/drivers/ata/pata_hpt3x2n.c 2010-07-07 09:04:50.000000000 -0400
20761 @@ -331,7 +331,7 @@ static struct scsi_host_template hpt3x2n
20762 * Configuration for HPT3x2n.
20763 */
20764
20765 -static struct ata_port_operations hpt3x2n_port_ops = {
20766 +static const struct ata_port_operations hpt3x2n_port_ops = {
20767 .inherits = &ata_bmdma_port_ops,
20768
20769 .bmdma_stop = hpt3x2n_bmdma_stop,
20770 diff -urNp linux-2.6.34.1/drivers/ata/pata_hpt3x3.c linux-2.6.34.1/drivers/ata/pata_hpt3x3.c
20771 --- linux-2.6.34.1/drivers/ata/pata_hpt3x3.c 2010-07-05 14:24:10.000000000 -0400
20772 +++ linux-2.6.34.1/drivers/ata/pata_hpt3x3.c 2010-07-07 09:04:50.000000000 -0400
20773 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
20774 ATA_BMDMA_SHT(DRV_NAME),
20775 };
20776
20777 -static struct ata_port_operations hpt3x3_port_ops = {
20778 +static const struct ata_port_operations hpt3x3_port_ops = {
20779 .inherits = &ata_bmdma_port_ops,
20780 .cable_detect = ata_cable_40wire,
20781 .set_piomode = hpt3x3_set_piomode,
20782 diff -urNp linux-2.6.34.1/drivers/ata/pata_icside.c linux-2.6.34.1/drivers/ata/pata_icside.c
20783 --- linux-2.6.34.1/drivers/ata/pata_icside.c 2010-07-05 14:24:10.000000000 -0400
20784 +++ linux-2.6.34.1/drivers/ata/pata_icside.c 2010-07-07 09:04:50.000000000 -0400
20785 @@ -320,7 +320,7 @@ static void pata_icside_postreset(struct
20786 }
20787 }
20788
20789 -static struct ata_port_operations pata_icside_port_ops = {
20790 +static const struct ata_port_operations pata_icside_port_ops = {
20791 .inherits = &ata_sff_port_ops,
20792 /* no need to build any PRD tables for DMA */
20793 .qc_prep = ata_noop_qc_prep,
20794 diff -urNp linux-2.6.34.1/drivers/ata/pata_isapnp.c linux-2.6.34.1/drivers/ata/pata_isapnp.c
20795 --- linux-2.6.34.1/drivers/ata/pata_isapnp.c 2010-07-05 14:24:10.000000000 -0400
20796 +++ linux-2.6.34.1/drivers/ata/pata_isapnp.c 2010-07-07 09:04:50.000000000 -0400
20797 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
20798 ATA_PIO_SHT(DRV_NAME),
20799 };
20800
20801 -static struct ata_port_operations isapnp_port_ops = {
20802 +static const struct ata_port_operations isapnp_port_ops = {
20803 .inherits = &ata_sff_port_ops,
20804 .cable_detect = ata_cable_40wire,
20805 };
20806
20807 -static struct ata_port_operations isapnp_noalt_port_ops = {
20808 +static const struct ata_port_operations isapnp_noalt_port_ops = {
20809 .inherits = &ata_sff_port_ops,
20810 .cable_detect = ata_cable_40wire,
20811 /* No altstatus so we don't want to use the lost interrupt poll */
20812 diff -urNp linux-2.6.34.1/drivers/ata/pata_it8213.c linux-2.6.34.1/drivers/ata/pata_it8213.c
20813 --- linux-2.6.34.1/drivers/ata/pata_it8213.c 2010-07-05 14:24:10.000000000 -0400
20814 +++ linux-2.6.34.1/drivers/ata/pata_it8213.c 2010-07-07 09:04:50.000000000 -0400
20815 @@ -233,7 +233,7 @@ static struct scsi_host_template it8213_
20816 };
20817
20818
20819 -static struct ata_port_operations it8213_ops = {
20820 +static const struct ata_port_operations it8213_ops = {
20821 .inherits = &ata_bmdma_port_ops,
20822 .cable_detect = it8213_cable_detect,
20823 .set_piomode = it8213_set_piomode,
20824 diff -urNp linux-2.6.34.1/drivers/ata/pata_it821x.c linux-2.6.34.1/drivers/ata/pata_it821x.c
20825 --- linux-2.6.34.1/drivers/ata/pata_it821x.c 2010-07-05 14:24:10.000000000 -0400
20826 +++ linux-2.6.34.1/drivers/ata/pata_it821x.c 2010-07-07 09:04:50.000000000 -0400
20827 @@ -801,7 +801,7 @@ static struct scsi_host_template it821x_
20828 ATA_BMDMA_SHT(DRV_NAME),
20829 };
20830
20831 -static struct ata_port_operations it821x_smart_port_ops = {
20832 +static const struct ata_port_operations it821x_smart_port_ops = {
20833 .inherits = &ata_bmdma_port_ops,
20834
20835 .check_atapi_dma= it821x_check_atapi_dma,
20836 @@ -815,7 +815,7 @@ static struct ata_port_operations it821x
20837 .port_start = it821x_port_start,
20838 };
20839
20840 -static struct ata_port_operations it821x_passthru_port_ops = {
20841 +static const struct ata_port_operations it821x_passthru_port_ops = {
20842 .inherits = &ata_bmdma_port_ops,
20843
20844 .check_atapi_dma= it821x_check_atapi_dma,
20845 @@ -831,7 +831,7 @@ static struct ata_port_operations it821x
20846 .port_start = it821x_port_start,
20847 };
20848
20849 -static struct ata_port_operations it821x_rdc_port_ops = {
20850 +static const struct ata_port_operations it821x_rdc_port_ops = {
20851 .inherits = &ata_bmdma_port_ops,
20852
20853 .check_atapi_dma= it821x_check_atapi_dma,
20854 diff -urNp linux-2.6.34.1/drivers/ata/pata_ixp4xx_cf.c linux-2.6.34.1/drivers/ata/pata_ixp4xx_cf.c
20855 --- linux-2.6.34.1/drivers/ata/pata_ixp4xx_cf.c 2010-07-05 14:24:10.000000000 -0400
20856 +++ linux-2.6.34.1/drivers/ata/pata_ixp4xx_cf.c 2010-07-07 09:04:50.000000000 -0400
20857 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
20858 ATA_PIO_SHT(DRV_NAME),
20859 };
20860
20861 -static struct ata_port_operations ixp4xx_port_ops = {
20862 +static const struct ata_port_operations ixp4xx_port_ops = {
20863 .inherits = &ata_sff_port_ops,
20864 .sff_data_xfer = ixp4xx_mmio_data_xfer,
20865 .cable_detect = ata_cable_40wire,
20866 diff -urNp linux-2.6.34.1/drivers/ata/pata_jmicron.c linux-2.6.34.1/drivers/ata/pata_jmicron.c
20867 --- linux-2.6.34.1/drivers/ata/pata_jmicron.c 2010-07-05 14:24:10.000000000 -0400
20868 +++ linux-2.6.34.1/drivers/ata/pata_jmicron.c 2010-07-07 09:04:50.000000000 -0400
20869 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
20870 ATA_BMDMA_SHT(DRV_NAME),
20871 };
20872
20873 -static struct ata_port_operations jmicron_ops = {
20874 +static const struct ata_port_operations jmicron_ops = {
20875 .inherits = &ata_bmdma_port_ops,
20876 .prereset = jmicron_pre_reset,
20877 };
20878 diff -urNp linux-2.6.34.1/drivers/ata/pata_legacy.c linux-2.6.34.1/drivers/ata/pata_legacy.c
20879 --- linux-2.6.34.1/drivers/ata/pata_legacy.c 2010-07-05 14:24:10.000000000 -0400
20880 +++ linux-2.6.34.1/drivers/ata/pata_legacy.c 2010-07-07 09:04:50.000000000 -0400
20881 @@ -113,7 +113,7 @@ struct legacy_probe {
20882
20883 struct legacy_controller {
20884 const char *name;
20885 - struct ata_port_operations *ops;
20886 + const struct ata_port_operations *ops;
20887 unsigned int pio_mask;
20888 unsigned int flags;
20889 unsigned int pflags;
20890 @@ -230,12 +230,12 @@ static const struct ata_port_operations
20891 * pio_mask as well.
20892 */
20893
20894 -static struct ata_port_operations simple_port_ops = {
20895 +static const struct ata_port_operations simple_port_ops = {
20896 .inherits = &legacy_base_port_ops,
20897 .sff_data_xfer = ata_sff_data_xfer_noirq,
20898 };
20899
20900 -static struct ata_port_operations legacy_port_ops = {
20901 +static const struct ata_port_operations legacy_port_ops = {
20902 .inherits = &legacy_base_port_ops,
20903 .sff_data_xfer = ata_sff_data_xfer_noirq,
20904 .set_mode = legacy_set_mode,
20905 @@ -331,7 +331,7 @@ static unsigned int pdc_data_xfer_vlb(st
20906 return buflen;
20907 }
20908
20909 -static struct ata_port_operations pdc20230_port_ops = {
20910 +static const struct ata_port_operations pdc20230_port_ops = {
20911 .inherits = &legacy_base_port_ops,
20912 .set_piomode = pdc20230_set_piomode,
20913 .sff_data_xfer = pdc_data_xfer_vlb,
20914 @@ -364,7 +364,7 @@ static void ht6560a_set_piomode(struct a
20915 ioread8(ap->ioaddr.status_addr);
20916 }
20917
20918 -static struct ata_port_operations ht6560a_port_ops = {
20919 +static const struct ata_port_operations ht6560a_port_ops = {
20920 .inherits = &legacy_base_port_ops,
20921 .set_piomode = ht6560a_set_piomode,
20922 };
20923 @@ -407,7 +407,7 @@ static void ht6560b_set_piomode(struct a
20924 ioread8(ap->ioaddr.status_addr);
20925 }
20926
20927 -static struct ata_port_operations ht6560b_port_ops = {
20928 +static const struct ata_port_operations ht6560b_port_ops = {
20929 .inherits = &legacy_base_port_ops,
20930 .set_piomode = ht6560b_set_piomode,
20931 };
20932 @@ -506,7 +506,7 @@ static void opti82c611a_set_piomode(stru
20933 }
20934
20935
20936 -static struct ata_port_operations opti82c611a_port_ops = {
20937 +static const struct ata_port_operations opti82c611a_port_ops = {
20938 .inherits = &legacy_base_port_ops,
20939 .set_piomode = opti82c611a_set_piomode,
20940 };
20941 @@ -616,7 +616,7 @@ static unsigned int opti82c46x_qc_issue(
20942 return ata_sff_qc_issue(qc);
20943 }
20944
20945 -static struct ata_port_operations opti82c46x_port_ops = {
20946 +static const struct ata_port_operations opti82c46x_port_ops = {
20947 .inherits = &legacy_base_port_ops,
20948 .set_piomode = opti82c46x_set_piomode,
20949 .qc_issue = opti82c46x_qc_issue,
20950 @@ -778,20 +778,20 @@ static int qdi_port(struct platform_devi
20951 return 0;
20952 }
20953
20954 -static struct ata_port_operations qdi6500_port_ops = {
20955 +static const struct ata_port_operations qdi6500_port_ops = {
20956 .inherits = &legacy_base_port_ops,
20957 .set_piomode = qdi6500_set_piomode,
20958 .qc_issue = qdi_qc_issue,
20959 .sff_data_xfer = vlb32_data_xfer,
20960 };
20961
20962 -static struct ata_port_operations qdi6580_port_ops = {
20963 +static const struct ata_port_operations qdi6580_port_ops = {
20964 .inherits = &legacy_base_port_ops,
20965 .set_piomode = qdi6580_set_piomode,
20966 .sff_data_xfer = vlb32_data_xfer,
20967 };
20968
20969 -static struct ata_port_operations qdi6580dp_port_ops = {
20970 +static const struct ata_port_operations qdi6580dp_port_ops = {
20971 .inherits = &legacy_base_port_ops,
20972 .set_piomode = qdi6580dp_set_piomode,
20973 .qc_issue = qdi_qc_issue,
20974 @@ -863,7 +863,7 @@ static int winbond_port(struct platform_
20975 return 0;
20976 }
20977
20978 -static struct ata_port_operations winbond_port_ops = {
20979 +static const struct ata_port_operations winbond_port_ops = {
20980 .inherits = &legacy_base_port_ops,
20981 .set_piomode = winbond_set_piomode,
20982 .sff_data_xfer = vlb32_data_xfer,
20983 @@ -986,7 +986,7 @@ static __init int legacy_init_one(struct
20984 int pio_modes = controller->pio_mask;
20985 unsigned long io = probe->port;
20986 u32 mask = (1 << probe->slot);
20987 - struct ata_port_operations *ops = controller->ops;
20988 + const struct ata_port_operations *ops = controller->ops;
20989 struct legacy_data *ld = &legacy_data[probe->slot];
20990 struct ata_host *host = NULL;
20991 struct ata_port *ap;
20992 diff -urNp linux-2.6.34.1/drivers/ata/pata_macio.c linux-2.6.34.1/drivers/ata/pata_macio.c
20993 --- linux-2.6.34.1/drivers/ata/pata_macio.c 2010-07-05 14:24:10.000000000 -0400
20994 +++ linux-2.6.34.1/drivers/ata/pata_macio.c 2010-07-07 09:04:50.000000000 -0400
20995 @@ -916,7 +916,7 @@ static struct scsi_host_template pata_ma
20996 .slave_configure = pata_macio_slave_config,
20997 };
20998
20999 -static struct ata_port_operations pata_macio_ops = {
21000 +static const struct ata_port_operations pata_macio_ops = {
21001 .inherits = &ata_sff_port_ops,
21002
21003 .freeze = pata_macio_freeze,
21004 diff -urNp linux-2.6.34.1/drivers/ata/pata_marvell.c linux-2.6.34.1/drivers/ata/pata_marvell.c
21005 --- linux-2.6.34.1/drivers/ata/pata_marvell.c 2010-07-05 14:24:10.000000000 -0400
21006 +++ linux-2.6.34.1/drivers/ata/pata_marvell.c 2010-07-07 09:04:50.000000000 -0400
21007 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
21008 ATA_BMDMA_SHT(DRV_NAME),
21009 };
21010
21011 -static struct ata_port_operations marvell_ops = {
21012 +static const struct ata_port_operations marvell_ops = {
21013 .inherits = &ata_bmdma_port_ops,
21014 .cable_detect = marvell_cable_detect,
21015 .prereset = marvell_pre_reset,
21016 diff -urNp linux-2.6.34.1/drivers/ata/pata_mpc52xx.c linux-2.6.34.1/drivers/ata/pata_mpc52xx.c
21017 --- linux-2.6.34.1/drivers/ata/pata_mpc52xx.c 2010-07-05 14:24:10.000000000 -0400
21018 +++ linux-2.6.34.1/drivers/ata/pata_mpc52xx.c 2010-07-07 09:04:50.000000000 -0400
21019 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
21020 ATA_PIO_SHT(DRV_NAME),
21021 };
21022
21023 -static struct ata_port_operations mpc52xx_ata_port_ops = {
21024 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
21025 .inherits = &ata_sff_port_ops,
21026 .sff_dev_select = mpc52xx_ata_dev_select,
21027 .set_piomode = mpc52xx_ata_set_piomode,
21028 diff -urNp linux-2.6.34.1/drivers/ata/pata_mpiix.c linux-2.6.34.1/drivers/ata/pata_mpiix.c
21029 --- linux-2.6.34.1/drivers/ata/pata_mpiix.c 2010-07-05 14:24:10.000000000 -0400
21030 +++ linux-2.6.34.1/drivers/ata/pata_mpiix.c 2010-07-07 09:04:50.000000000 -0400
21031 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
21032 ATA_PIO_SHT(DRV_NAME),
21033 };
21034
21035 -static struct ata_port_operations mpiix_port_ops = {
21036 +static const struct ata_port_operations mpiix_port_ops = {
21037 .inherits = &ata_sff_port_ops,
21038 .qc_issue = mpiix_qc_issue,
21039 .cable_detect = ata_cable_40wire,
21040 diff -urNp linux-2.6.34.1/drivers/ata/pata_netcell.c linux-2.6.34.1/drivers/ata/pata_netcell.c
21041 --- linux-2.6.34.1/drivers/ata/pata_netcell.c 2010-07-05 14:24:10.000000000 -0400
21042 +++ linux-2.6.34.1/drivers/ata/pata_netcell.c 2010-07-07 09:04:50.000000000 -0400
21043 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
21044 ATA_BMDMA_SHT(DRV_NAME),
21045 };
21046
21047 -static struct ata_port_operations netcell_ops = {
21048 +static const struct ata_port_operations netcell_ops = {
21049 .inherits = &ata_bmdma_port_ops,
21050 .cable_detect = ata_cable_80wire,
21051 .read_id = netcell_read_id,
21052 diff -urNp linux-2.6.34.1/drivers/ata/pata_ninja32.c linux-2.6.34.1/drivers/ata/pata_ninja32.c
21053 --- linux-2.6.34.1/drivers/ata/pata_ninja32.c 2010-07-05 14:24:10.000000000 -0400
21054 +++ linux-2.6.34.1/drivers/ata/pata_ninja32.c 2010-07-07 09:04:50.000000000 -0400
21055 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
21056 ATA_BMDMA_SHT(DRV_NAME),
21057 };
21058
21059 -static struct ata_port_operations ninja32_port_ops = {
21060 +static const struct ata_port_operations ninja32_port_ops = {
21061 .inherits = &ata_bmdma_port_ops,
21062 .sff_dev_select = ninja32_dev_select,
21063 .cable_detect = ata_cable_40wire,
21064 diff -urNp linux-2.6.34.1/drivers/ata/pata_ns87410.c linux-2.6.34.1/drivers/ata/pata_ns87410.c
21065 --- linux-2.6.34.1/drivers/ata/pata_ns87410.c 2010-07-05 14:24:10.000000000 -0400
21066 +++ linux-2.6.34.1/drivers/ata/pata_ns87410.c 2010-07-07 09:04:50.000000000 -0400
21067 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
21068 ATA_PIO_SHT(DRV_NAME),
21069 };
21070
21071 -static struct ata_port_operations ns87410_port_ops = {
21072 +static const struct ata_port_operations ns87410_port_ops = {
21073 .inherits = &ata_sff_port_ops,
21074 .qc_issue = ns87410_qc_issue,
21075 .cable_detect = ata_cable_40wire,
21076 diff -urNp linux-2.6.34.1/drivers/ata/pata_ns87415.c linux-2.6.34.1/drivers/ata/pata_ns87415.c
21077 --- linux-2.6.34.1/drivers/ata/pata_ns87415.c 2010-07-05 14:24:10.000000000 -0400
21078 +++ linux-2.6.34.1/drivers/ata/pata_ns87415.c 2010-07-07 09:04:50.000000000 -0400
21079 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
21080 }
21081 #endif /* 87560 SuperIO Support */
21082
21083 -static struct ata_port_operations ns87415_pata_ops = {
21084 +static const struct ata_port_operations ns87415_pata_ops = {
21085 .inherits = &ata_bmdma_port_ops,
21086
21087 .check_atapi_dma = ns87415_check_atapi_dma,
21088 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
21089 };
21090
21091 #if defined(CONFIG_SUPERIO)
21092 -static struct ata_port_operations ns87560_pata_ops = {
21093 +static const struct ata_port_operations ns87560_pata_ops = {
21094 .inherits = &ns87415_pata_ops,
21095 .sff_tf_read = ns87560_tf_read,
21096 .sff_check_status = ns87560_check_status,
21097 diff -urNp linux-2.6.34.1/drivers/ata/pata_octeon_cf.c linux-2.6.34.1/drivers/ata/pata_octeon_cf.c
21098 --- linux-2.6.34.1/drivers/ata/pata_octeon_cf.c 2010-07-05 14:24:10.000000000 -0400
21099 +++ linux-2.6.34.1/drivers/ata/pata_octeon_cf.c 2010-07-07 09:04:50.000000000 -0400
21100 @@ -802,6 +802,7 @@ static unsigned int octeon_cf_qc_issue(s
21101 return 0;
21102 }
21103
21104 +/* cannot be const */
21105 static struct ata_port_operations octeon_cf_ops = {
21106 .inherits = &ata_sff_port_ops,
21107 .check_atapi_dma = octeon_cf_check_atapi_dma,
21108 diff -urNp linux-2.6.34.1/drivers/ata/pata_oldpiix.c linux-2.6.34.1/drivers/ata/pata_oldpiix.c
21109 --- linux-2.6.34.1/drivers/ata/pata_oldpiix.c 2010-07-05 14:24:10.000000000 -0400
21110 +++ linux-2.6.34.1/drivers/ata/pata_oldpiix.c 2010-07-07 09:04:50.000000000 -0400
21111 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
21112 ATA_BMDMA_SHT(DRV_NAME),
21113 };
21114
21115 -static struct ata_port_operations oldpiix_pata_ops = {
21116 +static const struct ata_port_operations oldpiix_pata_ops = {
21117 .inherits = &ata_bmdma_port_ops,
21118 .qc_issue = oldpiix_qc_issue,
21119 .cable_detect = ata_cable_40wire,
21120 diff -urNp linux-2.6.34.1/drivers/ata/pata_opti.c linux-2.6.34.1/drivers/ata/pata_opti.c
21121 --- linux-2.6.34.1/drivers/ata/pata_opti.c 2010-07-05 14:24:10.000000000 -0400
21122 +++ linux-2.6.34.1/drivers/ata/pata_opti.c 2010-07-07 09:04:50.000000000 -0400
21123 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
21124 ATA_PIO_SHT(DRV_NAME),
21125 };
21126
21127 -static struct ata_port_operations opti_port_ops = {
21128 +static const struct ata_port_operations opti_port_ops = {
21129 .inherits = &ata_sff_port_ops,
21130 .cable_detect = ata_cable_40wire,
21131 .set_piomode = opti_set_piomode,
21132 diff -urNp linux-2.6.34.1/drivers/ata/pata_optidma.c linux-2.6.34.1/drivers/ata/pata_optidma.c
21133 --- linux-2.6.34.1/drivers/ata/pata_optidma.c 2010-07-05 14:24:10.000000000 -0400
21134 +++ linux-2.6.34.1/drivers/ata/pata_optidma.c 2010-07-07 09:04:50.000000000 -0400
21135 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
21136 ATA_BMDMA_SHT(DRV_NAME),
21137 };
21138
21139 -static struct ata_port_operations optidma_port_ops = {
21140 +static const struct ata_port_operations optidma_port_ops = {
21141 .inherits = &ata_bmdma_port_ops,
21142 .cable_detect = ata_cable_40wire,
21143 .set_piomode = optidma_set_pio_mode,
21144 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
21145 .prereset = optidma_pre_reset,
21146 };
21147
21148 -static struct ata_port_operations optiplus_port_ops = {
21149 +static const struct ata_port_operations optiplus_port_ops = {
21150 .inherits = &optidma_port_ops,
21151 .set_piomode = optiplus_set_pio_mode,
21152 .set_dmamode = optiplus_set_dma_mode,
21153 diff -urNp linux-2.6.34.1/drivers/ata/pata_palmld.c linux-2.6.34.1/drivers/ata/pata_palmld.c
21154 --- linux-2.6.34.1/drivers/ata/pata_palmld.c 2010-07-05 14:24:10.000000000 -0400
21155 +++ linux-2.6.34.1/drivers/ata/pata_palmld.c 2010-07-07 09:04:50.000000000 -0400
21156 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
21157 ATA_PIO_SHT(DRV_NAME),
21158 };
21159
21160 -static struct ata_port_operations palmld_port_ops = {
21161 +static const struct ata_port_operations palmld_port_ops = {
21162 .inherits = &ata_sff_port_ops,
21163 .sff_data_xfer = ata_sff_data_xfer_noirq,
21164 .cable_detect = ata_cable_40wire,
21165 diff -urNp linux-2.6.34.1/drivers/ata/pata_pcmcia.c linux-2.6.34.1/drivers/ata/pata_pcmcia.c
21166 --- linux-2.6.34.1/drivers/ata/pata_pcmcia.c 2010-07-05 14:24:10.000000000 -0400
21167 +++ linux-2.6.34.1/drivers/ata/pata_pcmcia.c 2010-07-07 09:04:50.000000000 -0400
21168 @@ -163,14 +163,14 @@ static struct scsi_host_template pcmcia_
21169 ATA_PIO_SHT(DRV_NAME),
21170 };
21171
21172 -static struct ata_port_operations pcmcia_port_ops = {
21173 +static const struct ata_port_operations pcmcia_port_ops = {
21174 .inherits = &ata_sff_port_ops,
21175 .sff_data_xfer = ata_sff_data_xfer_noirq,
21176 .cable_detect = ata_cable_40wire,
21177 .set_mode = pcmcia_set_mode,
21178 };
21179
21180 -static struct ata_port_operations pcmcia_8bit_port_ops = {
21181 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
21182 .inherits = &ata_sff_port_ops,
21183 .sff_data_xfer = ata_data_xfer_8bit,
21184 .cable_detect = ata_cable_40wire,
21185 @@ -254,7 +254,7 @@ static int pcmcia_init_one(struct pcmcia
21186 unsigned long io_base, ctl_base;
21187 void __iomem *io_addr, *ctl_addr;
21188 int n_ports = 1;
21189 - struct ata_port_operations *ops = &pcmcia_port_ops;
21190 + const struct ata_port_operations *ops = &pcmcia_port_ops;
21191
21192 info = kzalloc(sizeof(*info), GFP_KERNEL);
21193 if (info == NULL)
21194 diff -urNp linux-2.6.34.1/drivers/ata/pata_pdc2027x.c linux-2.6.34.1/drivers/ata/pata_pdc2027x.c
21195 --- linux-2.6.34.1/drivers/ata/pata_pdc2027x.c 2010-07-05 14:24:10.000000000 -0400
21196 +++ linux-2.6.34.1/drivers/ata/pata_pdc2027x.c 2010-07-07 09:04:50.000000000 -0400
21197 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
21198 ATA_BMDMA_SHT(DRV_NAME),
21199 };
21200
21201 -static struct ata_port_operations pdc2027x_pata100_ops = {
21202 +static const struct ata_port_operations pdc2027x_pata100_ops = {
21203 .inherits = &ata_bmdma_port_ops,
21204 .check_atapi_dma = pdc2027x_check_atapi_dma,
21205 .cable_detect = pdc2027x_cable_detect,
21206 .prereset = pdc2027x_prereset,
21207 };
21208
21209 -static struct ata_port_operations pdc2027x_pata133_ops = {
21210 +static const struct ata_port_operations pdc2027x_pata133_ops = {
21211 .inherits = &pdc2027x_pata100_ops,
21212 .mode_filter = pdc2027x_mode_filter,
21213 .set_piomode = pdc2027x_set_piomode,
21214 diff -urNp linux-2.6.34.1/drivers/ata/pata_pdc202xx_old.c linux-2.6.34.1/drivers/ata/pata_pdc202xx_old.c
21215 --- linux-2.6.34.1/drivers/ata/pata_pdc202xx_old.c 2010-07-05 14:24:10.000000000 -0400
21216 +++ linux-2.6.34.1/drivers/ata/pata_pdc202xx_old.c 2010-07-07 09:04:50.000000000 -0400
21217 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
21218 ATA_BMDMA_SHT(DRV_NAME),
21219 };
21220
21221 -static struct ata_port_operations pdc2024x_port_ops = {
21222 +static const struct ata_port_operations pdc2024x_port_ops = {
21223 .inherits = &ata_bmdma_port_ops,
21224
21225 .cable_detect = ata_cable_40wire,
21226 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
21227 .sff_exec_command = pdc202xx_exec_command,
21228 };
21229
21230 -static struct ata_port_operations pdc2026x_port_ops = {
21231 +static const struct ata_port_operations pdc2026x_port_ops = {
21232 .inherits = &pdc2024x_port_ops,
21233
21234 .check_atapi_dma = pdc2026x_check_atapi_dma,
21235 diff -urNp linux-2.6.34.1/drivers/ata/pata_piccolo.c linux-2.6.34.1/drivers/ata/pata_piccolo.c
21236 --- linux-2.6.34.1/drivers/ata/pata_piccolo.c 2010-07-05 14:24:10.000000000 -0400
21237 +++ linux-2.6.34.1/drivers/ata/pata_piccolo.c 2010-07-07 09:04:50.000000000 -0400
21238 @@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sh
21239 ATA_BMDMA_SHT(DRV_NAME),
21240 };
21241
21242 -static struct ata_port_operations tosh_port_ops = {
21243 +static const struct ata_port_operations tosh_port_ops = {
21244 .inherits = &ata_bmdma_port_ops,
21245 .cable_detect = ata_cable_unknown,
21246 .set_piomode = tosh_set_piomode,
21247 diff -urNp linux-2.6.34.1/drivers/ata/pata_platform.c linux-2.6.34.1/drivers/ata/pata_platform.c
21248 --- linux-2.6.34.1/drivers/ata/pata_platform.c 2010-07-05 14:24:10.000000000 -0400
21249 +++ linux-2.6.34.1/drivers/ata/pata_platform.c 2010-07-07 09:04:50.000000000 -0400
21250 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
21251 ATA_PIO_SHT(DRV_NAME),
21252 };
21253
21254 -static struct ata_port_operations pata_platform_port_ops = {
21255 +static const struct ata_port_operations pata_platform_port_ops = {
21256 .inherits = &ata_sff_port_ops,
21257 .sff_data_xfer = ata_sff_data_xfer_noirq,
21258 .cable_detect = ata_cable_unknown,
21259 diff -urNp linux-2.6.34.1/drivers/ata/pata_qdi.c linux-2.6.34.1/drivers/ata/pata_qdi.c
21260 --- linux-2.6.34.1/drivers/ata/pata_qdi.c 2010-07-05 14:24:10.000000000 -0400
21261 +++ linux-2.6.34.1/drivers/ata/pata_qdi.c 2010-07-07 09:04:50.000000000 -0400
21262 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
21263 ATA_PIO_SHT(DRV_NAME),
21264 };
21265
21266 -static struct ata_port_operations qdi6500_port_ops = {
21267 +static const struct ata_port_operations qdi6500_port_ops = {
21268 .inherits = &ata_sff_port_ops,
21269 .qc_issue = qdi_qc_issue,
21270 .sff_data_xfer = qdi_data_xfer,
21271 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
21272 .set_piomode = qdi6500_set_piomode,
21273 };
21274
21275 -static struct ata_port_operations qdi6580_port_ops = {
21276 +static const struct ata_port_operations qdi6580_port_ops = {
21277 .inherits = &qdi6500_port_ops,
21278 .set_piomode = qdi6580_set_piomode,
21279 };
21280 diff -urNp linux-2.6.34.1/drivers/ata/pata_radisys.c linux-2.6.34.1/drivers/ata/pata_radisys.c
21281 --- linux-2.6.34.1/drivers/ata/pata_radisys.c 2010-07-05 14:24:10.000000000 -0400
21282 +++ linux-2.6.34.1/drivers/ata/pata_radisys.c 2010-07-07 09:04:50.000000000 -0400
21283 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
21284 ATA_BMDMA_SHT(DRV_NAME),
21285 };
21286
21287 -static struct ata_port_operations radisys_pata_ops = {
21288 +static const struct ata_port_operations radisys_pata_ops = {
21289 .inherits = &ata_bmdma_port_ops,
21290 .qc_issue = radisys_qc_issue,
21291 .cable_detect = ata_cable_unknown,
21292 diff -urNp linux-2.6.34.1/drivers/ata/pata_rb532_cf.c linux-2.6.34.1/drivers/ata/pata_rb532_cf.c
21293 --- linux-2.6.34.1/drivers/ata/pata_rb532_cf.c 2010-07-05 14:24:10.000000000 -0400
21294 +++ linux-2.6.34.1/drivers/ata/pata_rb532_cf.c 2010-07-07 09:04:50.000000000 -0400
21295 @@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handle
21296 return IRQ_HANDLED;
21297 }
21298
21299 -static struct ata_port_operations rb532_pata_port_ops = {
21300 +static const struct ata_port_operations rb532_pata_port_ops = {
21301 .inherits = &ata_sff_port_ops,
21302 .sff_data_xfer = ata_sff_data_xfer32,
21303 };
21304 diff -urNp linux-2.6.34.1/drivers/ata/pata_rdc.c linux-2.6.34.1/drivers/ata/pata_rdc.c
21305 --- linux-2.6.34.1/drivers/ata/pata_rdc.c 2010-07-05 14:24:10.000000000 -0400
21306 +++ linux-2.6.34.1/drivers/ata/pata_rdc.c 2010-07-07 09:04:50.000000000 -0400
21307 @@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_p
21308 pci_write_config_byte(dev, 0x48, udma_enable);
21309 }
21310
21311 -static struct ata_port_operations rdc_pata_ops = {
21312 +static const struct ata_port_operations rdc_pata_ops = {
21313 .inherits = &ata_bmdma32_port_ops,
21314 .cable_detect = rdc_pata_cable_detect,
21315 .set_piomode = rdc_set_piomode,
21316 diff -urNp linux-2.6.34.1/drivers/ata/pata_rz1000.c linux-2.6.34.1/drivers/ata/pata_rz1000.c
21317 --- linux-2.6.34.1/drivers/ata/pata_rz1000.c 2010-07-05 14:24:10.000000000 -0400
21318 +++ linux-2.6.34.1/drivers/ata/pata_rz1000.c 2010-07-07 09:04:50.000000000 -0400
21319 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
21320 ATA_PIO_SHT(DRV_NAME),
21321 };
21322
21323 -static struct ata_port_operations rz1000_port_ops = {
21324 +static const struct ata_port_operations rz1000_port_ops = {
21325 .inherits = &ata_sff_port_ops,
21326 .cable_detect = ata_cable_40wire,
21327 .set_mode = rz1000_set_mode,
21328 diff -urNp linux-2.6.34.1/drivers/ata/pata_sc1200.c linux-2.6.34.1/drivers/ata/pata_sc1200.c
21329 --- linux-2.6.34.1/drivers/ata/pata_sc1200.c 2010-07-05 14:24:10.000000000 -0400
21330 +++ linux-2.6.34.1/drivers/ata/pata_sc1200.c 2010-07-07 09:04:50.000000000 -0400
21331 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
21332 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
21333 };
21334
21335 -static struct ata_port_operations sc1200_port_ops = {
21336 +static const struct ata_port_operations sc1200_port_ops = {
21337 .inherits = &ata_bmdma_port_ops,
21338 .qc_prep = ata_sff_dumb_qc_prep,
21339 .qc_issue = sc1200_qc_issue,
21340 diff -urNp linux-2.6.34.1/drivers/ata/pata_scc.c linux-2.6.34.1/drivers/ata/pata_scc.c
21341 --- linux-2.6.34.1/drivers/ata/pata_scc.c 2010-07-05 14:24:10.000000000 -0400
21342 +++ linux-2.6.34.1/drivers/ata/pata_scc.c 2010-07-07 09:04:50.000000000 -0400
21343 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
21344 ATA_BMDMA_SHT(DRV_NAME),
21345 };
21346
21347 -static struct ata_port_operations scc_pata_ops = {
21348 +static const struct ata_port_operations scc_pata_ops = {
21349 .inherits = &ata_bmdma_port_ops,
21350
21351 .set_piomode = scc_set_piomode,
21352 diff -urNp linux-2.6.34.1/drivers/ata/pata_sch.c linux-2.6.34.1/drivers/ata/pata_sch.c
21353 --- linux-2.6.34.1/drivers/ata/pata_sch.c 2010-07-05 14:24:10.000000000 -0400
21354 +++ linux-2.6.34.1/drivers/ata/pata_sch.c 2010-07-07 09:04:50.000000000 -0400
21355 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
21356 ATA_BMDMA_SHT(DRV_NAME),
21357 };
21358
21359 -static struct ata_port_operations sch_pata_ops = {
21360 +static const struct ata_port_operations sch_pata_ops = {
21361 .inherits = &ata_bmdma_port_ops,
21362 .cable_detect = ata_cable_unknown,
21363 .set_piomode = sch_set_piomode,
21364 diff -urNp linux-2.6.34.1/drivers/ata/pata_serverworks.c linux-2.6.34.1/drivers/ata/pata_serverworks.c
21365 --- linux-2.6.34.1/drivers/ata/pata_serverworks.c 2010-07-05 14:24:10.000000000 -0400
21366 +++ linux-2.6.34.1/drivers/ata/pata_serverworks.c 2010-07-07 09:04:50.000000000 -0400
21367 @@ -300,7 +300,7 @@ static struct scsi_host_template serverw
21368 ATA_BMDMA_SHT(DRV_NAME),
21369 };
21370
21371 -static struct ata_port_operations serverworks_osb4_port_ops = {
21372 +static const struct ata_port_operations serverworks_osb4_port_ops = {
21373 .inherits = &ata_bmdma_port_ops,
21374 .cable_detect = serverworks_cable_detect,
21375 .mode_filter = serverworks_osb4_filter,
21376 @@ -308,7 +308,7 @@ static struct ata_port_operations server
21377 .set_dmamode = serverworks_set_dmamode,
21378 };
21379
21380 -static struct ata_port_operations serverworks_csb_port_ops = {
21381 +static const struct ata_port_operations serverworks_csb_port_ops = {
21382 .inherits = &serverworks_osb4_port_ops,
21383 .mode_filter = serverworks_csb_filter,
21384 };
21385 diff -urNp linux-2.6.34.1/drivers/ata/pata_sil680.c linux-2.6.34.1/drivers/ata/pata_sil680.c
21386 --- linux-2.6.34.1/drivers/ata/pata_sil680.c 2010-07-05 14:24:10.000000000 -0400
21387 +++ linux-2.6.34.1/drivers/ata/pata_sil680.c 2010-07-07 09:04:50.000000000 -0400
21388 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
21389 ATA_BMDMA_SHT(DRV_NAME),
21390 };
21391
21392 -static struct ata_port_operations sil680_port_ops = {
21393 +static const struct ata_port_operations sil680_port_ops = {
21394 .inherits = &ata_bmdma32_port_ops,
21395 .cable_detect = sil680_cable_detect,
21396 .set_piomode = sil680_set_piomode,
21397 diff -urNp linux-2.6.34.1/drivers/ata/pata_sis.c linux-2.6.34.1/drivers/ata/pata_sis.c
21398 --- linux-2.6.34.1/drivers/ata/pata_sis.c 2010-07-05 14:24:10.000000000 -0400
21399 +++ linux-2.6.34.1/drivers/ata/pata_sis.c 2010-07-07 09:04:50.000000000 -0400
21400 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
21401 ATA_BMDMA_SHT(DRV_NAME),
21402 };
21403
21404 -static struct ata_port_operations sis_133_for_sata_ops = {
21405 +static const struct ata_port_operations sis_133_for_sata_ops = {
21406 .inherits = &ata_bmdma_port_ops,
21407 .set_piomode = sis_133_set_piomode,
21408 .set_dmamode = sis_133_set_dmamode,
21409 .cable_detect = sis_133_cable_detect,
21410 };
21411
21412 -static struct ata_port_operations sis_base_ops = {
21413 +static const struct ata_port_operations sis_base_ops = {
21414 .inherits = &ata_bmdma_port_ops,
21415 .prereset = sis_pre_reset,
21416 };
21417
21418 -static struct ata_port_operations sis_133_ops = {
21419 +static const struct ata_port_operations sis_133_ops = {
21420 .inherits = &sis_base_ops,
21421 .set_piomode = sis_133_set_piomode,
21422 .set_dmamode = sis_133_set_dmamode,
21423 .cable_detect = sis_133_cable_detect,
21424 };
21425
21426 -static struct ata_port_operations sis_133_early_ops = {
21427 +static const struct ata_port_operations sis_133_early_ops = {
21428 .inherits = &sis_base_ops,
21429 .set_piomode = sis_100_set_piomode,
21430 .set_dmamode = sis_133_early_set_dmamode,
21431 .cable_detect = sis_66_cable_detect,
21432 };
21433
21434 -static struct ata_port_operations sis_100_ops = {
21435 +static const struct ata_port_operations sis_100_ops = {
21436 .inherits = &sis_base_ops,
21437 .set_piomode = sis_100_set_piomode,
21438 .set_dmamode = sis_100_set_dmamode,
21439 .cable_detect = sis_66_cable_detect,
21440 };
21441
21442 -static struct ata_port_operations sis_66_ops = {
21443 +static const struct ata_port_operations sis_66_ops = {
21444 .inherits = &sis_base_ops,
21445 .set_piomode = sis_old_set_piomode,
21446 .set_dmamode = sis_66_set_dmamode,
21447 .cable_detect = sis_66_cable_detect,
21448 };
21449
21450 -static struct ata_port_operations sis_old_ops = {
21451 +static const struct ata_port_operations sis_old_ops = {
21452 .inherits = &sis_base_ops,
21453 .set_piomode = sis_old_set_piomode,
21454 .set_dmamode = sis_old_set_dmamode,
21455 diff -urNp linux-2.6.34.1/drivers/ata/pata_sl82c105.c linux-2.6.34.1/drivers/ata/pata_sl82c105.c
21456 --- linux-2.6.34.1/drivers/ata/pata_sl82c105.c 2010-07-05 14:24:10.000000000 -0400
21457 +++ linux-2.6.34.1/drivers/ata/pata_sl82c105.c 2010-07-07 09:04:50.000000000 -0400
21458 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
21459 ATA_BMDMA_SHT(DRV_NAME),
21460 };
21461
21462 -static struct ata_port_operations sl82c105_port_ops = {
21463 +static const struct ata_port_operations sl82c105_port_ops = {
21464 .inherits = &ata_bmdma_port_ops,
21465 .qc_defer = sl82c105_qc_defer,
21466 .bmdma_start = sl82c105_bmdma_start,
21467 diff -urNp linux-2.6.34.1/drivers/ata/pata_triflex.c linux-2.6.34.1/drivers/ata/pata_triflex.c
21468 --- linux-2.6.34.1/drivers/ata/pata_triflex.c 2010-07-05 14:24:10.000000000 -0400
21469 +++ linux-2.6.34.1/drivers/ata/pata_triflex.c 2010-07-07 09:04:50.000000000 -0400
21470 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
21471 ATA_BMDMA_SHT(DRV_NAME),
21472 };
21473
21474 -static struct ata_port_operations triflex_port_ops = {
21475 +static const struct ata_port_operations triflex_port_ops = {
21476 .inherits = &ata_bmdma_port_ops,
21477 .bmdma_start = triflex_bmdma_start,
21478 .bmdma_stop = triflex_bmdma_stop,
21479 diff -urNp linux-2.6.34.1/drivers/ata/pata_via.c linux-2.6.34.1/drivers/ata/pata_via.c
21480 --- linux-2.6.34.1/drivers/ata/pata_via.c 2010-07-05 14:24:10.000000000 -0400
21481 +++ linux-2.6.34.1/drivers/ata/pata_via.c 2010-07-07 09:04:50.000000000 -0400
21482 @@ -441,7 +441,7 @@ static struct scsi_host_template via_sht
21483 ATA_BMDMA_SHT(DRV_NAME),
21484 };
21485
21486 -static struct ata_port_operations via_port_ops = {
21487 +static const struct ata_port_operations via_port_ops = {
21488 .inherits = &ata_bmdma_port_ops,
21489 .cable_detect = via_cable_detect,
21490 .set_piomode = via_set_piomode,
21491 @@ -452,7 +452,7 @@ static struct ata_port_operations via_po
21492 .mode_filter = via_mode_filter,
21493 };
21494
21495 -static struct ata_port_operations via_port_ops_noirq = {
21496 +static const struct ata_port_operations via_port_ops_noirq = {
21497 .inherits = &via_port_ops,
21498 .sff_data_xfer = ata_sff_data_xfer_noirq,
21499 };
21500 diff -urNp linux-2.6.34.1/drivers/ata/pata_winbond.c linux-2.6.34.1/drivers/ata/pata_winbond.c
21501 --- linux-2.6.34.1/drivers/ata/pata_winbond.c 2010-07-05 14:24:10.000000000 -0400
21502 +++ linux-2.6.34.1/drivers/ata/pata_winbond.c 2010-07-07 09:04:50.000000000 -0400
21503 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
21504 ATA_PIO_SHT(DRV_NAME),
21505 };
21506
21507 -static struct ata_port_operations winbond_port_ops = {
21508 +static const struct ata_port_operations winbond_port_ops = {
21509 .inherits = &ata_sff_port_ops,
21510 .sff_data_xfer = winbond_data_xfer,
21511 .cable_detect = ata_cable_40wire,
21512 diff -urNp linux-2.6.34.1/drivers/ata/pdc_adma.c linux-2.6.34.1/drivers/ata/pdc_adma.c
21513 --- linux-2.6.34.1/drivers/ata/pdc_adma.c 2010-07-05 14:24:10.000000000 -0400
21514 +++ linux-2.6.34.1/drivers/ata/pdc_adma.c 2010-07-07 09:04:50.000000000 -0400
21515 @@ -146,7 +146,7 @@ static struct scsi_host_template adma_at
21516 .dma_boundary = ADMA_DMA_BOUNDARY,
21517 };
21518
21519 -static struct ata_port_operations adma_ata_ops = {
21520 +static const struct ata_port_operations adma_ata_ops = {
21521 .inherits = &ata_sff_port_ops,
21522
21523 .lost_interrupt = ATA_OP_NULL,
21524 diff -urNp linux-2.6.34.1/drivers/ata/sata_fsl.c linux-2.6.34.1/drivers/ata/sata_fsl.c
21525 --- linux-2.6.34.1/drivers/ata/sata_fsl.c 2010-07-05 14:24:10.000000000 -0400
21526 +++ linux-2.6.34.1/drivers/ata/sata_fsl.c 2010-07-07 09:04:50.000000000 -0400
21527 @@ -1261,7 +1261,7 @@ static struct scsi_host_template sata_fs
21528 .dma_boundary = ATA_DMA_BOUNDARY,
21529 };
21530
21531 -static struct ata_port_operations sata_fsl_ops = {
21532 +static const struct ata_port_operations sata_fsl_ops = {
21533 .inherits = &sata_pmp_port_ops,
21534
21535 .qc_defer = ata_std_qc_defer,
21536 diff -urNp linux-2.6.34.1/drivers/ata/sata_inic162x.c linux-2.6.34.1/drivers/ata/sata_inic162x.c
21537 --- linux-2.6.34.1/drivers/ata/sata_inic162x.c 2010-07-05 14:24:10.000000000 -0400
21538 +++ linux-2.6.34.1/drivers/ata/sata_inic162x.c 2010-07-07 09:04:50.000000000 -0400
21539 @@ -722,7 +722,7 @@ static int inic_port_start(struct ata_po
21540 return 0;
21541 }
21542
21543 -static struct ata_port_operations inic_port_ops = {
21544 +static const struct ata_port_operations inic_port_ops = {
21545 .inherits = &sata_port_ops,
21546
21547 .check_atapi_dma = inic_check_atapi_dma,
21548 diff -urNp linux-2.6.34.1/drivers/ata/sata_mv.c linux-2.6.34.1/drivers/ata/sata_mv.c
21549 --- linux-2.6.34.1/drivers/ata/sata_mv.c 2010-07-05 14:24:10.000000000 -0400
21550 +++ linux-2.6.34.1/drivers/ata/sata_mv.c 2010-07-07 09:04:50.000000000 -0400
21551 @@ -663,7 +663,7 @@ static struct scsi_host_template mv6_sht
21552 .dma_boundary = MV_DMA_BOUNDARY,
21553 };
21554
21555 -static struct ata_port_operations mv5_ops = {
21556 +static const struct ata_port_operations mv5_ops = {
21557 .inherits = &ata_sff_port_ops,
21558
21559 .lost_interrupt = ATA_OP_NULL,
21560 @@ -685,7 +685,7 @@ static struct ata_port_operations mv5_op
21561 .port_stop = mv_port_stop,
21562 };
21563
21564 -static struct ata_port_operations mv6_ops = {
21565 +static const struct ata_port_operations mv6_ops = {
21566 .inherits = &mv5_ops,
21567 .dev_config = mv6_dev_config,
21568 .scr_read = mv_scr_read,
21569 @@ -705,7 +705,7 @@ static struct ata_port_operations mv6_op
21570 .bmdma_status = mv_bmdma_status,
21571 };
21572
21573 -static struct ata_port_operations mv_iie_ops = {
21574 +static const struct ata_port_operations mv_iie_ops = {
21575 .inherits = &mv6_ops,
21576 .dev_config = ATA_OP_NULL,
21577 .qc_prep = mv_qc_prep_iie,
21578 diff -urNp linux-2.6.34.1/drivers/ata/sata_nv.c linux-2.6.34.1/drivers/ata/sata_nv.c
21579 --- linux-2.6.34.1/drivers/ata/sata_nv.c 2010-07-05 14:24:10.000000000 -0400
21580 +++ linux-2.6.34.1/drivers/ata/sata_nv.c 2010-07-07 09:04:50.000000000 -0400
21581 @@ -465,7 +465,7 @@ static struct scsi_host_template nv_swnc
21582 * cases. Define nv_hardreset() which only kicks in for post-boot
21583 * probing and use it for all variants.
21584 */
21585 -static struct ata_port_operations nv_generic_ops = {
21586 +static const struct ata_port_operations nv_generic_ops = {
21587 .inherits = &ata_bmdma_port_ops,
21588 .lost_interrupt = ATA_OP_NULL,
21589 .scr_read = nv_scr_read,
21590 @@ -473,20 +473,20 @@ static struct ata_port_operations nv_gen
21591 .hardreset = nv_hardreset,
21592 };
21593
21594 -static struct ata_port_operations nv_nf2_ops = {
21595 +static const struct ata_port_operations nv_nf2_ops = {
21596 .inherits = &nv_generic_ops,
21597 .freeze = nv_nf2_freeze,
21598 .thaw = nv_nf2_thaw,
21599 };
21600
21601 -static struct ata_port_operations nv_ck804_ops = {
21602 +static const struct ata_port_operations nv_ck804_ops = {
21603 .inherits = &nv_generic_ops,
21604 .freeze = nv_ck804_freeze,
21605 .thaw = nv_ck804_thaw,
21606 .host_stop = nv_ck804_host_stop,
21607 };
21608
21609 -static struct ata_port_operations nv_adma_ops = {
21610 +static const struct ata_port_operations nv_adma_ops = {
21611 .inherits = &nv_ck804_ops,
21612
21613 .check_atapi_dma = nv_adma_check_atapi_dma,
21614 @@ -510,7 +510,7 @@ static struct ata_port_operations nv_adm
21615 .host_stop = nv_adma_host_stop,
21616 };
21617
21618 -static struct ata_port_operations nv_swncq_ops = {
21619 +static const struct ata_port_operations nv_swncq_ops = {
21620 .inherits = &nv_generic_ops,
21621
21622 .qc_defer = ata_std_qc_defer,
21623 diff -urNp linux-2.6.34.1/drivers/ata/sata_promise.c linux-2.6.34.1/drivers/ata/sata_promise.c
21624 --- linux-2.6.34.1/drivers/ata/sata_promise.c 2010-07-05 14:24:10.000000000 -0400
21625 +++ linux-2.6.34.1/drivers/ata/sata_promise.c 2010-07-07 09:04:50.000000000 -0400
21626 @@ -196,7 +196,7 @@ static const struct ata_port_operations
21627 .error_handler = pdc_error_handler,
21628 };
21629
21630 -static struct ata_port_operations pdc_sata_ops = {
21631 +static const struct ata_port_operations pdc_sata_ops = {
21632 .inherits = &pdc_common_ops,
21633 .cable_detect = pdc_sata_cable_detect,
21634 .freeze = pdc_sata_freeze,
21635 @@ -209,14 +209,14 @@ static struct ata_port_operations pdc_sa
21636
21637 /* First-generation chips need a more restrictive ->check_atapi_dma op,
21638 and ->freeze/thaw that ignore the hotplug controls. */
21639 -static struct ata_port_operations pdc_old_sata_ops = {
21640 +static const struct ata_port_operations pdc_old_sata_ops = {
21641 .inherits = &pdc_sata_ops,
21642 .freeze = pdc_freeze,
21643 .thaw = pdc_thaw,
21644 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
21645 };
21646
21647 -static struct ata_port_operations pdc_pata_ops = {
21648 +static const struct ata_port_operations pdc_pata_ops = {
21649 .inherits = &pdc_common_ops,
21650 .cable_detect = pdc_pata_cable_detect,
21651 .freeze = pdc_freeze,
21652 diff -urNp linux-2.6.34.1/drivers/ata/sata_qstor.c linux-2.6.34.1/drivers/ata/sata_qstor.c
21653 --- linux-2.6.34.1/drivers/ata/sata_qstor.c 2010-07-05 14:24:10.000000000 -0400
21654 +++ linux-2.6.34.1/drivers/ata/sata_qstor.c 2010-07-07 09:04:50.000000000 -0400
21655 @@ -133,7 +133,7 @@ static struct scsi_host_template qs_ata_
21656 .dma_boundary = QS_DMA_BOUNDARY,
21657 };
21658
21659 -static struct ata_port_operations qs_ata_ops = {
21660 +static const struct ata_port_operations qs_ata_ops = {
21661 .inherits = &ata_sff_port_ops,
21662
21663 .check_atapi_dma = qs_check_atapi_dma,
21664 diff -urNp linux-2.6.34.1/drivers/ata/sata_sil.c linux-2.6.34.1/drivers/ata/sata_sil.c
21665 --- linux-2.6.34.1/drivers/ata/sata_sil.c 2010-07-05 14:24:10.000000000 -0400
21666 +++ linux-2.6.34.1/drivers/ata/sata_sil.c 2010-07-07 09:04:50.000000000 -0400
21667 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
21668 .sg_tablesize = ATA_MAX_PRD
21669 };
21670
21671 -static struct ata_port_operations sil_ops = {
21672 +static const struct ata_port_operations sil_ops = {
21673 .inherits = &ata_bmdma32_port_ops,
21674 .dev_config = sil_dev_config,
21675 .set_mode = sil_set_mode,
21676 diff -urNp linux-2.6.34.1/drivers/ata/sata_sil24.c linux-2.6.34.1/drivers/ata/sata_sil24.c
21677 --- linux-2.6.34.1/drivers/ata/sata_sil24.c 2010-07-05 14:24:10.000000000 -0400
21678 +++ linux-2.6.34.1/drivers/ata/sata_sil24.c 2010-07-07 09:04:50.000000000 -0400
21679 @@ -389,7 +389,7 @@ static struct scsi_host_template sil24_s
21680 .dma_boundary = ATA_DMA_BOUNDARY,
21681 };
21682
21683 -static struct ata_port_operations sil24_ops = {
21684 +static const struct ata_port_operations sil24_ops = {
21685 .inherits = &sata_pmp_port_ops,
21686
21687 .qc_defer = sil24_qc_defer,
21688 diff -urNp linux-2.6.34.1/drivers/ata/sata_sis.c linux-2.6.34.1/drivers/ata/sata_sis.c
21689 --- linux-2.6.34.1/drivers/ata/sata_sis.c 2010-07-05 14:24:10.000000000 -0400
21690 +++ linux-2.6.34.1/drivers/ata/sata_sis.c 2010-07-07 09:04:50.000000000 -0400
21691 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
21692 ATA_BMDMA_SHT(DRV_NAME),
21693 };
21694
21695 -static struct ata_port_operations sis_ops = {
21696 +static const struct ata_port_operations sis_ops = {
21697 .inherits = &ata_bmdma_port_ops,
21698 .scr_read = sis_scr_read,
21699 .scr_write = sis_scr_write,
21700 diff -urNp linux-2.6.34.1/drivers/ata/sata_svw.c linux-2.6.34.1/drivers/ata/sata_svw.c
21701 --- linux-2.6.34.1/drivers/ata/sata_svw.c 2010-07-05 14:24:10.000000000 -0400
21702 +++ linux-2.6.34.1/drivers/ata/sata_svw.c 2010-07-07 09:04:50.000000000 -0400
21703 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
21704 };
21705
21706
21707 -static struct ata_port_operations k2_sata_ops = {
21708 +static const struct ata_port_operations k2_sata_ops = {
21709 .inherits = &ata_bmdma_port_ops,
21710 .sff_tf_load = k2_sata_tf_load,
21711 .sff_tf_read = k2_sata_tf_read,
21712 diff -urNp linux-2.6.34.1/drivers/ata/sata_sx4.c linux-2.6.34.1/drivers/ata/sata_sx4.c
21713 --- linux-2.6.34.1/drivers/ata/sata_sx4.c 2010-07-05 14:24:10.000000000 -0400
21714 +++ linux-2.6.34.1/drivers/ata/sata_sx4.c 2010-07-07 09:04:50.000000000 -0400
21715 @@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sat
21716 };
21717
21718 /* TODO: inherit from base port_ops after converting to new EH */
21719 -static struct ata_port_operations pdc_20621_ops = {
21720 +static const struct ata_port_operations pdc_20621_ops = {
21721 .inherits = &ata_sff_port_ops,
21722
21723 .check_atapi_dma = pdc_check_atapi_dma,
21724 diff -urNp linux-2.6.34.1/drivers/ata/sata_uli.c linux-2.6.34.1/drivers/ata/sata_uli.c
21725 --- linux-2.6.34.1/drivers/ata/sata_uli.c 2010-07-05 14:24:10.000000000 -0400
21726 +++ linux-2.6.34.1/drivers/ata/sata_uli.c 2010-07-07 09:04:50.000000000 -0400
21727 @@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht
21728 ATA_BMDMA_SHT(DRV_NAME),
21729 };
21730
21731 -static struct ata_port_operations uli_ops = {
21732 +static const struct ata_port_operations uli_ops = {
21733 .inherits = &ata_bmdma_port_ops,
21734 .scr_read = uli_scr_read,
21735 .scr_write = uli_scr_write,
21736 diff -urNp linux-2.6.34.1/drivers/ata/sata_via.c linux-2.6.34.1/drivers/ata/sata_via.c
21737 --- linux-2.6.34.1/drivers/ata/sata_via.c 2010-07-05 14:24:10.000000000 -0400
21738 +++ linux-2.6.34.1/drivers/ata/sata_via.c 2010-07-07 09:04:50.000000000 -0400
21739 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
21740 ATA_BMDMA_SHT(DRV_NAME),
21741 };
21742
21743 -static struct ata_port_operations svia_base_ops = {
21744 +static const struct ata_port_operations svia_base_ops = {
21745 .inherits = &ata_bmdma_port_ops,
21746 .sff_tf_load = svia_tf_load,
21747 };
21748
21749 -static struct ata_port_operations vt6420_sata_ops = {
21750 +static const struct ata_port_operations vt6420_sata_ops = {
21751 .inherits = &svia_base_ops,
21752 .freeze = svia_noop_freeze,
21753 .prereset = vt6420_prereset,
21754 .bmdma_start = vt6420_bmdma_start,
21755 };
21756
21757 -static struct ata_port_operations vt6421_pata_ops = {
21758 +static const struct ata_port_operations vt6421_pata_ops = {
21759 .inherits = &svia_base_ops,
21760 .cable_detect = vt6421_pata_cable_detect,
21761 .set_piomode = vt6421_set_pio_mode,
21762 .set_dmamode = vt6421_set_dma_mode,
21763 };
21764
21765 -static struct ata_port_operations vt6421_sata_ops = {
21766 +static const struct ata_port_operations vt6421_sata_ops = {
21767 .inherits = &svia_base_ops,
21768 .scr_read = svia_scr_read,
21769 .scr_write = svia_scr_write,
21770 };
21771
21772 -static struct ata_port_operations vt8251_ops = {
21773 +static const struct ata_port_operations vt8251_ops = {
21774 .inherits = &svia_base_ops,
21775 .hardreset = sata_std_hardreset,
21776 .scr_read = vt8251_scr_read,
21777 diff -urNp linux-2.6.34.1/drivers/ata/sata_vsc.c linux-2.6.34.1/drivers/ata/sata_vsc.c
21778 --- linux-2.6.34.1/drivers/ata/sata_vsc.c 2010-07-05 14:24:10.000000000 -0400
21779 +++ linux-2.6.34.1/drivers/ata/sata_vsc.c 2010-07-07 09:04:50.000000000 -0400
21780 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
21781 };
21782
21783
21784 -static struct ata_port_operations vsc_sata_ops = {
21785 +static const struct ata_port_operations vsc_sata_ops = {
21786 .inherits = &ata_bmdma_port_ops,
21787 /* The IRQ handling is not quite standard SFF behaviour so we
21788 cannot use the default lost interrupt handler */
21789 diff -urNp linux-2.6.34.1/drivers/atm/adummy.c linux-2.6.34.1/drivers/atm/adummy.c
21790 --- linux-2.6.34.1/drivers/atm/adummy.c 2010-07-05 14:24:10.000000000 -0400
21791 +++ linux-2.6.34.1/drivers/atm/adummy.c 2010-07-07 09:04:50.000000000 -0400
21792 @@ -78,7 +78,7 @@ adummy_send(struct atm_vcc *vcc, struct
21793 vcc->pop(vcc, skb);
21794 else
21795 dev_kfree_skb_any(skb);
21796 - atomic_inc(&vcc->stats->tx);
21797 + atomic_inc_unchecked(&vcc->stats->tx);
21798
21799 return 0;
21800 }
21801 diff -urNp linux-2.6.34.1/drivers/atm/ambassador.c linux-2.6.34.1/drivers/atm/ambassador.c
21802 --- linux-2.6.34.1/drivers/atm/ambassador.c 2010-07-05 14:24:10.000000000 -0400
21803 +++ linux-2.6.34.1/drivers/atm/ambassador.c 2010-07-07 09:04:50.000000000 -0400
21804 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
21805 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
21806
21807 // VC layer stats
21808 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
21809 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
21810
21811 // free the descriptor
21812 kfree (tx_descr);
21813 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
21814 dump_skb ("<<<", vc, skb);
21815
21816 // VC layer stats
21817 - atomic_inc(&atm_vcc->stats->rx);
21818 + atomic_inc_unchecked(&atm_vcc->stats->rx);
21819 __net_timestamp(skb);
21820 // end of our responsability
21821 atm_vcc->push (atm_vcc, skb);
21822 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
21823 } else {
21824 PRINTK (KERN_INFO, "dropped over-size frame");
21825 // should we count this?
21826 - atomic_inc(&atm_vcc->stats->rx_drop);
21827 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
21828 }
21829
21830 } else {
21831 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
21832 }
21833
21834 if (check_area (skb->data, skb->len)) {
21835 - atomic_inc(&atm_vcc->stats->tx_err);
21836 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
21837 return -ENOMEM; // ?
21838 }
21839
21840 diff -urNp linux-2.6.34.1/drivers/atm/atmtcp.c linux-2.6.34.1/drivers/atm/atmtcp.c
21841 --- linux-2.6.34.1/drivers/atm/atmtcp.c 2010-07-05 14:24:10.000000000 -0400
21842 +++ linux-2.6.34.1/drivers/atm/atmtcp.c 2010-07-07 09:04:50.000000000 -0400
21843 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
21844 if (vcc->pop) vcc->pop(vcc,skb);
21845 else dev_kfree_skb(skb);
21846 if (dev_data) return 0;
21847 - atomic_inc(&vcc->stats->tx_err);
21848 + atomic_inc_unchecked(&vcc->stats->tx_err);
21849 return -ENOLINK;
21850 }
21851 size = skb->len+sizeof(struct atmtcp_hdr);
21852 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
21853 if (!new_skb) {
21854 if (vcc->pop) vcc->pop(vcc,skb);
21855 else dev_kfree_skb(skb);
21856 - atomic_inc(&vcc->stats->tx_err);
21857 + atomic_inc_unchecked(&vcc->stats->tx_err);
21858 return -ENOBUFS;
21859 }
21860 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
21861 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
21862 if (vcc->pop) vcc->pop(vcc,skb);
21863 else dev_kfree_skb(skb);
21864 out_vcc->push(out_vcc,new_skb);
21865 - atomic_inc(&vcc->stats->tx);
21866 - atomic_inc(&out_vcc->stats->rx);
21867 + atomic_inc_unchecked(&vcc->stats->tx);
21868 + atomic_inc_unchecked(&out_vcc->stats->rx);
21869 return 0;
21870 }
21871
21872 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
21873 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
21874 read_unlock(&vcc_sklist_lock);
21875 if (!out_vcc) {
21876 - atomic_inc(&vcc->stats->tx_err);
21877 + atomic_inc_unchecked(&vcc->stats->tx_err);
21878 goto done;
21879 }
21880 skb_pull(skb,sizeof(struct atmtcp_hdr));
21881 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
21882 __net_timestamp(new_skb);
21883 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
21884 out_vcc->push(out_vcc,new_skb);
21885 - atomic_inc(&vcc->stats->tx);
21886 - atomic_inc(&out_vcc->stats->rx);
21887 + atomic_inc_unchecked(&vcc->stats->tx);
21888 + atomic_inc_unchecked(&out_vcc->stats->rx);
21889 done:
21890 if (vcc->pop) vcc->pop(vcc,skb);
21891 else dev_kfree_skb(skb);
21892 diff -urNp linux-2.6.34.1/drivers/atm/eni.c linux-2.6.34.1/drivers/atm/eni.c
21893 --- linux-2.6.34.1/drivers/atm/eni.c 2010-07-05 14:24:10.000000000 -0400
21894 +++ linux-2.6.34.1/drivers/atm/eni.c 2010-07-07 09:04:50.000000000 -0400
21895 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
21896 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
21897 vcc->dev->number);
21898 length = 0;
21899 - atomic_inc(&vcc->stats->rx_err);
21900 + atomic_inc_unchecked(&vcc->stats->rx_err);
21901 }
21902 else {
21903 length = ATM_CELL_SIZE-1; /* no HEC */
21904 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
21905 size);
21906 }
21907 eff = length = 0;
21908 - atomic_inc(&vcc->stats->rx_err);
21909 + atomic_inc_unchecked(&vcc->stats->rx_err);
21910 }
21911 else {
21912 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
21913 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
21914 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
21915 vcc->dev->number,vcc->vci,length,size << 2,descr);
21916 length = eff = 0;
21917 - atomic_inc(&vcc->stats->rx_err);
21918 + atomic_inc_unchecked(&vcc->stats->rx_err);
21919 }
21920 }
21921 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
21922 @@ -771,7 +771,7 @@ rx_dequeued++;
21923 vcc->push(vcc,skb);
21924 pushed++;
21925 }
21926 - atomic_inc(&vcc->stats->rx);
21927 + atomic_inc_unchecked(&vcc->stats->rx);
21928 }
21929 wake_up(&eni_dev->rx_wait);
21930 }
21931 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
21932 PCI_DMA_TODEVICE);
21933 if (vcc->pop) vcc->pop(vcc,skb);
21934 else dev_kfree_skb_irq(skb);
21935 - atomic_inc(&vcc->stats->tx);
21936 + atomic_inc_unchecked(&vcc->stats->tx);
21937 wake_up(&eni_dev->tx_wait);
21938 dma_complete++;
21939 }
21940 diff -urNp linux-2.6.34.1/drivers/atm/firestream.c linux-2.6.34.1/drivers/atm/firestream.c
21941 --- linux-2.6.34.1/drivers/atm/firestream.c 2010-07-05 14:24:10.000000000 -0400
21942 +++ linux-2.6.34.1/drivers/atm/firestream.c 2010-07-07 09:04:50.000000000 -0400
21943 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
21944 }
21945 }
21946
21947 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
21948 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
21949
21950 fs_dprintk (FS_DEBUG_TXMEM, "i");
21951 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
21952 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
21953 #endif
21954 skb_put (skb, qe->p1 & 0xffff);
21955 ATM_SKB(skb)->vcc = atm_vcc;
21956 - atomic_inc(&atm_vcc->stats->rx);
21957 + atomic_inc_unchecked(&atm_vcc->stats->rx);
21958 __net_timestamp(skb);
21959 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
21960 atm_vcc->push (atm_vcc, skb);
21961 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
21962 kfree (pe);
21963 }
21964 if (atm_vcc)
21965 - atomic_inc(&atm_vcc->stats->rx_drop);
21966 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
21967 break;
21968 case 0x1f: /* Reassembly abort: no buffers. */
21969 /* Silently increment error counter. */
21970 if (atm_vcc)
21971 - atomic_inc(&atm_vcc->stats->rx_drop);
21972 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
21973 break;
21974 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
21975 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
21976 diff -urNp linux-2.6.34.1/drivers/atm/fore200e.c linux-2.6.34.1/drivers/atm/fore200e.c
21977 --- linux-2.6.34.1/drivers/atm/fore200e.c 2010-07-05 14:24:10.000000000 -0400
21978 +++ linux-2.6.34.1/drivers/atm/fore200e.c 2010-07-07 09:04:50.000000000 -0400
21979 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
21980 #endif
21981 /* check error condition */
21982 if (*entry->status & STATUS_ERROR)
21983 - atomic_inc(&vcc->stats->tx_err);
21984 + atomic_inc_unchecked(&vcc->stats->tx_err);
21985 else
21986 - atomic_inc(&vcc->stats->tx);
21987 + atomic_inc_unchecked(&vcc->stats->tx);
21988 }
21989 }
21990
21991 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
21992 if (skb == NULL) {
21993 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
21994
21995 - atomic_inc(&vcc->stats->rx_drop);
21996 + atomic_inc_unchecked(&vcc->stats->rx_drop);
21997 return -ENOMEM;
21998 }
21999
22000 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
22001
22002 dev_kfree_skb_any(skb);
22003
22004 - atomic_inc(&vcc->stats->rx_drop);
22005 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22006 return -ENOMEM;
22007 }
22008
22009 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22010
22011 vcc->push(vcc, skb);
22012 - atomic_inc(&vcc->stats->rx);
22013 + atomic_inc_unchecked(&vcc->stats->rx);
22014
22015 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22016
22017 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
22018 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22019 fore200e->atm_dev->number,
22020 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22021 - atomic_inc(&vcc->stats->rx_err);
22022 + atomic_inc_unchecked(&vcc->stats->rx_err);
22023 }
22024 }
22025
22026 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22027 goto retry_here;
22028 }
22029
22030 - atomic_inc(&vcc->stats->tx_err);
22031 + atomic_inc_unchecked(&vcc->stats->tx_err);
22032
22033 fore200e->tx_sat++;
22034 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22035 diff -urNp linux-2.6.34.1/drivers/atm/he.c linux-2.6.34.1/drivers/atm/he.c
22036 --- linux-2.6.34.1/drivers/atm/he.c 2010-07-05 14:24:10.000000000 -0400
22037 +++ linux-2.6.34.1/drivers/atm/he.c 2010-07-07 09:04:50.000000000 -0400
22038 @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22039
22040 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22041 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22042 - atomic_inc(&vcc->stats->rx_drop);
22043 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22044 goto return_host_buffers;
22045 }
22046
22047 @@ -1803,7 +1803,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22048 RBRQ_LEN_ERR(he_dev->rbrq_head)
22049 ? "LEN_ERR" : "",
22050 vcc->vpi, vcc->vci);
22051 - atomic_inc(&vcc->stats->rx_err);
22052 + atomic_inc_unchecked(&vcc->stats->rx_err);
22053 goto return_host_buffers;
22054 }
22055
22056 @@ -1862,7 +1862,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22057 vcc->push(vcc, skb);
22058 spin_lock(&he_dev->global_lock);
22059
22060 - atomic_inc(&vcc->stats->rx);
22061 + atomic_inc_unchecked(&vcc->stats->rx);
22062
22063 return_host_buffers:
22064 ++pdus_assembled;
22065 @@ -2207,7 +2207,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22066 tpd->vcc->pop(tpd->vcc, tpd->skb);
22067 else
22068 dev_kfree_skb_any(tpd->skb);
22069 - atomic_inc(&tpd->vcc->stats->tx_err);
22070 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22071 }
22072 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22073 return;
22074 @@ -2619,7 +2619,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22075 vcc->pop(vcc, skb);
22076 else
22077 dev_kfree_skb_any(skb);
22078 - atomic_inc(&vcc->stats->tx_err);
22079 + atomic_inc_unchecked(&vcc->stats->tx_err);
22080 return -EINVAL;
22081 }
22082
22083 @@ -2630,7 +2630,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22084 vcc->pop(vcc, skb);
22085 else
22086 dev_kfree_skb_any(skb);
22087 - atomic_inc(&vcc->stats->tx_err);
22088 + atomic_inc_unchecked(&vcc->stats->tx_err);
22089 return -EINVAL;
22090 }
22091 #endif
22092 @@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22093 vcc->pop(vcc, skb);
22094 else
22095 dev_kfree_skb_any(skb);
22096 - atomic_inc(&vcc->stats->tx_err);
22097 + atomic_inc_unchecked(&vcc->stats->tx_err);
22098 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22099 return -ENOMEM;
22100 }
22101 @@ -2684,7 +2684,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22102 vcc->pop(vcc, skb);
22103 else
22104 dev_kfree_skb_any(skb);
22105 - atomic_inc(&vcc->stats->tx_err);
22106 + atomic_inc_unchecked(&vcc->stats->tx_err);
22107 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22108 return -ENOMEM;
22109 }
22110 @@ -2715,7 +2715,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22111 __enqueue_tpd(he_dev, tpd, cid);
22112 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22113
22114 - atomic_inc(&vcc->stats->tx);
22115 + atomic_inc_unchecked(&vcc->stats->tx);
22116
22117 return 0;
22118 }
22119 diff -urNp linux-2.6.34.1/drivers/atm/horizon.c linux-2.6.34.1/drivers/atm/horizon.c
22120 --- linux-2.6.34.1/drivers/atm/horizon.c 2010-07-05 14:24:10.000000000 -0400
22121 +++ linux-2.6.34.1/drivers/atm/horizon.c 2010-07-07 09:04:51.000000000 -0400
22122 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22123 {
22124 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22125 // VC layer stats
22126 - atomic_inc(&vcc->stats->rx);
22127 + atomic_inc_unchecked(&vcc->stats->rx);
22128 __net_timestamp(skb);
22129 // end of our responsability
22130 vcc->push (vcc, skb);
22131 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22132 dev->tx_iovec = NULL;
22133
22134 // VC layer stats
22135 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22136 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22137
22138 // free the skb
22139 hrz_kfree_skb (skb);
22140 diff -urNp linux-2.6.34.1/drivers/atm/idt77252.c linux-2.6.34.1/drivers/atm/idt77252.c
22141 --- linux-2.6.34.1/drivers/atm/idt77252.c 2010-07-05 14:24:10.000000000 -0400
22142 +++ linux-2.6.34.1/drivers/atm/idt77252.c 2010-07-07 09:04:51.000000000 -0400
22143 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22144 else
22145 dev_kfree_skb(skb);
22146
22147 - atomic_inc(&vcc->stats->tx);
22148 + atomic_inc_unchecked(&vcc->stats->tx);
22149 }
22150
22151 atomic_dec(&scq->used);
22152 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22153 if ((sb = dev_alloc_skb(64)) == NULL) {
22154 printk("%s: Can't allocate buffers for aal0.\n",
22155 card->name);
22156 - atomic_add(i, &vcc->stats->rx_drop);
22157 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22158 break;
22159 }
22160 if (!atm_charge(vcc, sb->truesize)) {
22161 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22162 card->name);
22163 - atomic_add(i - 1, &vcc->stats->rx_drop);
22164 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22165 dev_kfree_skb(sb);
22166 break;
22167 }
22168 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22169 ATM_SKB(sb)->vcc = vcc;
22170 __net_timestamp(sb);
22171 vcc->push(vcc, sb);
22172 - atomic_inc(&vcc->stats->rx);
22173 + atomic_inc_unchecked(&vcc->stats->rx);
22174
22175 cell += ATM_CELL_PAYLOAD;
22176 }
22177 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22178 "(CDC: %08x)\n",
22179 card->name, len, rpp->len, readl(SAR_REG_CDC));
22180 recycle_rx_pool_skb(card, rpp);
22181 - atomic_inc(&vcc->stats->rx_err);
22182 + atomic_inc_unchecked(&vcc->stats->rx_err);
22183 return;
22184 }
22185 if (stat & SAR_RSQE_CRC) {
22186 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22187 recycle_rx_pool_skb(card, rpp);
22188 - atomic_inc(&vcc->stats->rx_err);
22189 + atomic_inc_unchecked(&vcc->stats->rx_err);
22190 return;
22191 }
22192 if (skb_queue_len(&rpp->queue) > 1) {
22193 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22194 RXPRINTK("%s: Can't alloc RX skb.\n",
22195 card->name);
22196 recycle_rx_pool_skb(card, rpp);
22197 - atomic_inc(&vcc->stats->rx_err);
22198 + atomic_inc_unchecked(&vcc->stats->rx_err);
22199 return;
22200 }
22201 if (!atm_charge(vcc, skb->truesize)) {
22202 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22203 __net_timestamp(skb);
22204
22205 vcc->push(vcc, skb);
22206 - atomic_inc(&vcc->stats->rx);
22207 + atomic_inc_unchecked(&vcc->stats->rx);
22208
22209 return;
22210 }
22211 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22212 __net_timestamp(skb);
22213
22214 vcc->push(vcc, skb);
22215 - atomic_inc(&vcc->stats->rx);
22216 + atomic_inc_unchecked(&vcc->stats->rx);
22217
22218 if (skb->truesize > SAR_FB_SIZE_3)
22219 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22220 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22221 if (vcc->qos.aal != ATM_AAL0) {
22222 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22223 card->name, vpi, vci);
22224 - atomic_inc(&vcc->stats->rx_drop);
22225 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22226 goto drop;
22227 }
22228
22229 if ((sb = dev_alloc_skb(64)) == NULL) {
22230 printk("%s: Can't allocate buffers for AAL0.\n",
22231 card->name);
22232 - atomic_inc(&vcc->stats->rx_err);
22233 + atomic_inc_unchecked(&vcc->stats->rx_err);
22234 goto drop;
22235 }
22236
22237 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22238 ATM_SKB(sb)->vcc = vcc;
22239 __net_timestamp(sb);
22240 vcc->push(vcc, sb);
22241 - atomic_inc(&vcc->stats->rx);
22242 + atomic_inc_unchecked(&vcc->stats->rx);
22243
22244 drop:
22245 skb_pull(queue, 64);
22246 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22247
22248 if (vc == NULL) {
22249 printk("%s: NULL connection in send().\n", card->name);
22250 - atomic_inc(&vcc->stats->tx_err);
22251 + atomic_inc_unchecked(&vcc->stats->tx_err);
22252 dev_kfree_skb(skb);
22253 return -EINVAL;
22254 }
22255 if (!test_bit(VCF_TX, &vc->flags)) {
22256 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22257 - atomic_inc(&vcc->stats->tx_err);
22258 + atomic_inc_unchecked(&vcc->stats->tx_err);
22259 dev_kfree_skb(skb);
22260 return -EINVAL;
22261 }
22262 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22263 break;
22264 default:
22265 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22266 - atomic_inc(&vcc->stats->tx_err);
22267 + atomic_inc_unchecked(&vcc->stats->tx_err);
22268 dev_kfree_skb(skb);
22269 return -EINVAL;
22270 }
22271
22272 if (skb_shinfo(skb)->nr_frags != 0) {
22273 printk("%s: No scatter-gather yet.\n", card->name);
22274 - atomic_inc(&vcc->stats->tx_err);
22275 + atomic_inc_unchecked(&vcc->stats->tx_err);
22276 dev_kfree_skb(skb);
22277 return -EINVAL;
22278 }
22279 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22280
22281 err = queue_skb(card, vc, skb, oam);
22282 if (err) {
22283 - atomic_inc(&vcc->stats->tx_err);
22284 + atomic_inc_unchecked(&vcc->stats->tx_err);
22285 dev_kfree_skb(skb);
22286 return err;
22287 }
22288 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22289 skb = dev_alloc_skb(64);
22290 if (!skb) {
22291 printk("%s: Out of memory in send_oam().\n", card->name);
22292 - atomic_inc(&vcc->stats->tx_err);
22293 + atomic_inc_unchecked(&vcc->stats->tx_err);
22294 return -ENOMEM;
22295 }
22296 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22297 diff -urNp linux-2.6.34.1/drivers/atm/iphase.c linux-2.6.34.1/drivers/atm/iphase.c
22298 --- linux-2.6.34.1/drivers/atm/iphase.c 2010-07-05 14:24:10.000000000 -0400
22299 +++ linux-2.6.34.1/drivers/atm/iphase.c 2010-07-07 09:04:51.000000000 -0400
22300 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22301 status = (u_short) (buf_desc_ptr->desc_mode);
22302 if (status & (RX_CER | RX_PTE | RX_OFL))
22303 {
22304 - atomic_inc(&vcc->stats->rx_err);
22305 + atomic_inc_unchecked(&vcc->stats->rx_err);
22306 IF_ERR(printk("IA: bad packet, dropping it");)
22307 if (status & RX_CER) {
22308 IF_ERR(printk(" cause: packet CRC error\n");)
22309 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22310 len = dma_addr - buf_addr;
22311 if (len > iadev->rx_buf_sz) {
22312 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22313 - atomic_inc(&vcc->stats->rx_err);
22314 + atomic_inc_unchecked(&vcc->stats->rx_err);
22315 goto out_free_desc;
22316 }
22317
22318 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22319 ia_vcc = INPH_IA_VCC(vcc);
22320 if (ia_vcc == NULL)
22321 {
22322 - atomic_inc(&vcc->stats->rx_err);
22323 + atomic_inc_unchecked(&vcc->stats->rx_err);
22324 dev_kfree_skb_any(skb);
22325 atm_return(vcc, atm_guess_pdu2truesize(len));
22326 goto INCR_DLE;
22327 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22328 if ((length > iadev->rx_buf_sz) || (length >
22329 (skb->len - sizeof(struct cpcs_trailer))))
22330 {
22331 - atomic_inc(&vcc->stats->rx_err);
22332 + atomic_inc_unchecked(&vcc->stats->rx_err);
22333 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22334 length, skb->len);)
22335 dev_kfree_skb_any(skb);
22336 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22337
22338 IF_RX(printk("rx_dle_intr: skb push");)
22339 vcc->push(vcc,skb);
22340 - atomic_inc(&vcc->stats->rx);
22341 + atomic_inc_unchecked(&vcc->stats->rx);
22342 iadev->rx_pkt_cnt++;
22343 }
22344 INCR_DLE:
22345 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22346 {
22347 struct k_sonet_stats *stats;
22348 stats = &PRIV(_ia_dev[board])->sonet_stats;
22349 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22350 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22351 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22352 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22353 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22354 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22355 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22356 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22357 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22358 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22359 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22360 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22361 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22362 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22363 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22364 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22365 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22366 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22367 }
22368 ia_cmds.status = 0;
22369 break;
22370 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22371 if ((desc == 0) || (desc > iadev->num_tx_desc))
22372 {
22373 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22374 - atomic_inc(&vcc->stats->tx);
22375 + atomic_inc_unchecked(&vcc->stats->tx);
22376 if (vcc->pop)
22377 vcc->pop(vcc, skb);
22378 else
22379 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22380 ATM_DESC(skb) = vcc->vci;
22381 skb_queue_tail(&iadev->tx_dma_q, skb);
22382
22383 - atomic_inc(&vcc->stats->tx);
22384 + atomic_inc_unchecked(&vcc->stats->tx);
22385 iadev->tx_pkt_cnt++;
22386 /* Increment transaction counter */
22387 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22388
22389 #if 0
22390 /* add flow control logic */
22391 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22392 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22393 if (iavcc->vc_desc_cnt > 10) {
22394 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22395 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22396 diff -urNp linux-2.6.34.1/drivers/atm/lanai.c linux-2.6.34.1/drivers/atm/lanai.c
22397 --- linux-2.6.34.1/drivers/atm/lanai.c 2010-07-05 14:24:10.000000000 -0400
22398 +++ linux-2.6.34.1/drivers/atm/lanai.c 2010-07-07 09:04:51.000000000 -0400
22399 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22400 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22401 lanai_endtx(lanai, lvcc);
22402 lanai_free_skb(lvcc->tx.atmvcc, skb);
22403 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22404 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22405 }
22406
22407 /* Try to fill the buffer - don't call unless there is backlog */
22408 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22409 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22410 __net_timestamp(skb);
22411 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22412 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22413 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22414 out:
22415 lvcc->rx.buf.ptr = end;
22416 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22417 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22418 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22419 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22420 lanai->stats.service_rxnotaal5++;
22421 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22422 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22423 return 0;
22424 }
22425 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22426 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22427 int bytes;
22428 read_unlock(&vcc_sklist_lock);
22429 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22430 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22431 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22432 lvcc->stats.x.aal5.service_trash++;
22433 bytes = (SERVICE_GET_END(s) * 16) -
22434 (((unsigned long) lvcc->rx.buf.ptr) -
22435 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22436 }
22437 if (s & SERVICE_STREAM) {
22438 read_unlock(&vcc_sklist_lock);
22439 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22440 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22441 lvcc->stats.x.aal5.service_stream++;
22442 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22443 "PDU on VCI %d!\n", lanai->number, vci);
22444 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22445 return 0;
22446 }
22447 DPRINTK("got rx crc error on vci %d\n", vci);
22448 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22449 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22450 lvcc->stats.x.aal5.service_rxcrc++;
22451 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22452 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22453 diff -urNp linux-2.6.34.1/drivers/atm/nicstar.c linux-2.6.34.1/drivers/atm/nicstar.c
22454 --- linux-2.6.34.1/drivers/atm/nicstar.c 2010-07-05 14:24:10.000000000 -0400
22455 +++ linux-2.6.34.1/drivers/atm/nicstar.c 2010-07-07 09:04:51.000000000 -0400
22456 @@ -1722,7 +1722,7 @@ static int ns_send(struct atm_vcc *vcc,
22457 if ((vc = (vc_map *) vcc->dev_data) == NULL)
22458 {
22459 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
22460 - atomic_inc(&vcc->stats->tx_err);
22461 + atomic_inc_unchecked(&vcc->stats->tx_err);
22462 dev_kfree_skb_any(skb);
22463 return -EINVAL;
22464 }
22465 @@ -1730,7 +1730,7 @@ static int ns_send(struct atm_vcc *vcc,
22466 if (!vc->tx)
22467 {
22468 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
22469 - atomic_inc(&vcc->stats->tx_err);
22470 + atomic_inc_unchecked(&vcc->stats->tx_err);
22471 dev_kfree_skb_any(skb);
22472 return -EINVAL;
22473 }
22474 @@ -1738,7 +1738,7 @@ static int ns_send(struct atm_vcc *vcc,
22475 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
22476 {
22477 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
22478 - atomic_inc(&vcc->stats->tx_err);
22479 + atomic_inc_unchecked(&vcc->stats->tx_err);
22480 dev_kfree_skb_any(skb);
22481 return -EINVAL;
22482 }
22483 @@ -1746,7 +1746,7 @@ static int ns_send(struct atm_vcc *vcc,
22484 if (skb_shinfo(skb)->nr_frags != 0)
22485 {
22486 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22487 - atomic_inc(&vcc->stats->tx_err);
22488 + atomic_inc_unchecked(&vcc->stats->tx_err);
22489 dev_kfree_skb_any(skb);
22490 return -EINVAL;
22491 }
22492 @@ -1791,11 +1791,11 @@ static int ns_send(struct atm_vcc *vcc,
22493
22494 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
22495 {
22496 - atomic_inc(&vcc->stats->tx_err);
22497 + atomic_inc_unchecked(&vcc->stats->tx_err);
22498 dev_kfree_skb_any(skb);
22499 return -EIO;
22500 }
22501 - atomic_inc(&vcc->stats->tx);
22502 + atomic_inc_unchecked(&vcc->stats->tx);
22503
22504 return 0;
22505 }
22506 @@ -2110,14 +2110,14 @@ static void dequeue_rx(ns_dev *card, ns_
22507 {
22508 printk("nicstar%d: Can't allocate buffers for aal0.\n",
22509 card->index);
22510 - atomic_add(i,&vcc->stats->rx_drop);
22511 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
22512 break;
22513 }
22514 if (!atm_charge(vcc, sb->truesize))
22515 {
22516 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
22517 card->index);
22518 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
22519 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
22520 dev_kfree_skb_any(sb);
22521 break;
22522 }
22523 @@ -2132,7 +2132,7 @@ static void dequeue_rx(ns_dev *card, ns_
22524 ATM_SKB(sb)->vcc = vcc;
22525 __net_timestamp(sb);
22526 vcc->push(vcc, sb);
22527 - atomic_inc(&vcc->stats->rx);
22528 + atomic_inc_unchecked(&vcc->stats->rx);
22529 cell += ATM_CELL_PAYLOAD;
22530 }
22531
22532 @@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev *card, ns_
22533 if (iovb == NULL)
22534 {
22535 printk("nicstar%d: Out of iovec buffers.\n", card->index);
22536 - atomic_inc(&vcc->stats->rx_drop);
22537 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22538 recycle_rx_buf(card, skb);
22539 return;
22540 }
22541 @@ -2181,7 +2181,7 @@ static void dequeue_rx(ns_dev *card, ns_
22542 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
22543 {
22544 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22545 - atomic_inc(&vcc->stats->rx_err);
22546 + atomic_inc_unchecked(&vcc->stats->rx_err);
22547 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
22548 NS_SKB(iovb)->iovcnt = 0;
22549 iovb->len = 0;
22550 @@ -2201,7 +2201,7 @@ static void dequeue_rx(ns_dev *card, ns_
22551 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
22552 card->index);
22553 which_list(card, skb);
22554 - atomic_inc(&vcc->stats->rx_err);
22555 + atomic_inc_unchecked(&vcc->stats->rx_err);
22556 recycle_rx_buf(card, skb);
22557 vc->rx_iov = NULL;
22558 recycle_iov_buf(card, iovb);
22559 @@ -2215,7 +2215,7 @@ static void dequeue_rx(ns_dev *card, ns_
22560 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
22561 card->index);
22562 which_list(card, skb);
22563 - atomic_inc(&vcc->stats->rx_err);
22564 + atomic_inc_unchecked(&vcc->stats->rx_err);
22565 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
22566 NS_SKB(iovb)->iovcnt);
22567 vc->rx_iov = NULL;
22568 @@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev *card, ns_
22569 printk(" - PDU size mismatch.\n");
22570 else
22571 printk(".\n");
22572 - atomic_inc(&vcc->stats->rx_err);
22573 + atomic_inc_unchecked(&vcc->stats->rx_err);
22574 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
22575 NS_SKB(iovb)->iovcnt);
22576 vc->rx_iov = NULL;
22577 @@ -2255,7 +2255,7 @@ static void dequeue_rx(ns_dev *card, ns_
22578 if (!atm_charge(vcc, skb->truesize))
22579 {
22580 push_rxbufs(card, skb);
22581 - atomic_inc(&vcc->stats->rx_drop);
22582 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22583 }
22584 else
22585 {
22586 @@ -2267,7 +2267,7 @@ static void dequeue_rx(ns_dev *card, ns_
22587 ATM_SKB(skb)->vcc = vcc;
22588 __net_timestamp(skb);
22589 vcc->push(vcc, skb);
22590 - atomic_inc(&vcc->stats->rx);
22591 + atomic_inc_unchecked(&vcc->stats->rx);
22592 }
22593 }
22594 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
22595 @@ -2282,7 +2282,7 @@ static void dequeue_rx(ns_dev *card, ns_
22596 if (!atm_charge(vcc, sb->truesize))
22597 {
22598 push_rxbufs(card, sb);
22599 - atomic_inc(&vcc->stats->rx_drop);
22600 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22601 }
22602 else
22603 {
22604 @@ -2294,7 +2294,7 @@ static void dequeue_rx(ns_dev *card, ns_
22605 ATM_SKB(sb)->vcc = vcc;
22606 __net_timestamp(sb);
22607 vcc->push(vcc, sb);
22608 - atomic_inc(&vcc->stats->rx);
22609 + atomic_inc_unchecked(&vcc->stats->rx);
22610 }
22611
22612 push_rxbufs(card, skb);
22613 @@ -2305,7 +2305,7 @@ static void dequeue_rx(ns_dev *card, ns_
22614 if (!atm_charge(vcc, skb->truesize))
22615 {
22616 push_rxbufs(card, skb);
22617 - atomic_inc(&vcc->stats->rx_drop);
22618 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22619 }
22620 else
22621 {
22622 @@ -2319,7 +2319,7 @@ static void dequeue_rx(ns_dev *card, ns_
22623 ATM_SKB(skb)->vcc = vcc;
22624 __net_timestamp(skb);
22625 vcc->push(vcc, skb);
22626 - atomic_inc(&vcc->stats->rx);
22627 + atomic_inc_unchecked(&vcc->stats->rx);
22628 }
22629
22630 push_rxbufs(card, sb);
22631 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev *card, ns_
22632 if (hb == NULL)
22633 {
22634 printk("nicstar%d: Out of huge buffers.\n", card->index);
22635 - atomic_inc(&vcc->stats->rx_drop);
22636 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22637 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
22638 NS_SKB(iovb)->iovcnt);
22639 vc->rx_iov = NULL;
22640 @@ -2392,7 +2392,7 @@ static void dequeue_rx(ns_dev *card, ns_
22641 }
22642 else
22643 dev_kfree_skb_any(hb);
22644 - atomic_inc(&vcc->stats->rx_drop);
22645 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22646 }
22647 else
22648 {
22649 @@ -2426,7 +2426,7 @@ static void dequeue_rx(ns_dev *card, ns_
22650 #endif /* NS_USE_DESTRUCTORS */
22651 __net_timestamp(hb);
22652 vcc->push(vcc, hb);
22653 - atomic_inc(&vcc->stats->rx);
22654 + atomic_inc_unchecked(&vcc->stats->rx);
22655 }
22656 }
22657
22658 diff -urNp linux-2.6.34.1/drivers/atm/solos-pci.c linux-2.6.34.1/drivers/atm/solos-pci.c
22659 --- linux-2.6.34.1/drivers/atm/solos-pci.c 2010-07-05 14:24:10.000000000 -0400
22660 +++ linux-2.6.34.1/drivers/atm/solos-pci.c 2010-07-07 09:04:51.000000000 -0400
22661 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
22662 }
22663 atm_charge(vcc, skb->truesize);
22664 vcc->push(vcc, skb);
22665 - atomic_inc(&vcc->stats->rx);
22666 + atomic_inc_unchecked(&vcc->stats->rx);
22667 break;
22668
22669 case PKT_STATUS:
22670 @@ -1018,7 +1018,7 @@ static uint32_t fpga_tx(struct solos_car
22671 vcc = SKB_CB(oldskb)->vcc;
22672
22673 if (vcc) {
22674 - atomic_inc(&vcc->stats->tx);
22675 + atomic_inc_unchecked(&vcc->stats->tx);
22676 solos_pop(vcc, oldskb);
22677 } else
22678 dev_kfree_skb_irq(oldskb);
22679 diff -urNp linux-2.6.34.1/drivers/atm/suni.c linux-2.6.34.1/drivers/atm/suni.c
22680 --- linux-2.6.34.1/drivers/atm/suni.c 2010-07-05 14:24:10.000000000 -0400
22681 +++ linux-2.6.34.1/drivers/atm/suni.c 2010-07-07 09:04:51.000000000 -0400
22682 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22683
22684
22685 #define ADD_LIMITED(s,v) \
22686 - atomic_add((v),&stats->s); \
22687 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22688 + atomic_add_unchecked((v),&stats->s); \
22689 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22690
22691
22692 static void suni_hz(unsigned long from_timer)
22693 diff -urNp linux-2.6.34.1/drivers/atm/uPD98402.c linux-2.6.34.1/drivers/atm/uPD98402.c
22694 --- linux-2.6.34.1/drivers/atm/uPD98402.c 2010-07-05 14:24:10.000000000 -0400
22695 +++ linux-2.6.34.1/drivers/atm/uPD98402.c 2010-07-07 09:04:51.000000000 -0400
22696 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22697 struct sonet_stats tmp;
22698 int error = 0;
22699
22700 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22701 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22702 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22703 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22704 if (zero && !error) {
22705 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22706
22707
22708 #define ADD_LIMITED(s,v) \
22709 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22710 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22711 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22712 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22713 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22714 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22715
22716
22717 static void stat_event(struct atm_dev *dev)
22718 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22719 if (reason & uPD98402_INT_PFM) stat_event(dev);
22720 if (reason & uPD98402_INT_PCO) {
22721 (void) GET(PCOCR); /* clear interrupt cause */
22722 - atomic_add(GET(HECCT),
22723 + atomic_add_unchecked(GET(HECCT),
22724 &PRIV(dev)->sonet_stats.uncorr_hcs);
22725 }
22726 if ((reason & uPD98402_INT_RFO) &&
22727 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22728 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22729 uPD98402_INT_LOS),PIMR); /* enable them */
22730 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22731 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22732 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22733 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22734 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22735 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22736 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22737 return 0;
22738 }
22739
22740 diff -urNp linux-2.6.34.1/drivers/atm/zatm.c linux-2.6.34.1/drivers/atm/zatm.c
22741 --- linux-2.6.34.1/drivers/atm/zatm.c 2010-07-05 14:24:10.000000000 -0400
22742 +++ linux-2.6.34.1/drivers/atm/zatm.c 2010-07-07 09:04:51.000000000 -0400
22743 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22744 }
22745 if (!size) {
22746 dev_kfree_skb_irq(skb);
22747 - if (vcc) atomic_inc(&vcc->stats->rx_err);
22748 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
22749 continue;
22750 }
22751 if (!atm_charge(vcc,skb->truesize)) {
22752 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22753 skb->len = size;
22754 ATM_SKB(skb)->vcc = vcc;
22755 vcc->push(vcc,skb);
22756 - atomic_inc(&vcc->stats->rx);
22757 + atomic_inc_unchecked(&vcc->stats->rx);
22758 }
22759 zout(pos & 0xffff,MTA(mbx));
22760 #if 0 /* probably a stupid idea */
22761 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
22762 skb_queue_head(&zatm_vcc->backlog,skb);
22763 break;
22764 }
22765 - atomic_inc(&vcc->stats->tx);
22766 + atomic_inc_unchecked(&vcc->stats->tx);
22767 wake_up(&zatm_vcc->tx_wait);
22768 }
22769
22770 diff -urNp linux-2.6.34.1/drivers/char/agp/frontend.c linux-2.6.34.1/drivers/char/agp/frontend.c
22771 --- linux-2.6.34.1/drivers/char/agp/frontend.c 2010-07-05 14:24:10.000000000 -0400
22772 +++ linux-2.6.34.1/drivers/char/agp/frontend.c 2010-07-07 09:04:51.000000000 -0400
22773 @@ -818,7 +818,7 @@ static int agpioc_reserve_wrap(struct ag
22774 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
22775 return -EFAULT;
22776
22777 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
22778 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
22779 return -EFAULT;
22780
22781 client = agp_find_client_by_pid(reserve.pid);
22782 diff -urNp linux-2.6.34.1/drivers/char/agp/intel-agp.c linux-2.6.34.1/drivers/char/agp/intel-agp.c
22783 --- linux-2.6.34.1/drivers/char/agp/intel-agp.c 2010-07-05 14:24:10.000000000 -0400
22784 +++ linux-2.6.34.1/drivers/char/agp/intel-agp.c 2010-07-07 09:04:51.000000000 -0400
22785 @@ -2697,7 +2697,7 @@ static struct pci_device_id agp_intel_pc
22786 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
22787 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
22788 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
22789 - { }
22790 + { 0, 0, 0, 0, 0, 0, 0 }
22791 };
22792
22793 MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
22794 diff -urNp linux-2.6.34.1/drivers/char/hpet.c linux-2.6.34.1/drivers/char/hpet.c
22795 --- linux-2.6.34.1/drivers/char/hpet.c 2010-07-05 14:24:10.000000000 -0400
22796 +++ linux-2.6.34.1/drivers/char/hpet.c 2010-07-07 09:04:51.000000000 -0400
22797 @@ -429,7 +429,7 @@ static int hpet_release(struct inode *in
22798 return 0;
22799 }
22800
22801 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
22802 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
22803
22804 static int
22805 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
22806 @@ -549,7 +549,7 @@ static inline unsigned long hpet_time_di
22807 }
22808
22809 static int
22810 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
22811 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
22812 {
22813 struct hpet_timer __iomem *timer;
22814 struct hpet __iomem *hpet;
22815 @@ -994,7 +994,7 @@ static struct acpi_driver hpet_acpi_driv
22816 },
22817 };
22818
22819 -static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
22820 +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL };
22821
22822 static int __init hpet_init(void)
22823 {
22824 diff -urNp linux-2.6.34.1/drivers/char/hvc_console.h linux-2.6.34.1/drivers/char/hvc_console.h
22825 --- linux-2.6.34.1/drivers/char/hvc_console.h 2010-07-05 14:24:10.000000000 -0400
22826 +++ linux-2.6.34.1/drivers/char/hvc_console.h 2010-07-07 09:04:51.000000000 -0400
22827 @@ -82,6 +82,7 @@ extern int hvc_instantiate(uint32_t vter
22828 /* register a vterm for hvc tty operation (module_init or hotplug add) */
22829 extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
22830 const struct hv_ops *ops, int outbuf_size);
22831 +
22832 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
22833 extern int hvc_remove(struct hvc_struct *hp);
22834
22835 diff -urNp linux-2.6.34.1/drivers/char/hvcs.c linux-2.6.34.1/drivers/char/hvcs.c
22836 --- linux-2.6.34.1/drivers/char/hvcs.c 2010-07-05 14:24:10.000000000 -0400
22837 +++ linux-2.6.34.1/drivers/char/hvcs.c 2010-07-07 09:04:51.000000000 -0400
22838 @@ -270,7 +270,7 @@ struct hvcs_struct {
22839 unsigned int index;
22840
22841 struct tty_struct *tty;
22842 - int open_count;
22843 + atomic_t open_count;
22844
22845 /*
22846 * Used to tell the driver kernel_thread what operations need to take
22847 @@ -420,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
22848
22849 spin_lock_irqsave(&hvcsd->lock, flags);
22850
22851 - if (hvcsd->open_count > 0) {
22852 + if (atomic_read(&hvcsd->open_count) > 0) {
22853 spin_unlock_irqrestore(&hvcsd->lock, flags);
22854 printk(KERN_INFO "HVCS: vterm state unchanged. "
22855 "The hvcs device node is still in use.\n");
22856 @@ -1136,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
22857 if ((retval = hvcs_partner_connect(hvcsd)))
22858 goto error_release;
22859
22860 - hvcsd->open_count = 1;
22861 + atomic_set(&hvcsd->open_count, 1);
22862 hvcsd->tty = tty;
22863 tty->driver_data = hvcsd;
22864
22865 @@ -1170,7 +1170,7 @@ fast_open:
22866
22867 spin_lock_irqsave(&hvcsd->lock, flags);
22868 kref_get(&hvcsd->kref);
22869 - hvcsd->open_count++;
22870 + atomic_inc(&hvcsd->open_count);
22871 hvcsd->todo_mask |= HVCS_SCHED_READ;
22872 spin_unlock_irqrestore(&hvcsd->lock, flags);
22873
22874 @@ -1214,7 +1214,7 @@ static void hvcs_close(struct tty_struct
22875 hvcsd = tty->driver_data;
22876
22877 spin_lock_irqsave(&hvcsd->lock, flags);
22878 - if (--hvcsd->open_count == 0) {
22879 + if (atomic_dec_and_test(&hvcsd->open_count)) {
22880
22881 vio_disable_interrupts(hvcsd->vdev);
22882
22883 @@ -1240,10 +1240,10 @@ static void hvcs_close(struct tty_struct
22884 free_irq(irq, hvcsd);
22885 kref_put(&hvcsd->kref, destroy_hvcs_struct);
22886 return;
22887 - } else if (hvcsd->open_count < 0) {
22888 + } else if (atomic_read(&hvcsd->open_count) < 0) {
22889 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
22890 " is missmanaged.\n",
22891 - hvcsd->vdev->unit_address, hvcsd->open_count);
22892 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->open_count));
22893 }
22894
22895 spin_unlock_irqrestore(&hvcsd->lock, flags);
22896 @@ -1259,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
22897
22898 spin_lock_irqsave(&hvcsd->lock, flags);
22899 /* Preserve this so that we know how many kref refs to put */
22900 - temp_open_count = hvcsd->open_count;
22901 + temp_open_count = atomic_read(&hvcsd->open_count);
22902
22903 /*
22904 * Don't kref put inside the spinlock because the destruction
22905 @@ -1274,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
22906 hvcsd->tty->driver_data = NULL;
22907 hvcsd->tty = NULL;
22908
22909 - hvcsd->open_count = 0;
22910 + atomic_set(&hvcsd->open_count, 0);
22911
22912 /* This will drop any buffered data on the floor which is OK in a hangup
22913 * scenario. */
22914 @@ -1345,7 +1345,7 @@ static int hvcs_write(struct tty_struct
22915 * the middle of a write operation? This is a crummy place to do this
22916 * but we want to keep it all in the spinlock.
22917 */
22918 - if (hvcsd->open_count <= 0) {
22919 + if (atomic_read(&hvcsd->open_count) <= 0) {
22920 spin_unlock_irqrestore(&hvcsd->lock, flags);
22921 return -ENODEV;
22922 }
22923 @@ -1419,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
22924 {
22925 struct hvcs_struct *hvcsd = tty->driver_data;
22926
22927 - if (!hvcsd || hvcsd->open_count <= 0)
22928 + if (!hvcsd || atomic_read(&hvcsd->open_count) <= 0)
22929 return 0;
22930
22931 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
22932 diff -urNp linux-2.6.34.1/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.34.1/drivers/char/ipmi/ipmi_msghandler.c
22933 --- linux-2.6.34.1/drivers/char/ipmi/ipmi_msghandler.c 2010-07-05 14:24:10.000000000 -0400
22934 +++ linux-2.6.34.1/drivers/char/ipmi/ipmi_msghandler.c 2010-07-07 09:04:51.000000000 -0400
22935 @@ -414,7 +414,7 @@ struct ipmi_smi {
22936 struct proc_dir_entry *proc_dir;
22937 char proc_dir_name[10];
22938
22939 - atomic_t stats[IPMI_NUM_STATS];
22940 + atomic_unchecked_t stats[IPMI_NUM_STATS];
22941
22942 /*
22943 * run_to_completion duplicate of smb_info, smi_info
22944 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
22945
22946
22947 #define ipmi_inc_stat(intf, stat) \
22948 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
22949 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
22950 #define ipmi_get_stat(intf, stat) \
22951 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
22952 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
22953
22954 static int is_lan_addr(struct ipmi_addr *addr)
22955 {
22956 @@ -2818,7 +2818,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
22957 INIT_LIST_HEAD(&intf->cmd_rcvrs);
22958 init_waitqueue_head(&intf->waitq);
22959 for (i = 0; i < IPMI_NUM_STATS; i++)
22960 - atomic_set(&intf->stats[i], 0);
22961 + atomic_set_unchecked(&intf->stats[i], 0);
22962
22963 intf->proc_dir = NULL;
22964
22965 diff -urNp linux-2.6.34.1/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.34.1/drivers/char/ipmi/ipmi_si_intf.c
22966 --- linux-2.6.34.1/drivers/char/ipmi/ipmi_si_intf.c 2010-07-05 14:24:10.000000000 -0400
22967 +++ linux-2.6.34.1/drivers/char/ipmi/ipmi_si_intf.c 2010-07-07 09:04:51.000000000 -0400
22968 @@ -278,7 +278,7 @@ struct smi_info {
22969 unsigned char slave_addr;
22970
22971 /* Counters and things for the proc filesystem. */
22972 - atomic_t stats[SI_NUM_STATS];
22973 + atomic_unchecked_t stats[SI_NUM_STATS];
22974
22975 struct task_struct *thread;
22976
22977 @@ -286,9 +286,9 @@ struct smi_info {
22978 };
22979
22980 #define smi_inc_stat(smi, stat) \
22981 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
22982 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
22983 #define smi_get_stat(smi, stat) \
22984 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
22985 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
22986
22987 #define SI_MAX_PARMS 4
22988
22989 @@ -3088,7 +3088,7 @@ static int try_smi_init(struct smi_info
22990 atomic_set(&new_smi->req_events, 0);
22991 new_smi->run_to_completion = 0;
22992 for (i = 0; i < SI_NUM_STATS; i++)
22993 - atomic_set(&new_smi->stats[i], 0);
22994 + atomic_set_unchecked(&new_smi->stats[i], 0);
22995
22996 new_smi->interrupt_disabled = 0;
22997 atomic_set(&new_smi->stop_operation, 0);
22998 diff -urNp linux-2.6.34.1/drivers/char/keyboard.c linux-2.6.34.1/drivers/char/keyboard.c
22999 --- linux-2.6.34.1/drivers/char/keyboard.c 2010-07-05 14:24:10.000000000 -0400
23000 +++ linux-2.6.34.1/drivers/char/keyboard.c 2010-07-07 09:04:51.000000000 -0400
23001 @@ -652,6 +652,16 @@ static void k_spec(struct vc_data *vc, u
23002 kbd->kbdmode == VC_MEDIUMRAW) &&
23003 value != KVAL(K_SAK))
23004 return; /* SAK is allowed even in raw mode */
23005 +
23006 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
23007 + {
23008 + void *func = fn_handler[value];
23009 + if (func == fn_show_state || func == fn_show_ptregs ||
23010 + func == fn_show_mem)
23011 + return;
23012 + }
23013 +#endif
23014 +
23015 fn_handler[value](vc);
23016 }
23017
23018 @@ -1407,7 +1417,7 @@ static const struct input_device_id kbd_
23019 .evbit = { BIT_MASK(EV_SND) },
23020 },
23021
23022 - { }, /* Terminating entry */
23023 + { 0 }, /* Terminating entry */
23024 };
23025
23026 MODULE_DEVICE_TABLE(input, kbd_ids);
23027 diff -urNp linux-2.6.34.1/drivers/char/mem.c linux-2.6.34.1/drivers/char/mem.c
23028 --- linux-2.6.34.1/drivers/char/mem.c 2010-07-05 14:24:10.000000000 -0400
23029 +++ linux-2.6.34.1/drivers/char/mem.c 2010-07-07 09:04:51.000000000 -0400
23030 @@ -18,6 +18,7 @@
23031 #include <linux/raw.h>
23032 #include <linux/tty.h>
23033 #include <linux/capability.h>
23034 +#include <linux/security.h>
23035 #include <linux/ptrace.h>
23036 #include <linux/device.h>
23037 #include <linux/highmem.h>
23038 @@ -34,6 +35,10 @@
23039 # include <linux/efi.h>
23040 #endif
23041
23042 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23043 +extern struct file_operations grsec_fops;
23044 +#endif
23045 +
23046 static inline unsigned long size_inside_page(unsigned long start,
23047 unsigned long size)
23048 {
23049 @@ -161,6 +166,11 @@ static ssize_t write_mem(struct file *fi
23050 if (!valid_phys_addr_range(p, count))
23051 return -EFAULT;
23052
23053 +#ifdef CONFIG_GRKERNSEC_KMEM
23054 + gr_handle_mem_write();
23055 + return -EPERM;
23056 +#endif
23057 +
23058 written = 0;
23059
23060 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
23061 @@ -316,6 +326,11 @@ static int mmap_mem(struct file *file, s
23062 &vma->vm_page_prot))
23063 return -EINVAL;
23064
23065 +#ifdef CONFIG_GRKERNSEC_KMEM
23066 + if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma))
23067 + return -EPERM;
23068 +#endif
23069 +
23070 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
23071 size,
23072 vma->vm_page_prot);
23073 @@ -530,6 +545,11 @@ static ssize_t write_kmem(struct file *f
23074 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
23075 int err = 0;
23076
23077 +#ifdef CONFIG_GRKERNSEC_KMEM
23078 + gr_handle_kmem_write();
23079 + return -EPERM;
23080 +#endif
23081 +
23082 if (p < (unsigned long) high_memory) {
23083 unsigned long to_write = min_t(unsigned long, count,
23084 (unsigned long)high_memory - p);
23085 @@ -731,6 +751,16 @@ static loff_t memory_lseek(struct file *
23086
23087 static int open_port(struct inode * inode, struct file * filp)
23088 {
23089 +#ifdef CONFIG_GRKERNSEC_KMEM
23090 + gr_handle_open_port();
23091 + return -EPERM;
23092 +#endif
23093 +
23094 + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
23095 +}
23096 +
23097 +static int open_mem(struct inode * inode, struct file * filp)
23098 +{
23099 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
23100 }
23101
23102 @@ -738,7 +768,6 @@ static int open_port(struct inode * inod
23103 #define full_lseek null_lseek
23104 #define write_zero write_null
23105 #define read_full read_zero
23106 -#define open_mem open_port
23107 #define open_kmem open_mem
23108 #define open_oldmem open_mem
23109
23110 @@ -854,6 +883,9 @@ static const struct memdev {
23111 #ifdef CONFIG_CRASH_DUMP
23112 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23113 #endif
23114 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23115 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23116 +#endif
23117 };
23118
23119 static int memory_open(struct inode *inode, struct file *filp)
23120 diff -urNp linux-2.6.34.1/drivers/char/n_tty.c linux-2.6.34.1/drivers/char/n_tty.c
23121 --- linux-2.6.34.1/drivers/char/n_tty.c 2010-07-05 14:24:10.000000000 -0400
23122 +++ linux-2.6.34.1/drivers/char/n_tty.c 2010-07-07 09:04:51.000000000 -0400
23123 @@ -2105,6 +2105,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
23124 {
23125 *ops = tty_ldisc_N_TTY;
23126 ops->owner = NULL;
23127 - ops->refcount = ops->flags = 0;
23128 + atomic_set(&ops->refcount, 0);
23129 + ops->flags = 0;
23130 }
23131 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
23132 diff -urNp linux-2.6.34.1/drivers/char/nvram.c linux-2.6.34.1/drivers/char/nvram.c
23133 --- linux-2.6.34.1/drivers/char/nvram.c 2010-07-05 14:24:10.000000000 -0400
23134 +++ linux-2.6.34.1/drivers/char/nvram.c 2010-07-07 09:04:51.000000000 -0400
23135 @@ -245,7 +245,7 @@ static ssize_t nvram_read(struct file *f
23136
23137 spin_unlock_irq(&rtc_lock);
23138
23139 - if (copy_to_user(buf, contents, tmp - contents))
23140 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23141 return -EFAULT;
23142
23143 *ppos = i;
23144 @@ -430,7 +430,10 @@ static const struct file_operations nvra
23145 static struct miscdevice nvram_dev = {
23146 NVRAM_MINOR,
23147 "nvram",
23148 - &nvram_fops
23149 + &nvram_fops,
23150 + {NULL, NULL},
23151 + NULL,
23152 + NULL
23153 };
23154
23155 static int __init nvram_init(void)
23156 diff -urNp linux-2.6.34.1/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.34.1/drivers/char/pcmcia/ipwireless/tty.c
23157 --- linux-2.6.34.1/drivers/char/pcmcia/ipwireless/tty.c 2010-07-05 14:24:10.000000000 -0400
23158 +++ linux-2.6.34.1/drivers/char/pcmcia/ipwireless/tty.c 2010-07-07 09:04:51.000000000 -0400
23159 @@ -51,7 +51,7 @@ struct ipw_tty {
23160 int tty_type;
23161 struct ipw_network *network;
23162 struct tty_struct *linux_tty;
23163 - int open_count;
23164 + atomic_t open_count;
23165 unsigned int control_lines;
23166 struct mutex ipw_tty_mutex;
23167 int tx_bytes_queued;
23168 @@ -127,10 +127,10 @@ static int ipw_open(struct tty_struct *l
23169 mutex_unlock(&tty->ipw_tty_mutex);
23170 return -ENODEV;
23171 }
23172 - if (tty->open_count == 0)
23173 + if (atomic_read(&tty->open_count) == 0)
23174 tty->tx_bytes_queued = 0;
23175
23176 - tty->open_count++;
23177 + atomic_inc(&tty->open_count);
23178
23179 tty->linux_tty = linux_tty;
23180 linux_tty->driver_data = tty;
23181 @@ -146,9 +146,7 @@ static int ipw_open(struct tty_struct *l
23182
23183 static void do_ipw_close(struct ipw_tty *tty)
23184 {
23185 - tty->open_count--;
23186 -
23187 - if (tty->open_count == 0) {
23188 + if (atomic_dec_return(&tty->open_count) == 0) {
23189 struct tty_struct *linux_tty = tty->linux_tty;
23190
23191 if (linux_tty != NULL) {
23192 @@ -169,7 +167,7 @@ static void ipw_hangup(struct tty_struct
23193 return;
23194
23195 mutex_lock(&tty->ipw_tty_mutex);
23196 - if (tty->open_count == 0) {
23197 + if (atomic_read(&tty->open_count) == 0) {
23198 mutex_unlock(&tty->ipw_tty_mutex);
23199 return;
23200 }
23201 @@ -198,7 +196,7 @@ void ipwireless_tty_received(struct ipw_
23202 return;
23203 }
23204
23205 - if (!tty->open_count) {
23206 + if (!atomic_read(&tty->open_count)) {
23207 mutex_unlock(&tty->ipw_tty_mutex);
23208 return;
23209 }
23210 @@ -240,7 +238,7 @@ static int ipw_write(struct tty_struct *
23211 return -ENODEV;
23212
23213 mutex_lock(&tty->ipw_tty_mutex);
23214 - if (!tty->open_count) {
23215 + if (!atomic_read(&tty->open_count)) {
23216 mutex_unlock(&tty->ipw_tty_mutex);
23217 return -EINVAL;
23218 }
23219 @@ -280,7 +278,7 @@ static int ipw_write_room(struct tty_str
23220 if (!tty)
23221 return -ENODEV;
23222
23223 - if (!tty->open_count)
23224 + if (!atomic_read(&tty->open_count))
23225 return -EINVAL;
23226
23227 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
23228 @@ -322,7 +320,7 @@ static int ipw_chars_in_buffer(struct tt
23229 if (!tty)
23230 return 0;
23231
23232 - if (!tty->open_count)
23233 + if (!atomic_read(&tty->open_count))
23234 return 0;
23235
23236 return tty->tx_bytes_queued;
23237 @@ -403,7 +401,7 @@ static int ipw_tiocmget(struct tty_struc
23238 if (!tty)
23239 return -ENODEV;
23240
23241 - if (!tty->open_count)
23242 + if (!atomic_read(&tty->open_count))
23243 return -EINVAL;
23244
23245 return get_control_lines(tty);
23246 @@ -419,7 +417,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
23247 if (!tty)
23248 return -ENODEV;
23249
23250 - if (!tty->open_count)
23251 + if (!atomic_read(&tty->open_count))
23252 return -EINVAL;
23253
23254 return set_control_lines(tty, set, clear);
23255 @@ -433,7 +431,7 @@ static int ipw_ioctl(struct tty_struct *
23256 if (!tty)
23257 return -ENODEV;
23258
23259 - if (!tty->open_count)
23260 + if (!atomic_read(&tty->open_count))
23261 return -EINVAL;
23262
23263 /* FIXME: Exactly how is the tty object locked here .. */
23264 @@ -591,7 +589,7 @@ void ipwireless_tty_free(struct ipw_tty
23265 against a parallel ioctl etc */
23266 mutex_lock(&ttyj->ipw_tty_mutex);
23267 }
23268 - while (ttyj->open_count)
23269 + while (atomic_read(&ttyj->open_count))
23270 do_ipw_close(ttyj);
23271 ipwireless_disassociate_network_ttys(network,
23272 ttyj->channel_idx);
23273 diff -urNp linux-2.6.34.1/drivers/char/pty.c linux-2.6.34.1/drivers/char/pty.c
23274 --- linux-2.6.34.1/drivers/char/pty.c 2010-07-05 14:24:10.000000000 -0400
23275 +++ linux-2.6.34.1/drivers/char/pty.c 2010-07-07 09:04:51.000000000 -0400
23276 @@ -677,7 +677,18 @@ static int ptmx_open(struct inode *inode
23277 return ret;
23278 }
23279
23280 -static struct file_operations ptmx_fops;
23281 +static const struct file_operations ptmx_fops = {
23282 + .llseek = no_llseek,
23283 + .read = tty_read,
23284 + .write = tty_write,
23285 + .poll = tty_poll,
23286 + .unlocked_ioctl = tty_ioctl,
23287 + .compat_ioctl = tty_compat_ioctl,
23288 + .open = ptmx_open,
23289 + .release = tty_release,
23290 + .fasync = tty_fasync,
23291 +};
23292 +
23293
23294 static void __init unix98_pty_init(void)
23295 {
23296 @@ -731,9 +742,6 @@ static void __init unix98_pty_init(void)
23297 register_sysctl_table(pty_root_table);
23298
23299 /* Now create the /dev/ptmx special device */
23300 - tty_default_fops(&ptmx_fops);
23301 - ptmx_fops.open = ptmx_open;
23302 -
23303 cdev_init(&ptmx_cdev, &ptmx_fops);
23304 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
23305 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
23306 diff -urNp linux-2.6.34.1/drivers/char/random.c linux-2.6.34.1/drivers/char/random.c
23307 --- linux-2.6.34.1/drivers/char/random.c 2010-07-05 14:24:10.000000000 -0400
23308 +++ linux-2.6.34.1/drivers/char/random.c 2010-07-07 09:04:51.000000000 -0400
23309 @@ -254,8 +254,13 @@
23310 /*
23311 * Configuration information
23312 */
23313 +#ifdef CONFIG_GRKERNSEC_RANDNET
23314 +#define INPUT_POOL_WORDS 512
23315 +#define OUTPUT_POOL_WORDS 128
23316 +#else
23317 #define INPUT_POOL_WORDS 128
23318 #define OUTPUT_POOL_WORDS 32
23319 +#endif
23320 #define SEC_XFER_SIZE 512
23321
23322 /*
23323 @@ -292,10 +297,17 @@ static struct poolinfo {
23324 int poolwords;
23325 int tap1, tap2, tap3, tap4, tap5;
23326 } poolinfo_table[] = {
23327 +#ifdef CONFIG_GRKERNSEC_RANDNET
23328 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23329 + { 512, 411, 308, 208, 104, 1 },
23330 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23331 + { 128, 103, 76, 51, 25, 1 },
23332 +#else
23333 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23334 { 128, 103, 76, 51, 25, 1 },
23335 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23336 { 32, 26, 20, 14, 7, 1 },
23337 +#endif
23338 #if 0
23339 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23340 { 2048, 1638, 1231, 819, 411, 1 },
23341 @@ -903,7 +915,7 @@ static ssize_t extract_entropy_user(stru
23342
23343 extract_buf(r, tmp);
23344 i = min_t(int, nbytes, EXTRACT_SIZE);
23345 - if (copy_to_user(buf, tmp, i)) {
23346 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23347 ret = -EFAULT;
23348 break;
23349 }
23350 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23351 #include <linux/sysctl.h>
23352
23353 static int min_read_thresh = 8, min_write_thresh;
23354 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23355 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23356 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23357 static char sysctl_bootid[16];
23358
23359 diff -urNp linux-2.6.34.1/drivers/char/sonypi.c linux-2.6.34.1/drivers/char/sonypi.c
23360 --- linux-2.6.34.1/drivers/char/sonypi.c 2010-07-05 14:24:10.000000000 -0400
23361 +++ linux-2.6.34.1/drivers/char/sonypi.c 2010-07-07 09:04:51.000000000 -0400
23362 @@ -491,7 +491,7 @@ static struct sonypi_device {
23363 spinlock_t fifo_lock;
23364 wait_queue_head_t fifo_proc_list;
23365 struct fasync_struct *fifo_async;
23366 - int open_count;
23367 + atomic_t open_count;
23368 int model;
23369 struct input_dev *input_jog_dev;
23370 struct input_dev *input_key_dev;
23371 @@ -898,7 +898,7 @@ static int sonypi_misc_fasync(int fd, st
23372 static int sonypi_misc_release(struct inode *inode, struct file *file)
23373 {
23374 mutex_lock(&sonypi_device.lock);
23375 - sonypi_device.open_count--;
23376 + atomic_dec(&sonypi_device.open_count);
23377 mutex_unlock(&sonypi_device.lock);
23378 return 0;
23379 }
23380 @@ -907,9 +907,9 @@ static int sonypi_misc_open(struct inode
23381 {
23382 mutex_lock(&sonypi_device.lock);
23383 /* Flush input queue on first open */
23384 - if (!sonypi_device.open_count)
23385 + if (!atomic_read(&sonypi_device.open_count))
23386 kfifo_reset(&sonypi_device.fifo);
23387 - sonypi_device.open_count++;
23388 + atomic_inc(&sonypi_device.open_count);
23389 mutex_unlock(&sonypi_device.lock);
23390
23391 return 0;
23392 diff -urNp linux-2.6.34.1/drivers/char/tpm/tpm_bios.c linux-2.6.34.1/drivers/char/tpm/tpm_bios.c
23393 --- linux-2.6.34.1/drivers/char/tpm/tpm_bios.c 2010-07-05 14:24:10.000000000 -0400
23394 +++ linux-2.6.34.1/drivers/char/tpm/tpm_bios.c 2010-07-07 09:04:51.000000000 -0400
23395 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23396 event = addr;
23397
23398 if ((event->event_type == 0 && event->event_size == 0) ||
23399 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23400 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
23401 return NULL;
23402
23403 return addr;
23404 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
23405 return NULL;
23406
23407 if ((event->event_type == 0 && event->event_size == 0) ||
23408 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
23409 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
23410 return NULL;
23411
23412 (*pos)++;
23413 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
23414 int i;
23415
23416 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
23417 - seq_putc(m, data[i]);
23418 + if (!seq_putc(m, data[i]))
23419 + return -EFAULT;
23420
23421 return 0;
23422 }
23423 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
23424 log->bios_event_log_end = log->bios_event_log + len;
23425
23426 virt = acpi_os_map_memory(start, len);
23427 + if (!virt) {
23428 + kfree(log->bios_event_log);
23429 + log->bios_event_log = NULL;
23430 + return -EFAULT;
23431 + }
23432
23433 memcpy(log->bios_event_log, virt, len);
23434
23435 diff -urNp linux-2.6.34.1/drivers/char/tty_io.c linux-2.6.34.1/drivers/char/tty_io.c
23436 --- linux-2.6.34.1/drivers/char/tty_io.c 2010-07-05 14:24:10.000000000 -0400
23437 +++ linux-2.6.34.1/drivers/char/tty_io.c 2010-07-07 09:04:51.000000000 -0400
23438 @@ -136,20 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
23439 DEFINE_MUTEX(tty_mutex);
23440 EXPORT_SYMBOL(tty_mutex);
23441
23442 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
23443 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
23444 ssize_t redirected_tty_write(struct file *, const char __user *,
23445 size_t, loff_t *);
23446 -static unsigned int tty_poll(struct file *, poll_table *);
23447 static int tty_open(struct inode *, struct file *);
23448 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
23449 -#ifdef CONFIG_COMPAT
23450 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
23451 - unsigned long arg);
23452 -#else
23453 -#define tty_compat_ioctl NULL
23454 -#endif
23455 -static int tty_fasync(int fd, struct file *filp, int on);
23456 static void release_tty(struct tty_struct *tty, int idx);
23457 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
23458 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
23459 @@ -871,7 +861,7 @@ EXPORT_SYMBOL(start_tty);
23460 * read calls may be outstanding in parallel.
23461 */
23462
23463 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
23464 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
23465 loff_t *ppos)
23466 {
23467 int i;
23468 @@ -899,6 +889,8 @@ static ssize_t tty_read(struct file *fil
23469 return i;
23470 }
23471
23472 +EXPORT_SYMBOL(tty_read);
23473 +
23474 void tty_write_unlock(struct tty_struct *tty)
23475 {
23476 mutex_unlock(&tty->atomic_write_lock);
23477 @@ -1048,7 +1040,7 @@ void tty_write_message(struct tty_struct
23478 * write method will not be invoked in parallel for each device.
23479 */
23480
23481 -static ssize_t tty_write(struct file *file, const char __user *buf,
23482 +ssize_t tty_write(struct file *file, const char __user *buf,
23483 size_t count, loff_t *ppos)
23484 {
23485 struct tty_struct *tty;
23486 @@ -1075,6 +1067,8 @@ static ssize_t tty_write(struct file *fi
23487 return ret;
23488 }
23489
23490 +EXPORT_SYMBOL(tty_write);
23491 +
23492 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
23493 size_t count, loff_t *ppos)
23494 {
23495 @@ -1897,6 +1891,8 @@ got_driver:
23496
23497
23498
23499 +EXPORT_SYMBOL(tty_release);
23500 +
23501 /**
23502 * tty_poll - check tty status
23503 * @filp: file being polled
23504 @@ -1909,7 +1905,7 @@ got_driver:
23505 * may be re-entered freely by other callers.
23506 */
23507
23508 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
23509 +unsigned int tty_poll(struct file *filp, poll_table *wait)
23510 {
23511 struct tty_struct *tty;
23512 struct tty_ldisc *ld;
23513 @@ -1926,7 +1922,9 @@ static unsigned int tty_poll(struct file
23514 return ret;
23515 }
23516
23517 -static int tty_fasync(int fd, struct file *filp, int on)
23518 +EXPORT_SYMBOL(tty_poll);
23519 +
23520 +int tty_fasync(int fd, struct file *filp, int on)
23521 {
23522 struct tty_struct *tty;
23523 unsigned long flags;
23524 @@ -1970,6 +1968,8 @@ out:
23525 return retval;
23526 }
23527
23528 +EXPORT_SYMBOL(tty_fasync);
23529 +
23530 /**
23531 * tiocsti - fake input character
23532 * @tty: tty to fake input into
23533 @@ -2602,8 +2602,10 @@ long tty_ioctl(struct file *file, unsign
23534 return retval;
23535 }
23536
23537 +EXPORT_SYMBOL(tty_ioctl);
23538 +
23539 #ifdef CONFIG_COMPAT
23540 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
23541 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
23542 unsigned long arg)
23543 {
23544 struct inode *inode = file->f_dentry->d_inode;
23545 @@ -2627,6 +2629,9 @@ static long tty_compat_ioctl(struct file
23546
23547 return retval;
23548 }
23549 +
23550 +EXPORT_SYMBOL(tty_compat_ioctl);
23551 +
23552 #endif
23553
23554 /*
23555 @@ -3070,11 +3075,6 @@ struct tty_struct *get_current_tty(void)
23556 }
23557 EXPORT_SYMBOL_GPL(get_current_tty);
23558
23559 -void tty_default_fops(struct file_operations *fops)
23560 -{
23561 - *fops = tty_fops;
23562 -}
23563 -
23564 /*
23565 * Initialize the console device. This is called *early*, so
23566 * we can't necessarily depend on lots of kernel help here.
23567 diff -urNp linux-2.6.34.1/drivers/char/tty_ldisc.c linux-2.6.34.1/drivers/char/tty_ldisc.c
23568 --- linux-2.6.34.1/drivers/char/tty_ldisc.c 2010-07-05 14:24:10.000000000 -0400
23569 +++ linux-2.6.34.1/drivers/char/tty_ldisc.c 2010-07-07 09:04:51.000000000 -0400
23570 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *
23571 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
23572 struct tty_ldisc_ops *ldo = ld->ops;
23573
23574 - ldo->refcount--;
23575 + atomic_dec(&ldo->refcount);
23576 module_put(ldo->owner);
23577 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
23578
23579 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
23580 spin_lock_irqsave(&tty_ldisc_lock, flags);
23581 tty_ldiscs[disc] = new_ldisc;
23582 new_ldisc->num = disc;
23583 - new_ldisc->refcount = 0;
23584 + atomic_set(&new_ldisc->refcount, 0);
23585 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
23586
23587 return ret;
23588 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
23589 return -EINVAL;
23590
23591 spin_lock_irqsave(&tty_ldisc_lock, flags);
23592 - if (tty_ldiscs[disc]->refcount)
23593 + if (atomic_read(&tty_ldiscs[disc]->refcount))
23594 ret = -EBUSY;
23595 else
23596 tty_ldiscs[disc] = NULL;
23597 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
23598 if (ldops) {
23599 ret = ERR_PTR(-EAGAIN);
23600 if (try_module_get(ldops->owner)) {
23601 - ldops->refcount++;
23602 + atomic_inc(&ldops->refcount);
23603 ret = ldops;
23604 }
23605 }
23606 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
23607 unsigned long flags;
23608
23609 spin_lock_irqsave(&tty_ldisc_lock, flags);
23610 - ldops->refcount--;
23611 + atomic_dec(&ldops->refcount);
23612 module_put(ldops->owner);
23613 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
23614 }
23615 diff -urNp linux-2.6.34.1/drivers/char/vt_ioctl.c linux-2.6.34.1/drivers/char/vt_ioctl.c
23616 --- linux-2.6.34.1/drivers/char/vt_ioctl.c 2010-07-05 14:24:10.000000000 -0400
23617 +++ linux-2.6.34.1/drivers/char/vt_ioctl.c 2010-07-07 09:04:51.000000000 -0400
23618 @@ -226,6 +226,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
23619 case KDSKBENT:
23620 if (!perm)
23621 return -EPERM;
23622 +
23623 +#ifdef CONFIG_GRKERNSEC
23624 + if (!capable(CAP_SYS_TTY_CONFIG))
23625 + return -EPERM;
23626 +#endif
23627 +
23628 if (!i && v == K_NOSUCHMAP) {
23629 /* deallocate map */
23630 key_map = key_maps[s];
23631 @@ -366,6 +372,13 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
23632 goto reterr;
23633 }
23634
23635 +#ifdef CONFIG_GRKERNSEC
23636 + if (!capable(CAP_SYS_TTY_CONFIG)) {
23637 + ret = -EPERM;
23638 + goto reterr;
23639 + }
23640 +#endif
23641 +
23642 q = func_table[i];
23643 first_free = funcbufptr + (funcbufsize - funcbufleft);
23644 for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
23645 diff -urNp linux-2.6.34.1/drivers/cpuidle/sysfs.c linux-2.6.34.1/drivers/cpuidle/sysfs.c
23646 --- linux-2.6.34.1/drivers/cpuidle/sysfs.c 2010-07-05 14:24:10.000000000 -0400
23647 +++ linux-2.6.34.1/drivers/cpuidle/sysfs.c 2010-07-07 09:04:51.000000000 -0400
23648 @@ -299,7 +299,7 @@ static struct kobj_type ktype_state_cpui
23649 .release = cpuidle_state_sysfs_release,
23650 };
23651
23652 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
23653 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
23654 {
23655 kobject_put(&device->kobjs[i]->kobj);
23656 wait_for_completion(&device->kobjs[i]->kobj_unregister);
23657 diff -urNp linux-2.6.34.1/drivers/edac/edac_core.h linux-2.6.34.1/drivers/edac/edac_core.h
23658 --- linux-2.6.34.1/drivers/edac/edac_core.h 2010-07-05 14:24:10.000000000 -0400
23659 +++ linux-2.6.34.1/drivers/edac/edac_core.h 2010-07-07 09:04:51.000000000 -0400
23660 @@ -100,11 +100,11 @@ extern const char *edac_mem_types[];
23661
23662 #else /* !CONFIG_EDAC_DEBUG */
23663
23664 -#define debugf0( ... )
23665 -#define debugf1( ... )
23666 -#define debugf2( ... )
23667 -#define debugf3( ... )
23668 -#define debugf4( ... )
23669 +#define debugf0( ... ) do {} while (0)
23670 +#define debugf1( ... ) do {} while (0)
23671 +#define debugf2( ... ) do {} while (0)
23672 +#define debugf3( ... ) do {} while (0)
23673 +#define debugf4( ... ) do {} while (0)
23674
23675 #endif /* !CONFIG_EDAC_DEBUG */
23676
23677 diff -urNp linux-2.6.34.1/drivers/firewire/core-cdev.c linux-2.6.34.1/drivers/firewire/core-cdev.c
23678 --- linux-2.6.34.1/drivers/firewire/core-cdev.c 2010-07-05 14:24:10.000000000 -0400
23679 +++ linux-2.6.34.1/drivers/firewire/core-cdev.c 2010-07-07 09:04:51.000000000 -0400
23680 @@ -1195,8 +1195,7 @@ static int init_iso_resource(struct clie
23681 int ret;
23682
23683 if ((request->channels == 0 && request->bandwidth == 0) ||
23684 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
23685 - request->bandwidth < 0)
23686 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
23687 return -EINVAL;
23688
23689 r = kmalloc(sizeof(*r), GFP_KERNEL);
23690 diff -urNp linux-2.6.34.1/drivers/firmware/dmi_scan.c linux-2.6.34.1/drivers/firmware/dmi_scan.c
23691 --- linux-2.6.34.1/drivers/firmware/dmi_scan.c 2010-07-05 14:24:10.000000000 -0400
23692 +++ linux-2.6.34.1/drivers/firmware/dmi_scan.c 2010-07-07 09:04:51.000000000 -0400
23693 @@ -387,11 +387,6 @@ void __init dmi_scan_machine(void)
23694 }
23695 }
23696 else {
23697 - /*
23698 - * no iounmap() for that ioremap(); it would be a no-op, but
23699 - * it's so early in setup that sucker gets confused into doing
23700 - * what it shouldn't if we actually call it.
23701 - */
23702 p = dmi_ioremap(0xF0000, 0x10000);
23703 if (p == NULL)
23704 goto error;
23705 diff -urNp linux-2.6.34.1/drivers/gpu/drm/drm_drv.c linux-2.6.34.1/drivers/gpu/drm/drm_drv.c
23706 --- linux-2.6.34.1/drivers/gpu/drm/drm_drv.c 2010-07-05 14:24:10.000000000 -0400
23707 +++ linux-2.6.34.1/drivers/gpu/drm/drm_drv.c 2010-07-07 09:04:51.000000000 -0400
23708 @@ -449,7 +449,7 @@ long drm_ioctl(struct file *filp,
23709
23710 dev = file_priv->minor->dev;
23711 atomic_inc(&dev->ioctl_count);
23712 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
23713 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
23714 ++file_priv->ioctl_count;
23715
23716 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
23717 diff -urNp linux-2.6.34.1/drivers/gpu/drm/drm_fops.c linux-2.6.34.1/drivers/gpu/drm/drm_fops.c
23718 --- linux-2.6.34.1/drivers/gpu/drm/drm_fops.c 2010-07-05 14:24:10.000000000 -0400
23719 +++ linux-2.6.34.1/drivers/gpu/drm/drm_fops.c 2010-07-07 09:04:51.000000000 -0400
23720 @@ -67,7 +67,7 @@ static int drm_setup(struct drm_device *
23721 }
23722
23723 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
23724 - atomic_set(&dev->counts[i], 0);
23725 + atomic_set_unchecked(&dev->counts[i], 0);
23726
23727 dev->sigdata.lock = NULL;
23728
23729 @@ -131,9 +131,9 @@ int drm_open(struct inode *inode, struct
23730
23731 retcode = drm_open_helper(inode, filp, dev);
23732 if (!retcode) {
23733 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
23734 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
23735 spin_lock(&dev->count_lock);
23736 - if (!dev->open_count++) {
23737 + if (atomic_inc_return(&dev->open_count) == 1) {
23738 spin_unlock(&dev->count_lock);
23739 retcode = drm_setup(dev);
23740 goto out;
23741 @@ -475,7 +475,7 @@ int drm_release(struct inode *inode, str
23742
23743 lock_kernel();
23744
23745 - DRM_DEBUG("open_count = %d\n", dev->open_count);
23746 + DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
23747
23748 if (dev->driver->preclose)
23749 dev->driver->preclose(dev, file_priv);
23750 @@ -487,7 +487,7 @@ int drm_release(struct inode *inode, str
23751 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
23752 task_pid_nr(current),
23753 (long)old_encode_dev(file_priv->minor->device),
23754 - dev->open_count);
23755 + atomic_read(&dev->open_count));
23756
23757 /* if the master has gone away we can't do anything with the lock */
23758 if (file_priv->minor->master)
23759 @@ -568,9 +568,9 @@ int drm_release(struct inode *inode, str
23760 * End inline drm_release
23761 */
23762
23763 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
23764 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
23765 spin_lock(&dev->count_lock);
23766 - if (!--dev->open_count) {
23767 + if (atomic_dec_and_test(&dev->open_count)) {
23768 if (atomic_read(&dev->ioctl_count)) {
23769 DRM_ERROR("Device busy: %d\n",
23770 atomic_read(&dev->ioctl_count));
23771 diff -urNp linux-2.6.34.1/drivers/gpu/drm/drm_ioctl.c linux-2.6.34.1/drivers/gpu/drm/drm_ioctl.c
23772 --- linux-2.6.34.1/drivers/gpu/drm/drm_ioctl.c 2010-07-05 14:24:10.000000000 -0400
23773 +++ linux-2.6.34.1/drivers/gpu/drm/drm_ioctl.c 2010-07-07 09:04:51.000000000 -0400
23774 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
23775 stats->data[i].value =
23776 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
23777 else
23778 - stats->data[i].value = atomic_read(&dev->counts[i]);
23779 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
23780 stats->data[i].type = dev->types[i];
23781 }
23782
23783 diff -urNp linux-2.6.34.1/drivers/gpu/drm/drm_lock.c linux-2.6.34.1/drivers/gpu/drm/drm_lock.c
23784 --- linux-2.6.34.1/drivers/gpu/drm/drm_lock.c 2010-07-05 14:24:10.000000000 -0400
23785 +++ linux-2.6.34.1/drivers/gpu/drm/drm_lock.c 2010-07-07 09:04:51.000000000 -0400
23786 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
23787 if (drm_lock_take(&master->lock, lock->context)) {
23788 master->lock.file_priv = file_priv;
23789 master->lock.lock_time = jiffies;
23790 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
23791 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
23792 break; /* Got lock */
23793 }
23794
23795 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
23796 return -EINVAL;
23797 }
23798
23799 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
23800 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
23801
23802 /* kernel_context_switch isn't used by any of the x86 drm
23803 * modules but is required by the Sparc driver.
23804 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i810/i810_dma.c linux-2.6.34.1/drivers/gpu/drm/i810/i810_dma.c
23805 --- linux-2.6.34.1/drivers/gpu/drm/i810/i810_dma.c 2010-07-05 14:24:10.000000000 -0400
23806 +++ linux-2.6.34.1/drivers/gpu/drm/i810/i810_dma.c 2010-07-07 09:04:51.000000000 -0400
23807 @@ -953,8 +953,8 @@ static int i810_dma_vertex(struct drm_de
23808 dma->buflist[vertex->idx],
23809 vertex->discard, vertex->used);
23810
23811 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
23812 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
23813 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
23814 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
23815 sarea_priv->last_enqueue = dev_priv->counter - 1;
23816 sarea_priv->last_dispatch = (int)hw_status[5];
23817
23818 @@ -1116,8 +1116,8 @@ static int i810_dma_mc(struct drm_device
23819 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
23820 mc->last_render);
23821
23822 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
23823 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
23824 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
23825 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
23826 sarea_priv->last_enqueue = dev_priv->counter - 1;
23827 sarea_priv->last_dispatch = (int)hw_status[5];
23828
23829 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo.h linux-2.6.34.1/drivers/gpu/drm/i915/dvo.h
23830 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo.h 2010-07-05 14:24:10.000000000 -0400
23831 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo.h 2010-07-07 09:04:51.000000000 -0400
23832 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
23833 *
23834 * \return singly-linked list of modes or NULL if no modes found.
23835 */
23836 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
23837 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
23838
23839 /**
23840 * Clean up driver-specific bits of the output
23841 */
23842 - void (*destroy) (struct intel_dvo_device *dvo);
23843 + void (* const destroy) (struct intel_dvo_device *dvo);
23844
23845 /**
23846 * Debugging hook to dump device registers to log file
23847 */
23848 - void (*dump_regs)(struct intel_dvo_device *dvo);
23849 + void (* const dump_regs)(struct intel_dvo_device *dvo);
23850 };
23851
23852 -extern struct intel_dvo_dev_ops sil164_ops;
23853 -extern struct intel_dvo_dev_ops ch7xxx_ops;
23854 -extern struct intel_dvo_dev_ops ivch_ops;
23855 -extern struct intel_dvo_dev_ops tfp410_ops;
23856 -extern struct intel_dvo_dev_ops ch7017_ops;
23857 +extern const struct intel_dvo_dev_ops sil164_ops;
23858 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
23859 +extern const struct intel_dvo_dev_ops ivch_ops;
23860 +extern const struct intel_dvo_dev_ops tfp410_ops;
23861 +extern const struct intel_dvo_dev_ops ch7017_ops;
23862
23863 #endif /* _INTEL_DVO_H */
23864 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7017.c
23865 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7017.c 2010-07-05 14:24:10.000000000 -0400
23866 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7017.c 2010-07-07 09:04:51.000000000 -0400
23867 @@ -444,7 +444,7 @@ static void ch7017_destroy(struct intel_
23868 }
23869 }
23870
23871 -struct intel_dvo_dev_ops ch7017_ops = {
23872 +const struct intel_dvo_dev_ops ch7017_ops = {
23873 .init = ch7017_init,
23874 .detect = ch7017_detect,
23875 .mode_valid = ch7017_mode_valid,
23876 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7xxx.c
23877 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-07-05 14:24:10.000000000 -0400
23878 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-07-07 09:04:51.000000000 -0400
23879 @@ -358,7 +358,7 @@ static void ch7xxx_destroy(struct intel_
23880 }
23881 }
23882
23883 -struct intel_dvo_dev_ops ch7xxx_ops = {
23884 +const struct intel_dvo_dev_ops ch7xxx_ops = {
23885 .init = ch7xxx_init,
23886 .detect = ch7xxx_detect,
23887 .mode_valid = ch7xxx_mode_valid,
23888 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ivch.c
23889 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ivch.c 2010-07-05 14:24:10.000000000 -0400
23890 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo_ivch.c 2010-07-07 09:04:51.000000000 -0400
23891 @@ -431,7 +431,7 @@ static void ivch_destroy(struct intel_dv
23892 }
23893 }
23894
23895 -struct intel_dvo_dev_ops ivch_ops= {
23896 +const struct intel_dvo_dev_ops ivch_ops= {
23897 .init = ivch_init,
23898 .dpms = ivch_dpms,
23899 .save = ivch_save,
23900 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.34.1/drivers/gpu/drm/i915/dvo_sil164.c
23901 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo_sil164.c 2010-07-05 14:24:10.000000000 -0400
23902 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo_sil164.c 2010-07-07 09:04:51.000000000 -0400
23903 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
23904 }
23905 }
23906
23907 -struct intel_dvo_dev_ops sil164_ops = {
23908 +const struct intel_dvo_dev_ops sil164_ops = {
23909 .init = sil164_init,
23910 .detect = sil164_detect,
23911 .mode_valid = sil164_mode_valid,
23912 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.34.1/drivers/gpu/drm/i915/dvo_tfp410.c
23913 --- linux-2.6.34.1/drivers/gpu/drm/i915/dvo_tfp410.c 2010-07-05 14:24:10.000000000 -0400
23914 +++ linux-2.6.34.1/drivers/gpu/drm/i915/dvo_tfp410.c 2010-07-07 09:04:51.000000000 -0400
23915 @@ -325,7 +325,7 @@ static void tfp410_destroy(struct intel_
23916 }
23917 }
23918
23919 -struct intel_dvo_dev_ops tfp410_ops = {
23920 +const struct intel_dvo_dev_ops tfp410_ops = {
23921 .init = tfp410_init,
23922 .detect = tfp410_detect,
23923 .mode_valid = tfp410_mode_valid,
23924 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/i915_dma.c linux-2.6.34.1/drivers/gpu/drm/i915/i915_dma.c
23925 --- linux-2.6.34.1/drivers/gpu/drm/i915/i915_dma.c 2010-07-05 14:24:10.000000000 -0400
23926 +++ linux-2.6.34.1/drivers/gpu/drm/i915/i915_dma.c 2010-07-07 09:04:51.000000000 -0400
23927 @@ -1416,7 +1416,7 @@ static bool i915_switcheroo_can_switch(s
23928 bool can_switch;
23929
23930 spin_lock(&dev->count_lock);
23931 - can_switch = (dev->open_count == 0);
23932 + can_switch = (atomic_read(&dev->open_count) == 0);
23933 spin_unlock(&dev->count_lock);
23934 return can_switch;
23935 }
23936 diff -urNp linux-2.6.34.1/drivers/gpu/drm/i915/i915_drv.c linux-2.6.34.1/drivers/gpu/drm/i915/i915_drv.c
23937 --- linux-2.6.34.1/drivers/gpu/drm/i915/i915_drv.c 2010-07-05 14:24:10.000000000 -0400
23938 +++ linux-2.6.34.1/drivers/gpu/drm/i915/i915_drv.c 2010-07-07 09:04:51.000000000 -0400
23939 @@ -484,7 +484,7 @@ const struct dev_pm_ops i915_pm_ops = {
23940 .restore = i915_pm_resume,
23941 };
23942
23943 -static struct vm_operations_struct i915_gem_vm_ops = {
23944 +static const struct vm_operations_struct i915_gem_vm_ops = {
23945 .fault = i915_gem_fault,
23946 .open = drm_gem_vm_open,
23947 .close = drm_gem_vm_close,
23948 diff -urNp linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_backlight.c linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_backlight.c
23949 --- linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-07-05 14:24:10.000000000 -0400
23950 +++ linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-07-07 09:04:51.000000000 -0400
23951 @@ -58,7 +58,7 @@ static int nv40_set_intensity(struct bac
23952 return 0;
23953 }
23954
23955 -static struct backlight_ops nv40_bl_ops = {
23956 +static const struct backlight_ops nv40_bl_ops = {
23957 .options = BL_CORE_SUSPENDRESUME,
23958 .get_brightness = nv40_get_intensity,
23959 .update_status = nv40_set_intensity,
23960 @@ -81,7 +81,7 @@ static int nv50_set_intensity(struct bac
23961 return 0;
23962 }
23963
23964 -static struct backlight_ops nv50_bl_ops = {
23965 +static const struct backlight_ops nv50_bl_ops = {
23966 .options = BL_CORE_SUSPENDRESUME,
23967 .get_brightness = nv50_get_intensity,
23968 .update_status = nv50_set_intensity,
23969 diff -urNp linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_state.c
23970 --- linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_state.c 2010-07-05 14:24:10.000000000 -0400
23971 +++ linux-2.6.34.1/drivers/gpu/drm/nouveau/nouveau_state.c 2010-07-07 09:04:51.000000000 -0400
23972 @@ -391,7 +391,7 @@ static bool nouveau_switcheroo_can_switc
23973 bool can_switch;
23974
23975 spin_lock(&dev->count_lock);
23976 - can_switch = (dev->open_count == 0);
23977 + can_switch = (atomic_read(&dev->open_count) == 0);
23978 spin_unlock(&dev->count_lock);
23979 return can_switch;
23980 }
23981 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.34.1/drivers/gpu/drm/radeon/mkregtable.c
23982 --- linux-2.6.34.1/drivers/gpu/drm/radeon/mkregtable.c 2010-07-05 14:24:10.000000000 -0400
23983 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/mkregtable.c 2010-07-07 09:04:51.000000000 -0400
23984 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
23985 regex_t mask_rex;
23986 regmatch_t match[4];
23987 char buf[1024];
23988 - size_t end;
23989 + long end;
23990 int len;
23991 int done = 0;
23992 int r;
23993 unsigned o;
23994 struct offset *offset;
23995 char last_reg_s[10];
23996 - int last_reg;
23997 + unsigned long last_reg;
23998
23999 if (regcomp
24000 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24001 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_atombios.c
24002 --- linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_atombios.c 2010-07-05 14:24:10.000000000 -0400
24003 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_atombios.c 2010-07-07 09:04:51.000000000 -0400
24004 @@ -658,14 +658,14 @@ static uint16_t atombios_get_connector_o
24005 }
24006 }
24007
24008 -struct bios_connector {
24009 +static struct bios_connector {
24010 bool valid;
24011 uint16_t line_mux;
24012 uint16_t devices;
24013 int connector_type;
24014 struct radeon_i2c_bus_rec ddc_bus;
24015 struct radeon_hpd hpd;
24016 -};
24017 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
24018
24019 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
24020 drm_device
24021 @@ -681,7 +681,6 @@ bool radeon_get_atom_connector_info_from
24022 uint8_t dac;
24023 union atom_supported_devices *supported_devices;
24024 int i, j, max_device;
24025 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
24026
24027 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24028 return false;
24029 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_device.c
24030 --- linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_device.c 2010-07-05 14:24:10.000000000 -0400
24031 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_device.c 2010-07-07 09:04:51.000000000 -0400
24032 @@ -560,7 +560,7 @@ static bool radeon_switcheroo_can_switch
24033 bool can_switch;
24034
24035 spin_lock(&dev->count_lock);
24036 - can_switch = (dev->open_count == 0);
24037 + can_switch = (atomic_read(&dev->open_count) == 0);
24038 spin_unlock(&dev->count_lock);
24039 return can_switch;
24040 }
24041 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_display.c
24042 --- linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_display.c 2010-07-05 14:24:10.000000000 -0400
24043 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_display.c 2010-07-07 09:04:51.000000000 -0400
24044 @@ -560,7 +560,7 @@ static void radeon_compute_pll_legacy(st
24045
24046 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
24047 error = freq - current_freq;
24048 - error = error < 0 ? 0xffffffff : error;
24049 + error = (int32_t)error < 0 ? 0xffffffff : error;
24050 } else
24051 error = abs(current_freq - freq);
24052 vco_diff = abs(vco - best_vco);
24053 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_state.c
24054 --- linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_state.c 2010-07-05 14:24:10.000000000 -0400
24055 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_state.c 2010-07-07 09:04:51.000000000 -0400
24056 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
24057 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
24058 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
24059
24060 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
24061 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
24062 sarea_priv->nbox * sizeof(depth_boxes[0])))
24063 return -EFAULT;
24064
24065 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
24066 {
24067 drm_radeon_private_t *dev_priv = dev->dev_private;
24068 drm_radeon_getparam_t *param = data;
24069 - int value;
24070 + int value = 0;
24071
24072 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
24073
24074 diff -urNp linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_ttm.c
24075 --- linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_ttm.c 2010-07-05 14:24:10.000000000 -0400
24076 +++ linux-2.6.34.1/drivers/gpu/drm/radeon/radeon_ttm.c 2010-07-07 09:04:51.000000000 -0400
24077 @@ -565,27 +565,10 @@ void radeon_ttm_fini(struct radeon_devic
24078 DRM_INFO("radeon: ttm finalized\n");
24079 }
24080
24081 -static struct vm_operations_struct radeon_ttm_vm_ops;
24082 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
24083 -
24084 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
24085 -{
24086 - struct ttm_buffer_object *bo;
24087 - int r;
24088 -
24089 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
24090 - if (bo == NULL) {
24091 - return VM_FAULT_NOPAGE;
24092 - }
24093 - r = ttm_vm_ops->fault(vma, vmf);
24094 - return r;
24095 -}
24096 -
24097 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
24098 {
24099 struct drm_file *file_priv;
24100 struct radeon_device *rdev;
24101 - int r;
24102
24103 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
24104 return drm_mmap(filp, vma);
24105 @@ -593,20 +576,9 @@ int radeon_mmap(struct file *filp, struc
24106
24107 file_priv = (struct drm_file *)filp->private_data;
24108 rdev = file_priv->minor->dev->dev_private;
24109 - if (rdev == NULL) {
24110 + if (!rdev)
24111 return -EINVAL;
24112 - }
24113 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
24114 - if (unlikely(r != 0)) {
24115 - return r;
24116 - }
24117 - if (unlikely(ttm_vm_ops == NULL)) {
24118 - ttm_vm_ops = vma->vm_ops;
24119 - radeon_ttm_vm_ops = *ttm_vm_ops;
24120 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
24121 - }
24122 - vma->vm_ops = &radeon_ttm_vm_ops;
24123 - return 0;
24124 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
24125 }
24126
24127
24128 diff -urNp linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo.c
24129 --- linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo.c 2010-07-05 14:24:10.000000000 -0400
24130 +++ linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo.c 2010-07-07 09:04:51.000000000 -0400
24131 @@ -47,7 +47,7 @@
24132 #include <linux/module.h>
24133
24134 #define TTM_ASSERT_LOCKED(param)
24135 -#define TTM_DEBUG(fmt, arg...)
24136 +#define TTM_DEBUG(fmt, arg...) do {} while (0)
24137 #define TTM_BO_HASH_ORDER 13
24138
24139 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
24140 diff -urNp linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo_vm.c
24141 --- linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-07-05 14:24:10.000000000 -0400
24142 +++ linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-07-07 09:04:51.000000000 -0400
24143 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
24144 {
24145 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
24146 vma->vm_private_data;
24147 - struct ttm_bo_device *bdev = bo->bdev;
24148 + struct ttm_bo_device *bdev;
24149 unsigned long bus_base;
24150 unsigned long bus_offset;
24151 unsigned long bus_size;
24152 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
24153 unsigned long address = (unsigned long)vmf->virtual_address;
24154 int retval = VM_FAULT_NOPAGE;
24155
24156 + if (!bo)
24157 + return VM_FAULT_NOPAGE;
24158 + bdev = bo->bdev;
24159 +
24160 /*
24161 * Work around locking order reversal in fault / nopfn
24162 * between mmap_sem and bo_reserve: Perform a trylock operation
24163 diff -urNp linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_global.c
24164 --- linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_global.c 2010-07-05 14:24:10.000000000 -0400
24165 +++ linux-2.6.34.1/drivers/gpu/drm/ttm/ttm_global.c 2010-07-07 09:04:51.000000000 -0400
24166 @@ -36,7 +36,7 @@
24167 struct ttm_global_item {
24168 struct mutex mutex;
24169 void *object;
24170 - int refcount;
24171 + atomic_t refcount;
24172 };
24173
24174 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
24175 @@ -49,7 +49,7 @@ void ttm_global_init(void)
24176 struct ttm_global_item *item = &glob[i];
24177 mutex_init(&item->mutex);
24178 item->object = NULL;
24179 - item->refcount = 0;
24180 + atomic_set(&item->refcount, 0);
24181 }
24182 }
24183
24184 @@ -59,7 +59,7 @@ void ttm_global_release(void)
24185 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
24186 struct ttm_global_item *item = &glob[i];
24187 BUG_ON(item->object != NULL);
24188 - BUG_ON(item->refcount != 0);
24189 + BUG_ON(atomic_read(&item->refcount) != 0);
24190 }
24191 }
24192
24193 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
24194 void *object;
24195
24196 mutex_lock(&item->mutex);
24197 - if (item->refcount == 0) {
24198 + if (atomic_read(&item->refcount) == 0) {
24199 item->object = kzalloc(ref->size, GFP_KERNEL);
24200 if (unlikely(item->object == NULL)) {
24201 ret = -ENOMEM;
24202 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
24203 goto out_err;
24204
24205 }
24206 - ++item->refcount;
24207 + atomic_inc(&item->refcount);
24208 ref->object = item->object;
24209 object = item->object;
24210 mutex_unlock(&item->mutex);
24211 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
24212 struct ttm_global_item *item = &glob[ref->global_type];
24213
24214 mutex_lock(&item->mutex);
24215 - BUG_ON(item->refcount == 0);
24216 + BUG_ON(atomic_read(&item->refcount) == 0);
24217 BUG_ON(ref->object != item->object);
24218 - if (--item->refcount == 0) {
24219 + if (atomic_dec_and_test(&item->refcount)) {
24220 ref->release(ref);
24221 item->object = NULL;
24222 }
24223 diff -urNp linux-2.6.34.1/drivers/hid/usbhid/hiddev.c linux-2.6.34.1/drivers/hid/usbhid/hiddev.c
24224 --- linux-2.6.34.1/drivers/hid/usbhid/hiddev.c 2010-07-05 14:24:10.000000000 -0400
24225 +++ linux-2.6.34.1/drivers/hid/usbhid/hiddev.c 2010-07-07 09:04:51.000000000 -0400
24226 @@ -615,7 +615,7 @@ static long hiddev_ioctl(struct file *fi
24227 return put_user(HID_VERSION, (int __user *)arg);
24228
24229 case HIDIOCAPPLICATION:
24230 - if (arg < 0 || arg >= hid->maxapplication)
24231 + if (arg >= hid->maxapplication)
24232 return -EINVAL;
24233
24234 for (i = 0; i < hid->maxcollection; i++)
24235 diff -urNp linux-2.6.34.1/drivers/hwmon/k8temp.c linux-2.6.34.1/drivers/hwmon/k8temp.c
24236 --- linux-2.6.34.1/drivers/hwmon/k8temp.c 2010-07-05 14:24:10.000000000 -0400
24237 +++ linux-2.6.34.1/drivers/hwmon/k8temp.c 2010-07-07 09:04:51.000000000 -0400
24238 @@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n
24239
24240 static const struct pci_device_id k8temp_ids[] = {
24241 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
24242 - { 0 },
24243 + { 0, 0, 0, 0, 0, 0, 0 },
24244 };
24245
24246 MODULE_DEVICE_TABLE(pci, k8temp_ids);
24247 diff -urNp linux-2.6.34.1/drivers/hwmon/sis5595.c linux-2.6.34.1/drivers/hwmon/sis5595.c
24248 --- linux-2.6.34.1/drivers/hwmon/sis5595.c 2010-07-05 14:24:10.000000000 -0400
24249 +++ linux-2.6.34.1/drivers/hwmon/sis5595.c 2010-07-07 09:04:51.000000000 -0400
24250 @@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_upda
24251
24252 static const struct pci_device_id sis5595_pci_ids[] = {
24253 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
24254 - { 0, }
24255 + { 0, 0, 0, 0, 0, 0, 0 }
24256 };
24257
24258 MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
24259 diff -urNp linux-2.6.34.1/drivers/hwmon/via686a.c linux-2.6.34.1/drivers/hwmon/via686a.c
24260 --- linux-2.6.34.1/drivers/hwmon/via686a.c 2010-07-05 14:24:10.000000000 -0400
24261 +++ linux-2.6.34.1/drivers/hwmon/via686a.c 2010-07-07 09:04:51.000000000 -0400
24262 @@ -769,7 +769,7 @@ static struct via686a_data *via686a_upda
24263
24264 static const struct pci_device_id via686a_pci_ids[] = {
24265 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
24266 - { 0, }
24267 + { 0, 0, 0, 0, 0, 0, 0 }
24268 };
24269
24270 MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
24271 diff -urNp linux-2.6.34.1/drivers/hwmon/vt8231.c linux-2.6.34.1/drivers/hwmon/vt8231.c
24272 --- linux-2.6.34.1/drivers/hwmon/vt8231.c 2010-07-05 14:24:10.000000000 -0400
24273 +++ linux-2.6.34.1/drivers/hwmon/vt8231.c 2010-07-07 09:04:51.000000000 -0400
24274 @@ -699,7 +699,7 @@ static struct platform_driver vt8231_dri
24275
24276 static const struct pci_device_id vt8231_pci_ids[] = {
24277 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
24278 - { 0, }
24279 + { 0, 0, 0, 0, 0, 0, 0 }
24280 };
24281
24282 MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
24283 diff -urNp linux-2.6.34.1/drivers/hwmon/w83791d.c linux-2.6.34.1/drivers/hwmon/w83791d.c
24284 --- linux-2.6.34.1/drivers/hwmon/w83791d.c 2010-07-05 14:24:10.000000000 -0400
24285 +++ linux-2.6.34.1/drivers/hwmon/w83791d.c 2010-07-07 09:04:51.000000000 -0400
24286 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
24287 struct i2c_board_info *info);
24288 static int w83791d_remove(struct i2c_client *client);
24289
24290 -static int w83791d_read(struct i2c_client *client, u8 register);
24291 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
24292 +static int w83791d_read(struct i2c_client *client, u8 reg);
24293 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
24294 static struct w83791d_data *w83791d_update_device(struct device *dev);
24295
24296 #ifdef DEBUG
24297 diff -urNp linux-2.6.34.1/drivers/i2c/busses/i2c-i801.c linux-2.6.34.1/drivers/i2c/busses/i2c-i801.c
24298 --- linux-2.6.34.1/drivers/i2c/busses/i2c-i801.c 2010-07-05 14:24:10.000000000 -0400
24299 +++ linux-2.6.34.1/drivers/i2c/busses/i2c-i801.c 2010-07-07 09:04:51.000000000 -0400
24300 @@ -582,7 +582,7 @@ static const struct pci_device_id i801_i
24301 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
24302 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
24303 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
24304 - { 0, }
24305 + { 0, 0, 0, 0, 0, 0, 0 }
24306 };
24307
24308 MODULE_DEVICE_TABLE (pci, i801_ids);
24309 diff -urNp linux-2.6.34.1/drivers/i2c/busses/i2c-piix4.c linux-2.6.34.1/drivers/i2c/busses/i2c-piix4.c
24310 --- linux-2.6.34.1/drivers/i2c/busses/i2c-piix4.c 2010-07-05 14:24:10.000000000 -0400
24311 +++ linux-2.6.34.1/drivers/i2c/busses/i2c-piix4.c 2010-07-07 09:04:52.000000000 -0400
24312 @@ -124,7 +124,7 @@ static struct dmi_system_id __devinitdat
24313 .ident = "IBM",
24314 .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
24315 },
24316 - { },
24317 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24318 };
24319
24320 static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
24321 @@ -491,7 +491,7 @@ static const struct pci_device_id piix4_
24322 PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
24323 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
24324 PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
24325 - { 0, }
24326 + { 0, 0, 0, 0, 0, 0, 0 }
24327 };
24328
24329 MODULE_DEVICE_TABLE (pci, piix4_ids);
24330 diff -urNp linux-2.6.34.1/drivers/i2c/busses/i2c-sis630.c linux-2.6.34.1/drivers/i2c/busses/i2c-sis630.c
24331 --- linux-2.6.34.1/drivers/i2c/busses/i2c-sis630.c 2010-07-05 14:24:10.000000000 -0400
24332 +++ linux-2.6.34.1/drivers/i2c/busses/i2c-sis630.c 2010-07-07 09:04:52.000000000 -0400
24333 @@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter
24334 static const struct pci_device_id sis630_ids[] __devinitconst = {
24335 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
24336 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
24337 - { 0, }
24338 + { 0, 0, 0, 0, 0, 0, 0 }
24339 };
24340
24341 MODULE_DEVICE_TABLE (pci, sis630_ids);
24342 diff -urNp linux-2.6.34.1/drivers/i2c/busses/i2c-sis96x.c linux-2.6.34.1/drivers/i2c/busses/i2c-sis96x.c
24343 --- linux-2.6.34.1/drivers/i2c/busses/i2c-sis96x.c 2010-07-05 14:24:10.000000000 -0400
24344 +++ linux-2.6.34.1/drivers/i2c/busses/i2c-sis96x.c 2010-07-07 09:04:52.000000000 -0400
24345 @@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter
24346
24347 static const struct pci_device_id sis96x_ids[] = {
24348 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
24349 - { 0, }
24350 + { 0, 0, 0, 0, 0, 0, 0 }
24351 };
24352
24353 MODULE_DEVICE_TABLE (pci, sis96x_ids);
24354 diff -urNp linux-2.6.34.1/drivers/ide/ide-cd.c linux-2.6.34.1/drivers/ide/ide-cd.c
24355 --- linux-2.6.34.1/drivers/ide/ide-cd.c 2010-07-05 14:24:10.000000000 -0400
24356 +++ linux-2.6.34.1/drivers/ide/ide-cd.c 2010-07-07 09:04:52.000000000 -0400
24357 @@ -766,7 +766,7 @@ static void cdrom_do_block_pc(ide_drive_
24358 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
24359 if ((unsigned long)buf & alignment
24360 || blk_rq_bytes(rq) & q->dma_pad_mask
24361 - || object_is_on_stack(buf))
24362 + || object_starts_on_stack(buf))
24363 drive->dma = 0;
24364 }
24365 }
24366 diff -urNp linux-2.6.34.1/drivers/ieee1394/dv1394.c linux-2.6.34.1/drivers/ieee1394/dv1394.c
24367 --- linux-2.6.34.1/drivers/ieee1394/dv1394.c 2010-07-05 14:24:10.000000000 -0400
24368 +++ linux-2.6.34.1/drivers/ieee1394/dv1394.c 2010-07-07 09:04:52.000000000 -0400
24369 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
24370 based upon DIF section and sequence
24371 */
24372
24373 -static void inline
24374 +static inline void
24375 frame_put_packet (struct frame *f, struct packet *p)
24376 {
24377 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
24378 @@ -2178,7 +2178,7 @@ static const struct ieee1394_device_id d
24379 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
24380 .version = AVC_SW_VERSION_ENTRY & 0xffffff
24381 },
24382 - { }
24383 + { 0, 0, 0, 0, 0, 0 }
24384 };
24385
24386 MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
24387 diff -urNp linux-2.6.34.1/drivers/ieee1394/eth1394.c linux-2.6.34.1/drivers/ieee1394/eth1394.c
24388 --- linux-2.6.34.1/drivers/ieee1394/eth1394.c 2010-07-05 14:24:10.000000000 -0400
24389 +++ linux-2.6.34.1/drivers/ieee1394/eth1394.c 2010-07-07 09:04:52.000000000 -0400
24390 @@ -446,7 +446,7 @@ static const struct ieee1394_device_id e
24391 .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
24392 .version = ETHER1394_GASP_VERSION,
24393 },
24394 - {}
24395 + { 0, 0, 0, 0, 0, 0 }
24396 };
24397
24398 MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
24399 diff -urNp linux-2.6.34.1/drivers/ieee1394/hosts.c linux-2.6.34.1/drivers/ieee1394/hosts.c
24400 --- linux-2.6.34.1/drivers/ieee1394/hosts.c 2010-07-05 14:24:10.000000000 -0400
24401 +++ linux-2.6.34.1/drivers/ieee1394/hosts.c 2010-07-07 09:04:52.000000000 -0400
24402 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
24403 }
24404
24405 static struct hpsb_host_driver dummy_driver = {
24406 + .name = "dummy",
24407 .transmit_packet = dummy_transmit_packet,
24408 .devctl = dummy_devctl,
24409 .isoctl = dummy_isoctl
24410 diff -urNp linux-2.6.34.1/drivers/ieee1394/ohci1394.c linux-2.6.34.1/drivers/ieee1394/ohci1394.c
24411 --- linux-2.6.34.1/drivers/ieee1394/ohci1394.c 2010-07-05 14:24:10.000000000 -0400
24412 +++ linux-2.6.34.1/drivers/ieee1394/ohci1394.c 2010-07-07 09:04:52.000000000 -0400
24413 @@ -148,9 +148,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
24414 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
24415
24416 /* Module Parameters */
24417 -static int phys_dma = 1;
24418 +static int phys_dma;
24419 module_param(phys_dma, int, 0444);
24420 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
24421 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
24422
24423 static void dma_trm_tasklet(unsigned long data);
24424 static void dma_trm_reset(struct dma_trm_ctx *d);
24425 @@ -3445,7 +3445,7 @@ static struct pci_device_id ohci1394_pci
24426 .subvendor = PCI_ANY_ID,
24427 .subdevice = PCI_ANY_ID,
24428 },
24429 - { 0, },
24430 + { 0, 0, 0, 0, 0, 0, 0 },
24431 };
24432
24433 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
24434 diff -urNp linux-2.6.34.1/drivers/ieee1394/raw1394.c linux-2.6.34.1/drivers/ieee1394/raw1394.c
24435 --- linux-2.6.34.1/drivers/ieee1394/raw1394.c 2010-07-05 14:24:10.000000000 -0400
24436 +++ linux-2.6.34.1/drivers/ieee1394/raw1394.c 2010-07-07 09:04:52.000000000 -0400
24437 @@ -3002,7 +3002,7 @@ static const struct ieee1394_device_id r
24438 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
24439 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
24440 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
24441 - {}
24442 + { 0, 0, 0, 0, 0, 0 }
24443 };
24444
24445 MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
24446 diff -urNp linux-2.6.34.1/drivers/ieee1394/sbp2.c linux-2.6.34.1/drivers/ieee1394/sbp2.c
24447 --- linux-2.6.34.1/drivers/ieee1394/sbp2.c 2010-07-05 14:24:10.000000000 -0400
24448 +++ linux-2.6.34.1/drivers/ieee1394/sbp2.c 2010-07-07 09:04:52.000000000 -0400
24449 @@ -289,7 +289,7 @@ static const struct ieee1394_device_id s
24450 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
24451 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
24452 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
24453 - {}
24454 + { 0, 0, 0, 0, 0, 0 }
24455 };
24456 MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
24457
24458 @@ -2110,7 +2110,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
24459 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
24460 MODULE_LICENSE("GPL");
24461
24462 -static int sbp2_module_init(void)
24463 +static int __init sbp2_module_init(void)
24464 {
24465 int ret;
24466
24467 diff -urNp linux-2.6.34.1/drivers/ieee1394/video1394.c linux-2.6.34.1/drivers/ieee1394/video1394.c
24468 --- linux-2.6.34.1/drivers/ieee1394/video1394.c 2010-07-05 14:24:10.000000000 -0400
24469 +++ linux-2.6.34.1/drivers/ieee1394/video1394.c 2010-07-07 09:04:52.000000000 -0400
24470 @@ -1311,7 +1311,7 @@ static const struct ieee1394_device_id v
24471 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
24472 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
24473 },
24474 - { }
24475 + { 0, 0, 0, 0, 0, 0 }
24476 };
24477
24478 MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
24479 diff -urNp linux-2.6.34.1/drivers/infiniband/core/cm.c linux-2.6.34.1/drivers/infiniband/core/cm.c
24480 --- linux-2.6.34.1/drivers/infiniband/core/cm.c 2010-07-05 14:24:10.000000000 -0400
24481 +++ linux-2.6.34.1/drivers/infiniband/core/cm.c 2010-07-07 09:04:52.000000000 -0400
24482 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
24483
24484 struct cm_counter_group {
24485 struct kobject obj;
24486 - atomic_long_t counter[CM_ATTR_COUNT];
24487 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
24488 };
24489
24490 struct cm_counter_attribute {
24491 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
24492 struct ib_mad_send_buf *msg = NULL;
24493 int ret;
24494
24495 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24496 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24497 counter[CM_REQ_COUNTER]);
24498
24499 /* Quick state check to discard duplicate REQs. */
24500 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
24501 if (!cm_id_priv)
24502 return;
24503
24504 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24505 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24506 counter[CM_REP_COUNTER]);
24507 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
24508 if (ret)
24509 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
24510 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
24511 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
24512 spin_unlock_irq(&cm_id_priv->lock);
24513 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24514 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24515 counter[CM_RTU_COUNTER]);
24516 goto out;
24517 }
24518 @@ -2111,7 +2111,7 @@ static int cm_dreq_handler(struct cm_wor
24519 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
24520 dreq_msg->local_comm_id);
24521 if (!cm_id_priv) {
24522 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24523 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24524 counter[CM_DREQ_COUNTER]);
24525 cm_issue_drep(work->port, work->mad_recv_wc);
24526 return -EINVAL;
24527 @@ -2132,7 +2132,7 @@ static int cm_dreq_handler(struct cm_wor
24528 case IB_CM_MRA_REP_RCVD:
24529 break;
24530 case IB_CM_TIMEWAIT:
24531 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24532 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24533 counter[CM_DREQ_COUNTER]);
24534 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
24535 goto unlock;
24536 @@ -2146,7 +2146,7 @@ static int cm_dreq_handler(struct cm_wor
24537 cm_free_msg(msg);
24538 goto deref;
24539 case IB_CM_DREQ_RCVD:
24540 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24541 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24542 counter[CM_DREQ_COUNTER]);
24543 goto unlock;
24544 default:
24545 @@ -2502,7 +2502,7 @@ static int cm_mra_handler(struct cm_work
24546 ib_modify_mad(cm_id_priv->av.port->mad_agent,
24547 cm_id_priv->msg, timeout)) {
24548 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
24549 - atomic_long_inc(&work->port->
24550 + atomic_long_inc_unchecked(&work->port->
24551 counter_group[CM_RECV_DUPLICATES].
24552 counter[CM_MRA_COUNTER]);
24553 goto out;
24554 @@ -2511,7 +2511,7 @@ static int cm_mra_handler(struct cm_work
24555 break;
24556 case IB_CM_MRA_REQ_RCVD:
24557 case IB_CM_MRA_REP_RCVD:
24558 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24559 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24560 counter[CM_MRA_COUNTER]);
24561 /* fall through */
24562 default:
24563 @@ -2673,7 +2673,7 @@ static int cm_lap_handler(struct cm_work
24564 case IB_CM_LAP_IDLE:
24565 break;
24566 case IB_CM_MRA_LAP_SENT:
24567 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24568 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24569 counter[CM_LAP_COUNTER]);
24570 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
24571 goto unlock;
24572 @@ -2689,7 +2689,7 @@ static int cm_lap_handler(struct cm_work
24573 cm_free_msg(msg);
24574 goto deref;
24575 case IB_CM_LAP_RCVD:
24576 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24577 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24578 counter[CM_LAP_COUNTER]);
24579 goto unlock;
24580 default:
24581 @@ -2973,7 +2973,7 @@ static int cm_sidr_req_handler(struct cm
24582 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
24583 if (cur_cm_id_priv) {
24584 spin_unlock_irq(&cm.lock);
24585 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
24586 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
24587 counter[CM_SIDR_REQ_COUNTER]);
24588 goto out; /* Duplicate message. */
24589 }
24590 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
24591 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
24592 msg->retries = 1;
24593
24594 - atomic_long_add(1 + msg->retries,
24595 + atomic_long_add_unchecked(1 + msg->retries,
24596 &port->counter_group[CM_XMIT].counter[attr_index]);
24597 if (msg->retries)
24598 - atomic_long_add(msg->retries,
24599 + atomic_long_add_unchecked(msg->retries,
24600 &port->counter_group[CM_XMIT_RETRIES].
24601 counter[attr_index]);
24602
24603 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
24604 }
24605
24606 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
24607 - atomic_long_inc(&port->counter_group[CM_RECV].
24608 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
24609 counter[attr_id - CM_ATTR_ID_OFFSET]);
24610
24611 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
24612 @@ -3595,7 +3595,7 @@ static ssize_t cm_show_counter(struct ko
24613 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
24614
24615 return sprintf(buf, "%ld\n",
24616 - atomic_long_read(&group->counter[cm_attr->index]));
24617 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
24618 }
24619
24620 static const struct sysfs_ops cm_counter_ops = {
24621 diff -urNp linux-2.6.34.1/drivers/input/keyboard/atkbd.c linux-2.6.34.1/drivers/input/keyboard/atkbd.c
24622 --- linux-2.6.34.1/drivers/input/keyboard/atkbd.c 2010-07-05 14:24:10.000000000 -0400
24623 +++ linux-2.6.34.1/drivers/input/keyboard/atkbd.c 2010-07-07 09:04:52.000000000 -0400
24624 @@ -1240,7 +1240,7 @@ static struct serio_device_id atkbd_seri
24625 .id = SERIO_ANY,
24626 .extra = SERIO_ANY,
24627 },
24628 - { 0 }
24629 + { 0, 0, 0, 0 }
24630 };
24631
24632 MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
24633 diff -urNp linux-2.6.34.1/drivers/input/mouse/lifebook.c linux-2.6.34.1/drivers/input/mouse/lifebook.c
24634 --- linux-2.6.34.1/drivers/input/mouse/lifebook.c 2010-07-05 14:24:10.000000000 -0400
24635 +++ linux-2.6.34.1/drivers/input/mouse/lifebook.c 2010-07-07 09:04:52.000000000 -0400
24636 @@ -123,7 +123,7 @@ static const struct dmi_system_id __init
24637 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"),
24638 },
24639 },
24640 - { }
24641 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
24642 };
24643
24644 void __init lifebook_module_init(void)
24645 diff -urNp linux-2.6.34.1/drivers/input/mouse/psmouse-base.c linux-2.6.34.1/drivers/input/mouse/psmouse-base.c
24646 --- linux-2.6.34.1/drivers/input/mouse/psmouse-base.c 2010-07-05 14:24:10.000000000 -0400
24647 +++ linux-2.6.34.1/drivers/input/mouse/psmouse-base.c 2010-07-07 09:04:52.000000000 -0400
24648 @@ -1460,7 +1460,7 @@ static struct serio_device_id psmouse_se
24649 .id = SERIO_ANY,
24650 .extra = SERIO_ANY,
24651 },
24652 - { 0 }
24653 + { 0, 0, 0, 0 }
24654 };
24655
24656 MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
24657 diff -urNp linux-2.6.34.1/drivers/input/mouse/synaptics.c linux-2.6.34.1/drivers/input/mouse/synaptics.c
24658 --- linux-2.6.34.1/drivers/input/mouse/synaptics.c 2010-07-05 14:24:10.000000000 -0400
24659 +++ linux-2.6.34.1/drivers/input/mouse/synaptics.c 2010-07-07 09:04:52.000000000 -0400
24660 @@ -458,7 +458,7 @@ static void synaptics_process_packet(str
24661 break;
24662 case 2:
24663 if (SYN_MODEL_PEN(priv->model_id))
24664 - ; /* Nothing, treat a pen as a single finger */
24665 + break; /* Nothing, treat a pen as a single finger */
24666 break;
24667 case 4 ... 15:
24668 if (SYN_CAP_PALMDETECT(priv->capabilities))
24669 @@ -680,7 +680,6 @@ static const struct dmi_system_id __init
24670 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
24671 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
24672 },
24673 -
24674 },
24675 {
24676 /* Toshiba Portege M300 */
24677 @@ -689,9 +688,8 @@ static const struct dmi_system_id __init
24678 DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
24679 DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
24680 },
24681 -
24682 },
24683 - { }
24684 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24685 #endif
24686 };
24687
24688 diff -urNp linux-2.6.34.1/drivers/input/mousedev.c linux-2.6.34.1/drivers/input/mousedev.c
24689 --- linux-2.6.34.1/drivers/input/mousedev.c 2010-07-05 14:24:10.000000000 -0400
24690 +++ linux-2.6.34.1/drivers/input/mousedev.c 2010-07-07 09:04:52.000000000 -0400
24691 @@ -754,7 +754,7 @@ static ssize_t mousedev_read(struct file
24692
24693 spin_unlock_irq(&client->packet_lock);
24694
24695 - if (copy_to_user(buffer, data, count))
24696 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
24697 return -EFAULT;
24698
24699 return count;
24700 @@ -1051,7 +1051,7 @@ static struct input_handler mousedev_han
24701
24702 #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
24703 static struct miscdevice psaux_mouse = {
24704 - PSMOUSE_MINOR, "psaux", &mousedev_fops
24705 + PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL
24706 };
24707 static int psaux_registered;
24708 #endif
24709 diff -urNp linux-2.6.34.1/drivers/input/serio/i8042-x86ia64io.h linux-2.6.34.1/drivers/input/serio/i8042-x86ia64io.h
24710 --- linux-2.6.34.1/drivers/input/serio/i8042-x86ia64io.h 2010-07-05 14:24:10.000000000 -0400
24711 +++ linux-2.6.34.1/drivers/input/serio/i8042-x86ia64io.h 2010-07-07 09:04:52.000000000 -0400
24712 @@ -172,7 +172,7 @@ static const struct dmi_system_id __init
24713 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
24714 },
24715 },
24716 - { }
24717 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24718 };
24719
24720 /*
24721 @@ -402,7 +402,7 @@ static const struct dmi_system_id __init
24722 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
24723 },
24724 },
24725 - { }
24726 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24727 };
24728
24729 static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
24730 @@ -476,7 +476,7 @@ static const struct dmi_system_id __init
24731 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
24732 },
24733 },
24734 - { }
24735 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24736 };
24737
24738 #ifdef CONFIG_PNP
24739 @@ -495,7 +495,7 @@ static const struct dmi_system_id __init
24740 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
24741 },
24742 },
24743 - { }
24744 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24745 };
24746
24747 static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
24748 @@ -519,7 +519,7 @@ static const struct dmi_system_id __init
24749 DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
24750 },
24751 },
24752 - { }
24753 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24754 };
24755 #endif
24756
24757 @@ -593,7 +593,7 @@ static const struct dmi_system_id __init
24758 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
24759 },
24760 },
24761 - { }
24762 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
24763 };
24764
24765 #endif /* CONFIG_X86 */
24766 diff -urNp linux-2.6.34.1/drivers/input/serio/serio_raw.c linux-2.6.34.1/drivers/input/serio/serio_raw.c
24767 --- linux-2.6.34.1/drivers/input/serio/serio_raw.c 2010-07-05 14:24:10.000000000 -0400
24768 +++ linux-2.6.34.1/drivers/input/serio/serio_raw.c 2010-07-07 09:04:52.000000000 -0400
24769 @@ -376,7 +376,7 @@ static struct serio_device_id serio_raw_
24770 .id = SERIO_ANY,
24771 .extra = SERIO_ANY,
24772 },
24773 - { 0 }
24774 + { 0, 0, 0, 0 }
24775 };
24776
24777 MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids);
24778 diff -urNp linux-2.6.34.1/drivers/isdn/gigaset/common.c linux-2.6.34.1/drivers/isdn/gigaset/common.c
24779 --- linux-2.6.34.1/drivers/isdn/gigaset/common.c 2010-07-05 14:24:10.000000000 -0400
24780 +++ linux-2.6.34.1/drivers/isdn/gigaset/common.c 2010-07-07 09:04:52.000000000 -0400
24781 @@ -733,7 +733,7 @@ struct cardstate *gigaset_initcs(struct
24782 cs->commands_pending = 0;
24783 cs->cur_at_seq = 0;
24784 cs->gotfwver = -1;
24785 - cs->open_count = 0;
24786 + atomic_set(&cs->open_count, 0);
24787 cs->dev = NULL;
24788 cs->tty = NULL;
24789 cs->tty_dev = NULL;
24790 diff -urNp linux-2.6.34.1/drivers/isdn/gigaset/gigaset.h linux-2.6.34.1/drivers/isdn/gigaset/gigaset.h
24791 --- linux-2.6.34.1/drivers/isdn/gigaset/gigaset.h 2010-07-05 14:24:10.000000000 -0400
24792 +++ linux-2.6.34.1/drivers/isdn/gigaset/gigaset.h 2010-07-07 09:04:52.000000000 -0400
24793 @@ -443,7 +443,7 @@ struct cardstate {
24794 spinlock_t cmdlock;
24795 unsigned curlen, cmdbytes;
24796
24797 - unsigned open_count;
24798 + atomic_t open_count;
24799 struct tty_struct *tty;
24800 struct tasklet_struct if_wake_tasklet;
24801 unsigned control_state;
24802 diff -urNp linux-2.6.34.1/drivers/isdn/gigaset/interface.c linux-2.6.34.1/drivers/isdn/gigaset/interface.c
24803 --- linux-2.6.34.1/drivers/isdn/gigaset/interface.c 2010-07-05 14:24:10.000000000 -0400
24804 +++ linux-2.6.34.1/drivers/isdn/gigaset/interface.c 2010-07-07 09:04:52.000000000 -0400
24805 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
24806 return -ERESTARTSYS;
24807 tty->driver_data = cs;
24808
24809 - ++cs->open_count;
24810 -
24811 - if (cs->open_count == 1) {
24812 + if (atomic_inc_return(&cs->open_count) == 1) {
24813 spin_lock_irqsave(&cs->lock, flags);
24814 cs->tty = tty;
24815 spin_unlock_irqrestore(&cs->lock, flags);
24816 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
24817
24818 if (!cs->connected)
24819 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
24820 - else if (!cs->open_count)
24821 + else if (!atomic_read(&cs->open_count))
24822 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24823 else {
24824 - if (!--cs->open_count) {
24825 + if (!atomic_dec_return(&cs->open_count)) {
24826 spin_lock_irqsave(&cs->lock, flags);
24827 cs->tty = NULL;
24828 spin_unlock_irqrestore(&cs->lock, flags);
24829 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
24830 if (!cs->connected) {
24831 gig_dbg(DEBUG_IF, "not connected");
24832 retval = -ENODEV;
24833 - } else if (!cs->open_count)
24834 + } else if (!atomic_read(&cs->open_count))
24835 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24836 else {
24837 retval = 0;
24838 @@ -355,7 +353,7 @@ static int if_write(struct tty_struct *t
24839 if (!cs->connected) {
24840 gig_dbg(DEBUG_IF, "not connected");
24841 retval = -ENODEV;
24842 - } else if (!cs->open_count)
24843 + } else if (!atomic_read(&cs->open_count))
24844 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24845 else if (cs->mstate != MS_LOCKED) {
24846 dev_warn(cs->dev, "can't write to unlocked device\n");
24847 @@ -389,7 +387,7 @@ static int if_write_room(struct tty_stru
24848 if (!cs->connected) {
24849 gig_dbg(DEBUG_IF, "not connected");
24850 retval = -ENODEV;
24851 - } else if (!cs->open_count)
24852 + } else if (!atomic_read(&cs->open_count))
24853 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24854 else if (cs->mstate != MS_LOCKED) {
24855 dev_warn(cs->dev, "can't write to unlocked device\n");
24856 @@ -419,7 +417,7 @@ static int if_chars_in_buffer(struct tty
24857
24858 if (!cs->connected)
24859 gig_dbg(DEBUG_IF, "not connected");
24860 - else if (!cs->open_count)
24861 + else if (!atomic_read(&cs->open_count))
24862 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24863 else if (cs->mstate != MS_LOCKED)
24864 dev_warn(cs->dev, "can't write to unlocked device\n");
24865 @@ -447,7 +445,7 @@ static void if_throttle(struct tty_struc
24866
24867 if (!cs->connected)
24868 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
24869 - else if (!cs->open_count)
24870 + else if (!atomic_read(&cs->open_count))
24871 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24872 else
24873 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
24874 @@ -471,7 +469,7 @@ static void if_unthrottle(struct tty_str
24875
24876 if (!cs->connected)
24877 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
24878 - else if (!cs->open_count)
24879 + else if (!atomic_read(&cs->open_count))
24880 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24881 else
24882 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
24883 @@ -502,7 +500,7 @@ static void if_set_termios(struct tty_st
24884 goto out;
24885 }
24886
24887 - if (!cs->open_count) {
24888 + if (!atomic_read(&cs->open_count)) {
24889 dev_warn(cs->dev, "%s: device not opened\n", __func__);
24890 goto out;
24891 }
24892 diff -urNp linux-2.6.34.1/drivers/isdn/hardware/avm/b1.c linux-2.6.34.1/drivers/isdn/hardware/avm/b1.c
24893 --- linux-2.6.34.1/drivers/isdn/hardware/avm/b1.c 2010-07-05 14:24:10.000000000 -0400
24894 +++ linux-2.6.34.1/drivers/isdn/hardware/avm/b1.c 2010-07-07 09:04:52.000000000 -0400
24895 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
24896 }
24897 if (left) {
24898 if (t4file->user) {
24899 - if (copy_from_user(buf, dp, left))
24900 + if (left > sizeof(buf) || copy_from_user(buf, dp, left))
24901 return -EFAULT;
24902 } else {
24903 memcpy(buf, dp, left);
24904 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
24905 }
24906 if (left) {
24907 if (config->user) {
24908 - if (copy_from_user(buf, dp, left))
24909 + if (left > sizeof(buf) || copy_from_user(buf, dp, left))
24910 return -EFAULT;
24911 } else {
24912 memcpy(buf, dp, left);
24913 diff -urNp linux-2.6.34.1/drivers/isdn/icn/icn.c linux-2.6.34.1/drivers/isdn/icn/icn.c
24914 --- linux-2.6.34.1/drivers/isdn/icn/icn.c 2010-07-05 14:24:10.000000000 -0400
24915 +++ linux-2.6.34.1/drivers/isdn/icn/icn.c 2010-07-07 09:04:52.000000000 -0400
24916 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
24917 if (count > len)
24918 count = len;
24919 if (user) {
24920 - if (copy_from_user(msg, buf, count))
24921 + if (count > sizeof(msg) || copy_from_user(msg, buf, count))
24922 return -EFAULT;
24923 } else
24924 memcpy(msg, buf, count);
24925 diff -urNp linux-2.6.34.1/drivers/lguest/core.c linux-2.6.34.1/drivers/lguest/core.c
24926 --- linux-2.6.34.1/drivers/lguest/core.c 2010-07-05 14:24:10.000000000 -0400
24927 +++ linux-2.6.34.1/drivers/lguest/core.c 2010-07-07 09:04:52.000000000 -0400
24928 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
24929 * it's worked so far. The end address needs +1 because __get_vm_area
24930 * allocates an extra guard page, so we need space for that.
24931 */
24932 +
24933 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24934 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
24935 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
24936 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
24937 +#else
24938 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
24939 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
24940 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
24941 +#endif
24942 +
24943 if (!switcher_vma) {
24944 err = -ENOMEM;
24945 printk("lguest: could not map switcher pages high\n");
24946 diff -urNp linux-2.6.34.1/drivers/macintosh/via-pmu-backlight.c linux-2.6.34.1/drivers/macintosh/via-pmu-backlight.c
24947 --- linux-2.6.34.1/drivers/macintosh/via-pmu-backlight.c 2010-07-05 14:24:10.000000000 -0400
24948 +++ linux-2.6.34.1/drivers/macintosh/via-pmu-backlight.c 2010-07-07 09:04:52.000000000 -0400
24949 @@ -15,7 +15,7 @@
24950
24951 #define MAX_PMU_LEVEL 0xFF
24952
24953 -static struct backlight_ops pmu_backlight_data;
24954 +static const struct backlight_ops pmu_backlight_data;
24955 static DEFINE_SPINLOCK(pmu_backlight_lock);
24956 static int sleeping, uses_pmu_bl;
24957 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
24958 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
24959 return bd->props.brightness;
24960 }
24961
24962 -static struct backlight_ops pmu_backlight_data = {
24963 +static const struct backlight_ops pmu_backlight_data = {
24964 .get_brightness = pmu_backlight_get_brightness,
24965 .update_status = pmu_backlight_update_status,
24966
24967 diff -urNp linux-2.6.34.1/drivers/macintosh/via-pmu.c linux-2.6.34.1/drivers/macintosh/via-pmu.c
24968 --- linux-2.6.34.1/drivers/macintosh/via-pmu.c 2010-07-05 14:24:10.000000000 -0400
24969 +++ linux-2.6.34.1/drivers/macintosh/via-pmu.c 2010-07-07 09:04:52.000000000 -0400
24970 @@ -2254,7 +2254,7 @@ static int pmu_sleep_valid(suspend_state
24971 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
24972 }
24973
24974 -static struct platform_suspend_ops pmu_pm_ops = {
24975 +static const struct platform_suspend_ops pmu_pm_ops = {
24976 .enter = powerbook_sleep,
24977 .valid = pmu_sleep_valid,
24978 };
24979 diff -urNp linux-2.6.34.1/drivers/md/bitmap.c linux-2.6.34.1/drivers/md/bitmap.c
24980 --- linux-2.6.34.1/drivers/md/bitmap.c 2010-07-05 14:24:10.000000000 -0400
24981 +++ linux-2.6.34.1/drivers/md/bitmap.c 2010-07-07 09:04:52.000000000 -0400
24982 @@ -58,7 +58,7 @@
24983 # if DEBUG > 0
24984 # define PRINTK(x...) printk(KERN_DEBUG x)
24985 # else
24986 -# define PRINTK(x...)
24987 +# define PRINTK(x...) do {} while (0)
24988 # endif
24989 #endif
24990
24991 diff -urNp linux-2.6.34.1/drivers/md/dm-table.c linux-2.6.34.1/drivers/md/dm-table.c
24992 --- linux-2.6.34.1/drivers/md/dm-table.c 2010-07-05 14:24:10.000000000 -0400
24993 +++ linux-2.6.34.1/drivers/md/dm-table.c 2010-07-07 09:04:52.000000000 -0400
24994 @@ -363,7 +363,7 @@ static int device_area_is_invalid(struct
24995 if (!dev_size)
24996 return 0;
24997
24998 - if ((start >= dev_size) || (start + len > dev_size)) {
24999 + if ((start >= dev_size) || (len > dev_size - start)) {
25000 DMWARN("%s: %s too small for target: "
25001 "start=%llu, len=%llu, dev_size=%llu",
25002 dm_device_name(ti->table->md), bdevname(bdev, b),
25003 diff -urNp linux-2.6.34.1/drivers/md/md.c linux-2.6.34.1/drivers/md/md.c
25004 --- linux-2.6.34.1/drivers/md/md.c 2010-07-05 14:24:10.000000000 -0400
25005 +++ linux-2.6.34.1/drivers/md/md.c 2010-07-07 09:04:52.000000000 -0400
25006 @@ -6265,7 +6265,7 @@ static int md_seq_show(struct seq_file *
25007 chunk_kb ? "KB" : "B");
25008 if (bitmap->file) {
25009 seq_printf(seq, ", file: ");
25010 - seq_path(seq, &bitmap->file->f_path, " \t\n");
25011 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
25012 }
25013
25014 seq_printf(seq, "\n");
25015 @@ -6359,7 +6359,7 @@ static int is_mddev_idle(mddev_t *mddev,
25016 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
25017 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
25018 (int)part_stat_read(&disk->part0, sectors[1]) -
25019 - atomic_read(&disk->sync_io);
25020 + atomic_read_unchecked(&disk->sync_io);
25021 /* sync IO will cause sync_io to increase before the disk_stats
25022 * as sync_io is counted when a request starts, and
25023 * disk_stats is counted when it completes.
25024 diff -urNp linux-2.6.34.1/drivers/md/md.h linux-2.6.34.1/drivers/md/md.h
25025 --- linux-2.6.34.1/drivers/md/md.h 2010-07-05 14:24:10.000000000 -0400
25026 +++ linux-2.6.34.1/drivers/md/md.h 2010-07-07 09:04:52.000000000 -0400
25027 @@ -328,7 +328,7 @@ static inline void rdev_dec_pending(mdk_
25028
25029 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
25030 {
25031 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
25032 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
25033 }
25034
25035 struct mdk_personality
25036 diff -urNp linux-2.6.34.1/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.34.1/drivers/media/dvb/dvb-core/dvbdev.c
25037 --- linux-2.6.34.1/drivers/media/dvb/dvb-core/dvbdev.c 2010-07-05 14:24:10.000000000 -0400
25038 +++ linux-2.6.34.1/drivers/media/dvb/dvb-core/dvbdev.c 2010-07-07 09:04:52.000000000 -0400
25039 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
25040 const struct dvb_device *template, void *priv, int type)
25041 {
25042 struct dvb_device *dvbdev;
25043 + /* cannot be const, see this function */
25044 struct file_operations *dvbdevfops;
25045 struct device *clsdev;
25046 int minor;
25047 diff -urNp linux-2.6.34.1/drivers/media/radio/radio-cadet.c linux-2.6.34.1/drivers/media/radio/radio-cadet.c
25048 --- linux-2.6.34.1/drivers/media/radio/radio-cadet.c 2010-07-05 14:24:10.000000000 -0400
25049 +++ linux-2.6.34.1/drivers/media/radio/radio-cadet.c 2010-07-07 09:04:52.000000000 -0400
25050 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
25051 while (i < count && dev->rdsin != dev->rdsout)
25052 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
25053
25054 - if (copy_to_user(data, readbuf, i))
25055 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
25056 return -EFAULT;
25057 return i;
25058 }
25059 diff -urNp linux-2.6.34.1/drivers/message/fusion/mptdebug.h linux-2.6.34.1/drivers/message/fusion/mptdebug.h
25060 --- linux-2.6.34.1/drivers/message/fusion/mptdebug.h 2010-07-05 14:24:10.000000000 -0400
25061 +++ linux-2.6.34.1/drivers/message/fusion/mptdebug.h 2010-07-07 09:04:52.000000000 -0400
25062 @@ -71,7 +71,7 @@
25063 CMD; \
25064 }
25065 #else
25066 -#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
25067 +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0)
25068 #endif
25069
25070
25071 diff -urNp linux-2.6.34.1/drivers/message/fusion/mptsas.c linux-2.6.34.1/drivers/message/fusion/mptsas.c
25072 --- linux-2.6.34.1/drivers/message/fusion/mptsas.c 2010-07-05 14:24:10.000000000 -0400
25073 +++ linux-2.6.34.1/drivers/message/fusion/mptsas.c 2010-07-07 09:04:52.000000000 -0400
25074 @@ -437,6 +437,23 @@ mptsas_is_end_device(struct mptsas_devin
25075 return 0;
25076 }
25077
25078 +static inline void
25079 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
25080 +{
25081 + if (phy_info->port_details) {
25082 + phy_info->port_details->rphy = rphy;
25083 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
25084 + ioc->name, rphy));
25085 + }
25086 +
25087 + if (rphy) {
25088 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
25089 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
25090 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
25091 + ioc->name, rphy, rphy->dev.release));
25092 + }
25093 +}
25094 +
25095 /* no mutex */
25096 static void
25097 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
25098 @@ -475,23 +492,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
25099 return NULL;
25100 }
25101
25102 -static inline void
25103 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
25104 -{
25105 - if (phy_info->port_details) {
25106 - phy_info->port_details->rphy = rphy;
25107 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
25108 - ioc->name, rphy));
25109 - }
25110 -
25111 - if (rphy) {
25112 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
25113 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
25114 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
25115 - ioc->name, rphy, rphy->dev.release));
25116 - }
25117 -}
25118 -
25119 static inline struct sas_port *
25120 mptsas_get_port(struct mptsas_phyinfo *phy_info)
25121 {
25122 diff -urNp linux-2.6.34.1/drivers/message/i2o/i2o_proc.c linux-2.6.34.1/drivers/message/i2o/i2o_proc.c
25123 --- linux-2.6.34.1/drivers/message/i2o/i2o_proc.c 2010-07-05 14:24:10.000000000 -0400
25124 +++ linux-2.6.34.1/drivers/message/i2o/i2o_proc.c 2010-07-07 09:04:52.000000000 -0400
25125 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
25126 "Array Controller Device"
25127 };
25128
25129 -static char *chtostr(u8 * chars, int n)
25130 -{
25131 - char tmp[256];
25132 - tmp[0] = 0;
25133 - return strncat(tmp, (char *)chars, n);
25134 -}
25135 -
25136 static int i2o_report_query_status(struct seq_file *seq, int block_status,
25137 char *group)
25138 {
25139 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
25140
25141 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
25142 seq_printf(seq, "%-#8x", ddm_table.module_id);
25143 - seq_printf(seq, "%-29s",
25144 - chtostr(ddm_table.module_name_version, 28));
25145 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
25146 seq_printf(seq, "%9d ", ddm_table.data_size);
25147 seq_printf(seq, "%8d", ddm_table.code_size);
25148
25149 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
25150
25151 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
25152 seq_printf(seq, "%-#8x", dst->module_id);
25153 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
25154 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
25155 + seq_printf(seq, "%-.28s", dst->module_name_version);
25156 + seq_printf(seq, "%-.8s", dst->date);
25157 seq_printf(seq, "%8d ", dst->module_size);
25158 seq_printf(seq, "%8d ", dst->mpb_size);
25159 seq_printf(seq, "0x%04x", dst->module_flags);
25160 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
25161 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
25162 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
25163 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
25164 - seq_printf(seq, "Vendor info : %s\n",
25165 - chtostr((u8 *) (work32 + 2), 16));
25166 - seq_printf(seq, "Product info : %s\n",
25167 - chtostr((u8 *) (work32 + 6), 16));
25168 - seq_printf(seq, "Description : %s\n",
25169 - chtostr((u8 *) (work32 + 10), 16));
25170 - seq_printf(seq, "Product rev. : %s\n",
25171 - chtostr((u8 *) (work32 + 14), 8));
25172 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
25173 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
25174 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
25175 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
25176
25177 seq_printf(seq, "Serial number : ");
25178 print_serial_number(seq, (u8 *) (work32 + 16),
25179 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
25180 }
25181
25182 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
25183 - seq_printf(seq, "Module name : %s\n",
25184 - chtostr(result.module_name, 24));
25185 - seq_printf(seq, "Module revision : %s\n",
25186 - chtostr(result.module_rev, 8));
25187 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
25188 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
25189
25190 seq_printf(seq, "Serial number : ");
25191 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
25192 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
25193 return 0;
25194 }
25195
25196 - seq_printf(seq, "Device name : %s\n",
25197 - chtostr(result.device_name, 64));
25198 - seq_printf(seq, "Service name : %s\n",
25199 - chtostr(result.service_name, 64));
25200 - seq_printf(seq, "Physical name : %s\n",
25201 - chtostr(result.physical_location, 64));
25202 - seq_printf(seq, "Instance number : %s\n",
25203 - chtostr(result.instance_number, 4));
25204 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
25205 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
25206 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
25207 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
25208
25209 return 0;
25210 }
25211 diff -urNp linux-2.6.34.1/drivers/misc/kgdbts.c linux-2.6.34.1/drivers/misc/kgdbts.c
25212 --- linux-2.6.34.1/drivers/misc/kgdbts.c 2010-07-05 14:24:10.000000000 -0400
25213 +++ linux-2.6.34.1/drivers/misc/kgdbts.c 2010-07-07 09:04:52.000000000 -0400
25214 @@ -118,7 +118,7 @@
25215 } while (0)
25216 #define MAX_CONFIG_LEN 40
25217
25218 -static struct kgdb_io kgdbts_io_ops;
25219 +static const struct kgdb_io kgdbts_io_ops;
25220 static char get_buf[BUFMAX];
25221 static int get_buf_cnt;
25222 static char put_buf[BUFMAX];
25223 @@ -1114,7 +1114,7 @@ static void kgdbts_post_exp_handler(void
25224 module_put(THIS_MODULE);
25225 }
25226
25227 -static struct kgdb_io kgdbts_io_ops = {
25228 +static const struct kgdb_io kgdbts_io_ops = {
25229 .name = "kgdbts",
25230 .read_char = kgdbts_get_char,
25231 .write_char = kgdbts_put_char,
25232 diff -urNp linux-2.6.34.1/drivers/misc/sgi-gru/gruhandles.c linux-2.6.34.1/drivers/misc/sgi-gru/gruhandles.c
25233 --- linux-2.6.34.1/drivers/misc/sgi-gru/gruhandles.c 2010-07-05 14:24:10.000000000 -0400
25234 +++ linux-2.6.34.1/drivers/misc/sgi-gru/gruhandles.c 2010-07-07 09:04:52.000000000 -0400
25235 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
25236 unsigned long nsec;
25237
25238 nsec = CLKS2NSEC(clks);
25239 - atomic_long_inc(&mcs_op_statistics[op].count);
25240 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
25241 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
25242 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
25243 if (mcs_op_statistics[op].max < nsec)
25244 mcs_op_statistics[op].max = nsec;
25245 }
25246 diff -urNp linux-2.6.34.1/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.34.1/drivers/misc/sgi-gru/gruprocfs.c
25247 --- linux-2.6.34.1/drivers/misc/sgi-gru/gruprocfs.c 2010-07-05 14:24:10.000000000 -0400
25248 +++ linux-2.6.34.1/drivers/misc/sgi-gru/gruprocfs.c 2010-07-07 09:04:52.000000000 -0400
25249 @@ -32,9 +32,9 @@
25250
25251 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
25252
25253 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
25254 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
25255 {
25256 - unsigned long val = atomic_long_read(v);
25257 + unsigned long val = atomic_long_read_unchecked(v);
25258
25259 seq_printf(s, "%16lu %s\n", val, id);
25260 }
25261 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
25262
25263 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
25264 for (op = 0; op < mcsop_last; op++) {
25265 - count = atomic_long_read(&mcs_op_statistics[op].count);
25266 - total = atomic_long_read(&mcs_op_statistics[op].total);
25267 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
25268 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
25269 max = mcs_op_statistics[op].max;
25270 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
25271 count ? total / count : 0, max);
25272 diff -urNp linux-2.6.34.1/drivers/misc/sgi-gru/grutables.h linux-2.6.34.1/drivers/misc/sgi-gru/grutables.h
25273 --- linux-2.6.34.1/drivers/misc/sgi-gru/grutables.h 2010-07-05 14:24:10.000000000 -0400
25274 +++ linux-2.6.34.1/drivers/misc/sgi-gru/grutables.h 2010-07-07 09:04:52.000000000 -0400
25275 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
25276 * GRU statistics.
25277 */
25278 struct gru_stats_s {
25279 - atomic_long_t vdata_alloc;
25280 - atomic_long_t vdata_free;
25281 - atomic_long_t gts_alloc;
25282 - atomic_long_t gts_free;
25283 - atomic_long_t gms_alloc;
25284 - atomic_long_t gms_free;
25285 - atomic_long_t gts_double_allocate;
25286 - atomic_long_t assign_context;
25287 - atomic_long_t assign_context_failed;
25288 - atomic_long_t free_context;
25289 - atomic_long_t load_user_context;
25290 - atomic_long_t load_kernel_context;
25291 - atomic_long_t lock_kernel_context;
25292 - atomic_long_t unlock_kernel_context;
25293 - atomic_long_t steal_user_context;
25294 - atomic_long_t steal_kernel_context;
25295 - atomic_long_t steal_context_failed;
25296 - atomic_long_t nopfn;
25297 - atomic_long_t asid_new;
25298 - atomic_long_t asid_next;
25299 - atomic_long_t asid_wrap;
25300 - atomic_long_t asid_reuse;
25301 - atomic_long_t intr;
25302 - atomic_long_t intr_cbr;
25303 - atomic_long_t intr_tfh;
25304 - atomic_long_t intr_spurious;
25305 - atomic_long_t intr_mm_lock_failed;
25306 - atomic_long_t call_os;
25307 - atomic_long_t call_os_wait_queue;
25308 - atomic_long_t user_flush_tlb;
25309 - atomic_long_t user_unload_context;
25310 - atomic_long_t user_exception;
25311 - atomic_long_t set_context_option;
25312 - atomic_long_t check_context_retarget_intr;
25313 - atomic_long_t check_context_unload;
25314 - atomic_long_t tlb_dropin;
25315 - atomic_long_t tlb_preload_page;
25316 - atomic_long_t tlb_dropin_fail_no_asid;
25317 - atomic_long_t tlb_dropin_fail_upm;
25318 - atomic_long_t tlb_dropin_fail_invalid;
25319 - atomic_long_t tlb_dropin_fail_range_active;
25320 - atomic_long_t tlb_dropin_fail_idle;
25321 - atomic_long_t tlb_dropin_fail_fmm;
25322 - atomic_long_t tlb_dropin_fail_no_exception;
25323 - atomic_long_t tfh_stale_on_fault;
25324 - atomic_long_t mmu_invalidate_range;
25325 - atomic_long_t mmu_invalidate_page;
25326 - atomic_long_t flush_tlb;
25327 - atomic_long_t flush_tlb_gru;
25328 - atomic_long_t flush_tlb_gru_tgh;
25329 - atomic_long_t flush_tlb_gru_zero_asid;
25330 -
25331 - atomic_long_t copy_gpa;
25332 - atomic_long_t read_gpa;
25333 -
25334 - atomic_long_t mesq_receive;
25335 - atomic_long_t mesq_receive_none;
25336 - atomic_long_t mesq_send;
25337 - atomic_long_t mesq_send_failed;
25338 - atomic_long_t mesq_noop;
25339 - atomic_long_t mesq_send_unexpected_error;
25340 - atomic_long_t mesq_send_lb_overflow;
25341 - atomic_long_t mesq_send_qlimit_reached;
25342 - atomic_long_t mesq_send_amo_nacked;
25343 - atomic_long_t mesq_send_put_nacked;
25344 - atomic_long_t mesq_page_overflow;
25345 - atomic_long_t mesq_qf_locked;
25346 - atomic_long_t mesq_qf_noop_not_full;
25347 - atomic_long_t mesq_qf_switch_head_failed;
25348 - atomic_long_t mesq_qf_unexpected_error;
25349 - atomic_long_t mesq_noop_unexpected_error;
25350 - atomic_long_t mesq_noop_lb_overflow;
25351 - atomic_long_t mesq_noop_qlimit_reached;
25352 - atomic_long_t mesq_noop_amo_nacked;
25353 - atomic_long_t mesq_noop_put_nacked;
25354 - atomic_long_t mesq_noop_page_overflow;
25355 + atomic_long_unchecked_t vdata_alloc;
25356 + atomic_long_unchecked_t vdata_free;
25357 + atomic_long_unchecked_t gts_alloc;
25358 + atomic_long_unchecked_t gts_free;
25359 + atomic_long_unchecked_t gms_alloc;
25360 + atomic_long_unchecked_t gms_free;
25361 + atomic_long_unchecked_t gts_double_allocate;
25362 + atomic_long_unchecked_t assign_context;
25363 + atomic_long_unchecked_t assign_context_failed;
25364 + atomic_long_unchecked_t free_context;
25365 + atomic_long_unchecked_t load_user_context;
25366 + atomic_long_unchecked_t load_kernel_context;
25367 + atomic_long_unchecked_t lock_kernel_context;
25368 + atomic_long_unchecked_t unlock_kernel_context;
25369 + atomic_long_unchecked_t steal_user_context;
25370 + atomic_long_unchecked_t steal_kernel_context;
25371 + atomic_long_unchecked_t steal_context_failed;
25372 + atomic_long_unchecked_t nopfn;
25373 + atomic_long_unchecked_t asid_new;
25374 + atomic_long_unchecked_t asid_next;
25375 + atomic_long_unchecked_t asid_wrap;
25376 + atomic_long_unchecked_t asid_reuse;
25377 + atomic_long_unchecked_t intr;
25378 + atomic_long_unchecked_t intr_cbr;
25379 + atomic_long_unchecked_t intr_tfh;
25380 + atomic_long_unchecked_t intr_spurious;
25381 + atomic_long_unchecked_t intr_mm_lock_failed;
25382 + atomic_long_unchecked_t call_os;
25383 + atomic_long_unchecked_t call_os_wait_queue;
25384 + atomic_long_unchecked_t user_flush_tlb;
25385 + atomic_long_unchecked_t user_unload_context;
25386 + atomic_long_unchecked_t user_exception;
25387 + atomic_long_unchecked_t set_context_option;
25388 + atomic_long_unchecked_t check_context_retarget_intr;
25389 + atomic_long_unchecked_t check_context_unload;
25390 + atomic_long_unchecked_t tlb_dropin;
25391 + atomic_long_unchecked_t tlb_preload_page;
25392 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
25393 + atomic_long_unchecked_t tlb_dropin_fail_upm;
25394 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
25395 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
25396 + atomic_long_unchecked_t tlb_dropin_fail_idle;
25397 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
25398 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
25399 + atomic_long_unchecked_t tfh_stale_on_fault;
25400 + atomic_long_unchecked_t mmu_invalidate_range;
25401 + atomic_long_unchecked_t mmu_invalidate_page;
25402 + atomic_long_unchecked_t flush_tlb;
25403 + atomic_long_unchecked_t flush_tlb_gru;
25404 + atomic_long_unchecked_t flush_tlb_gru_tgh;
25405 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
25406 +
25407 + atomic_long_unchecked_t copy_gpa;
25408 + atomic_long_unchecked_t read_gpa;
25409 +
25410 + atomic_long_unchecked_t mesq_receive;
25411 + atomic_long_unchecked_t mesq_receive_none;
25412 + atomic_long_unchecked_t mesq_send;
25413 + atomic_long_unchecked_t mesq_send_failed;
25414 + atomic_long_unchecked_t mesq_noop;
25415 + atomic_long_unchecked_t mesq_send_unexpected_error;
25416 + atomic_long_unchecked_t mesq_send_lb_overflow;
25417 + atomic_long_unchecked_t mesq_send_qlimit_reached;
25418 + atomic_long_unchecked_t mesq_send_amo_nacked;
25419 + atomic_long_unchecked_t mesq_send_put_nacked;
25420 + atomic_long_unchecked_t mesq_page_overflow;
25421 + atomic_long_unchecked_t mesq_qf_locked;
25422 + atomic_long_unchecked_t mesq_qf_noop_not_full;
25423 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
25424 + atomic_long_unchecked_t mesq_qf_unexpected_error;
25425 + atomic_long_unchecked_t mesq_noop_unexpected_error;
25426 + atomic_long_unchecked_t mesq_noop_lb_overflow;
25427 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
25428 + atomic_long_unchecked_t mesq_noop_amo_nacked;
25429 + atomic_long_unchecked_t mesq_noop_put_nacked;
25430 + atomic_long_unchecked_t mesq_noop_page_overflow;
25431
25432 };
25433
25434 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
25435 tghop_invalidate, mcsop_last};
25436
25437 struct mcs_op_statistic {
25438 - atomic_long_t count;
25439 - atomic_long_t total;
25440 + atomic_long_unchecked_t count;
25441 + atomic_long_unchecked_t total;
25442 unsigned long max;
25443 };
25444
25445 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
25446
25447 #define STAT(id) do { \
25448 if (gru_options & OPT_STATS) \
25449 - atomic_long_inc(&gru_stats.id); \
25450 + atomic_long_inc_unchecked(&gru_stats.id); \
25451 } while (0)
25452
25453 #ifdef CONFIG_SGI_GRU_DEBUG
25454 diff -urNp linux-2.6.34.1/drivers/mtd/devices/doc2000.c linux-2.6.34.1/drivers/mtd/devices/doc2000.c
25455 --- linux-2.6.34.1/drivers/mtd/devices/doc2000.c 2010-07-05 14:24:10.000000000 -0400
25456 +++ linux-2.6.34.1/drivers/mtd/devices/doc2000.c 2010-07-07 09:04:52.000000000 -0400
25457 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
25458
25459 /* The ECC will not be calculated correctly if less than 512 is written */
25460 /* DBB-
25461 - if (len != 0x200 && eccbuf)
25462 + if (len != 0x200)
25463 printk(KERN_WARNING
25464 "ECC needs a full sector write (adr: %lx size %lx)\n",
25465 (long) to, (long) len);
25466 diff -urNp linux-2.6.34.1/drivers/mtd/devices/doc2001.c linux-2.6.34.1/drivers/mtd/devices/doc2001.c
25467 --- linux-2.6.34.1/drivers/mtd/devices/doc2001.c 2010-07-05 14:24:10.000000000 -0400
25468 +++ linux-2.6.34.1/drivers/mtd/devices/doc2001.c 2010-07-07 09:04:52.000000000 -0400
25469 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
25470 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
25471
25472 /* Don't allow read past end of device */
25473 - if (from >= this->totlen)
25474 + if (from >= this->totlen || !len)
25475 return -EINVAL;
25476
25477 /* Don't allow a single read to cross a 512-byte block boundary */
25478 diff -urNp linux-2.6.34.1/drivers/mtd/ubi/build.c linux-2.6.34.1/drivers/mtd/ubi/build.c
25479 --- linux-2.6.34.1/drivers/mtd/ubi/build.c 2010-07-05 14:24:10.000000000 -0400
25480 +++ linux-2.6.34.1/drivers/mtd/ubi/build.c 2010-07-07 09:04:52.000000000 -0400
25481 @@ -1296,7 +1296,7 @@ module_exit(ubi_exit);
25482 static int __init bytes_str_to_int(const char *str)
25483 {
25484 char *endp;
25485 - unsigned long result;
25486 + unsigned long result, scale = 1;
25487
25488 result = simple_strtoul(str, &endp, 0);
25489 if (str == endp || result >= INT_MAX) {
25490 @@ -1307,11 +1307,11 @@ static int __init bytes_str_to_int(const
25491
25492 switch (*endp) {
25493 case 'G':
25494 - result *= 1024;
25495 + scale *= 1024;
25496 case 'M':
25497 - result *= 1024;
25498 + scale *= 1024;
25499 case 'K':
25500 - result *= 1024;
25501 + scale *= 1024;
25502 if (endp[1] == 'i' && endp[2] == 'B')
25503 endp += 2;
25504 case '\0':
25505 @@ -1322,7 +1322,13 @@ static int __init bytes_str_to_int(const
25506 return -EINVAL;
25507 }
25508
25509 - return result;
25510 + if ((intoverflow_t)result*scale >= INT_MAX) {
25511 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
25512 + str);
25513 + return -EINVAL;
25514 + }
25515 +
25516 + return result*scale;
25517 }
25518
25519 /**
25520 diff -urNp linux-2.6.34.1/drivers/net/e1000e/82571.c linux-2.6.34.1/drivers/net/e1000e/82571.c
25521 --- linux-2.6.34.1/drivers/net/e1000e/82571.c 2010-07-05 14:24:10.000000000 -0400
25522 +++ linux-2.6.34.1/drivers/net/e1000e/82571.c 2010-07-07 09:04:52.000000000 -0400
25523 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_82571(s
25524 {
25525 struct e1000_hw *hw = &adapter->hw;
25526 struct e1000_mac_info *mac = &hw->mac;
25527 + /* cannot be const */
25528 struct e1000_mac_operations *func = &mac->ops;
25529 u32 swsm = 0;
25530 u32 swsm2 = 0;
25531 @@ -1692,7 +1693,7 @@ static void e1000_clear_hw_cntrs_82571(s
25532 er32(ICRXDMTC);
25533 }
25534
25535 -static struct e1000_mac_operations e82571_mac_ops = {
25536 +static const struct e1000_mac_operations e82571_mac_ops = {
25537 /* .check_mng_mode: mac type dependent */
25538 /* .check_for_link: media type dependent */
25539 .id_led_init = e1000e_id_led_init,
25540 @@ -1714,7 +1715,7 @@ static struct e1000_mac_operations e8257
25541 .read_mac_addr = e1000_read_mac_addr_82571,
25542 };
25543
25544 -static struct e1000_phy_operations e82_phy_ops_igp = {
25545 +static const struct e1000_phy_operations e82_phy_ops_igp = {
25546 .acquire = e1000_get_hw_semaphore_82571,
25547 .check_polarity = e1000_check_polarity_igp,
25548 .check_reset_block = e1000e_check_reset_block_generic,
25549 @@ -1732,7 +1733,7 @@ static struct e1000_phy_operations e82_p
25550 .cfg_on_link_up = NULL,
25551 };
25552
25553 -static struct e1000_phy_operations e82_phy_ops_m88 = {
25554 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
25555 .acquire = e1000_get_hw_semaphore_82571,
25556 .check_polarity = e1000_check_polarity_m88,
25557 .check_reset_block = e1000e_check_reset_block_generic,
25558 @@ -1750,7 +1751,7 @@ static struct e1000_phy_operations e82_p
25559 .cfg_on_link_up = NULL,
25560 };
25561
25562 -static struct e1000_phy_operations e82_phy_ops_bm = {
25563 +static const struct e1000_phy_operations e82_phy_ops_bm = {
25564 .acquire = e1000_get_hw_semaphore_82571,
25565 .check_polarity = e1000_check_polarity_m88,
25566 .check_reset_block = e1000e_check_reset_block_generic,
25567 @@ -1768,7 +1769,7 @@ static struct e1000_phy_operations e82_p
25568 .cfg_on_link_up = NULL,
25569 };
25570
25571 -static struct e1000_nvm_operations e82571_nvm_ops = {
25572 +static const struct e1000_nvm_operations e82571_nvm_ops = {
25573 .acquire = e1000_acquire_nvm_82571,
25574 .read = e1000e_read_nvm_eerd,
25575 .release = e1000_release_nvm_82571,
25576 diff -urNp linux-2.6.34.1/drivers/net/e1000e/e1000.h linux-2.6.34.1/drivers/net/e1000e/e1000.h
25577 --- linux-2.6.34.1/drivers/net/e1000e/e1000.h 2010-07-05 14:24:10.000000000 -0400
25578 +++ linux-2.6.34.1/drivers/net/e1000e/e1000.h 2010-07-07 09:04:52.000000000 -0400
25579 @@ -379,9 +379,9 @@ struct e1000_info {
25580 u32 pba;
25581 u32 max_hw_frame_size;
25582 s32 (*get_variants)(struct e1000_adapter *);
25583 - struct e1000_mac_operations *mac_ops;
25584 - struct e1000_phy_operations *phy_ops;
25585 - struct e1000_nvm_operations *nvm_ops;
25586 + const struct e1000_mac_operations *mac_ops;
25587 + const struct e1000_phy_operations *phy_ops;
25588 + const struct e1000_nvm_operations *nvm_ops;
25589 };
25590
25591 /* hardware capability, feature, and workaround flags */
25592 diff -urNp linux-2.6.34.1/drivers/net/e1000e/es2lan.c linux-2.6.34.1/drivers/net/e1000e/es2lan.c
25593 --- linux-2.6.34.1/drivers/net/e1000e/es2lan.c 2010-07-05 14:24:10.000000000 -0400
25594 +++ linux-2.6.34.1/drivers/net/e1000e/es2lan.c 2010-07-07 09:04:52.000000000 -0400
25595 @@ -205,6 +205,7 @@ static s32 e1000_init_mac_params_80003es
25596 {
25597 struct e1000_hw *hw = &adapter->hw;
25598 struct e1000_mac_info *mac = &hw->mac;
25599 + /* cannot be const */
25600 struct e1000_mac_operations *func = &mac->ops;
25601
25602 /* Set media type */
25603 @@ -1430,7 +1431,7 @@ static void e1000_clear_hw_cntrs_80003es
25604 er32(ICRXDMTC);
25605 }
25606
25607 -static struct e1000_mac_operations es2_mac_ops = {
25608 +static const struct e1000_mac_operations es2_mac_ops = {
25609 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
25610 .id_led_init = e1000e_id_led_init,
25611 .check_mng_mode = e1000e_check_mng_mode_generic,
25612 @@ -1452,7 +1453,7 @@ static struct e1000_mac_operations es2_m
25613 .setup_led = e1000e_setup_led_generic,
25614 };
25615
25616 -static struct e1000_phy_operations es2_phy_ops = {
25617 +static const struct e1000_phy_operations es2_phy_ops = {
25618 .acquire = e1000_acquire_phy_80003es2lan,
25619 .check_polarity = e1000_check_polarity_m88,
25620 .check_reset_block = e1000e_check_reset_block_generic,
25621 @@ -1470,7 +1471,7 @@ static struct e1000_phy_operations es2_p
25622 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
25623 };
25624
25625 -static struct e1000_nvm_operations es2_nvm_ops = {
25626 +static const struct e1000_nvm_operations es2_nvm_ops = {
25627 .acquire = e1000_acquire_nvm_80003es2lan,
25628 .read = e1000e_read_nvm_eerd,
25629 .release = e1000_release_nvm_80003es2lan,
25630 diff -urNp linux-2.6.34.1/drivers/net/e1000e/hw.h linux-2.6.34.1/drivers/net/e1000e/hw.h
25631 --- linux-2.6.34.1/drivers/net/e1000e/hw.h 2010-07-05 14:24:10.000000000 -0400
25632 +++ linux-2.6.34.1/drivers/net/e1000e/hw.h 2010-07-07 09:04:52.000000000 -0400
25633 @@ -788,13 +788,13 @@ struct e1000_phy_operations {
25634
25635 /* Function pointers for the NVM. */
25636 struct e1000_nvm_operations {
25637 - s32 (*acquire)(struct e1000_hw *);
25638 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
25639 - void (*release)(struct e1000_hw *);
25640 - s32 (*update)(struct e1000_hw *);
25641 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
25642 - s32 (*validate)(struct e1000_hw *);
25643 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
25644 + s32 (* const acquire)(struct e1000_hw *);
25645 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
25646 + void (* const release)(struct e1000_hw *);
25647 + s32 (* const update)(struct e1000_hw *);
25648 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
25649 + s32 (* const validate)(struct e1000_hw *);
25650 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
25651 };
25652
25653 struct e1000_mac_info {
25654 @@ -873,6 +873,7 @@ struct e1000_phy_info {
25655 };
25656
25657 struct e1000_nvm_info {
25658 + /* cannot be const */
25659 struct e1000_nvm_operations ops;
25660
25661 enum e1000_nvm_type type;
25662 diff -urNp linux-2.6.34.1/drivers/net/e1000e/ich8lan.c linux-2.6.34.1/drivers/net/e1000e/ich8lan.c
25663 --- linux-2.6.34.1/drivers/net/e1000e/ich8lan.c 2010-07-05 14:24:10.000000000 -0400
25664 +++ linux-2.6.34.1/drivers/net/e1000e/ich8lan.c 2010-07-07 09:04:52.000000000 -0400
25665 @@ -3371,7 +3371,7 @@ static void e1000_clear_hw_cntrs_ich8lan
25666 }
25667 }
25668
25669 -static struct e1000_mac_operations ich8_mac_ops = {
25670 +static const struct e1000_mac_operations ich8_mac_ops = {
25671 .id_led_init = e1000e_id_led_init,
25672 .check_mng_mode = e1000_check_mng_mode_ich8lan,
25673 .check_for_link = e1000_check_for_copper_link_ich8lan,
25674 @@ -3390,7 +3390,7 @@ static struct e1000_mac_operations ich8_
25675 /* id_led_init dependent on mac type */
25676 };
25677
25678 -static struct e1000_phy_operations ich8_phy_ops = {
25679 +static const struct e1000_phy_operations ich8_phy_ops = {
25680 .acquire = e1000_acquire_swflag_ich8lan,
25681 .check_reset_block = e1000_check_reset_block_ich8lan,
25682 .commit = NULL,
25683 @@ -3404,7 +3404,7 @@ static struct e1000_phy_operations ich8_
25684 .write_reg = e1000e_write_phy_reg_igp,
25685 };
25686
25687 -static struct e1000_nvm_operations ich8_nvm_ops = {
25688 +static const struct e1000_nvm_operations ich8_nvm_ops = {
25689 .acquire = e1000_acquire_nvm_ich8lan,
25690 .read = e1000_read_nvm_ich8lan,
25691 .release = e1000_release_nvm_ich8lan,
25692 diff -urNp linux-2.6.34.1/drivers/net/igb/e1000_82575.c linux-2.6.34.1/drivers/net/igb/e1000_82575.c
25693 --- linux-2.6.34.1/drivers/net/igb/e1000_82575.c 2010-07-05 14:24:10.000000000 -0400
25694 +++ linux-2.6.34.1/drivers/net/igb/e1000_82575.c 2010-07-07 09:04:52.000000000 -0400
25695 @@ -1610,7 +1610,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
25696 return ret_val;
25697 }
25698
25699 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
25700 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
25701 .init_hw = igb_init_hw_82575,
25702 .check_for_link = igb_check_for_link_82575,
25703 .rar_set = igb_rar_set,
25704 @@ -1618,13 +1618,13 @@ static struct e1000_mac_operations e1000
25705 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
25706 };
25707
25708 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
25709 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
25710 .acquire = igb_acquire_phy_82575,
25711 .get_cfg_done = igb_get_cfg_done_82575,
25712 .release = igb_release_phy_82575,
25713 };
25714
25715 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
25716 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
25717 .acquire = igb_acquire_nvm_82575,
25718 .read = igb_read_nvm_eerd,
25719 .release = igb_release_nvm_82575,
25720 diff -urNp linux-2.6.34.1/drivers/net/igb/e1000_hw.h linux-2.6.34.1/drivers/net/igb/e1000_hw.h
25721 --- linux-2.6.34.1/drivers/net/igb/e1000_hw.h 2010-07-05 14:24:10.000000000 -0400
25722 +++ linux-2.6.34.1/drivers/net/igb/e1000_hw.h 2010-07-07 09:04:52.000000000 -0400
25723 @@ -317,17 +317,17 @@ struct e1000_phy_operations {
25724 };
25725
25726 struct e1000_nvm_operations {
25727 - s32 (*acquire)(struct e1000_hw *);
25728 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
25729 - void (*release)(struct e1000_hw *);
25730 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
25731 + s32 (* const acquire)(struct e1000_hw *);
25732 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
25733 + void (* const release)(struct e1000_hw *);
25734 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
25735 };
25736
25737 struct e1000_info {
25738 s32 (*get_invariants)(struct e1000_hw *);
25739 - struct e1000_mac_operations *mac_ops;
25740 - struct e1000_phy_operations *phy_ops;
25741 - struct e1000_nvm_operations *nvm_ops;
25742 + const struct e1000_mac_operations *mac_ops;
25743 + const struct e1000_phy_operations *phy_ops;
25744 + const struct e1000_nvm_operations *nvm_ops;
25745 };
25746
25747 extern const struct e1000_info e1000_82575_info;
25748 @@ -406,6 +406,7 @@ struct e1000_phy_info {
25749 };
25750
25751 struct e1000_nvm_info {
25752 + /* cannot be const */
25753 struct e1000_nvm_operations ops;
25754
25755 enum e1000_nvm_type type;
25756 diff -urNp linux-2.6.34.1/drivers/net/irda/vlsi_ir.c linux-2.6.34.1/drivers/net/irda/vlsi_ir.c
25757 --- linux-2.6.34.1/drivers/net/irda/vlsi_ir.c 2010-07-05 14:24:10.000000000 -0400
25758 +++ linux-2.6.34.1/drivers/net/irda/vlsi_ir.c 2010-07-07 09:04:52.000000000 -0400
25759 @@ -907,13 +907,12 @@ static netdev_tx_t vlsi_hard_start_xmit(
25760 /* no race - tx-ring already empty */
25761 vlsi_set_baud(idev, iobase);
25762 netif_wake_queue(ndev);
25763 - }
25764 - else
25765 - ;
25766 + } else {
25767 /* keep the speed change pending like it would
25768 * for any len>0 packet. tx completion interrupt
25769 * will apply it when the tx ring becomes empty.
25770 */
25771 + }
25772 spin_unlock_irqrestore(&idev->lock, flags);
25773 dev_kfree_skb_any(skb);
25774 return NETDEV_TX_OK;
25775 diff -urNp linux-2.6.34.1/drivers/net/pcnet32.c linux-2.6.34.1/drivers/net/pcnet32.c
25776 --- linux-2.6.34.1/drivers/net/pcnet32.c 2010-07-05 14:24:10.000000000 -0400
25777 +++ linux-2.6.34.1/drivers/net/pcnet32.c 2010-07-07 09:04:52.000000000 -0400
25778 @@ -82,7 +82,7 @@ static int cards_found;
25779 /*
25780 * VLB I/O addresses
25781 */
25782 -static unsigned int pcnet32_portlist[] __initdata =
25783 +static unsigned int pcnet32_portlist[] __devinitdata =
25784 { 0x300, 0x320, 0x340, 0x360, 0 };
25785
25786 static int pcnet32_debug;
25787 diff -urNp linux-2.6.34.1/drivers/net/ppp_generic.c linux-2.6.34.1/drivers/net/ppp_generic.c
25788 --- linux-2.6.34.1/drivers/net/ppp_generic.c 2010-07-05 14:24:10.000000000 -0400
25789 +++ linux-2.6.34.1/drivers/net/ppp_generic.c 2010-07-07 09:04:53.000000000 -0400
25790 @@ -992,7 +992,6 @@ ppp_net_ioctl(struct net_device *dev, st
25791 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
25792 struct ppp_stats stats;
25793 struct ppp_comp_stats cstats;
25794 - char *vers;
25795
25796 switch (cmd) {
25797 case SIOCGPPPSTATS:
25798 @@ -1014,8 +1013,7 @@ ppp_net_ioctl(struct net_device *dev, st
25799 break;
25800
25801 case SIOCGPPPVER:
25802 - vers = PPP_VERSION;
25803 - if (copy_to_user(addr, vers, strlen(vers) + 1))
25804 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
25805 break;
25806 err = 0;
25807 break;
25808 diff -urNp linux-2.6.34.1/drivers/net/tg3.h linux-2.6.34.1/drivers/net/tg3.h
25809 --- linux-2.6.34.1/drivers/net/tg3.h 2010-07-05 14:24:10.000000000 -0400
25810 +++ linux-2.6.34.1/drivers/net/tg3.h 2010-07-07 09:04:53.000000000 -0400
25811 @@ -133,6 +133,7 @@
25812 #define CHIPREV_ID_5750_A0 0x4000
25813 #define CHIPREV_ID_5750_A1 0x4001
25814 #define CHIPREV_ID_5750_A3 0x4003
25815 +#define CHIPREV_ID_5750_C1 0x4201
25816 #define CHIPREV_ID_5750_C2 0x4202
25817 #define CHIPREV_ID_5752_A0_HW 0x5000
25818 #define CHIPREV_ID_5752_A0 0x6000
25819 diff -urNp linux-2.6.34.1/drivers/net/tulip/de4x5.c linux-2.6.34.1/drivers/net/tulip/de4x5.c
25820 --- linux-2.6.34.1/drivers/net/tulip/de4x5.c 2010-07-05 14:24:10.000000000 -0400
25821 +++ linux-2.6.34.1/drivers/net/tulip/de4x5.c 2010-07-07 09:04:53.000000000 -0400
25822 @@ -5470,7 +5470,7 @@ de4x5_ioctl(struct net_device *dev, stru
25823 for (i=0; i<ETH_ALEN; i++) {
25824 tmp.addr[i] = dev->dev_addr[i];
25825 }
25826 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
25827 + if (ioc->len > sizeof(tmp.addr) || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
25828 break;
25829
25830 case DE4X5_SET_HWADDR: /* Set the hardware address */
25831 @@ -5510,7 +5510,7 @@ de4x5_ioctl(struct net_device *dev, stru
25832 spin_lock_irqsave(&lp->lock, flags);
25833 memcpy(&statbuf, &lp->pktStats, ioc->len);
25834 spin_unlock_irqrestore(&lp->lock, flags);
25835 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
25836 + if (ioc->len > sizeof(statbuf) || copy_to_user(ioc->data, &statbuf, ioc->len))
25837 return -EFAULT;
25838 break;
25839 }
25840 diff -urNp linux-2.6.34.1/drivers/net/usb/hso.c linux-2.6.34.1/drivers/net/usb/hso.c
25841 --- linux-2.6.34.1/drivers/net/usb/hso.c 2010-07-05 14:24:10.000000000 -0400
25842 +++ linux-2.6.34.1/drivers/net/usb/hso.c 2010-07-07 09:04:53.000000000 -0400
25843 @@ -258,7 +258,7 @@ struct hso_serial {
25844
25845 /* from usb_serial_port */
25846 struct tty_struct *tty;
25847 - int open_count;
25848 + atomic_t open_count;
25849 spinlock_t serial_lock;
25850
25851 int (*write_data) (struct hso_serial *serial);
25852 @@ -1200,7 +1200,7 @@ static void put_rxbuf_data_and_resubmit_
25853 struct urb *urb;
25854
25855 urb = serial->rx_urb[0];
25856 - if (serial->open_count > 0) {
25857 + if (atomic_read(&serial->open_count) > 0) {
25858 count = put_rxbuf_data(urb, serial);
25859 if (count == -1)
25860 return;
25861 @@ -1236,7 +1236,7 @@ static void hso_std_serial_read_bulk_cal
25862 DUMP1(urb->transfer_buffer, urb->actual_length);
25863
25864 /* Anyone listening? */
25865 - if (serial->open_count == 0)
25866 + if (atomic_read(&serial->open_count) == 0)
25867 return;
25868
25869 if (status == 0) {
25870 @@ -1331,8 +1331,7 @@ static int hso_serial_open(struct tty_st
25871 spin_unlock_irq(&serial->serial_lock);
25872
25873 /* check for port already opened, if not set the termios */
25874 - serial->open_count++;
25875 - if (serial->open_count == 1) {
25876 + if (atomic_inc_return(&serial->open_count) == 1) {
25877 tty->low_latency = 1;
25878 serial->rx_state = RX_IDLE;
25879 /* Force default termio settings */
25880 @@ -1345,7 +1344,7 @@ static int hso_serial_open(struct tty_st
25881 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
25882 if (result) {
25883 hso_stop_serial_device(serial->parent);
25884 - serial->open_count--;
25885 + atomic_dec(&serial->open_count);
25886 kref_put(&serial->parent->ref, hso_serial_ref_free);
25887 }
25888 } else {
25889 @@ -1382,10 +1381,10 @@ static void hso_serial_close(struct tty_
25890
25891 /* reset the rts and dtr */
25892 /* do the actual close */
25893 - serial->open_count--;
25894 + atomic_dec(&serial->open_count);
25895
25896 - if (serial->open_count <= 0) {
25897 - serial->open_count = 0;
25898 + if (atomic_read(&serial->open_count) <= 0) {
25899 + atomic_set(&serial->open_count, 0);
25900 spin_lock_irq(&serial->serial_lock);
25901 if (serial->tty == tty) {
25902 serial->tty->driver_data = NULL;
25903 @@ -1467,7 +1466,7 @@ static void hso_serial_set_termios(struc
25904
25905 /* the actual setup */
25906 spin_lock_irqsave(&serial->serial_lock, flags);
25907 - if (serial->open_count)
25908 + if (atomic_read(&serial->open_count))
25909 _hso_serial_set_termios(tty, old);
25910 else
25911 tty->termios = old;
25912 @@ -1930,7 +1929,7 @@ static void intr_callback(struct urb *ur
25913 D1("Pending read interrupt on port %d\n", i);
25914 spin_lock(&serial->serial_lock);
25915 if (serial->rx_state == RX_IDLE &&
25916 - serial->open_count > 0) {
25917 + atomic_read(&serial->open_count) > 0) {
25918 /* Setup and send a ctrl req read on
25919 * port i */
25920 if (!serial->rx_urb_filled[0]) {
25921 @@ -3121,7 +3120,7 @@ static int hso_resume(struct usb_interfa
25922 /* Start all serial ports */
25923 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
25924 if (serial_table[i] && (serial_table[i]->interface == iface)) {
25925 - if (dev2ser(serial_table[i])->open_count) {
25926 + if (atomic_read(&dev2ser(serial_table[i])->open_count)) {
25927 result =
25928 hso_start_serial_device(serial_table[i], GFP_NOIO);
25929 hso_kick_transmit(dev2ser(serial_table[i]));
25930 diff -urNp linux-2.6.34.1/drivers/net/wireless/b43/debugfs.c linux-2.6.34.1/drivers/net/wireless/b43/debugfs.c
25931 --- linux-2.6.34.1/drivers/net/wireless/b43/debugfs.c 2010-07-05 14:24:10.000000000 -0400
25932 +++ linux-2.6.34.1/drivers/net/wireless/b43/debugfs.c 2010-07-07 09:04:53.000000000 -0400
25933 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
25934 struct b43_debugfs_fops {
25935 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
25936 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
25937 - struct file_operations fops;
25938 + const struct file_operations fops;
25939 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
25940 size_t file_struct_offset;
25941 };
25942 diff -urNp linux-2.6.34.1/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.34.1/drivers/net/wireless/b43legacy/debugfs.c
25943 --- linux-2.6.34.1/drivers/net/wireless/b43legacy/debugfs.c 2010-07-05 14:24:10.000000000 -0400
25944 +++ linux-2.6.34.1/drivers/net/wireless/b43legacy/debugfs.c 2010-07-07 09:04:53.000000000 -0400
25945 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
25946 struct b43legacy_debugfs_fops {
25947 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
25948 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
25949 - struct file_operations fops;
25950 + const struct file_operations fops;
25951 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
25952 size_t file_struct_offset;
25953 /* Take wl->irq_lock before calling read/write? */
25954 diff -urNp linux-2.6.34.1/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.34.1/drivers/net/wireless/iwlwifi/iwl-debug.h
25955 --- linux-2.6.34.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-07-05 14:24:10.000000000 -0400
25956 +++ linux-2.6.34.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-07-07 09:04:53.000000000 -0400
25957 @@ -68,8 +68,8 @@ do {
25958 } while (0)
25959
25960 #else
25961 -#define IWL_DEBUG(__priv, level, fmt, args...)
25962 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
25963 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
25964 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
25965 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
25966 void *p, u32 len)
25967 {}
25968 diff -urNp linux-2.6.34.1/drivers/net/wireless/libertas/debugfs.c linux-2.6.34.1/drivers/net/wireless/libertas/debugfs.c
25969 --- linux-2.6.34.1/drivers/net/wireless/libertas/debugfs.c 2010-07-05 14:24:10.000000000 -0400
25970 +++ linux-2.6.34.1/drivers/net/wireless/libertas/debugfs.c 2010-07-07 09:04:53.000000000 -0400
25971 @@ -718,7 +718,7 @@ out_unlock:
25972 struct lbs_debugfs_files {
25973 const char *name;
25974 int perm;
25975 - struct file_operations fops;
25976 + const struct file_operations fops;
25977 };
25978
25979 static const struct lbs_debugfs_files debugfs_files[] = {
25980 diff -urNp linux-2.6.34.1/drivers/net/wireless/rndis_wlan.c linux-2.6.34.1/drivers/net/wireless/rndis_wlan.c
25981 --- linux-2.6.34.1/drivers/net/wireless/rndis_wlan.c 2010-07-05 14:24:10.000000000 -0400
25982 +++ linux-2.6.34.1/drivers/net/wireless/rndis_wlan.c 2010-07-07 09:04:53.000000000 -0400
25983 @@ -1185,7 +1185,7 @@ static int set_rts_threshold(struct usbn
25984
25985 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
25986
25987 - if (rts_threshold < 0 || rts_threshold > 2347)
25988 + if (rts_threshold > 2347)
25989 rts_threshold = 2347;
25990
25991 tmp = cpu_to_le32(rts_threshold);
25992 diff -urNp linux-2.6.34.1/drivers/oprofile/buffer_sync.c linux-2.6.34.1/drivers/oprofile/buffer_sync.c
25993 --- linux-2.6.34.1/drivers/oprofile/buffer_sync.c 2010-07-05 14:24:10.000000000 -0400
25994 +++ linux-2.6.34.1/drivers/oprofile/buffer_sync.c 2010-07-07 09:04:53.000000000 -0400
25995 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
25996 if (cookie == NO_COOKIE)
25997 offset = pc;
25998 if (cookie == INVALID_COOKIE) {
25999 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
26000 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
26001 offset = pc;
26002 }
26003 if (cookie != last_cookie) {
26004 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
26005 /* add userspace sample */
26006
26007 if (!mm) {
26008 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
26009 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
26010 return 0;
26011 }
26012
26013 cookie = lookup_dcookie(mm, s->eip, &offset);
26014
26015 if (cookie == INVALID_COOKIE) {
26016 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
26017 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
26018 return 0;
26019 }
26020
26021 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
26022 /* ignore backtraces if failed to add a sample */
26023 if (state == sb_bt_start) {
26024 state = sb_bt_ignore;
26025 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
26026 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
26027 }
26028 }
26029 release_mm(mm);
26030 diff -urNp linux-2.6.34.1/drivers/oprofile/event_buffer.c linux-2.6.34.1/drivers/oprofile/event_buffer.c
26031 --- linux-2.6.34.1/drivers/oprofile/event_buffer.c 2010-07-05 14:24:10.000000000 -0400
26032 +++ linux-2.6.34.1/drivers/oprofile/event_buffer.c 2010-07-07 09:04:53.000000000 -0400
26033 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
26034 }
26035
26036 if (buffer_pos == buffer_size) {
26037 - atomic_inc(&oprofile_stats.event_lost_overflow);
26038 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
26039 return;
26040 }
26041
26042 diff -urNp linux-2.6.34.1/drivers/oprofile/oprof.c linux-2.6.34.1/drivers/oprofile/oprof.c
26043 --- linux-2.6.34.1/drivers/oprofile/oprof.c 2010-07-05 14:24:10.000000000 -0400
26044 +++ linux-2.6.34.1/drivers/oprofile/oprof.c 2010-07-07 09:04:53.000000000 -0400
26045 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
26046 if (oprofile_ops.switch_events())
26047 return;
26048
26049 - atomic_inc(&oprofile_stats.multiplex_counter);
26050 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
26051 start_switch_worker();
26052 }
26053
26054 diff -urNp linux-2.6.34.1/drivers/oprofile/oprofile_stats.c linux-2.6.34.1/drivers/oprofile/oprofile_stats.c
26055 --- linux-2.6.34.1/drivers/oprofile/oprofile_stats.c 2010-07-05 14:24:10.000000000 -0400
26056 +++ linux-2.6.34.1/drivers/oprofile/oprofile_stats.c 2010-07-07 09:04:53.000000000 -0400
26057 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
26058 cpu_buf->sample_invalid_eip = 0;
26059 }
26060
26061 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
26062 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
26063 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
26064 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
26065 - atomic_set(&oprofile_stats.multiplex_counter, 0);
26066 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
26067 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
26068 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
26069 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
26070 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
26071 }
26072
26073
26074 diff -urNp linux-2.6.34.1/drivers/oprofile/oprofile_stats.h linux-2.6.34.1/drivers/oprofile/oprofile_stats.h
26075 --- linux-2.6.34.1/drivers/oprofile/oprofile_stats.h 2010-07-05 14:24:10.000000000 -0400
26076 +++ linux-2.6.34.1/drivers/oprofile/oprofile_stats.h 2010-07-07 09:04:53.000000000 -0400
26077 @@ -13,11 +13,11 @@
26078 #include <asm/atomic.h>
26079
26080 struct oprofile_stat_struct {
26081 - atomic_t sample_lost_no_mm;
26082 - atomic_t sample_lost_no_mapping;
26083 - atomic_t bt_lost_no_mapping;
26084 - atomic_t event_lost_overflow;
26085 - atomic_t multiplex_counter;
26086 + atomic_unchecked_t sample_lost_no_mm;
26087 + atomic_unchecked_t sample_lost_no_mapping;
26088 + atomic_unchecked_t bt_lost_no_mapping;
26089 + atomic_unchecked_t event_lost_overflow;
26090 + atomic_unchecked_t multiplex_counter;
26091 };
26092
26093 extern struct oprofile_stat_struct oprofile_stats;
26094 diff -urNp linux-2.6.34.1/drivers/oprofile/oprofilefs.c linux-2.6.34.1/drivers/oprofile/oprofilefs.c
26095 --- linux-2.6.34.1/drivers/oprofile/oprofilefs.c 2010-07-05 14:24:10.000000000 -0400
26096 +++ linux-2.6.34.1/drivers/oprofile/oprofilefs.c 2010-07-07 09:04:53.000000000 -0400
26097 @@ -187,7 +187,7 @@ static const struct file_operations atom
26098
26099
26100 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
26101 - char const *name, atomic_t *val)
26102 + char const *name, atomic_unchecked_t *val)
26103 {
26104 struct dentry *d = __oprofilefs_create_file(sb, root, name,
26105 &atomic_ro_fops, 0444);
26106 diff -urNp linux-2.6.34.1/drivers/parport/procfs.c linux-2.6.34.1/drivers/parport/procfs.c
26107 --- linux-2.6.34.1/drivers/parport/procfs.c 2010-07-05 14:24:10.000000000 -0400
26108 +++ linux-2.6.34.1/drivers/parport/procfs.c 2010-07-07 09:04:53.000000000 -0400
26109 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
26110
26111 *ppos += len;
26112
26113 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
26114 + return (len > sizeof(buffer) || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
26115 }
26116
26117 #ifdef CONFIG_PARPORT_1284
26118 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
26119
26120 *ppos += len;
26121
26122 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
26123 + return (len > sizeof(buffer) || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
26124 }
26125 #endif /* IEEE1284.3 support. */
26126
26127 diff -urNp linux-2.6.34.1/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.34.1/drivers/pci/hotplug/acpiphp_glue.c
26128 --- linux-2.6.34.1/drivers/pci/hotplug/acpiphp_glue.c 2010-07-05 14:24:10.000000000 -0400
26129 +++ linux-2.6.34.1/drivers/pci/hotplug/acpiphp_glue.c 2010-07-07 09:04:53.000000000 -0400
26130 @@ -110,7 +110,7 @@ static int post_dock_fixups(struct notif
26131 }
26132
26133
26134 -static struct acpi_dock_ops acpiphp_dock_ops = {
26135 +static const struct acpi_dock_ops acpiphp_dock_ops = {
26136 .handler = handle_hotplug_event_func,
26137 };
26138
26139 diff -urNp linux-2.6.34.1/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.34.1/drivers/pci/hotplug/cpqphp_nvram.c
26140 --- linux-2.6.34.1/drivers/pci/hotplug/cpqphp_nvram.c 2010-07-05 14:24:10.000000000 -0400
26141 +++ linux-2.6.34.1/drivers/pci/hotplug/cpqphp_nvram.c 2010-07-07 09:04:53.000000000 -0400
26142 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
26143
26144 void compaq_nvram_init (void __iomem *rom_start)
26145 {
26146 +
26147 +#ifndef CONFIG_PAX_KERNEXEC
26148 if (rom_start) {
26149 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
26150 }
26151 +#endif
26152 +
26153 dbg("int15 entry = %p\n", compaq_int15_entry_point);
26154
26155 /* initialize our int15 lock */
26156 diff -urNp linux-2.6.34.1/drivers/pci/intel-iommu.c linux-2.6.34.1/drivers/pci/intel-iommu.c
26157 --- linux-2.6.34.1/drivers/pci/intel-iommu.c 2010-07-05 14:24:10.000000000 -0400
26158 +++ linux-2.6.34.1/drivers/pci/intel-iommu.c 2010-07-07 09:04:53.000000000 -0400
26159 @@ -2940,7 +2940,7 @@ static int intel_mapping_error(struct de
26160 return !dma_addr;
26161 }
26162
26163 -struct dma_map_ops intel_dma_ops = {
26164 +const struct dma_map_ops intel_dma_ops = {
26165 .alloc_coherent = intel_alloc_coherent,
26166 .free_coherent = intel_free_coherent,
26167 .map_sg = intel_map_sg,
26168 diff -urNp linux-2.6.34.1/drivers/pci/pcie/portdrv_pci.c linux-2.6.34.1/drivers/pci/pcie/portdrv_pci.c
26169 --- linux-2.6.34.1/drivers/pci/pcie/portdrv_pci.c 2010-07-05 14:24:10.000000000 -0400
26170 +++ linux-2.6.34.1/drivers/pci/pcie/portdrv_pci.c 2010-07-07 09:04:53.000000000 -0400
26171 @@ -250,7 +250,7 @@ static void pcie_portdrv_err_resume(stru
26172 static const struct pci_device_id port_pci_ids[] = { {
26173 /* handle any PCI-Express port */
26174 PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
26175 - }, { /* end: all zeroes */ }
26176 + }, { 0, 0, 0, 0, 0, 0, 0 }
26177 };
26178 MODULE_DEVICE_TABLE(pci, port_pci_ids);
26179
26180 diff -urNp linux-2.6.34.1/drivers/pci/probe.c linux-2.6.34.1/drivers/pci/probe.c
26181 --- linux-2.6.34.1/drivers/pci/probe.c 2010-07-05 14:24:10.000000000 -0400
26182 +++ linux-2.6.34.1/drivers/pci/probe.c 2010-07-07 09:04:53.000000000 -0400
26183 @@ -63,14 +63,14 @@ static ssize_t pci_bus_show_cpuaffinity(
26184 return ret;
26185 }
26186
26187 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
26188 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
26189 struct device_attribute *attr,
26190 char *buf)
26191 {
26192 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
26193 }
26194
26195 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
26196 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
26197 struct device_attribute *attr,
26198 char *buf)
26199 {
26200 diff -urNp linux-2.6.34.1/drivers/pci/proc.c linux-2.6.34.1/drivers/pci/proc.c
26201 --- linux-2.6.34.1/drivers/pci/proc.c 2010-07-05 14:24:10.000000000 -0400
26202 +++ linux-2.6.34.1/drivers/pci/proc.c 2010-07-07 09:04:53.000000000 -0400
26203 @@ -481,7 +481,16 @@ static const struct file_operations proc
26204 static int __init pci_proc_init(void)
26205 {
26206 struct pci_dev *dev = NULL;
26207 +
26208 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
26209 +#ifdef CONFIG_GRKERNSEC_PROC_USER
26210 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
26211 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
26212 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
26213 +#endif
26214 +#else
26215 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
26216 +#endif
26217 proc_create("devices", 0, proc_bus_pci_dir,
26218 &proc_bus_pci_dev_operations);
26219 proc_initialized = 1;
26220 diff -urNp linux-2.6.34.1/drivers/pcmcia/ti113x.h linux-2.6.34.1/drivers/pcmcia/ti113x.h
26221 --- linux-2.6.34.1/drivers/pcmcia/ti113x.h 2010-07-05 14:24:10.000000000 -0400
26222 +++ linux-2.6.34.1/drivers/pcmcia/ti113x.h 2010-07-07 09:04:53.000000000 -0400
26223 @@ -936,7 +936,7 @@ static struct pci_device_id ene_tune_tbl
26224 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
26225 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
26226
26227 - {}
26228 + { 0, 0, 0, 0, 0, 0, 0 }
26229 };
26230
26231 static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
26232 diff -urNp linux-2.6.34.1/drivers/pcmcia/yenta_socket.c linux-2.6.34.1/drivers/pcmcia/yenta_socket.c
26233 --- linux-2.6.34.1/drivers/pcmcia/yenta_socket.c 2010-07-05 14:24:10.000000000 -0400
26234 +++ linux-2.6.34.1/drivers/pcmcia/yenta_socket.c 2010-07-07 09:04:53.000000000 -0400
26235 @@ -1437,7 +1437,7 @@ static struct pci_device_id yenta_table[
26236
26237 /* match any cardbus bridge */
26238 CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT),
26239 - { /* all zeroes */ }
26240 + { 0, 0, 0, 0, 0, 0, 0 }
26241 };
26242 MODULE_DEVICE_TABLE(pci, yenta_table);
26243
26244 diff -urNp linux-2.6.34.1/drivers/platform/x86/acer-wmi.c linux-2.6.34.1/drivers/platform/x86/acer-wmi.c
26245 --- linux-2.6.34.1/drivers/platform/x86/acer-wmi.c 2010-07-05 14:24:10.000000000 -0400
26246 +++ linux-2.6.34.1/drivers/platform/x86/acer-wmi.c 2010-07-07 09:04:53.000000000 -0400
26247 @@ -916,7 +916,7 @@ static int update_bl_status(struct backl
26248 return 0;
26249 }
26250
26251 -static struct backlight_ops acer_bl_ops = {
26252 +static const struct backlight_ops acer_bl_ops = {
26253 .get_brightness = read_brightness,
26254 .update_status = update_bl_status,
26255 };
26256 diff -urNp linux-2.6.34.1/drivers/platform/x86/asus-laptop.c linux-2.6.34.1/drivers/platform/x86/asus-laptop.c
26257 --- linux-2.6.34.1/drivers/platform/x86/asus-laptop.c 2010-07-05 14:24:10.000000000 -0400
26258 +++ linux-2.6.34.1/drivers/platform/x86/asus-laptop.c 2010-07-07 09:04:53.000000000 -0400
26259 @@ -621,7 +621,7 @@ static int update_bl_status(struct backl
26260 return asus_lcd_set(asus, value);
26261 }
26262
26263 -static struct backlight_ops asusbl_ops = {
26264 +static const struct backlight_ops asusbl_ops = {
26265 .get_brightness = asus_read_brightness,
26266 .update_status = update_bl_status,
26267 };
26268 diff -urNp linux-2.6.34.1/drivers/platform/x86/asus_acpi.c linux-2.6.34.1/drivers/platform/x86/asus_acpi.c
26269 --- linux-2.6.34.1/drivers/platform/x86/asus_acpi.c 2010-07-05 14:24:10.000000000 -0400
26270 +++ linux-2.6.34.1/drivers/platform/x86/asus_acpi.c 2010-07-07 09:04:53.000000000 -0400
26271 @@ -1464,7 +1464,7 @@ static int asus_hotk_remove(struct acpi_
26272 return 0;
26273 }
26274
26275 -static struct backlight_ops asus_backlight_data = {
26276 +static const struct backlight_ops asus_backlight_data = {
26277 .get_brightness = read_brightness,
26278 .update_status = set_brightness_status,
26279 };
26280 diff -urNp linux-2.6.34.1/drivers/platform/x86/compal-laptop.c linux-2.6.34.1/drivers/platform/x86/compal-laptop.c
26281 --- linux-2.6.34.1/drivers/platform/x86/compal-laptop.c 2010-07-05 14:24:10.000000000 -0400
26282 +++ linux-2.6.34.1/drivers/platform/x86/compal-laptop.c 2010-07-07 09:04:53.000000000 -0400
26283 @@ -168,7 +168,7 @@ static int bl_update_status(struct backl
26284 return set_lcd_level(b->props.brightness);
26285 }
26286
26287 -static struct backlight_ops compalbl_ops = {
26288 +static const struct backlight_ops compalbl_ops = {
26289 .get_brightness = bl_get_brightness,
26290 .update_status = bl_update_status,
26291 };
26292 diff -urNp linux-2.6.34.1/drivers/platform/x86/dell-laptop.c linux-2.6.34.1/drivers/platform/x86/dell-laptop.c
26293 --- linux-2.6.34.1/drivers/platform/x86/dell-laptop.c 2010-07-05 14:24:10.000000000 -0400
26294 +++ linux-2.6.34.1/drivers/platform/x86/dell-laptop.c 2010-07-07 09:04:53.000000000 -0400
26295 @@ -462,7 +462,7 @@ out:
26296 return buffer->output[1];
26297 }
26298
26299 -static struct backlight_ops dell_ops = {
26300 +static const struct backlight_ops dell_ops = {
26301 .get_brightness = dell_get_intensity,
26302 .update_status = dell_send_intensity,
26303 };
26304 diff -urNp linux-2.6.34.1/drivers/platform/x86/eeepc-laptop.c linux-2.6.34.1/drivers/platform/x86/eeepc-laptop.c
26305 --- linux-2.6.34.1/drivers/platform/x86/eeepc-laptop.c 2010-07-05 14:24:10.000000000 -0400
26306 +++ linux-2.6.34.1/drivers/platform/x86/eeepc-laptop.c 2010-07-07 09:04:53.000000000 -0400
26307 @@ -1114,7 +1114,7 @@ static int update_bl_status(struct backl
26308 return set_brightness(bd, bd->props.brightness);
26309 }
26310
26311 -static struct backlight_ops eeepcbl_ops = {
26312 +static const struct backlight_ops eeepcbl_ops = {
26313 .get_brightness = read_brightness,
26314 .update_status = update_bl_status,
26315 };
26316 diff -urNp linux-2.6.34.1/drivers/platform/x86/fujitsu-laptop.c linux-2.6.34.1/drivers/platform/x86/fujitsu-laptop.c
26317 --- linux-2.6.34.1/drivers/platform/x86/fujitsu-laptop.c 2010-07-05 14:24:10.000000000 -0400
26318 +++ linux-2.6.34.1/drivers/platform/x86/fujitsu-laptop.c 2010-07-07 09:04:53.000000000 -0400
26319 @@ -437,7 +437,7 @@ static int bl_update_status(struct backl
26320 return ret;
26321 }
26322
26323 -static struct backlight_ops fujitsubl_ops = {
26324 +static const struct backlight_ops fujitsubl_ops = {
26325 .get_brightness = bl_get_brightness,
26326 .update_status = bl_update_status,
26327 };
26328 diff -urNp linux-2.6.34.1/drivers/platform/x86/msi-laptop.c linux-2.6.34.1/drivers/platform/x86/msi-laptop.c
26329 --- linux-2.6.34.1/drivers/platform/x86/msi-laptop.c 2010-07-05 14:24:10.000000000 -0400
26330 +++ linux-2.6.34.1/drivers/platform/x86/msi-laptop.c 2010-07-07 09:04:53.000000000 -0400
26331 @@ -254,7 +254,7 @@ static int bl_update_status(struct backl
26332 return set_lcd_level(b->props.brightness);
26333 }
26334
26335 -static struct backlight_ops msibl_ops = {
26336 +static const struct backlight_ops msibl_ops = {
26337 .get_brightness = bl_get_brightness,
26338 .update_status = bl_update_status,
26339 };
26340 diff -urNp linux-2.6.34.1/drivers/platform/x86/sony-laptop.c linux-2.6.34.1/drivers/platform/x86/sony-laptop.c
26341 --- linux-2.6.34.1/drivers/platform/x86/sony-laptop.c 2010-07-05 14:24:10.000000000 -0400
26342 +++ linux-2.6.34.1/drivers/platform/x86/sony-laptop.c 2010-07-07 09:04:53.000000000 -0400
26343 @@ -857,7 +857,7 @@ static int sony_backlight_get_brightness
26344 }
26345
26346 static struct backlight_device *sony_backlight_device;
26347 -static struct backlight_ops sony_backlight_ops = {
26348 +static const struct backlight_ops sony_backlight_ops = {
26349 .update_status = sony_backlight_update_status,
26350 .get_brightness = sony_backlight_get_brightness,
26351 };
26352 diff -urNp linux-2.6.34.1/drivers/platform/x86/thinkpad_acpi.c linux-2.6.34.1/drivers/platform/x86/thinkpad_acpi.c
26353 --- linux-2.6.34.1/drivers/platform/x86/thinkpad_acpi.c 2010-07-05 14:24:10.000000000 -0400
26354 +++ linux-2.6.34.1/drivers/platform/x86/thinkpad_acpi.c 2010-07-07 09:04:53.000000000 -0400
26355 @@ -6132,7 +6132,7 @@ static void tpacpi_brightness_notify_cha
26356 BACKLIGHT_UPDATE_HOTKEY);
26357 }
26358
26359 -static struct backlight_ops ibm_backlight_data = {
26360 +static const struct backlight_ops ibm_backlight_data = {
26361 .get_brightness = brightness_get,
26362 .update_status = brightness_update_status,
26363 };
26364 diff -urNp linux-2.6.34.1/drivers/platform/x86/toshiba_acpi.c linux-2.6.34.1/drivers/platform/x86/toshiba_acpi.c
26365 --- linux-2.6.34.1/drivers/platform/x86/toshiba_acpi.c 2010-07-05 14:24:10.000000000 -0400
26366 +++ linux-2.6.34.1/drivers/platform/x86/toshiba_acpi.c 2010-07-07 09:04:53.000000000 -0400
26367 @@ -741,7 +741,7 @@ static acpi_status remove_device(void)
26368 return AE_OK;
26369 }
26370
26371 -static struct backlight_ops toshiba_backlight_data = {
26372 +static const struct backlight_ops toshiba_backlight_data = {
26373 .get_brightness = get_lcd,
26374 .update_status = set_lcd_status,
26375 };
26376 diff -urNp linux-2.6.34.1/drivers/pnp/pnpbios/bioscalls.c linux-2.6.34.1/drivers/pnp/pnpbios/bioscalls.c
26377 --- linux-2.6.34.1/drivers/pnp/pnpbios/bioscalls.c 2010-07-05 14:24:10.000000000 -0400
26378 +++ linux-2.6.34.1/drivers/pnp/pnpbios/bioscalls.c 2010-07-07 09:04:53.000000000 -0400
26379 @@ -59,7 +59,7 @@ do { \
26380 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
26381 } while(0)
26382
26383 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
26384 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
26385 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
26386
26387 /*
26388 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
26389
26390 cpu = get_cpu();
26391 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
26392 +
26393 + pax_open_kernel();
26394 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
26395 + pax_close_kernel();
26396
26397 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
26398 spin_lock_irqsave(&pnp_bios_lock, flags);
26399 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
26400 :"memory");
26401 spin_unlock_irqrestore(&pnp_bios_lock, flags);
26402
26403 + pax_open_kernel();
26404 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
26405 + pax_close_kernel();
26406 +
26407 put_cpu();
26408
26409 /* If we get here and this is set then the PnP BIOS faulted on us. */
26410 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
26411 return status;
26412 }
26413
26414 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
26415 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
26416 {
26417 int i;
26418
26419 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
26420 pnp_bios_callpoint.offset = header->fields.pm16offset;
26421 pnp_bios_callpoint.segment = PNP_CS16;
26422
26423 + pax_open_kernel();
26424 +
26425 for_each_possible_cpu(i) {
26426 struct desc_struct *gdt = get_cpu_gdt_table(i);
26427 if (!gdt)
26428 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
26429 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
26430 (unsigned long)__va(header->fields.pm16dseg));
26431 }
26432 +
26433 + pax_close_kernel();
26434 }
26435 diff -urNp linux-2.6.34.1/drivers/pnp/quirks.c linux-2.6.34.1/drivers/pnp/quirks.c
26436 --- linux-2.6.34.1/drivers/pnp/quirks.c 2010-07-05 14:24:10.000000000 -0400
26437 +++ linux-2.6.34.1/drivers/pnp/quirks.c 2010-07-07 09:04:53.000000000 -0400
26438 @@ -322,7 +322,7 @@ static struct pnp_fixup pnp_fixups[] = {
26439 /* PnP resources that might overlap PCI BARs */
26440 {"PNP0c01", quirk_system_pci_resources},
26441 {"PNP0c02", quirk_system_pci_resources},
26442 - {""}
26443 + {"", NULL}
26444 };
26445
26446 void pnp_fixup_device(struct pnp_dev *dev)
26447 diff -urNp linux-2.6.34.1/drivers/pnp/resource.c linux-2.6.34.1/drivers/pnp/resource.c
26448 --- linux-2.6.34.1/drivers/pnp/resource.c 2010-07-05 14:24:10.000000000 -0400
26449 +++ linux-2.6.34.1/drivers/pnp/resource.c 2010-07-07 09:04:53.000000000 -0400
26450 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
26451 return 1;
26452
26453 /* check if the resource is valid */
26454 - if (*irq < 0 || *irq > 15)
26455 + if (*irq > 15)
26456 return 0;
26457
26458 /* check if the resource is reserved */
26459 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
26460 return 1;
26461
26462 /* check if the resource is valid */
26463 - if (*dma < 0 || *dma == 4 || *dma > 7)
26464 + if (*dma == 4 || *dma > 7)
26465 return 0;
26466
26467 /* check if the resource is reserved */
26468 diff -urNp linux-2.6.34.1/drivers/s390/cio/qdio_debug.c linux-2.6.34.1/drivers/s390/cio/qdio_debug.c
26469 --- linux-2.6.34.1/drivers/s390/cio/qdio_debug.c 2010-07-05 14:24:10.000000000 -0400
26470 +++ linux-2.6.34.1/drivers/s390/cio/qdio_debug.c 2010-07-07 09:04:53.000000000 -0400
26471 @@ -233,7 +233,7 @@ static int qperf_seq_open(struct inode *
26472 filp->f_path.dentry->d_inode->i_private);
26473 }
26474
26475 -static struct file_operations debugfs_perf_fops = {
26476 +static const struct file_operations debugfs_perf_fops = {
26477 .owner = THIS_MODULE,
26478 .open = qperf_seq_open,
26479 .read = seq_read,
26480 diff -urNp linux-2.6.34.1/drivers/scsi/ipr.c linux-2.6.34.1/drivers/scsi/ipr.c
26481 --- linux-2.6.34.1/drivers/scsi/ipr.c 2010-07-05 14:24:10.000000000 -0400
26482 +++ linux-2.6.34.1/drivers/scsi/ipr.c 2010-07-07 09:04:53.000000000 -0400
26483 @@ -6055,7 +6055,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
26484 return true;
26485 }
26486
26487 -static struct ata_port_operations ipr_sata_ops = {
26488 +static const struct ata_port_operations ipr_sata_ops = {
26489 .phy_reset = ipr_ata_phy_reset,
26490 .hardreset = ipr_sata_reset,
26491 .post_internal_cmd = ipr_ata_post_internal,
26492 diff -urNp linux-2.6.34.1/drivers/scsi/libfc/fc_exch.c linux-2.6.34.1/drivers/scsi/libfc/fc_exch.c
26493 --- linux-2.6.34.1/drivers/scsi/libfc/fc_exch.c 2010-07-05 14:24:10.000000000 -0400
26494 +++ linux-2.6.34.1/drivers/scsi/libfc/fc_exch.c 2010-07-07 09:04:53.000000000 -0400
26495 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
26496 * all together if not used XXX
26497 */
26498 struct {
26499 - atomic_t no_free_exch;
26500 - atomic_t no_free_exch_xid;
26501 - atomic_t xid_not_found;
26502 - atomic_t xid_busy;
26503 - atomic_t seq_not_found;
26504 - atomic_t non_bls_resp;
26505 + atomic_unchecked_t no_free_exch;
26506 + atomic_unchecked_t no_free_exch_xid;
26507 + atomic_unchecked_t xid_not_found;
26508 + atomic_unchecked_t xid_busy;
26509 + atomic_unchecked_t seq_not_found;
26510 + atomic_unchecked_t non_bls_resp;
26511 } stats;
26512 };
26513 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
26514 @@ -671,7 +671,7 @@ static struct fc_exch *fc_exch_em_alloc(
26515 /* allocate memory for exchange */
26516 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
26517 if (!ep) {
26518 - atomic_inc(&mp->stats.no_free_exch);
26519 + atomic_inc_unchecked(&mp->stats.no_free_exch);
26520 goto out;
26521 }
26522 memset(ep, 0, sizeof(*ep));
26523 @@ -718,7 +718,7 @@ out:
26524 return ep;
26525 err:
26526 spin_unlock_bh(&pool->lock);
26527 - atomic_inc(&mp->stats.no_free_exch_xid);
26528 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
26529 mempool_free(ep, mp->ep_pool);
26530 return NULL;
26531 }
26532 @@ -868,7 +868,7 @@ static enum fc_pf_rjt_reason fc_seq_look
26533 xid = ntohs(fh->fh_ox_id); /* we originated exch */
26534 ep = fc_exch_find(mp, xid);
26535 if (!ep) {
26536 - atomic_inc(&mp->stats.xid_not_found);
26537 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26538 reject = FC_RJT_OX_ID;
26539 goto out;
26540 }
26541 @@ -898,7 +898,7 @@ static enum fc_pf_rjt_reason fc_seq_look
26542 ep = fc_exch_find(mp, xid);
26543 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
26544 if (ep) {
26545 - atomic_inc(&mp->stats.xid_busy);
26546 + atomic_inc_unchecked(&mp->stats.xid_busy);
26547 reject = FC_RJT_RX_ID;
26548 goto rel;
26549 }
26550 @@ -909,7 +909,7 @@ static enum fc_pf_rjt_reason fc_seq_look
26551 }
26552 xid = ep->xid; /* get our XID */
26553 } else if (!ep) {
26554 - atomic_inc(&mp->stats.xid_not_found);
26555 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26556 reject = FC_RJT_RX_ID; /* XID not found */
26557 goto out;
26558 }
26559 @@ -930,7 +930,7 @@ static enum fc_pf_rjt_reason fc_seq_look
26560 } else {
26561 sp = &ep->seq;
26562 if (sp->id != fh->fh_seq_id) {
26563 - atomic_inc(&mp->stats.seq_not_found);
26564 + atomic_inc_unchecked(&mp->stats.seq_not_found);
26565 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
26566 goto rel;
26567 }
26568 @@ -1317,22 +1317,22 @@ static void fc_exch_recv_seq_resp(struct
26569
26570 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
26571 if (!ep) {
26572 - atomic_inc(&mp->stats.xid_not_found);
26573 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26574 goto out;
26575 }
26576 if (ep->esb_stat & ESB_ST_COMPLETE) {
26577 - atomic_inc(&mp->stats.xid_not_found);
26578 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26579 goto out;
26580 }
26581 if (ep->rxid == FC_XID_UNKNOWN)
26582 ep->rxid = ntohs(fh->fh_rx_id);
26583 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
26584 - atomic_inc(&mp->stats.xid_not_found);
26585 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26586 goto rel;
26587 }
26588 if (ep->did != ntoh24(fh->fh_s_id) &&
26589 ep->did != FC_FID_FLOGI) {
26590 - atomic_inc(&mp->stats.xid_not_found);
26591 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26592 goto rel;
26593 }
26594 sof = fr_sof(fp);
26595 @@ -1343,7 +1343,7 @@ static void fc_exch_recv_seq_resp(struct
26596 } else {
26597 sp = &ep->seq;
26598 if (sp->id != fh->fh_seq_id) {
26599 - atomic_inc(&mp->stats.seq_not_found);
26600 + atomic_inc_unchecked(&mp->stats.seq_not_found);
26601 goto rel;
26602 }
26603 }
26604 @@ -1406,9 +1406,9 @@ static void fc_exch_recv_resp(struct fc_
26605 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
26606
26607 if (!sp)
26608 - atomic_inc(&mp->stats.xid_not_found);
26609 + atomic_inc_unchecked(&mp->stats.xid_not_found);
26610 else
26611 - atomic_inc(&mp->stats.non_bls_resp);
26612 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
26613
26614 fc_frame_free(fp);
26615 }
26616 diff -urNp linux-2.6.34.1/drivers/scsi/libsas/sas_ata.c linux-2.6.34.1/drivers/scsi/libsas/sas_ata.c
26617 --- linux-2.6.34.1/drivers/scsi/libsas/sas_ata.c 2010-07-05 14:24:10.000000000 -0400
26618 +++ linux-2.6.34.1/drivers/scsi/libsas/sas_ata.c 2010-07-07 09:04:53.000000000 -0400
26619 @@ -344,7 +344,7 @@ static int sas_ata_scr_read(struct ata_l
26620 }
26621 }
26622
26623 -static struct ata_port_operations sas_sata_ops = {
26624 +static const struct ata_port_operations sas_sata_ops = {
26625 .phy_reset = sas_ata_phy_reset,
26626 .post_internal_cmd = sas_ata_post_internal,
26627 .qc_prep = ata_noop_qc_prep,
26628 diff -urNp linux-2.6.34.1/drivers/scsi/mpt2sas/mpt2sas_debug.h linux-2.6.34.1/drivers/scsi/mpt2sas/mpt2sas_debug.h
26629 --- linux-2.6.34.1/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-07-05 14:24:10.000000000 -0400
26630 +++ linux-2.6.34.1/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-07-07 09:04:53.000000000 -0400
26631 @@ -79,7 +79,7 @@
26632 CMD; \
26633 }
26634 #else
26635 -#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
26636 +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0)
26637 #endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
26638
26639
26640 diff -urNp linux-2.6.34.1/drivers/scsi/scsi_logging.h linux-2.6.34.1/drivers/scsi/scsi_logging.h
26641 --- linux-2.6.34.1/drivers/scsi/scsi_logging.h 2010-07-05 14:24:10.000000000 -0400
26642 +++ linux-2.6.34.1/drivers/scsi/scsi_logging.h 2010-07-07 09:04:53.000000000 -0400
26643 @@ -51,7 +51,7 @@ do { \
26644 } while (0); \
26645 } while (0)
26646 #else
26647 -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
26648 +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0)
26649 #endif /* CONFIG_SCSI_LOGGING */
26650
26651 /*
26652 diff -urNp linux-2.6.34.1/drivers/scsi/sg.c linux-2.6.34.1/drivers/scsi/sg.c
26653 --- linux-2.6.34.1/drivers/scsi/sg.c 2010-07-05 14:24:10.000000000 -0400
26654 +++ linux-2.6.34.1/drivers/scsi/sg.c 2010-07-07 09:04:53.000000000 -0400
26655 @@ -2291,7 +2291,7 @@ struct sg_proc_leaf {
26656 const struct file_operations * fops;
26657 };
26658
26659 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
26660 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
26661 {"allow_dio", &adio_fops},
26662 {"debug", &debug_fops},
26663 {"def_reserved_size", &dressz_fops},
26664 @@ -2306,7 +2306,7 @@ sg_proc_init(void)
26665 {
26666 int k, mask;
26667 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
26668 - struct sg_proc_leaf * leaf;
26669 + const struct sg_proc_leaf * leaf;
26670
26671 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
26672 if (!sg_proc_sgp)
26673 diff -urNp linux-2.6.34.1/drivers/serial/8250_pci.c linux-2.6.34.1/drivers/serial/8250_pci.c
26674 --- linux-2.6.34.1/drivers/serial/8250_pci.c 2010-07-05 14:24:10.000000000 -0400
26675 +++ linux-2.6.34.1/drivers/serial/8250_pci.c 2010-07-07 09:04:53.000000000 -0400
26676 @@ -3693,7 +3693,7 @@ static struct pci_device_id serial_pci_t
26677 PCI_ANY_ID, PCI_ANY_ID,
26678 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8,
26679 0xffff00, pbn_default },
26680 - { 0, }
26681 + { 0, 0, 0, 0, 0, 0, 0 }
26682 };
26683
26684 static struct pci_driver serial_pci_driver = {
26685 diff -urNp linux-2.6.34.1/drivers/serial/kgdboc.c linux-2.6.34.1/drivers/serial/kgdboc.c
26686 --- linux-2.6.34.1/drivers/serial/kgdboc.c 2010-07-05 14:24:10.000000000 -0400
26687 +++ linux-2.6.34.1/drivers/serial/kgdboc.c 2010-07-07 09:04:53.000000000 -0400
26688 @@ -18,7 +18,7 @@
26689
26690 #define MAX_CONFIG_LEN 40
26691
26692 -static struct kgdb_io kgdboc_io_ops;
26693 +static const struct kgdb_io kgdboc_io_ops;
26694
26695 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
26696 static int configured = -1;
26697 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
26698 module_put(THIS_MODULE);
26699 }
26700
26701 -static struct kgdb_io kgdboc_io_ops = {
26702 +static const struct kgdb_io kgdboc_io_ops = {
26703 .name = "kgdboc",
26704 .read_char = kgdboc_get_char,
26705 .write_char = kgdboc_put_char,
26706 diff -urNp linux-2.6.34.1/drivers/staging/comedi/comedi_fops.c linux-2.6.34.1/drivers/staging/comedi/comedi_fops.c
26707 --- linux-2.6.34.1/drivers/staging/comedi/comedi_fops.c 2010-07-05 14:24:10.000000000 -0400
26708 +++ linux-2.6.34.1/drivers/staging/comedi/comedi_fops.c 2010-07-07 09:04:53.000000000 -0400
26709 @@ -1384,7 +1384,7 @@ void comedi_unmap(struct vm_area_struct
26710 mutex_unlock(&dev->mutex);
26711 }
26712
26713 -static struct vm_operations_struct comedi_vm_ops = {
26714 +static const struct vm_operations_struct comedi_vm_ops = {
26715 .close = comedi_unmap,
26716 };
26717
26718 diff -urNp linux-2.6.34.1/drivers/staging/dream/pmem.c linux-2.6.34.1/drivers/staging/dream/pmem.c
26719 --- linux-2.6.34.1/drivers/staging/dream/pmem.c 2010-07-05 14:24:10.000000000 -0400
26720 +++ linux-2.6.34.1/drivers/staging/dream/pmem.c 2010-07-07 09:04:53.000000000 -0400
26721 @@ -175,7 +175,7 @@ static int pmem_mmap(struct file *, stru
26722 static int pmem_open(struct inode *, struct file *);
26723 static long pmem_ioctl(struct file *, unsigned int, unsigned long);
26724
26725 -struct file_operations pmem_fops = {
26726 +const struct file_operations pmem_fops = {
26727 .release = pmem_release,
26728 .mmap = pmem_mmap,
26729 .open = pmem_open,
26730 @@ -1201,7 +1201,7 @@ static ssize_t debug_read(struct file *f
26731 return simple_read_from_buffer(buf, count, ppos, buffer, n);
26732 }
26733
26734 -static struct file_operations debug_fops = {
26735 +static const struct file_operations debug_fops = {
26736 .read = debug_read,
26737 .open = debug_open,
26738 };
26739 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.34.1/drivers/staging/dream/qdsp5/adsp_driver.c
26740 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/adsp_driver.c 2010-07-05 14:24:10.000000000 -0400
26741 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/adsp_driver.c 2010-07-07 09:04:54.000000000 -0400
26742 @@ -577,7 +577,7 @@ static struct adsp_device *inode_to_devi
26743 static dev_t adsp_devno;
26744 static struct class *adsp_class;
26745
26746 -static struct file_operations adsp_fops = {
26747 +static const struct file_operations adsp_fops = {
26748 .owner = THIS_MODULE,
26749 .open = adsp_open,
26750 .unlocked_ioctl = adsp_ioctl,
26751 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_aac.c
26752 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_aac.c 2010-07-05 14:24:10.000000000 -0400
26753 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_aac.c 2010-07-07 09:04:54.000000000 -0400
26754 @@ -1023,7 +1023,7 @@ done:
26755 return rc;
26756 }
26757
26758 -static struct file_operations audio_aac_fops = {
26759 +static const struct file_operations audio_aac_fops = {
26760 .owner = THIS_MODULE,
26761 .open = audio_open,
26762 .release = audio_release,
26763 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_amrnb.c
26764 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-07-05 14:24:10.000000000 -0400
26765 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-07-07 09:04:54.000000000 -0400
26766 @@ -834,7 +834,7 @@ done:
26767 return rc;
26768 }
26769
26770 -static struct file_operations audio_amrnb_fops = {
26771 +static const struct file_operations audio_amrnb_fops = {
26772 .owner = THIS_MODULE,
26773 .open = audamrnb_open,
26774 .release = audamrnb_release,
26775 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_evrc.c
26776 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_evrc.c 2010-07-05 14:24:10.000000000 -0400
26777 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_evrc.c 2010-07-07 09:04:54.000000000 -0400
26778 @@ -806,7 +806,7 @@ dma_fail:
26779 return rc;
26780 }
26781
26782 -static struct file_operations audio_evrc_fops = {
26783 +static const struct file_operations audio_evrc_fops = {
26784 .owner = THIS_MODULE,
26785 .open = audevrc_open,
26786 .release = audevrc_release,
26787 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_in.c
26788 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_in.c 2010-07-05 14:24:10.000000000 -0400
26789 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_in.c 2010-07-07 09:04:54.000000000 -0400
26790 @@ -914,7 +914,7 @@ static int audpre_open(struct inode *ino
26791 return 0;
26792 }
26793
26794 -static struct file_operations audio_fops = {
26795 +static const struct file_operations audio_fops = {
26796 .owner = THIS_MODULE,
26797 .open = audio_in_open,
26798 .release = audio_in_release,
26799 @@ -923,7 +923,7 @@ static struct file_operations audio_fops
26800 .unlocked_ioctl = audio_in_ioctl,
26801 };
26802
26803 -static struct file_operations audpre_fops = {
26804 +static const struct file_operations audpre_fops = {
26805 .owner = THIS_MODULE,
26806 .open = audpre_open,
26807 .unlocked_ioctl = audpre_ioctl,
26808 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_mp3.c
26809 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_mp3.c 2010-07-05 14:24:10.000000000 -0400
26810 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_mp3.c 2010-07-07 09:04:54.000000000 -0400
26811 @@ -941,7 +941,7 @@ done:
26812 return rc;
26813 }
26814
26815 -static struct file_operations audio_mp3_fops = {
26816 +static const struct file_operations audio_mp3_fops = {
26817 .owner = THIS_MODULE,
26818 .open = audio_open,
26819 .release = audio_release,
26820 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_out.c
26821 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_out.c 2010-07-05 14:24:10.000000000 -0400
26822 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_out.c 2010-07-07 09:04:54.000000000 -0400
26823 @@ -807,7 +807,7 @@ static int audpp_open(struct inode *inod
26824 return 0;
26825 }
26826
26827 -static struct file_operations audio_fops = {
26828 +static const struct file_operations audio_fops = {
26829 .owner = THIS_MODULE,
26830 .open = audio_open,
26831 .release = audio_release,
26832 @@ -816,7 +816,7 @@ static struct file_operations audio_fops
26833 .unlocked_ioctl = audio_ioctl,
26834 };
26835
26836 -static struct file_operations audpp_fops = {
26837 +static const struct file_operations audpp_fops = {
26838 .owner = THIS_MODULE,
26839 .open = audpp_open,
26840 .unlocked_ioctl = audpp_ioctl,
26841 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_qcelp.c
26842 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-07-05 14:24:10.000000000 -0400
26843 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-07-07 09:04:54.000000000 -0400
26844 @@ -817,7 +817,7 @@ err:
26845 return rc;
26846 }
26847
26848 -static struct file_operations audio_qcelp_fops = {
26849 +static const struct file_operations audio_qcelp_fops = {
26850 .owner = THIS_MODULE,
26851 .open = audqcelp_open,
26852 .release = audqcelp_release,
26853 diff -urNp linux-2.6.34.1/drivers/staging/dream/qdsp5/snd.c linux-2.6.34.1/drivers/staging/dream/qdsp5/snd.c
26854 --- linux-2.6.34.1/drivers/staging/dream/qdsp5/snd.c 2010-07-05 14:24:10.000000000 -0400
26855 +++ linux-2.6.34.1/drivers/staging/dream/qdsp5/snd.c 2010-07-07 09:04:54.000000000 -0400
26856 @@ -242,7 +242,7 @@ err:
26857 return rc;
26858 }
26859
26860 -static struct file_operations snd_fops = {
26861 +static const struct file_operations snd_fops = {
26862 .owner = THIS_MODULE,
26863 .open = snd_open,
26864 .release = snd_release,
26865 diff -urNp linux-2.6.34.1/drivers/staging/dream/smd/smd_qmi.c linux-2.6.34.1/drivers/staging/dream/smd/smd_qmi.c
26866 --- linux-2.6.34.1/drivers/staging/dream/smd/smd_qmi.c 2010-07-05 14:24:10.000000000 -0400
26867 +++ linux-2.6.34.1/drivers/staging/dream/smd/smd_qmi.c 2010-07-07 09:04:54.000000000 -0400
26868 @@ -788,7 +788,7 @@ static int qmi_release(struct inode *ip,
26869 return 0;
26870 }
26871
26872 -static struct file_operations qmi_fops = {
26873 +static const struct file_operations qmi_fops = {
26874 .owner = THIS_MODULE,
26875 .read = qmi_read,
26876 .write = qmi_write,
26877 diff -urNp linux-2.6.34.1/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.34.1/drivers/staging/dream/smd/smd_rpcrouter_device.c
26878 --- linux-2.6.34.1/drivers/staging/dream/smd/smd_rpcrouter_device.c 2010-07-05 14:24:10.000000000 -0400
26879 +++ linux-2.6.34.1/drivers/staging/dream/smd/smd_rpcrouter_device.c 2010-07-07 09:04:54.000000000 -0400
26880 @@ -215,7 +215,7 @@ static long rpcrouter_ioctl(struct file
26881 return rc;
26882 }
26883
26884 -static struct file_operations rpcrouter_server_fops = {
26885 +static const struct file_operations rpcrouter_server_fops = {
26886 .owner = THIS_MODULE,
26887 .open = rpcrouter_open,
26888 .release = rpcrouter_release,
26889 @@ -225,7 +225,7 @@ static struct file_operations rpcrouter_
26890 .unlocked_ioctl = rpcrouter_ioctl,
26891 };
26892
26893 -static struct file_operations rpcrouter_router_fops = {
26894 +static const struct file_operations rpcrouter_router_fops = {
26895 .owner = THIS_MODULE,
26896 .open = rpcrouter_open,
26897 .release = rpcrouter_release,
26898 diff -urNp linux-2.6.34.1/drivers/staging/dt3155/dt3155_drv.c linux-2.6.34.1/drivers/staging/dt3155/dt3155_drv.c
26899 --- linux-2.6.34.1/drivers/staging/dt3155/dt3155_drv.c 2010-07-05 14:24:10.000000000 -0400
26900 +++ linux-2.6.34.1/drivers/staging/dt3155/dt3155_drv.c 2010-07-07 09:04:54.000000000 -0400
26901 @@ -841,7 +841,7 @@ static unsigned int dt3155_poll (struct
26902 * needed by init_module
26903 * register_chrdev
26904 *****************************************************/
26905 -static struct file_operations dt3155_fops = {
26906 +static const struct file_operations dt3155_fops = {
26907 read: dt3155_read,
26908 ioctl: dt3155_ioctl,
26909 mmap: dt3155_mmap,
26910 diff -urNp linux-2.6.34.1/drivers/staging/go7007/go7007-v4l2.c linux-2.6.34.1/drivers/staging/go7007/go7007-v4l2.c
26911 --- linux-2.6.34.1/drivers/staging/go7007/go7007-v4l2.c 2010-07-05 14:24:10.000000000 -0400
26912 +++ linux-2.6.34.1/drivers/staging/go7007/go7007-v4l2.c 2010-07-07 09:04:54.000000000 -0400
26913 @@ -1675,7 +1675,7 @@ static int go7007_vm_fault(struct vm_are
26914 return 0;
26915 }
26916
26917 -static struct vm_operations_struct go7007_vm_ops = {
26918 +static const struct vm_operations_struct go7007_vm_ops = {
26919 .open = go7007_vm_open,
26920 .close = go7007_vm_close,
26921 .fault = go7007_vm_fault,
26922 diff -urNp linux-2.6.34.1/drivers/staging/hv/Hv.c linux-2.6.34.1/drivers/staging/hv/Hv.c
26923 --- linux-2.6.34.1/drivers/staging/hv/Hv.c 2010-07-05 14:24:10.000000000 -0400
26924 +++ linux-2.6.34.1/drivers/staging/hv/Hv.c 2010-07-07 09:04:54.000000000 -0400
26925 @@ -162,7 +162,7 @@ static u64 HvDoHypercall(u64 Control, vo
26926 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
26927 u32 outputAddressHi = outputAddress >> 32;
26928 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
26929 - volatile void *hypercallPage = gHvContext.HypercallPage;
26930 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
26931
26932 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
26933 Control, Input, Output);
26934 diff -urNp linux-2.6.34.1/drivers/staging/hv/blkvsc_drv.c linux-2.6.34.1/drivers/staging/hv/blkvsc_drv.c
26935 --- linux-2.6.34.1/drivers/staging/hv/blkvsc_drv.c 2010-07-05 14:24:10.000000000 -0400
26936 +++ linux-2.6.34.1/drivers/staging/hv/blkvsc_drv.c 2010-07-07 09:04:54.000000000 -0400
26937 @@ -155,7 +155,7 @@ static int blkvsc_ringbuffer_size = BLKV
26938 /* The one and only one */
26939 static struct blkvsc_driver_context g_blkvsc_drv;
26940
26941 -static struct block_device_operations block_ops = {
26942 +static const struct block_device_operations block_ops = {
26943 .owner = THIS_MODULE,
26944 .open = blkvsc_open,
26945 .release = blkvsc_release,
26946 diff -urNp linux-2.6.34.1/drivers/staging/panel/panel.c linux-2.6.34.1/drivers/staging/panel/panel.c
26947 --- linux-2.6.34.1/drivers/staging/panel/panel.c 2010-07-05 14:24:10.000000000 -0400
26948 +++ linux-2.6.34.1/drivers/staging/panel/panel.c 2010-07-07 09:04:54.000000000 -0400
26949 @@ -1304,7 +1304,7 @@ static int lcd_release(struct inode *ino
26950 return 0;
26951 }
26952
26953 -static struct file_operations lcd_fops = {
26954 +static const struct file_operations lcd_fops = {
26955 .write = lcd_write,
26956 .open = lcd_open,
26957 .release = lcd_release,
26958 @@ -1564,7 +1564,7 @@ static int keypad_release(struct inode *
26959 return 0;
26960 }
26961
26962 -static struct file_operations keypad_fops = {
26963 +static const struct file_operations keypad_fops = {
26964 .read = keypad_read, /* read */
26965 .open = keypad_open, /* open */
26966 .release = keypad_release, /* close */
26967 diff -urNp linux-2.6.34.1/drivers/staging/phison/phison.c linux-2.6.34.1/drivers/staging/phison/phison.c
26968 --- linux-2.6.34.1/drivers/staging/phison/phison.c 2010-07-05 14:24:10.000000000 -0400
26969 +++ linux-2.6.34.1/drivers/staging/phison/phison.c 2010-07-07 09:04:54.000000000 -0400
26970 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
26971 ATA_BMDMA_SHT(DRV_NAME),
26972 };
26973
26974 -static struct ata_port_operations phison_ops = {
26975 +static const struct ata_port_operations phison_ops = {
26976 .inherits = &ata_bmdma_port_ops,
26977 .prereset = phison_pre_reset,
26978 };
26979 diff -urNp linux-2.6.34.1/drivers/staging/poch/poch.c linux-2.6.34.1/drivers/staging/poch/poch.c
26980 --- linux-2.6.34.1/drivers/staging/poch/poch.c 2010-07-05 14:24:10.000000000 -0400
26981 +++ linux-2.6.34.1/drivers/staging/poch/poch.c 2010-07-07 09:04:54.000000000 -0400
26982 @@ -1033,7 +1033,7 @@ static int poch_ioctl(struct inode *inod
26983 return 0;
26984 }
26985
26986 -static struct file_operations poch_fops = {
26987 +static const struct file_operations poch_fops = {
26988 .owner = THIS_MODULE,
26989 .open = poch_open,
26990 .release = poch_release,
26991 diff -urNp linux-2.6.34.1/drivers/staging/pohmelfs/inode.c linux-2.6.34.1/drivers/staging/pohmelfs/inode.c
26992 --- linux-2.6.34.1/drivers/staging/pohmelfs/inode.c 2010-07-05 14:24:10.000000000 -0400
26993 +++ linux-2.6.34.1/drivers/staging/pohmelfs/inode.c 2010-07-07 09:04:54.000000000 -0400
26994 @@ -1854,7 +1854,7 @@ static int pohmelfs_fill_super(struct su
26995 mutex_init(&psb->mcache_lock);
26996 psb->mcache_root = RB_ROOT;
26997 psb->mcache_timeout = msecs_to_jiffies(5000);
26998 - atomic_long_set(&psb->mcache_gen, 0);
26999 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
27000
27001 psb->trans_max_pages = 100;
27002
27003 diff -urNp linux-2.6.34.1/drivers/staging/pohmelfs/mcache.c linux-2.6.34.1/drivers/staging/pohmelfs/mcache.c
27004 --- linux-2.6.34.1/drivers/staging/pohmelfs/mcache.c 2010-07-05 14:24:10.000000000 -0400
27005 +++ linux-2.6.34.1/drivers/staging/pohmelfs/mcache.c 2010-07-07 09:04:54.000000000 -0400
27006 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
27007 m->data = data;
27008 m->start = start;
27009 m->size = size;
27010 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
27011 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
27012
27013 mutex_lock(&psb->mcache_lock);
27014 err = pohmelfs_mcache_insert(psb, m);
27015 diff -urNp linux-2.6.34.1/drivers/staging/pohmelfs/netfs.h linux-2.6.34.1/drivers/staging/pohmelfs/netfs.h
27016 --- linux-2.6.34.1/drivers/staging/pohmelfs/netfs.h 2010-07-05 14:24:10.000000000 -0400
27017 +++ linux-2.6.34.1/drivers/staging/pohmelfs/netfs.h 2010-07-07 09:04:54.000000000 -0400
27018 @@ -571,7 +571,7 @@ struct pohmelfs_config;
27019 struct pohmelfs_sb {
27020 struct rb_root mcache_root;
27021 struct mutex mcache_lock;
27022 - atomic_long_t mcache_gen;
27023 + atomic_long_unchecked_t mcache_gen;
27024 unsigned long mcache_timeout;
27025
27026 unsigned int idx;
27027 diff -urNp linux-2.6.34.1/drivers/staging/ramzswap/ramzswap_drv.c linux-2.6.34.1/drivers/staging/ramzswap/ramzswap_drv.c
27028 --- linux-2.6.34.1/drivers/staging/ramzswap/ramzswap_drv.c 2010-07-05 14:24:10.000000000 -0400
27029 +++ linux-2.6.34.1/drivers/staging/ramzswap/ramzswap_drv.c 2010-07-07 09:04:54.000000000 -0400
27030 @@ -1295,7 +1295,7 @@ out:
27031 return ret;
27032 }
27033
27034 -static struct block_device_operations ramzswap_devops = {
27035 +static const struct block_device_operations ramzswap_devops = {
27036 .ioctl = ramzswap_ioctl,
27037 .owner = THIS_MODULE,
27038 };
27039 diff -urNp linux-2.6.34.1/drivers/staging/rtl8192u/ieee80211/proc.c linux-2.6.34.1/drivers/staging/rtl8192u/ieee80211/proc.c
27040 --- linux-2.6.34.1/drivers/staging/rtl8192u/ieee80211/proc.c 2010-07-05 14:24:10.000000000 -0400
27041 +++ linux-2.6.34.1/drivers/staging/rtl8192u/ieee80211/proc.c 2010-07-07 09:04:54.000000000 -0400
27042 @@ -99,7 +99,7 @@ static int crypto_info_open(struct inode
27043 return seq_open(file, &crypto_seq_ops);
27044 }
27045
27046 -static struct file_operations proc_crypto_ops = {
27047 +static const struct file_operations proc_crypto_ops = {
27048 .open = crypto_info_open,
27049 .read = seq_read,
27050 .llseek = seq_lseek,
27051 diff -urNp linux-2.6.34.1/drivers/staging/samsung-laptop/samsung-laptop.c linux-2.6.34.1/drivers/staging/samsung-laptop/samsung-laptop.c
27052 --- linux-2.6.34.1/drivers/staging/samsung-laptop/samsung-laptop.c 2010-07-05 14:24:10.000000000 -0400
27053 +++ linux-2.6.34.1/drivers/staging/samsung-laptop/samsung-laptop.c 2010-07-07 09:04:54.000000000 -0400
27054 @@ -269,7 +269,7 @@ static int update_status(struct backligh
27055 return 0;
27056 }
27057
27058 -static struct backlight_ops backlight_ops = {
27059 +static const struct backlight_ops backlight_ops = {
27060 .get_brightness = get_brightness,
27061 .update_status = update_status,
27062 };
27063 diff -urNp linux-2.6.34.1/drivers/staging/sep/sep_driver.c linux-2.6.34.1/drivers/staging/sep/sep_driver.c
27064 --- linux-2.6.34.1/drivers/staging/sep/sep_driver.c 2010-07-05 14:24:10.000000000 -0400
27065 +++ linux-2.6.34.1/drivers/staging/sep/sep_driver.c 2010-07-07 09:04:54.000000000 -0400
27066 @@ -2610,7 +2610,7 @@ static struct pci_driver sep_pci_driver
27067 static dev_t sep_devno;
27068
27069 /* the files operations structure of the driver */
27070 -static struct file_operations sep_file_operations = {
27071 +static const struct file_operations sep_file_operations = {
27072 .owner = THIS_MODULE,
27073 .unlocked_ioctl = sep_ioctl,
27074 .poll = sep_poll,
27075 diff -urNp linux-2.6.34.1/drivers/staging/vme/devices/vme_user.c linux-2.6.34.1/drivers/staging/vme/devices/vme_user.c
27076 --- linux-2.6.34.1/drivers/staging/vme/devices/vme_user.c 2010-07-05 14:24:10.000000000 -0400
27077 +++ linux-2.6.34.1/drivers/staging/vme/devices/vme_user.c 2010-07-07 09:04:54.000000000 -0400
27078 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
27079 static int __init vme_user_probe(struct device *, int, int);
27080 static int __exit vme_user_remove(struct device *, int, int);
27081
27082 -static struct file_operations vme_user_fops = {
27083 +static const struct file_operations vme_user_fops = {
27084 .open = vme_user_open,
27085 .release = vme_user_release,
27086 .read = vme_user_read,
27087 diff -urNp linux-2.6.34.1/drivers/usb/atm/usbatm.c linux-2.6.34.1/drivers/usb/atm/usbatm.c
27088 --- linux-2.6.34.1/drivers/usb/atm/usbatm.c 2010-07-05 14:24:10.000000000 -0400
27089 +++ linux-2.6.34.1/drivers/usb/atm/usbatm.c 2010-07-07 09:04:54.000000000 -0400
27090 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
27091 if (printk_ratelimit())
27092 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
27093 __func__, vpi, vci);
27094 - atomic_inc(&vcc->stats->rx_err);
27095 + atomic_inc_unchecked(&vcc->stats->rx_err);
27096 return;
27097 }
27098
27099 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
27100 if (length > ATM_MAX_AAL5_PDU) {
27101 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
27102 __func__, length, vcc);
27103 - atomic_inc(&vcc->stats->rx_err);
27104 + atomic_inc_unchecked(&vcc->stats->rx_err);
27105 goto out;
27106 }
27107
27108 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
27109 if (sarb->len < pdu_length) {
27110 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
27111 __func__, pdu_length, sarb->len, vcc);
27112 - atomic_inc(&vcc->stats->rx_err);
27113 + atomic_inc_unchecked(&vcc->stats->rx_err);
27114 goto out;
27115 }
27116
27117 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
27118 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
27119 __func__, vcc);
27120 - atomic_inc(&vcc->stats->rx_err);
27121 + atomic_inc_unchecked(&vcc->stats->rx_err);
27122 goto out;
27123 }
27124
27125 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
27126 if (printk_ratelimit())
27127 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
27128 __func__, length);
27129 - atomic_inc(&vcc->stats->rx_drop);
27130 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27131 goto out;
27132 }
27133
27134 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
27135
27136 vcc->push(vcc, skb);
27137
27138 - atomic_inc(&vcc->stats->rx);
27139 + atomic_inc_unchecked(&vcc->stats->rx);
27140 out:
27141 skb_trim(sarb, 0);
27142 }
27143 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
27144 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
27145
27146 usbatm_pop(vcc, skb);
27147 - atomic_inc(&vcc->stats->tx);
27148 + atomic_inc_unchecked(&vcc->stats->tx);
27149
27150 skb = skb_dequeue(&instance->sndqueue);
27151 }
27152 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
27153 if (!left--)
27154 return sprintf(page,
27155 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
27156 - atomic_read(&atm_dev->stats.aal5.tx),
27157 - atomic_read(&atm_dev->stats.aal5.tx_err),
27158 - atomic_read(&atm_dev->stats.aal5.rx),
27159 - atomic_read(&atm_dev->stats.aal5.rx_err),
27160 - atomic_read(&atm_dev->stats.aal5.rx_drop));
27161 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
27162 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
27163 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
27164 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
27165 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
27166
27167 if (!left--) {
27168 if (instance->disconnected)
27169 diff -urNp linux-2.6.34.1/drivers/usb/class/cdc-acm.c linux-2.6.34.1/drivers/usb/class/cdc-acm.c
27170 --- linux-2.6.34.1/drivers/usb/class/cdc-acm.c 2010-07-05 14:24:10.000000000 -0400
27171 +++ linux-2.6.34.1/drivers/usb/class/cdc-acm.c 2010-07-07 09:04:54.000000000 -0400
27172 @@ -1618,7 +1618,7 @@ static const struct usb_device_id acm_id
27173 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
27174 USB_CDC_ACM_PROTO_AT_CDMA) },
27175
27176 - { }
27177 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
27178 };
27179
27180 MODULE_DEVICE_TABLE(usb, acm_ids);
27181 diff -urNp linux-2.6.34.1/drivers/usb/class/usblp.c linux-2.6.34.1/drivers/usb/class/usblp.c
27182 --- linux-2.6.34.1/drivers/usb/class/usblp.c 2010-07-05 14:24:10.000000000 -0400
27183 +++ linux-2.6.34.1/drivers/usb/class/usblp.c 2010-07-07 09:04:54.000000000 -0400
27184 @@ -226,7 +226,7 @@ static const struct quirk_printer_struct
27185 { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
27186 { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */
27187 { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
27188 - { 0, 0 }
27189 + { 0, 0, 0 }
27190 };
27191
27192 static int usblp_wwait(struct usblp *usblp, int nonblock);
27193 @@ -1398,7 +1398,7 @@ static const struct usb_device_id usblp_
27194 { USB_INTERFACE_INFO(7, 1, 2) },
27195 { USB_INTERFACE_INFO(7, 1, 3) },
27196 { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */
27197 - { } /* Terminating entry */
27198 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
27199 };
27200
27201 MODULE_DEVICE_TABLE (usb, usblp_ids);
27202 diff -urNp linux-2.6.34.1/drivers/usb/core/hcd.c linux-2.6.34.1/drivers/usb/core/hcd.c
27203 --- linux-2.6.34.1/drivers/usb/core/hcd.c 2010-07-05 14:24:10.000000000 -0400
27204 +++ linux-2.6.34.1/drivers/usb/core/hcd.c 2010-07-07 09:04:54.000000000 -0400
27205 @@ -2316,7 +2316,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
27206
27207 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
27208
27209 -struct usb_mon_operations *mon_ops;
27210 +const struct usb_mon_operations *mon_ops;
27211
27212 /*
27213 * The registration is unlocked.
27214 @@ -2326,7 +2326,7 @@ struct usb_mon_operations *mon_ops;
27215 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
27216 */
27217
27218 -int usb_mon_register (struct usb_mon_operations *ops)
27219 +int usb_mon_register (const struct usb_mon_operations *ops)
27220 {
27221
27222 if (mon_ops)
27223 diff -urNp linux-2.6.34.1/drivers/usb/core/hcd.h linux-2.6.34.1/drivers/usb/core/hcd.h
27224 --- linux-2.6.34.1/drivers/usb/core/hcd.h 2010-07-05 14:24:10.000000000 -0400
27225 +++ linux-2.6.34.1/drivers/usb/core/hcd.h 2010-07-07 09:04:54.000000000 -0400
27226 @@ -506,13 +506,13 @@ static inline void usbfs_cleanup(void) {
27227 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
27228
27229 struct usb_mon_operations {
27230 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
27231 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
27232 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
27233 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
27234 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
27235 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
27236 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
27237 };
27238
27239 -extern struct usb_mon_operations *mon_ops;
27240 +extern const struct usb_mon_operations *mon_ops;
27241
27242 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
27243 {
27244 @@ -534,7 +534,7 @@ static inline void usbmon_urb_complete(s
27245 (*mon_ops->urb_complete)(bus, urb, status);
27246 }
27247
27248 -int usb_mon_register(struct usb_mon_operations *ops);
27249 +int usb_mon_register(const struct usb_mon_operations *ops);
27250 void usb_mon_deregister(void);
27251
27252 #else
27253 diff -urNp linux-2.6.34.1/drivers/usb/core/hub.c linux-2.6.34.1/drivers/usb/core/hub.c
27254 --- linux-2.6.34.1/drivers/usb/core/hub.c 2010-07-05 14:24:10.000000000 -0400
27255 +++ linux-2.6.34.1/drivers/usb/core/hub.c 2010-07-07 09:04:54.000000000 -0400
27256 @@ -3437,7 +3437,7 @@ static const struct usb_device_id hub_id
27257 .bDeviceClass = USB_CLASS_HUB},
27258 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
27259 .bInterfaceClass = USB_CLASS_HUB},
27260 - { } /* Terminating entry */
27261 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
27262 };
27263
27264 MODULE_DEVICE_TABLE (usb, hub_id_table);
27265 diff -urNp linux-2.6.34.1/drivers/usb/core/message.c linux-2.6.34.1/drivers/usb/core/message.c
27266 --- linux-2.6.34.1/drivers/usb/core/message.c 2010-07-05 14:24:10.000000000 -0400
27267 +++ linux-2.6.34.1/drivers/usb/core/message.c 2010-07-07 09:04:54.000000000 -0400
27268 @@ -884,8 +884,8 @@ char *usb_cache_string(struct usb_device
27269 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
27270 if (buf) {
27271 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
27272 - if (len > 0) {
27273 - smallbuf = kmalloc(++len, GFP_NOIO);
27274 + if (len++ > 0) {
27275 + smallbuf = kmalloc(len, GFP_NOIO);
27276 if (!smallbuf)
27277 return buf;
27278 memcpy(smallbuf, buf, len);
27279 diff -urNp linux-2.6.34.1/drivers/usb/host/ehci-pci.c linux-2.6.34.1/drivers/usb/host/ehci-pci.c
27280 --- linux-2.6.34.1/drivers/usb/host/ehci-pci.c 2010-07-05 14:24:10.000000000 -0400
27281 +++ linux-2.6.34.1/drivers/usb/host/ehci-pci.c 2010-07-07 09:04:54.000000000 -0400
27282 @@ -415,7 +415,7 @@ static const struct pci_device_id pci_id
27283 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
27284 .driver_data = (unsigned long) &ehci_pci_hc_driver,
27285 },
27286 - { /* end: all zeroes */ }
27287 + { 0, 0, 0, 0, 0, 0, 0 }
27288 };
27289 MODULE_DEVICE_TABLE(pci, pci_ids);
27290
27291 diff -urNp linux-2.6.34.1/drivers/usb/host/uhci-hcd.c linux-2.6.34.1/drivers/usb/host/uhci-hcd.c
27292 --- linux-2.6.34.1/drivers/usb/host/uhci-hcd.c 2010-07-05 14:24:10.000000000 -0400
27293 +++ linux-2.6.34.1/drivers/usb/host/uhci-hcd.c 2010-07-07 09:04:54.000000000 -0400
27294 @@ -941,7 +941,7 @@ static const struct pci_device_id uhci_p
27295 /* handle any USB UHCI controller */
27296 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
27297 .driver_data = (unsigned long) &uhci_driver,
27298 - }, { /* end: all zeroes */ }
27299 + }, { 0, 0, 0, 0, 0, 0, 0 }
27300 };
27301
27302 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
27303 diff -urNp linux-2.6.34.1/drivers/usb/mon/mon_main.c linux-2.6.34.1/drivers/usb/mon/mon_main.c
27304 --- linux-2.6.34.1/drivers/usb/mon/mon_main.c 2010-07-05 14:24:10.000000000 -0400
27305 +++ linux-2.6.34.1/drivers/usb/mon/mon_main.c 2010-07-07 09:04:54.000000000 -0400
27306 @@ -239,7 +239,7 @@ static struct notifier_block mon_nb = {
27307 /*
27308 * Ops
27309 */
27310 -static struct usb_mon_operations mon_ops_0 = {
27311 +static const struct usb_mon_operations mon_ops_0 = {
27312 .urb_submit = mon_submit,
27313 .urb_submit_error = mon_submit_error,
27314 .urb_complete = mon_complete,
27315 diff -urNp linux-2.6.34.1/drivers/usb/storage/debug.h linux-2.6.34.1/drivers/usb/storage/debug.h
27316 --- linux-2.6.34.1/drivers/usb/storage/debug.h 2010-07-05 14:24:10.000000000 -0400
27317 +++ linux-2.6.34.1/drivers/usb/storage/debug.h 2010-07-07 09:04:54.000000000 -0400
27318 @@ -54,9 +54,9 @@ void usb_stor_show_sense( unsigned char
27319 #define US_DEBUGPX(x...) printk( x )
27320 #define US_DEBUG(x) x
27321 #else
27322 -#define US_DEBUGP(x...)
27323 -#define US_DEBUGPX(x...)
27324 -#define US_DEBUG(x)
27325 +#define US_DEBUGP(x...) do {} while (0)
27326 +#define US_DEBUGPX(x...) do {} while (0)
27327 +#define US_DEBUG(x) do {} while (0)
27328 #endif
27329
27330 #endif
27331 diff -urNp linux-2.6.34.1/drivers/usb/storage/usb.c linux-2.6.34.1/drivers/usb/storage/usb.c
27332 --- linux-2.6.34.1/drivers/usb/storage/usb.c 2010-07-05 14:24:10.000000000 -0400
27333 +++ linux-2.6.34.1/drivers/usb/storage/usb.c 2010-07-07 09:04:54.000000000 -0400
27334 @@ -122,7 +122,7 @@ MODULE_PARM_DESC(quirks, "supplemental l
27335
27336 static struct us_unusual_dev us_unusual_dev_list[] = {
27337 # include "unusual_devs.h"
27338 - { } /* Terminating entry */
27339 + { NULL, NULL, 0, 0, NULL } /* Terminating entry */
27340 };
27341
27342 #undef UNUSUAL_DEV
27343 diff -urNp linux-2.6.34.1/drivers/usb/storage/usual-tables.c linux-2.6.34.1/drivers/usb/storage/usual-tables.c
27344 --- linux-2.6.34.1/drivers/usb/storage/usual-tables.c 2010-07-05 14:24:10.000000000 -0400
27345 +++ linux-2.6.34.1/drivers/usb/storage/usual-tables.c 2010-07-07 09:04:54.000000000 -0400
27346 @@ -48,7 +48,7 @@
27347
27348 struct usb_device_id usb_storage_usb_ids[] = {
27349 # include "unusual_devs.h"
27350 - { } /* Terminating entry */
27351 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
27352 };
27353 EXPORT_SYMBOL_GPL(usb_storage_usb_ids);
27354
27355 diff -urNp linux-2.6.34.1/drivers/uwb/wlp/messages.c linux-2.6.34.1/drivers/uwb/wlp/messages.c
27356 --- linux-2.6.34.1/drivers/uwb/wlp/messages.c 2010-07-05 14:24:10.000000000 -0400
27357 +++ linux-2.6.34.1/drivers/uwb/wlp/messages.c 2010-07-07 09:04:54.000000000 -0400
27358 @@ -920,7 +920,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
27359 size_t len = skb->len;
27360 size_t used;
27361 ssize_t result;
27362 - struct wlp_nonce enonce, rnonce;
27363 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
27364 enum wlp_assc_error assc_err;
27365 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
27366 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
27367 diff -urNp linux-2.6.34.1/drivers/video/atmel_lcdfb.c linux-2.6.34.1/drivers/video/atmel_lcdfb.c
27368 --- linux-2.6.34.1/drivers/video/atmel_lcdfb.c 2010-07-05 14:24:10.000000000 -0400
27369 +++ linux-2.6.34.1/drivers/video/atmel_lcdfb.c 2010-07-07 09:04:54.000000000 -0400
27370 @@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struc
27371 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
27372 }
27373
27374 -static struct backlight_ops atmel_lcdc_bl_ops = {
27375 +static const struct backlight_ops atmel_lcdc_bl_ops = {
27376 .update_status = atmel_bl_update_status,
27377 .get_brightness = atmel_bl_get_brightness,
27378 };
27379 diff -urNp linux-2.6.34.1/drivers/video/aty/aty128fb.c linux-2.6.34.1/drivers/video/aty/aty128fb.c
27380 --- linux-2.6.34.1/drivers/video/aty/aty128fb.c 2010-07-05 14:24:10.000000000 -0400
27381 +++ linux-2.6.34.1/drivers/video/aty/aty128fb.c 2010-07-07 09:04:54.000000000 -0400
27382 @@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(stru
27383 return bd->props.brightness;
27384 }
27385
27386 -static struct backlight_ops aty128_bl_data = {
27387 +static const struct backlight_ops aty128_bl_data = {
27388 .get_brightness = aty128_bl_get_brightness,
27389 .update_status = aty128_bl_update_status,
27390 };
27391 diff -urNp linux-2.6.34.1/drivers/video/aty/atyfb_base.c linux-2.6.34.1/drivers/video/aty/atyfb_base.c
27392 --- linux-2.6.34.1/drivers/video/aty/atyfb_base.c 2010-07-05 14:24:10.000000000 -0400
27393 +++ linux-2.6.34.1/drivers/video/aty/atyfb_base.c 2010-07-07 09:04:54.000000000 -0400
27394 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
27395 return bd->props.brightness;
27396 }
27397
27398 -static struct backlight_ops aty_bl_data = {
27399 +static const struct backlight_ops aty_bl_data = {
27400 .get_brightness = aty_bl_get_brightness,
27401 .update_status = aty_bl_update_status,
27402 };
27403 diff -urNp linux-2.6.34.1/drivers/video/aty/radeon_backlight.c linux-2.6.34.1/drivers/video/aty/radeon_backlight.c
27404 --- linux-2.6.34.1/drivers/video/aty/radeon_backlight.c 2010-07-05 14:24:10.000000000 -0400
27405 +++ linux-2.6.34.1/drivers/video/aty/radeon_backlight.c 2010-07-07 09:04:54.000000000 -0400
27406 @@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(stru
27407 return bd->props.brightness;
27408 }
27409
27410 -static struct backlight_ops radeon_bl_data = {
27411 +static const struct backlight_ops radeon_bl_data = {
27412 .get_brightness = radeon_bl_get_brightness,
27413 .update_status = radeon_bl_update_status,
27414 };
27415 diff -urNp linux-2.6.34.1/drivers/video/backlight/88pm860x_bl.c linux-2.6.34.1/drivers/video/backlight/88pm860x_bl.c
27416 --- linux-2.6.34.1/drivers/video/backlight/88pm860x_bl.c 2010-07-05 14:24:10.000000000 -0400
27417 +++ linux-2.6.34.1/drivers/video/backlight/88pm860x_bl.c 2010-07-07 09:04:54.000000000 -0400
27418 @@ -155,7 +155,7 @@ out:
27419 return -EINVAL;
27420 }
27421
27422 -static struct backlight_ops pm860x_backlight_ops = {
27423 +static const struct backlight_ops pm860x_backlight_ops = {
27424 .options = BL_CORE_SUSPENDRESUME,
27425 .update_status = pm860x_backlight_update_status,
27426 .get_brightness = pm860x_backlight_get_brightness,
27427 diff -urNp linux-2.6.34.1/drivers/video/backlight/max8925_bl.c linux-2.6.34.1/drivers/video/backlight/max8925_bl.c
27428 --- linux-2.6.34.1/drivers/video/backlight/max8925_bl.c 2010-07-05 14:24:10.000000000 -0400
27429 +++ linux-2.6.34.1/drivers/video/backlight/max8925_bl.c 2010-07-07 09:04:54.000000000 -0400
27430 @@ -92,7 +92,7 @@ static int max8925_backlight_get_brightn
27431 return ret;
27432 }
27433
27434 -static struct backlight_ops max8925_backlight_ops = {
27435 +static const struct backlight_ops max8925_backlight_ops = {
27436 .options = BL_CORE_SUSPENDRESUME,
27437 .update_status = max8925_backlight_update_status,
27438 .get_brightness = max8925_backlight_get_brightness,
27439 diff -urNp linux-2.6.34.1/drivers/video/fbcmap.c linux-2.6.34.1/drivers/video/fbcmap.c
27440 --- linux-2.6.34.1/drivers/video/fbcmap.c 2010-07-05 14:24:10.000000000 -0400
27441 +++ linux-2.6.34.1/drivers/video/fbcmap.c 2010-07-07 09:04:54.000000000 -0400
27442 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
27443 rc = -ENODEV;
27444 goto out;
27445 }
27446 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
27447 - !info->fbops->fb_setcmap)) {
27448 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
27449 rc = -EINVAL;
27450 goto out1;
27451 }
27452 diff -urNp linux-2.6.34.1/drivers/video/fbmem.c linux-2.6.34.1/drivers/video/fbmem.c
27453 --- linux-2.6.34.1/drivers/video/fbmem.c 2010-07-05 14:24:10.000000000 -0400
27454 +++ linux-2.6.34.1/drivers/video/fbmem.c 2010-07-07 09:04:54.000000000 -0400
27455 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
27456 image->dx += image->width + 8;
27457 }
27458 } else if (rotate == FB_ROTATE_UD) {
27459 - for (x = 0; x < num && image->dx >= 0; x++) {
27460 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
27461 info->fbops->fb_imageblit(info, image);
27462 image->dx -= image->width + 8;
27463 }
27464 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
27465 image->dy += image->height + 8;
27466 }
27467 } else if (rotate == FB_ROTATE_CCW) {
27468 - for (x = 0; x < num && image->dy >= 0; x++) {
27469 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
27470 info->fbops->fb_imageblit(info, image);
27471 image->dy -= image->height + 8;
27472 }
27473 @@ -1119,7 +1119,7 @@ static long do_fb_ioctl(struct fb_info *
27474 return -EFAULT;
27475 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
27476 return -EINVAL;
27477 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
27478 + if (con2fb.framebuffer >= FB_MAX)
27479 return -EINVAL;
27480 if (!registered_fb[con2fb.framebuffer])
27481 request_module("fb%d", con2fb.framebuffer);
27482 diff -urNp linux-2.6.34.1/drivers/video/fbmon.c linux-2.6.34.1/drivers/video/fbmon.c
27483 --- linux-2.6.34.1/drivers/video/fbmon.c 2010-07-05 14:24:10.000000000 -0400
27484 +++ linux-2.6.34.1/drivers/video/fbmon.c 2010-07-07 09:04:54.000000000 -0400
27485 @@ -46,7 +46,7 @@
27486 #ifdef DEBUG
27487 #define DPRINTK(fmt, args...) printk(fmt,## args)
27488 #else
27489 -#define DPRINTK(fmt, args...)
27490 +#define DPRINTK(fmt, args...) do {} while (0)
27491 #endif
27492
27493 #define FBMON_FIX_HEADER 1
27494 diff -urNp linux-2.6.34.1/drivers/video/i810/i810_accel.c linux-2.6.34.1/drivers/video/i810/i810_accel.c
27495 --- linux-2.6.34.1/drivers/video/i810/i810_accel.c 2010-07-05 14:24:10.000000000 -0400
27496 +++ linux-2.6.34.1/drivers/video/i810/i810_accel.c 2010-07-07 09:04:54.000000000 -0400
27497 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
27498 }
27499 }
27500 printk("ringbuffer lockup!!!\n");
27501 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
27502 i810_report_error(mmio);
27503 par->dev_flags |= LOCKUP;
27504 info->pixmap.scan_align = 1;
27505 diff -urNp linux-2.6.34.1/drivers/video/i810/i810_main.c linux-2.6.34.1/drivers/video/i810/i810_main.c
27506 --- linux-2.6.34.1/drivers/video/i810/i810_main.c 2010-07-05 14:24:10.000000000 -0400
27507 +++ linux-2.6.34.1/drivers/video/i810/i810_main.c 2010-07-07 09:04:54.000000000 -0400
27508 @@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t
27509 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
27510 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC,
27511 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
27512 - { 0 },
27513 + { 0, 0, 0, 0, 0, 0, 0 },
27514 };
27515
27516 static struct pci_driver i810fb_driver = {
27517 diff -urNp linux-2.6.34.1/drivers/video/modedb.c linux-2.6.34.1/drivers/video/modedb.c
27518 --- linux-2.6.34.1/drivers/video/modedb.c 2010-07-05 14:24:10.000000000 -0400
27519 +++ linux-2.6.34.1/drivers/video/modedb.c 2010-07-07 09:04:54.000000000 -0400
27520 @@ -40,240 +40,240 @@ static const struct fb_videomode modedb[
27521 {
27522 /* 640x400 @ 70 Hz, 31.5 kHz hsync */
27523 NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
27524 - 0, FB_VMODE_NONINTERLACED
27525 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27526 }, {
27527 /* 640x480 @ 60 Hz, 31.5 kHz hsync */
27528 NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
27529 - 0, FB_VMODE_NONINTERLACED
27530 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27531 }, {
27532 /* 800x600 @ 56 Hz, 35.15 kHz hsync */
27533 NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
27534 - 0, FB_VMODE_NONINTERLACED
27535 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27536 }, {
27537 /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
27538 NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
27539 - 0, FB_VMODE_INTERLACED
27540 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
27541 }, {
27542 /* 640x400 @ 85 Hz, 37.86 kHz hsync */
27543 NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
27544 - FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27545 + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27546 }, {
27547 /* 640x480 @ 72 Hz, 36.5 kHz hsync */
27548 NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
27549 - 0, FB_VMODE_NONINTERLACED
27550 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27551 }, {
27552 /* 640x480 @ 75 Hz, 37.50 kHz hsync */
27553 NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
27554 - 0, FB_VMODE_NONINTERLACED
27555 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27556 }, {
27557 /* 800x600 @ 60 Hz, 37.8 kHz hsync */
27558 NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
27559 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27560 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27561 }, {
27562 /* 640x480 @ 85 Hz, 43.27 kHz hsync */
27563 NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
27564 - 0, FB_VMODE_NONINTERLACED
27565 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27566 }, {
27567 /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
27568 NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
27569 - 0, FB_VMODE_INTERLACED
27570 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
27571 }, {
27572 /* 800x600 @ 72 Hz, 48.0 kHz hsync */
27573 NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
27574 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27575 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27576 }, {
27577 /* 1024x768 @ 60 Hz, 48.4 kHz hsync */
27578 NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
27579 - 0, FB_VMODE_NONINTERLACED
27580 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27581 }, {
27582 /* 640x480 @ 100 Hz, 53.01 kHz hsync */
27583 NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
27584 - 0, FB_VMODE_NONINTERLACED
27585 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27586 }, {
27587 /* 1152x864 @ 60 Hz, 53.5 kHz hsync */
27588 NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
27589 - 0, FB_VMODE_NONINTERLACED
27590 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27591 }, {
27592 /* 800x600 @ 85 Hz, 55.84 kHz hsync */
27593 NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
27594 - 0, FB_VMODE_NONINTERLACED
27595 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27596 }, {
27597 /* 1024x768 @ 70 Hz, 56.5 kHz hsync */
27598 NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
27599 - 0, FB_VMODE_NONINTERLACED
27600 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27601 }, {
27602 /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
27603 NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
27604 - 0, FB_VMODE_INTERLACED
27605 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
27606 }, {
27607 /* 800x600 @ 100 Hz, 64.02 kHz hsync */
27608 NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
27609 - 0, FB_VMODE_NONINTERLACED
27610 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27611 }, {
27612 /* 1024x768 @ 76 Hz, 62.5 kHz hsync */
27613 NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
27614 - 0, FB_VMODE_NONINTERLACED
27615 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27616 }, {
27617 /* 1152x864 @ 70 Hz, 62.4 kHz hsync */
27618 NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
27619 - 0, FB_VMODE_NONINTERLACED
27620 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27621 }, {
27622 /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
27623 NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
27624 - 0, FB_VMODE_NONINTERLACED
27625 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27626 }, {
27627 /* 1400x1050 @ 60Hz, 63.9 kHz hsync */
27628 NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
27629 - 0, FB_VMODE_NONINTERLACED
27630 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27631 }, {
27632 /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
27633 NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
27634 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27635 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27636 }, {
27637 /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
27638 NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
27639 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27640 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27641 }, {
27642 /* 1024x768 @ 85 Hz, 70.24 kHz hsync */
27643 NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
27644 - 0, FB_VMODE_NONINTERLACED
27645 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27646 }, {
27647 /* 1152x864 @ 78 Hz, 70.8 kHz hsync */
27648 NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
27649 - 0, FB_VMODE_NONINTERLACED
27650 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27651 }, {
27652 /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
27653 NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
27654 - 0, FB_VMODE_NONINTERLACED
27655 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27656 }, {
27657 /* 1600x1200 @ 60Hz, 75.00 kHz hsync */
27658 NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
27659 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27660 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27661 }, {
27662 /* 1152x864 @ 84 Hz, 76.0 kHz hsync */
27663 NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
27664 - 0, FB_VMODE_NONINTERLACED
27665 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27666 }, {
27667 /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
27668 NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
27669 - 0, FB_VMODE_NONINTERLACED
27670 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27671 }, {
27672 /* 1024x768 @ 100Hz, 80.21 kHz hsync */
27673 NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
27674 - 0, FB_VMODE_NONINTERLACED
27675 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27676 }, {
27677 /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
27678 NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
27679 - 0, FB_VMODE_NONINTERLACED
27680 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27681 }, {
27682 /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
27683 NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
27684 - 0, FB_VMODE_NONINTERLACED
27685 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27686 }, {
27687 /* 1152x864 @ 100 Hz, 89.62 kHz hsync */
27688 NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
27689 - 0, FB_VMODE_NONINTERLACED
27690 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27691 }, {
27692 /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
27693 NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
27694 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27695 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27696 }, {
27697 /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
27698 NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
27699 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27700 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27701 }, {
27702 /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
27703 NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
27704 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27705 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27706 }, {
27707 /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
27708 NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
27709 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27710 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27711 }, {
27712 /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
27713 NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
27714 - 0, FB_VMODE_NONINTERLACED
27715 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27716 }, {
27717 /* 1800x1440 @ 64Hz, 96.15 kHz hsync */
27718 NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
27719 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27720 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27721 }, {
27722 /* 1800x1440 @ 70Hz, 104.52 kHz hsync */
27723 NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
27724 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27725 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27726 }, {
27727 /* 512x384 @ 78 Hz, 31.50 kHz hsync */
27728 NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
27729 - 0, FB_VMODE_NONINTERLACED
27730 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27731 }, {
27732 /* 512x384 @ 85 Hz, 34.38 kHz hsync */
27733 NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
27734 - 0, FB_VMODE_NONINTERLACED
27735 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27736 }, {
27737 /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
27738 NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
27739 - 0, FB_VMODE_DOUBLE
27740 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27741 }, {
27742 /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
27743 NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
27744 - 0, FB_VMODE_DOUBLE
27745 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27746 }, {
27747 /* 320x240 @ 72 Hz, 36.5 kHz hsync */
27748 NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
27749 - 0, FB_VMODE_DOUBLE
27750 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27751 }, {
27752 /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
27753 NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
27754 - 0, FB_VMODE_DOUBLE
27755 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27756 }, {
27757 /* 400x300 @ 60 Hz, 37.8 kHz hsync */
27758 NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
27759 - 0, FB_VMODE_DOUBLE
27760 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27761 }, {
27762 /* 400x300 @ 72 Hz, 48.0 kHz hsync */
27763 NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
27764 - 0, FB_VMODE_DOUBLE
27765 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27766 }, {
27767 /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
27768 NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
27769 - 0, FB_VMODE_DOUBLE
27770 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27771 }, {
27772 /* 480x300 @ 60 Hz, 37.8 kHz hsync */
27773 NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
27774 - 0, FB_VMODE_DOUBLE
27775 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27776 }, {
27777 /* 480x300 @ 63 Hz, 39.6 kHz hsync */
27778 NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
27779 - 0, FB_VMODE_DOUBLE
27780 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27781 }, {
27782 /* 480x300 @ 72 Hz, 48.0 kHz hsync */
27783 NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
27784 - 0, FB_VMODE_DOUBLE
27785 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
27786 }, {
27787 /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
27788 NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
27789 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
27790 - FB_VMODE_NONINTERLACED
27791 + FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27792 }, {
27793 /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
27794 NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
27795 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
27796 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27797 }, {
27798 /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
27799 NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
27800 - 0, FB_VMODE_NONINTERLACED
27801 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27802 }, {
27803 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
27804 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
27805 - 0, FB_VMODE_NONINTERLACED
27806 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
27807 }, {
27808 /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
27809 NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
27810 - 0, FB_VMODE_INTERLACED
27811 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
27812 }, {
27813 /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
27814 NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
27815 - 0, FB_VMODE_INTERLACED
27816 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
27817 },
27818 };
27819
27820 diff -urNp linux-2.6.34.1/drivers/video/nvidia/nv_backlight.c linux-2.6.34.1/drivers/video/nvidia/nv_backlight.c
27821 --- linux-2.6.34.1/drivers/video/nvidia/nv_backlight.c 2010-07-05 14:24:10.000000000 -0400
27822 +++ linux-2.6.34.1/drivers/video/nvidia/nv_backlight.c 2010-07-07 09:04:54.000000000 -0400
27823 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
27824 return bd->props.brightness;
27825 }
27826
27827 -static struct backlight_ops nvidia_bl_ops = {
27828 +static const struct backlight_ops nvidia_bl_ops = {
27829 .get_brightness = nvidia_bl_get_brightness,
27830 .update_status = nvidia_bl_update_status,
27831 };
27832 diff -urNp linux-2.6.34.1/drivers/video/omap2/displays/panel-taal.c linux-2.6.34.1/drivers/video/omap2/displays/panel-taal.c
27833 --- linux-2.6.34.1/drivers/video/omap2/displays/panel-taal.c 2010-07-05 14:24:10.000000000 -0400
27834 +++ linux-2.6.34.1/drivers/video/omap2/displays/panel-taal.c 2010-07-07 09:04:54.000000000 -0400
27835 @@ -316,7 +316,7 @@ static int taal_bl_get_intensity(struct
27836 return 0;
27837 }
27838
27839 -static struct backlight_ops taal_bl_ops = {
27840 +static const struct backlight_ops taal_bl_ops = {
27841 .get_brightness = taal_bl_get_intensity,
27842 .update_status = taal_bl_update_status,
27843 };
27844 diff -urNp linux-2.6.34.1/drivers/video/riva/fbdev.c linux-2.6.34.1/drivers/video/riva/fbdev.c
27845 --- linux-2.6.34.1/drivers/video/riva/fbdev.c 2010-07-05 14:24:10.000000000 -0400
27846 +++ linux-2.6.34.1/drivers/video/riva/fbdev.c 2010-07-07 09:04:54.000000000 -0400
27847 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
27848 return bd->props.brightness;
27849 }
27850
27851 -static struct backlight_ops riva_bl_ops = {
27852 +static const struct backlight_ops riva_bl_ops = {
27853 .get_brightness = riva_bl_get_brightness,
27854 .update_status = riva_bl_update_status,
27855 };
27856 diff -urNp linux-2.6.34.1/drivers/video/uvesafb.c linux-2.6.34.1/drivers/video/uvesafb.c
27857 --- linux-2.6.34.1/drivers/video/uvesafb.c 2010-07-05 14:24:10.000000000 -0400
27858 +++ linux-2.6.34.1/drivers/video/uvesafb.c 2010-07-07 09:04:54.000000000 -0400
27859 @@ -19,6 +19,7 @@
27860 #include <linux/io.h>
27861 #include <linux/mutex.h>
27862 #include <linux/slab.h>
27863 +#include <linux/moduleloader.h>
27864 #include <video/edid.h>
27865 #include <video/uvesafb.h>
27866 #ifdef CONFIG_X86
27867 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
27868 NULL,
27869 };
27870
27871 - return call_usermodehelper(v86d_path, argv, envp, 1);
27872 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
27873 }
27874
27875 /*
27876 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
27877 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
27878 par->pmi_setpal = par->ypan = 0;
27879 } else {
27880 +
27881 +#ifdef CONFIG_PAX_KERNEXEC
27882 +#ifdef CONFIG_MODULES
27883 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
27884 +#endif
27885 + if (!par->pmi_code) {
27886 + par->pmi_setpal = par->ypan = 0;
27887 + return 0;
27888 + }
27889 +#endif
27890 +
27891 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
27892 + task->t.regs.edi);
27893 +
27894 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
27895 + pax_open_kernel();
27896 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
27897 + pax_close_kernel();
27898 +
27899 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
27900 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
27901 +#else
27902 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
27903 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
27904 +#endif
27905 +
27906 printk(KERN_INFO "uvesafb: protected mode interface info at "
27907 "%04x:%04x\n",
27908 (u16)task->t.regs.es, (u16)task->t.regs.edi);
27909 @@ -1800,6 +1823,11 @@ out:
27910 if (par->vbe_modes)
27911 kfree(par->vbe_modes);
27912
27913 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
27914 + if (par->pmi_code)
27915 + module_free_exec(NULL, par->pmi_code);
27916 +#endif
27917 +
27918 framebuffer_release(info);
27919 return err;
27920 }
27921 @@ -1826,6 +1854,12 @@ static int uvesafb_remove(struct platfor
27922 kfree(par->vbe_state_orig);
27923 if (par->vbe_state_saved)
27924 kfree(par->vbe_state_saved);
27925 +
27926 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
27927 + if (par->pmi_code)
27928 + module_free_exec(NULL, par->pmi_code);
27929 +#endif
27930 +
27931 }
27932
27933 framebuffer_release(info);
27934 diff -urNp linux-2.6.34.1/drivers/video/vesafb.c linux-2.6.34.1/drivers/video/vesafb.c
27935 --- linux-2.6.34.1/drivers/video/vesafb.c 2010-07-05 14:24:10.000000000 -0400
27936 +++ linux-2.6.34.1/drivers/video/vesafb.c 2010-07-07 09:04:54.000000000 -0400
27937 @@ -9,6 +9,7 @@
27938 */
27939
27940 #include <linux/module.h>
27941 +#include <linux/moduleloader.h>
27942 #include <linux/kernel.h>
27943 #include <linux/errno.h>
27944 #include <linux/string.h>
27945 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
27946 static int vram_total __initdata; /* Set total amount of memory */
27947 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
27948 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
27949 -static void (*pmi_start)(void) __read_mostly;
27950 -static void (*pmi_pal) (void) __read_mostly;
27951 +static void (*pmi_start)(void) __read_only;
27952 +static void (*pmi_pal) (void) __read_only;
27953 static int depth __read_mostly;
27954 static int vga_compat __read_mostly;
27955 /* --------------------------------------------------------------------- */
27956 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
27957 unsigned int size_vmode;
27958 unsigned int size_remap;
27959 unsigned int size_total;
27960 + void *pmi_code = NULL;
27961
27962 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
27963 return -ENODEV;
27964 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
27965 size_remap = size_total;
27966 vesafb_fix.smem_len = size_remap;
27967
27968 -#ifndef __i386__
27969 - screen_info.vesapm_seg = 0;
27970 -#endif
27971 -
27972 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
27973 printk(KERN_WARNING
27974 "vesafb: cannot reserve video memory at 0x%lx\n",
27975 @@ -314,9 +312,21 @@ static int __init vesafb_probe(struct pl
27976 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
27977 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
27978
27979 +#ifdef __i386__
27980 +
27981 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
27982 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
27983 + if (!pmi_code)
27984 +#elif !defined(CONFIG_PAX_KERNEXEC)
27985 + if (0)
27986 +#endif
27987 +
27988 +#endif
27989 + screen_info.vesapm_seg = 0;
27990 +
27991 if (screen_info.vesapm_seg) {
27992 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
27993 - screen_info.vesapm_seg,screen_info.vesapm_off);
27994 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
27995 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
27996 }
27997
27998 if (screen_info.vesapm_seg < 0xc000)
27999 @@ -324,9 +334,25 @@ static int __init vesafb_probe(struct pl
28000
28001 if (ypan || pmi_setpal) {
28002 unsigned short *pmi_base;
28003 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
28004 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
28005 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
28006 +
28007 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
28008 +
28009 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28010 + pax_open_kernel();
28011 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
28012 +#else
28013 + pmi_code = pmi_base;
28014 +#endif
28015 +
28016 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
28017 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
28018 +
28019 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28020 + pmi_start = ktva_ktla(pmi_start);
28021 + pmi_pal = ktva_ktla(pmi_pal);
28022 + pax_close_kernel();
28023 +#endif
28024 +
28025 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
28026 if (pmi_base[3]) {
28027 printk(KERN_INFO "vesafb: pmi: ports = ");
28028 @@ -468,6 +494,11 @@ static int __init vesafb_probe(struct pl
28029 info->node, info->fix.id);
28030 return 0;
28031 err:
28032 +
28033 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28034 + module_free_exec(NULL, pmi_code);
28035 +#endif
28036 +
28037 if (info->screen_base)
28038 iounmap(info->screen_base);
28039 framebuffer_release(info);
28040 diff -urNp linux-2.6.34.1/fs/9p/vfs_inode.c linux-2.6.34.1/fs/9p/vfs_inode.c
28041 --- linux-2.6.34.1/fs/9p/vfs_inode.c 2010-07-05 14:24:10.000000000 -0400
28042 +++ linux-2.6.34.1/fs/9p/vfs_inode.c 2010-07-07 09:04:54.000000000 -0400
28043 @@ -1067,7 +1067,7 @@ static void *v9fs_vfs_follow_link(struct
28044 static void
28045 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
28046 {
28047 - char *s = nd_get_link(nd);
28048 + const char *s = nd_get_link(nd);
28049
28050 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
28051 IS_ERR(s) ? "<error>" : s);
28052 diff -urNp linux-2.6.34.1/fs/aio.c linux-2.6.34.1/fs/aio.c
28053 --- linux-2.6.34.1/fs/aio.c 2010-07-05 14:24:10.000000000 -0400
28054 +++ linux-2.6.34.1/fs/aio.c 2010-07-07 09:04:54.000000000 -0400
28055 @@ -130,7 +130,7 @@ static int aio_setup_ring(struct kioctx
28056 size += sizeof(struct io_event) * nr_events;
28057 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
28058
28059 - if (nr_pages < 0)
28060 + if (nr_pages <= 0)
28061 return -EINVAL;
28062
28063 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
28064 diff -urNp linux-2.6.34.1/fs/attr.c linux-2.6.34.1/fs/attr.c
28065 --- linux-2.6.34.1/fs/attr.c 2010-07-05 14:24:10.000000000 -0400
28066 +++ linux-2.6.34.1/fs/attr.c 2010-07-07 09:04:54.000000000 -0400
28067 @@ -82,6 +82,7 @@ int inode_newsize_ok(const struct inode
28068 unsigned long limit;
28069
28070 limit = rlimit(RLIMIT_FSIZE);
28071 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
28072 if (limit != RLIM_INFINITY && offset > limit)
28073 goto out_sig;
28074 if (offset > inode->i_sb->s_maxbytes)
28075 diff -urNp linux-2.6.34.1/fs/autofs/root.c linux-2.6.34.1/fs/autofs/root.c
28076 --- linux-2.6.34.1/fs/autofs/root.c 2010-07-05 14:24:10.000000000 -0400
28077 +++ linux-2.6.34.1/fs/autofs/root.c 2010-07-07 09:04:54.000000000 -0400
28078 @@ -300,7 +300,8 @@ static int autofs_root_symlink(struct in
28079 set_bit(n,sbi->symlink_bitmap);
28080 sl = &sbi->symlink[n];
28081 sl->len = strlen(symname);
28082 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
28083 + slsize = sl->len+1;
28084 + sl->data = kmalloc(slsize, GFP_KERNEL);
28085 if (!sl->data) {
28086 clear_bit(n,sbi->symlink_bitmap);
28087 unlock_kernel();
28088 diff -urNp linux-2.6.34.1/fs/autofs4/symlink.c linux-2.6.34.1/fs/autofs4/symlink.c
28089 --- linux-2.6.34.1/fs/autofs4/symlink.c 2010-07-05 14:24:10.000000000 -0400
28090 +++ linux-2.6.34.1/fs/autofs4/symlink.c 2010-07-07 09:04:54.000000000 -0400
28091 @@ -15,7 +15,7 @@
28092 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
28093 {
28094 struct autofs_info *ino = autofs4_dentry_ino(dentry);
28095 - nd_set_link(nd, (char *)ino->u.symlink);
28096 + nd_set_link(nd, ino->u.symlink);
28097 return NULL;
28098 }
28099
28100 diff -urNp linux-2.6.34.1/fs/befs/linuxvfs.c linux-2.6.34.1/fs/befs/linuxvfs.c
28101 --- linux-2.6.34.1/fs/befs/linuxvfs.c 2010-07-05 14:24:10.000000000 -0400
28102 +++ linux-2.6.34.1/fs/befs/linuxvfs.c 2010-07-07 09:04:54.000000000 -0400
28103 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
28104 {
28105 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
28106 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
28107 - char *link = nd_get_link(nd);
28108 + const char *link = nd_get_link(nd);
28109 if (!IS_ERR(link))
28110 kfree(link);
28111 }
28112 diff -urNp linux-2.6.34.1/fs/binfmt_aout.c linux-2.6.34.1/fs/binfmt_aout.c
28113 --- linux-2.6.34.1/fs/binfmt_aout.c 2010-07-05 14:24:10.000000000 -0400
28114 +++ linux-2.6.34.1/fs/binfmt_aout.c 2010-07-07 09:04:54.000000000 -0400
28115 @@ -16,6 +16,7 @@
28116 #include <linux/string.h>
28117 #include <linux/fs.h>
28118 #include <linux/file.h>
28119 +#include <linux/security.h>
28120 #include <linux/stat.h>
28121 #include <linux/fcntl.h>
28122 #include <linux/ptrace.h>
28123 @@ -97,10 +98,12 @@ static int aout_core_dump(struct coredum
28124
28125 /* If the size of the dump file exceeds the rlimit, then see what would happen
28126 if we wrote the stack, but not the data area. */
28127 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
28128 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
28129 dump.u_dsize = 0;
28130
28131 /* Make sure we have enough room to write the stack and data areas. */
28132 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
28133 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
28134 dump.u_ssize = 0;
28135
28136 @@ -238,6 +241,8 @@ static int load_aout_binary(struct linux
28137 rlim = rlimit(RLIMIT_DATA);
28138 if (rlim >= RLIM_INFINITY)
28139 rlim = ~0;
28140 +
28141 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
28142 if (ex.a_data + ex.a_bss > rlim)
28143 return -ENOMEM;
28144
28145 @@ -266,6 +271,27 @@ static int load_aout_binary(struct linux
28146 install_exec_creds(bprm);
28147 current->flags &= ~PF_FORKNOEXEC;
28148
28149 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
28150 + current->mm->pax_flags = 0UL;
28151 +#endif
28152 +
28153 +#ifdef CONFIG_PAX_PAGEEXEC
28154 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
28155 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
28156 +
28157 +#ifdef CONFIG_PAX_EMUTRAMP
28158 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
28159 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
28160 +#endif
28161 +
28162 +#ifdef CONFIG_PAX_MPROTECT
28163 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
28164 + current->mm->pax_flags |= MF_PAX_MPROTECT;
28165 +#endif
28166 +
28167 + }
28168 +#endif
28169 +
28170 if (N_MAGIC(ex) == OMAGIC) {
28171 unsigned long text_addr, map_size;
28172 loff_t pos;
28173 @@ -338,7 +364,7 @@ static int load_aout_binary(struct linux
28174
28175 down_write(&current->mm->mmap_sem);
28176 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
28177 - PROT_READ | PROT_WRITE | PROT_EXEC,
28178 + PROT_READ | PROT_WRITE,
28179 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
28180 fd_offset + ex.a_text);
28181 up_write(&current->mm->mmap_sem);
28182 diff -urNp linux-2.6.34.1/fs/binfmt_elf.c linux-2.6.34.1/fs/binfmt_elf.c
28183 --- linux-2.6.34.1/fs/binfmt_elf.c 2010-07-05 14:24:10.000000000 -0400
28184 +++ linux-2.6.34.1/fs/binfmt_elf.c 2010-07-07 09:04:54.000000000 -0400
28185 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
28186 #define elf_core_dump NULL
28187 #endif
28188
28189 +#ifdef CONFIG_PAX_MPROTECT
28190 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
28191 +#endif
28192 +
28193 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
28194 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
28195 #else
28196 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
28197 .load_binary = load_elf_binary,
28198 .load_shlib = load_elf_library,
28199 .core_dump = elf_core_dump,
28200 +
28201 +#ifdef CONFIG_PAX_MPROTECT
28202 + .handle_mprotect= elf_handle_mprotect,
28203 +#endif
28204 +
28205 .min_coredump = ELF_EXEC_PAGESIZE,
28206 .hasvdso = 1
28207 };
28208 @@ -78,6 +87,8 @@ static struct linux_binfmt elf_format =
28209
28210 static int set_brk(unsigned long start, unsigned long end)
28211 {
28212 + unsigned long e = end;
28213 +
28214 start = ELF_PAGEALIGN(start);
28215 end = ELF_PAGEALIGN(end);
28216 if (end > start) {
28217 @@ -88,7 +99,7 @@ static int set_brk(unsigned long start,
28218 if (BAD_ADDR(addr))
28219 return addr;
28220 }
28221 - current->mm->start_brk = current->mm->brk = end;
28222 + current->mm->start_brk = current->mm->brk = e;
28223 return 0;
28224 }
28225
28226 @@ -149,7 +160,7 @@ create_elf_tables(struct linux_binprm *b
28227 elf_addr_t __user *u_rand_bytes;
28228 const char *k_platform = ELF_PLATFORM;
28229 const char *k_base_platform = ELF_BASE_PLATFORM;
28230 - unsigned char k_rand_bytes[16];
28231 + u32 k_rand_bytes[4];
28232 int items;
28233 elf_addr_t *elf_info;
28234 int ei_index = 0;
28235 @@ -196,8 +207,12 @@ create_elf_tables(struct linux_binprm *b
28236 * Generate 16 random bytes for userspace PRNG seeding.
28237 */
28238 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
28239 - u_rand_bytes = (elf_addr_t __user *)
28240 - STACK_ALLOC(p, sizeof(k_rand_bytes));
28241 + srandom32(k_rand_bytes[0] ^ random32());
28242 + srandom32(k_rand_bytes[1] ^ random32());
28243 + srandom32(k_rand_bytes[2] ^ random32());
28244 + srandom32(k_rand_bytes[3] ^ random32());
28245 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
28246 + u_rand_bytes = (elf_addr_t __user *) p;
28247 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
28248 return -EFAULT;
28249
28250 @@ -386,10 +401,10 @@ static unsigned long load_elf_interp(str
28251 {
28252 struct elf_phdr *elf_phdata;
28253 struct elf_phdr *eppnt;
28254 - unsigned long load_addr = 0;
28255 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
28256 int load_addr_set = 0;
28257 unsigned long last_bss = 0, elf_bss = 0;
28258 - unsigned long error = ~0UL;
28259 + unsigned long error = -EINVAL;
28260 unsigned long total_size;
28261 int retval, i, size;
28262
28263 @@ -435,6 +450,11 @@ static unsigned long load_elf_interp(str
28264 goto out_close;
28265 }
28266
28267 +#ifdef CONFIG_PAX_SEGMEXEC
28268 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
28269 + pax_task_size = SEGMEXEC_TASK_SIZE;
28270 +#endif
28271 +
28272 eppnt = elf_phdata;
28273 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
28274 if (eppnt->p_type == PT_LOAD) {
28275 @@ -478,8 +498,8 @@ static unsigned long load_elf_interp(str
28276 k = load_addr + eppnt->p_vaddr;
28277 if (BAD_ADDR(k) ||
28278 eppnt->p_filesz > eppnt->p_memsz ||
28279 - eppnt->p_memsz > TASK_SIZE ||
28280 - TASK_SIZE - eppnt->p_memsz < k) {
28281 + eppnt->p_memsz > pax_task_size ||
28282 + pax_task_size - eppnt->p_memsz < k) {
28283 error = -ENOMEM;
28284 goto out_close;
28285 }
28286 @@ -533,6 +553,177 @@ out:
28287 return error;
28288 }
28289
28290 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
28291 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
28292 +{
28293 + unsigned long pax_flags = 0UL;
28294 +
28295 +#ifdef CONFIG_PAX_PAGEEXEC
28296 + if (elf_phdata->p_flags & PF_PAGEEXEC)
28297 + pax_flags |= MF_PAX_PAGEEXEC;
28298 +#endif
28299 +
28300 +#ifdef CONFIG_PAX_SEGMEXEC
28301 + if (elf_phdata->p_flags & PF_SEGMEXEC)
28302 + pax_flags |= MF_PAX_SEGMEXEC;
28303 +#endif
28304 +
28305 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
28306 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
28307 + if ((__supported_pte_mask & _PAGE_NX))
28308 + pax_flags &= ~MF_PAX_SEGMEXEC;
28309 + else
28310 + pax_flags &= ~MF_PAX_PAGEEXEC;
28311 + }
28312 +#endif
28313 +
28314 +#ifdef CONFIG_PAX_EMUTRAMP
28315 + if (elf_phdata->p_flags & PF_EMUTRAMP)
28316 + pax_flags |= MF_PAX_EMUTRAMP;
28317 +#endif
28318 +
28319 +#ifdef CONFIG_PAX_MPROTECT
28320 + if (elf_phdata->p_flags & PF_MPROTECT)
28321 + pax_flags |= MF_PAX_MPROTECT;
28322 +#endif
28323 +
28324 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
28325 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
28326 + pax_flags |= MF_PAX_RANDMMAP;
28327 +#endif
28328 +
28329 + return pax_flags;
28330 +}
28331 +#endif
28332 +
28333 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
28334 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
28335 +{
28336 + unsigned long pax_flags = 0UL;
28337 +
28338 +#ifdef CONFIG_PAX_PAGEEXEC
28339 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
28340 + pax_flags |= MF_PAX_PAGEEXEC;
28341 +#endif
28342 +
28343 +#ifdef CONFIG_PAX_SEGMEXEC
28344 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
28345 + pax_flags |= MF_PAX_SEGMEXEC;
28346 +#endif
28347 +
28348 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
28349 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
28350 + if ((__supported_pte_mask & _PAGE_NX))
28351 + pax_flags &= ~MF_PAX_SEGMEXEC;
28352 + else
28353 + pax_flags &= ~MF_PAX_PAGEEXEC;
28354 + }
28355 +#endif
28356 +
28357 +#ifdef CONFIG_PAX_EMUTRAMP
28358 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
28359 + pax_flags |= MF_PAX_EMUTRAMP;
28360 +#endif
28361 +
28362 +#ifdef CONFIG_PAX_MPROTECT
28363 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
28364 + pax_flags |= MF_PAX_MPROTECT;
28365 +#endif
28366 +
28367 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
28368 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
28369 + pax_flags |= MF_PAX_RANDMMAP;
28370 +#endif
28371 +
28372 + return pax_flags;
28373 +}
28374 +#endif
28375 +
28376 +#ifdef CONFIG_PAX_EI_PAX
28377 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
28378 +{
28379 + unsigned long pax_flags = 0UL;
28380 +
28381 +#ifdef CONFIG_PAX_PAGEEXEC
28382 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
28383 + pax_flags |= MF_PAX_PAGEEXEC;
28384 +#endif
28385 +
28386 +#ifdef CONFIG_PAX_SEGMEXEC
28387 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
28388 + pax_flags |= MF_PAX_SEGMEXEC;
28389 +#endif
28390 +
28391 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
28392 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
28393 + if ((__supported_pte_mask & _PAGE_NX))
28394 + pax_flags &= ~MF_PAX_SEGMEXEC;
28395 + else
28396 + pax_flags &= ~MF_PAX_PAGEEXEC;
28397 + }
28398 +#endif
28399 +
28400 +#ifdef CONFIG_PAX_EMUTRAMP
28401 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
28402 + pax_flags |= MF_PAX_EMUTRAMP;
28403 +#endif
28404 +
28405 +#ifdef CONFIG_PAX_MPROTECT
28406 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
28407 + pax_flags |= MF_PAX_MPROTECT;
28408 +#endif
28409 +
28410 +#ifdef CONFIG_PAX_ASLR
28411 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
28412 + pax_flags |= MF_PAX_RANDMMAP;
28413 +#endif
28414 +
28415 + return pax_flags;
28416 +}
28417 +#endif
28418 +
28419 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
28420 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
28421 +{
28422 + unsigned long pax_flags = 0UL;
28423 +
28424 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
28425 + unsigned long i;
28426 +#endif
28427 +
28428 +#ifdef CONFIG_PAX_EI_PAX
28429 + pax_flags = pax_parse_ei_pax(elf_ex);
28430 +#endif
28431 +
28432 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
28433 + for (i = 0UL; i < elf_ex->e_phnum; i++)
28434 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
28435 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
28436 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
28437 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
28438 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
28439 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
28440 + return -EINVAL;
28441 +
28442 +#ifdef CONFIG_PAX_SOFTMODE
28443 + if (pax_softmode)
28444 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
28445 + else
28446 +#endif
28447 +
28448 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
28449 + break;
28450 + }
28451 +#endif
28452 +
28453 + if (0 > pax_check_flags(&pax_flags))
28454 + return -EINVAL;
28455 +
28456 + current->mm->pax_flags = pax_flags;
28457 + return 0;
28458 +}
28459 +#endif
28460 +
28461 /*
28462 * These are the functions used to load ELF style executables and shared
28463 * libraries. There is no binary dependent code anywhere else.
28464 @@ -549,6 +740,11 @@ static unsigned long randomize_stack_top
28465 {
28466 unsigned int random_variable = 0;
28467
28468 +#ifdef CONFIG_PAX_RANDUSTACK
28469 + if (randomize_va_space)
28470 + return stack_top - current->mm->delta_stack;
28471 +#endif
28472 +
28473 if ((current->flags & PF_RANDOMIZE) &&
28474 !(current->personality & ADDR_NO_RANDOMIZE)) {
28475 random_variable = get_random_int() & STACK_RND_MASK;
28476 @@ -567,7 +763,7 @@ static int load_elf_binary(struct linux_
28477 unsigned long load_addr = 0, load_bias = 0;
28478 int load_addr_set = 0;
28479 char * elf_interpreter = NULL;
28480 - unsigned long error;
28481 + unsigned long error = 0;
28482 struct elf_phdr *elf_ppnt, *elf_phdata;
28483 unsigned long elf_bss, elf_brk;
28484 int retval, i;
28485 @@ -577,11 +773,11 @@ static int load_elf_binary(struct linux_
28486 unsigned long start_code, end_code, start_data, end_data;
28487 unsigned long reloc_func_desc = 0;
28488 int executable_stack = EXSTACK_DEFAULT;
28489 - unsigned long def_flags = 0;
28490 struct {
28491 struct elfhdr elf_ex;
28492 struct elfhdr interp_elf_ex;
28493 } *loc;
28494 + unsigned long pax_task_size = TASK_SIZE;
28495
28496 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
28497 if (!loc) {
28498 @@ -719,11 +915,80 @@ static int load_elf_binary(struct linux_
28499
28500 /* OK, This is the point of no return */
28501 current->flags &= ~PF_FORKNOEXEC;
28502 - current->mm->def_flags = def_flags;
28503 +
28504 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
28505 + current->mm->pax_flags = 0UL;
28506 +#endif
28507 +
28508 +#ifdef CONFIG_PAX_DLRESOLVE
28509 + current->mm->call_dl_resolve = 0UL;
28510 +#endif
28511 +
28512 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
28513 + current->mm->call_syscall = 0UL;
28514 +#endif
28515 +
28516 +#ifdef CONFIG_PAX_ASLR
28517 + current->mm->delta_mmap = 0UL;
28518 + current->mm->delta_stack = 0UL;
28519 +#endif
28520 +
28521 + current->mm->def_flags = 0;
28522 +
28523 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
28524 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
28525 + send_sig(SIGKILL, current, 0);
28526 + goto out_free_dentry;
28527 + }
28528 +#endif
28529 +
28530 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
28531 + pax_set_initial_flags(bprm);
28532 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
28533 + if (pax_set_initial_flags_func)
28534 + (pax_set_initial_flags_func)(bprm);
28535 +#endif
28536 +
28537 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
28538 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
28539 + current->mm->context.user_cs_limit = PAGE_SIZE;
28540 + current->mm->def_flags |= VM_PAGEEXEC;
28541 + }
28542 +#endif
28543 +
28544 +#ifdef CONFIG_PAX_SEGMEXEC
28545 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
28546 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
28547 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
28548 + pax_task_size = SEGMEXEC_TASK_SIZE;
28549 + }
28550 +#endif
28551 +
28552 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
28553 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
28554 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
28555 + put_cpu();
28556 + }
28557 +#endif
28558
28559 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
28560 may depend on the personality. */
28561 SET_PERSONALITY(loc->elf_ex);
28562 +
28563 +#ifdef CONFIG_PAX_ASLR
28564 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
28565 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
28566 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
28567 + }
28568 +#endif
28569 +
28570 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28571 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
28572 + executable_stack = EXSTACK_DISABLE_X;
28573 + current->personality &= ~READ_IMPLIES_EXEC;
28574 + } else
28575 +#endif
28576 +
28577 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
28578 current->personality |= READ_IMPLIES_EXEC;
28579
28580 @@ -805,6 +1070,20 @@ static int load_elf_binary(struct linux_
28581 #else
28582 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
28583 #endif
28584 +
28585 +#ifdef CONFIG_PAX_RANDMMAP
28586 + /* PaX: randomize base address at the default exe base if requested */
28587 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
28588 +#ifdef CONFIG_SPARC64
28589 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
28590 +#else
28591 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
28592 +#endif
28593 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
28594 + elf_flags |= MAP_FIXED;
28595 + }
28596 +#endif
28597 +
28598 }
28599
28600 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
28601 @@ -837,9 +1116,9 @@ static int load_elf_binary(struct linux_
28602 * allowed task size. Note that p_filesz must always be
28603 * <= p_memsz so it is only necessary to check p_memsz.
28604 */
28605 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
28606 - elf_ppnt->p_memsz > TASK_SIZE ||
28607 - TASK_SIZE - elf_ppnt->p_memsz < k) {
28608 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
28609 + elf_ppnt->p_memsz > pax_task_size ||
28610 + pax_task_size - elf_ppnt->p_memsz < k) {
28611 /* set_brk can never work. Avoid overflows. */
28612 send_sig(SIGKILL, current, 0);
28613 retval = -EINVAL;
28614 @@ -867,6 +1146,11 @@ static int load_elf_binary(struct linux_
28615 start_data += load_bias;
28616 end_data += load_bias;
28617
28618 +#ifdef CONFIG_PAX_RANDMMAP
28619 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28620 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
28621 +#endif
28622 +
28623 /* Calling set_brk effectively mmaps the pages that we need
28624 * for the bss and break sections. We must do this before
28625 * mapping in the interpreter, to make sure it doesn't wind
28626 @@ -878,9 +1162,11 @@ static int load_elf_binary(struct linux_
28627 goto out_free_dentry;
28628 }
28629 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
28630 - send_sig(SIGSEGV, current, 0);
28631 - retval = -EFAULT; /* Nobody gets to see this, but.. */
28632 - goto out_free_dentry;
28633 + /*
28634 + * This bss-zeroing can fail if the ELF
28635 + * file specifies odd protections. So
28636 + * we don't check the return value
28637 + */
28638 }
28639
28640 if (elf_interpreter) {
28641 @@ -1091,7 +1377,7 @@ out:
28642 * Decide what to dump of a segment, part, all or none.
28643 */
28644 static unsigned long vma_dump_size(struct vm_area_struct *vma,
28645 - unsigned long mm_flags)
28646 + unsigned long mm_flags, long signr)
28647 {
28648 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
28649
28650 @@ -1125,7 +1411,7 @@ static unsigned long vma_dump_size(struc
28651 if (vma->vm_file == NULL)
28652 return 0;
28653
28654 - if (FILTER(MAPPED_PRIVATE))
28655 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
28656 goto whole;
28657
28658 /*
28659 @@ -1347,9 +1633,9 @@ static void fill_auxv_note(struct memelf
28660 {
28661 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
28662 int i = 0;
28663 - do
28664 + do {
28665 i += 2;
28666 - while (auxv[i - 2] != AT_NULL);
28667 + } while (auxv[i - 2] != AT_NULL);
28668 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
28669 }
28670
28671 @@ -1855,14 +2141,14 @@ static void fill_extnum_info(struct elfh
28672 }
28673
28674 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
28675 - unsigned long mm_flags)
28676 + struct coredump_params *cprm)
28677 {
28678 struct vm_area_struct *vma;
28679 size_t size = 0;
28680
28681 for (vma = first_vma(current, gate_vma); vma != NULL;
28682 vma = next_vma(vma, gate_vma))
28683 - size += vma_dump_size(vma, mm_flags);
28684 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
28685 return size;
28686 }
28687
28688 @@ -1956,7 +2242,7 @@ static int elf_core_dump(struct coredump
28689
28690 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
28691
28692 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
28693 + offset += elf_core_vma_data_size(gate_vma, cprm);
28694 offset += elf_core_extra_data_size();
28695 e_shoff = offset;
28696
28697 @@ -1970,10 +2256,12 @@ static int elf_core_dump(struct coredump
28698 offset = dataoff;
28699
28700 size += sizeof(*elf);
28701 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
28702 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
28703 goto end_coredump;
28704
28705 size += sizeof(*phdr4note);
28706 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
28707 if (size > cprm->limit
28708 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
28709 goto end_coredump;
28710 @@ -1987,7 +2275,7 @@ static int elf_core_dump(struct coredump
28711 phdr.p_offset = offset;
28712 phdr.p_vaddr = vma->vm_start;
28713 phdr.p_paddr = 0;
28714 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
28715 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
28716 phdr.p_memsz = vma->vm_end - vma->vm_start;
28717 offset += phdr.p_filesz;
28718 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
28719 @@ -1998,6 +2286,7 @@ static int elf_core_dump(struct coredump
28720 phdr.p_align = ELF_EXEC_PAGESIZE;
28721
28722 size += sizeof(phdr);
28723 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
28724 if (size > cprm->limit
28725 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
28726 goto end_coredump;
28727 @@ -2022,7 +2311,7 @@ static int elf_core_dump(struct coredump
28728 unsigned long addr;
28729 unsigned long end;
28730
28731 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
28732 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
28733
28734 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
28735 struct page *page;
28736 @@ -2031,6 +2320,7 @@ static int elf_core_dump(struct coredump
28737 page = get_dump_page(addr);
28738 if (page) {
28739 void *kaddr = kmap(page);
28740 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
28741 stop = ((size += PAGE_SIZE) > cprm->limit) ||
28742 !dump_write(cprm->file, kaddr,
28743 PAGE_SIZE);
28744 @@ -2048,6 +2338,7 @@ static int elf_core_dump(struct coredump
28745
28746 if (e_phnum == PN_XNUM) {
28747 size += sizeof(*shdr4extnum);
28748 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
28749 if (size > cprm->limit
28750 || !dump_write(cprm->file, shdr4extnum,
28751 sizeof(*shdr4extnum)))
28752 @@ -2068,6 +2359,97 @@ out:
28753
28754 #endif /* CONFIG_ELF_CORE */
28755
28756 +#ifdef CONFIG_PAX_MPROTECT
28757 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
28758 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
28759 + * we'll remove VM_MAYWRITE for good on RELRO segments.
28760 + *
28761 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
28762 + * basis because we want to allow the common case and not the special ones.
28763 + */
28764 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
28765 +{
28766 + struct elfhdr elf_h;
28767 + struct elf_phdr elf_p;
28768 + unsigned long i;
28769 + unsigned long oldflags;
28770 + bool is_textrel_rw, is_textrel_rx, is_relro;
28771 +
28772 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
28773 + return;
28774 +
28775 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
28776 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
28777 +
28778 +#ifdef CONFIG_PAX_NOELFRELOCS
28779 + is_textrel_rw = false;
28780 + is_textrel_rx = false;
28781 +#else
28782 + /* possible TEXTREL */
28783 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
28784 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
28785 +#endif
28786 +
28787 + /* possible RELRO */
28788 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
28789 +
28790 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
28791 + return;
28792 +
28793 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
28794 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
28795 +
28796 +#ifdef CONFIG_PAX_ETEXECRELOCS
28797 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
28798 +#else
28799 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
28800 +#endif
28801 +
28802 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
28803 + !elf_check_arch(&elf_h) ||
28804 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
28805 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
28806 + return;
28807 +
28808 + for (i = 0UL; i < elf_h.e_phnum; i++) {
28809 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
28810 + return;
28811 + switch (elf_p.p_type) {
28812 + case PT_DYNAMIC:
28813 + if (!is_textrel_rw && !is_textrel_rx)
28814 + continue;
28815 + i = 0UL;
28816 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
28817 + elf_dyn dyn;
28818 +
28819 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
28820 + return;
28821 + if (dyn.d_tag == DT_NULL)
28822 + return;
28823 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
28824 + gr_log_textrel(vma);
28825 + if (is_textrel_rw)
28826 + vma->vm_flags |= VM_MAYWRITE;
28827 + else
28828 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
28829 + vma->vm_flags &= ~VM_MAYWRITE;
28830 + return;
28831 + }
28832 + i++;
28833 + }
28834 + return;
28835 +
28836 + case PT_GNU_RELRO:
28837 + if (!is_relro)
28838 + continue;
28839 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
28840 + vma->vm_flags &= ~VM_MAYWRITE;
28841 + return;
28842 + }
28843 + }
28844 +}
28845 +#endif
28846 +
28847 static int __init init_elf_binfmt(void)
28848 {
28849 return register_binfmt(&elf_format);
28850 diff -urNp linux-2.6.34.1/fs/binfmt_flat.c linux-2.6.34.1/fs/binfmt_flat.c
28851 --- linux-2.6.34.1/fs/binfmt_flat.c 2010-07-05 14:24:10.000000000 -0400
28852 +++ linux-2.6.34.1/fs/binfmt_flat.c 2010-07-07 09:04:54.000000000 -0400
28853 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
28854 realdatastart = (unsigned long) -ENOMEM;
28855 printk("Unable to allocate RAM for process data, errno %d\n",
28856 (int)-realdatastart);
28857 + down_write(&current->mm->mmap_sem);
28858 do_munmap(current->mm, textpos, text_len);
28859 + up_write(&current->mm->mmap_sem);
28860 ret = realdatastart;
28861 goto err;
28862 }
28863 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
28864 }
28865 if (IS_ERR_VALUE(result)) {
28866 printk("Unable to read data+bss, errno %d\n", (int)-result);
28867 + down_write(&current->mm->mmap_sem);
28868 do_munmap(current->mm, textpos, text_len);
28869 do_munmap(current->mm, realdatastart, data_len + extra);
28870 + up_write(&current->mm->mmap_sem);
28871 ret = result;
28872 goto err;
28873 }
28874 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
28875 }
28876 if (IS_ERR_VALUE(result)) {
28877 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
28878 + down_write(&current->mm->mmap_sem);
28879 do_munmap(current->mm, textpos, text_len + data_len + extra +
28880 MAX_SHARED_LIBS * sizeof(unsigned long));
28881 + up_write(&current->mm->mmap_sem);
28882 ret = result;
28883 goto err;
28884 }
28885 diff -urNp linux-2.6.34.1/fs/binfmt_misc.c linux-2.6.34.1/fs/binfmt_misc.c
28886 --- linux-2.6.34.1/fs/binfmt_misc.c 2010-07-05 14:24:10.000000000 -0400
28887 +++ linux-2.6.34.1/fs/binfmt_misc.c 2010-07-07 09:04:54.000000000 -0400
28888 @@ -693,7 +693,7 @@ static int bm_fill_super(struct super_bl
28889 static struct tree_descr bm_files[] = {
28890 [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
28891 [3] = {"register", &bm_register_operations, S_IWUSR},
28892 - /* last one */ {""}
28893 + /* last one */ {"", NULL, 0}
28894 };
28895 int err = simple_fill_super(sb, 0x42494e4d, bm_files);
28896 if (!err)
28897 diff -urNp linux-2.6.34.1/fs/bio.c linux-2.6.34.1/fs/bio.c
28898 --- linux-2.6.34.1/fs/bio.c 2010-07-05 14:24:10.000000000 -0400
28899 +++ linux-2.6.34.1/fs/bio.c 2010-07-07 09:04:54.000000000 -0400
28900 @@ -1213,7 +1213,7 @@ static void bio_copy_kern_endio(struct b
28901 const int read = bio_data_dir(bio) == READ;
28902 struct bio_map_data *bmd = bio->bi_private;
28903 int i;
28904 - char *p = bmd->sgvecs[0].iov_base;
28905 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
28906
28907 __bio_for_each_segment(bvec, bio, i, 0) {
28908 char *addr = page_address(bvec->bv_page);
28909 diff -urNp linux-2.6.34.1/fs/block_dev.c linux-2.6.34.1/fs/block_dev.c
28910 --- linux-2.6.34.1/fs/block_dev.c 2010-07-05 14:24:10.000000000 -0400
28911 +++ linux-2.6.34.1/fs/block_dev.c 2010-07-07 09:04:54.000000000 -0400
28912 @@ -681,7 +681,7 @@ int bd_claim(struct block_device *bdev,
28913 else if (bdev->bd_contains == bdev)
28914 res = 0; /* is a whole device which isn't held */
28915
28916 - else if (bdev->bd_contains->bd_holder == bd_claim)
28917 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
28918 res = 0; /* is a partition of a device that is being partitioned */
28919 else if (bdev->bd_contains->bd_holder != NULL)
28920 res = -EBUSY; /* is a partition of a held device */
28921 diff -urNp linux-2.6.34.1/fs/btrfs/ctree.c linux-2.6.34.1/fs/btrfs/ctree.c
28922 --- linux-2.6.34.1/fs/btrfs/ctree.c 2010-07-05 14:24:10.000000000 -0400
28923 +++ linux-2.6.34.1/fs/btrfs/ctree.c 2010-07-07 09:04:54.000000000 -0400
28924 @@ -3650,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
28925
28926 ret = 0;
28927 if (slot == 0) {
28928 - struct btrfs_disk_key disk_key;
28929 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
28930 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
28931 }
28932 diff -urNp linux-2.6.34.1/fs/btrfs/disk-io.c linux-2.6.34.1/fs/btrfs/disk-io.c
28933 --- linux-2.6.34.1/fs/btrfs/disk-io.c 2010-07-05 14:24:10.000000000 -0400
28934 +++ linux-2.6.34.1/fs/btrfs/disk-io.c 2010-07-07 09:04:54.000000000 -0400
28935 @@ -40,7 +40,7 @@
28936 #include "tree-log.h"
28937 #include "free-space-cache.h"
28938
28939 -static struct extent_io_ops btree_extent_io_ops;
28940 +static const struct extent_io_ops btree_extent_io_ops;
28941 static void end_workqueue_fn(struct btrfs_work *work);
28942 static void free_fs_root(struct btrfs_root *root);
28943
28944 @@ -2603,7 +2603,7 @@ out:
28945 return 0;
28946 }
28947
28948 -static struct extent_io_ops btree_extent_io_ops = {
28949 +static const struct extent_io_ops btree_extent_io_ops = {
28950 .write_cache_pages_lock_hook = btree_lock_page_hook,
28951 .readpage_end_io_hook = btree_readpage_end_io_hook,
28952 .submit_bio_hook = btree_submit_bio_hook,
28953 diff -urNp linux-2.6.34.1/fs/btrfs/extent_io.h linux-2.6.34.1/fs/btrfs/extent_io.h
28954 --- linux-2.6.34.1/fs/btrfs/extent_io.h 2010-07-05 14:24:10.000000000 -0400
28955 +++ linux-2.6.34.1/fs/btrfs/extent_io.h 2010-07-07 09:04:55.000000000 -0400
28956 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
28957 struct bio *bio, int mirror_num,
28958 unsigned long bio_flags);
28959 struct extent_io_ops {
28960 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
28961 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
28962 u64 start, u64 end, int *page_started,
28963 unsigned long *nr_written);
28964 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
28965 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
28966 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
28967 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
28968 extent_submit_bio_hook_t *submit_bio_hook;
28969 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
28970 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
28971 size_t size, struct bio *bio,
28972 unsigned long bio_flags);
28973 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
28974 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
28975 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
28976 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
28977 u64 start, u64 end,
28978 struct extent_state *state);
28979 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
28980 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
28981 u64 start, u64 end,
28982 struct extent_state *state);
28983 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
28984 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
28985 struct extent_state *state);
28986 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
28987 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
28988 struct extent_state *state, int uptodate);
28989 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
28990 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
28991 unsigned long old, unsigned long bits);
28992 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
28993 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
28994 unsigned long bits);
28995 - int (*merge_extent_hook)(struct inode *inode,
28996 + int (* const merge_extent_hook)(struct inode *inode,
28997 struct extent_state *new,
28998 struct extent_state *other);
28999 - int (*split_extent_hook)(struct inode *inode,
29000 + int (* const split_extent_hook)(struct inode *inode,
29001 struct extent_state *orig, u64 split);
29002 - int (*write_cache_pages_lock_hook)(struct page *page);
29003 + int (* const write_cache_pages_lock_hook)(struct page *page);
29004 };
29005
29006 struct extent_io_tree {
29007 @@ -88,7 +88,7 @@ struct extent_io_tree {
29008 u64 dirty_bytes;
29009 spinlock_t lock;
29010 spinlock_t buffer_lock;
29011 - struct extent_io_ops *ops;
29012 + const struct extent_io_ops *ops;
29013 };
29014
29015 struct extent_state {
29016 diff -urNp linux-2.6.34.1/fs/btrfs/free-space-cache.c linux-2.6.34.1/fs/btrfs/free-space-cache.c
29017 --- linux-2.6.34.1/fs/btrfs/free-space-cache.c 2010-07-05 14:24:10.000000000 -0400
29018 +++ linux-2.6.34.1/fs/btrfs/free-space-cache.c 2010-07-07 09:04:55.000000000 -0400
29019 @@ -1075,8 +1075,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
29020
29021 while(1) {
29022 if (entry->bytes < bytes || entry->offset < min_start) {
29023 - struct rb_node *node;
29024 -
29025 node = rb_next(&entry->offset_index);
29026 if (!node)
29027 break;
29028 @@ -1227,7 +1225,7 @@ again:
29029 */
29030 while (entry->bitmap || found_bitmap ||
29031 (!entry->bitmap && entry->bytes < min_bytes)) {
29032 - struct rb_node *node = rb_next(&entry->offset_index);
29033 + node = rb_next(&entry->offset_index);
29034
29035 if (entry->bitmap && entry->bytes > bytes + empty_size) {
29036 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
29037 diff -urNp linux-2.6.34.1/fs/btrfs/inode.c linux-2.6.34.1/fs/btrfs/inode.c
29038 --- linux-2.6.34.1/fs/btrfs/inode.c 2010-07-05 14:24:10.000000000 -0400
29039 +++ linux-2.6.34.1/fs/btrfs/inode.c 2010-07-07 09:04:55.000000000 -0400
29040 @@ -64,7 +64,7 @@ static const struct inode_operations btr
29041 static const struct address_space_operations btrfs_aops;
29042 static const struct address_space_operations btrfs_symlink_aops;
29043 static const struct file_operations btrfs_dir_file_operations;
29044 -static struct extent_io_ops btrfs_extent_io_ops;
29045 +static const struct extent_io_ops btrfs_extent_io_ops;
29046
29047 static struct kmem_cache *btrfs_inode_cachep;
29048 struct kmem_cache *btrfs_trans_handle_cachep;
29049 @@ -5956,7 +5956,7 @@ static const struct file_operations btrf
29050 .fsync = btrfs_sync_file,
29051 };
29052
29053 -static struct extent_io_ops btrfs_extent_io_ops = {
29054 +static const struct extent_io_ops btrfs_extent_io_ops = {
29055 .fill_delalloc = run_delalloc_range,
29056 .submit_bio_hook = btrfs_submit_bio_hook,
29057 .merge_bio_hook = btrfs_merge_bio_hook,
29058 diff -urNp linux-2.6.34.1/fs/buffer.c linux-2.6.34.1/fs/buffer.c
29059 --- linux-2.6.34.1/fs/buffer.c 2010-07-05 14:24:10.000000000 -0400
29060 +++ linux-2.6.34.1/fs/buffer.c 2010-07-07 09:04:55.000000000 -0400
29061 @@ -25,6 +25,7 @@
29062 #include <linux/percpu.h>
29063 #include <linux/slab.h>
29064 #include <linux/capability.h>
29065 +#include <linux/security.h>
29066 #include <linux/blkdev.h>
29067 #include <linux/file.h>
29068 #include <linux/quotaops.h>
29069 diff -urNp linux-2.6.34.1/fs/cachefiles/bind.c linux-2.6.34.1/fs/cachefiles/bind.c
29070 --- linux-2.6.34.1/fs/cachefiles/bind.c 2010-07-05 14:24:10.000000000 -0400
29071 +++ linux-2.6.34.1/fs/cachefiles/bind.c 2010-07-07 09:04:55.000000000 -0400
29072 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
29073 args);
29074
29075 /* start by checking things over */
29076 - ASSERT(cache->fstop_percent >= 0 &&
29077 - cache->fstop_percent < cache->fcull_percent &&
29078 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
29079 cache->fcull_percent < cache->frun_percent &&
29080 cache->frun_percent < 100);
29081
29082 - ASSERT(cache->bstop_percent >= 0 &&
29083 - cache->bstop_percent < cache->bcull_percent &&
29084 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
29085 cache->bcull_percent < cache->brun_percent &&
29086 cache->brun_percent < 100);
29087
29088 diff -urNp linux-2.6.34.1/fs/cachefiles/daemon.c linux-2.6.34.1/fs/cachefiles/daemon.c
29089 --- linux-2.6.34.1/fs/cachefiles/daemon.c 2010-07-05 14:24:10.000000000 -0400
29090 +++ linux-2.6.34.1/fs/cachefiles/daemon.c 2010-07-07 09:04:55.000000000 -0400
29091 @@ -195,7 +195,7 @@ static ssize_t cachefiles_daemon_read(st
29092 if (n > buflen)
29093 return -EMSGSIZE;
29094
29095 - if (copy_to_user(_buffer, buffer, n) != 0)
29096 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
29097 return -EFAULT;
29098
29099 return n;
29100 @@ -221,7 +221,7 @@ static ssize_t cachefiles_daemon_write(s
29101 if (test_bit(CACHEFILES_DEAD, &cache->flags))
29102 return -EIO;
29103
29104 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
29105 + if (datalen > PAGE_SIZE - 1)
29106 return -EOPNOTSUPP;
29107
29108 /* drag the command string into the kernel so we can parse it */
29109 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
29110 if (args[0] != '%' || args[1] != '\0')
29111 return -EINVAL;
29112
29113 - if (fstop < 0 || fstop >= cache->fcull_percent)
29114 + if (fstop >= cache->fcull_percent)
29115 return cachefiles_daemon_range_error(cache, args);
29116
29117 cache->fstop_percent = fstop;
29118 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
29119 if (args[0] != '%' || args[1] != '\0')
29120 return -EINVAL;
29121
29122 - if (bstop < 0 || bstop >= cache->bcull_percent)
29123 + if (bstop >= cache->bcull_percent)
29124 return cachefiles_daemon_range_error(cache, args);
29125
29126 cache->bstop_percent = bstop;
29127 diff -urNp linux-2.6.34.1/fs/cachefiles/rdwr.c linux-2.6.34.1/fs/cachefiles/rdwr.c
29128 --- linux-2.6.34.1/fs/cachefiles/rdwr.c 2010-07-05 14:24:10.000000000 -0400
29129 +++ linux-2.6.34.1/fs/cachefiles/rdwr.c 2010-07-07 09:04:55.000000000 -0400
29130 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
29131 old_fs = get_fs();
29132 set_fs(KERNEL_DS);
29133 ret = file->f_op->write(
29134 - file, (const void __user *) data, len, &pos);
29135 + file, (__force const void __user *) data, len, &pos);
29136 set_fs(old_fs);
29137 kunmap(page);
29138 if (ret != len)
29139 diff -urNp linux-2.6.34.1/fs/cifs/cifs_uniupr.h linux-2.6.34.1/fs/cifs/cifs_uniupr.h
29140 --- linux-2.6.34.1/fs/cifs/cifs_uniupr.h 2010-07-05 14:24:10.000000000 -0400
29141 +++ linux-2.6.34.1/fs/cifs/cifs_uniupr.h 2010-07-07 09:04:55.000000000 -0400
29142 @@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa
29143 {0x0490, 0x04cc, UniCaseRangeU0490},
29144 {0x1e00, 0x1ffc, UniCaseRangeU1e00},
29145 {0xff40, 0xff5a, UniCaseRangeUff40},
29146 - {0}
29147 + {0, 0, NULL}
29148 };
29149 #endif
29150
29151 diff -urNp linux-2.6.34.1/fs/cifs/link.c linux-2.6.34.1/fs/cifs/link.c
29152 --- linux-2.6.34.1/fs/cifs/link.c 2010-07-05 14:24:10.000000000 -0400
29153 +++ linux-2.6.34.1/fs/cifs/link.c 2010-07-07 09:04:55.000000000 -0400
29154 @@ -216,7 +216,7 @@ cifs_symlink(struct inode *inode, struct
29155
29156 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
29157 {
29158 - char *p = nd_get_link(nd);
29159 + const char *p = nd_get_link(nd);
29160 if (!IS_ERR(p))
29161 kfree(p);
29162 }
29163 diff -urNp linux-2.6.34.1/fs/compat.c linux-2.6.34.1/fs/compat.c
29164 --- linux-2.6.34.1/fs/compat.c 2010-07-05 14:24:10.000000000 -0400
29165 +++ linux-2.6.34.1/fs/compat.c 2010-07-07 09:04:55.000000000 -0400
29166 @@ -1433,14 +1433,12 @@ static int compat_copy_strings(int argc,
29167 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
29168 struct page *page;
29169
29170 -#ifdef CONFIG_STACK_GROWSUP
29171 ret = expand_stack_downwards(bprm->vma, pos);
29172 if (ret < 0) {
29173 /* We've exceed the stack rlimit. */
29174 ret = -E2BIG;
29175 goto out;
29176 }
29177 -#endif
29178 ret = get_user_pages(current, bprm->mm, pos,
29179 1, 1, 1, &page, NULL);
29180 if (ret <= 0) {
29181 @@ -1486,6 +1484,11 @@ int compat_do_execve(char * filename,
29182 compat_uptr_t __user *envp,
29183 struct pt_regs * regs)
29184 {
29185 +#ifdef CONFIG_GRKERNSEC
29186 + struct file *old_exec_file;
29187 + struct acl_subject_label *old_acl;
29188 + struct rlimit old_rlim[RLIM_NLIMITS];
29189 +#endif
29190 struct linux_binprm *bprm;
29191 struct file *file;
29192 struct files_struct *displaced;
29193 @@ -1522,6 +1525,14 @@ int compat_do_execve(char * filename,
29194 bprm->filename = filename;
29195 bprm->interp = filename;
29196
29197 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
29198 + retval = -EAGAIN;
29199 + if (gr_handle_nproc())
29200 + goto out_file;
29201 + retval = -EACCES;
29202 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
29203 + goto out_file;
29204 +
29205 retval = bprm_mm_init(bprm);
29206 if (retval)
29207 goto out_file;
29208 @@ -1551,9 +1562,40 @@ int compat_do_execve(char * filename,
29209 if (retval < 0)
29210 goto out;
29211
29212 + if (!gr_tpe_allow(file)) {
29213 + retval = -EACCES;
29214 + goto out;
29215 + }
29216 +
29217 + if (gr_check_crash_exec(file)) {
29218 + retval = -EACCES;
29219 + goto out;
29220 + }
29221 +
29222 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
29223 +
29224 + gr_handle_exec_args(bprm, (char __user * __user *)argv);
29225 +
29226 +#ifdef CONFIG_GRKERNSEC
29227 + old_acl = current->acl;
29228 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
29229 + old_exec_file = current->exec_file;
29230 + get_file(file);
29231 + current->exec_file = file;
29232 +#endif
29233 +
29234 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
29235 + bprm->unsafe & LSM_UNSAFE_SHARE);
29236 + if (retval < 0)
29237 + goto out_fail;
29238 +
29239 retval = search_binary_handler(bprm, regs);
29240 if (retval < 0)
29241 - goto out;
29242 + goto out_fail;
29243 +#ifdef CONFIG_GRKERNSEC
29244 + if (old_exec_file)
29245 + fput(old_exec_file);
29246 +#endif
29247
29248 /* execve succeeded */
29249 current->fs->in_exec = 0;
29250 @@ -1564,6 +1606,14 @@ int compat_do_execve(char * filename,
29251 put_files_struct(displaced);
29252 return retval;
29253
29254 +out_fail:
29255 +#ifdef CONFIG_GRKERNSEC
29256 + current->acl = old_acl;
29257 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
29258 + fput(current->exec_file);
29259 + current->exec_file = old_exec_file;
29260 +#endif
29261 +
29262 out:
29263 if (bprm->mm)
29264 mmput(bprm->mm);
29265 diff -urNp linux-2.6.34.1/fs/compat_binfmt_elf.c linux-2.6.34.1/fs/compat_binfmt_elf.c
29266 --- linux-2.6.34.1/fs/compat_binfmt_elf.c 2010-07-05 14:24:10.000000000 -0400
29267 +++ linux-2.6.34.1/fs/compat_binfmt_elf.c 2010-07-07 09:04:55.000000000 -0400
29268 @@ -30,11 +30,13 @@
29269 #undef elf_phdr
29270 #undef elf_shdr
29271 #undef elf_note
29272 +#undef elf_dyn
29273 #undef elf_addr_t
29274 #define elfhdr elf32_hdr
29275 #define elf_phdr elf32_phdr
29276 #define elf_shdr elf32_shdr
29277 #define elf_note elf32_note
29278 +#define elf_dyn Elf32_Dyn
29279 #define elf_addr_t Elf32_Addr
29280
29281 /*
29282 diff -urNp linux-2.6.34.1/fs/debugfs/inode.c linux-2.6.34.1/fs/debugfs/inode.c
29283 --- linux-2.6.34.1/fs/debugfs/inode.c 2010-07-05 14:24:10.000000000 -0400
29284 +++ linux-2.6.34.1/fs/debugfs/inode.c 2010-07-07 09:04:55.000000000 -0400
29285 @@ -129,7 +129,7 @@ static inline int debugfs_positive(struc
29286
29287 static int debug_fill_super(struct super_block *sb, void *data, int silent)
29288 {
29289 - static struct tree_descr debug_files[] = {{""}};
29290 + static struct tree_descr debug_files[] = {{"", NULL, 0}};
29291
29292 return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
29293 }
29294 diff -urNp linux-2.6.34.1/fs/dlm/lockspace.c linux-2.6.34.1/fs/dlm/lockspace.c
29295 --- linux-2.6.34.1/fs/dlm/lockspace.c 2010-07-05 14:24:10.000000000 -0400
29296 +++ linux-2.6.34.1/fs/dlm/lockspace.c 2010-07-07 09:04:55.000000000 -0400
29297 @@ -200,7 +200,7 @@ static int dlm_uevent(struct kset *kset,
29298 return 0;
29299 }
29300
29301 -static struct kset_uevent_ops dlm_uevent_ops = {
29302 +static const struct kset_uevent_ops dlm_uevent_ops = {
29303 .uevent = dlm_uevent,
29304 };
29305
29306 diff -urNp linux-2.6.34.1/fs/ecryptfs/inode.c linux-2.6.34.1/fs/ecryptfs/inode.c
29307 --- linux-2.6.34.1/fs/ecryptfs/inode.c 2010-07-05 14:24:10.000000000 -0400
29308 +++ linux-2.6.34.1/fs/ecryptfs/inode.c 2010-07-07 09:04:55.000000000 -0400
29309 @@ -666,7 +666,7 @@ static int ecryptfs_readlink_lower(struc
29310 old_fs = get_fs();
29311 set_fs(get_ds());
29312 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
29313 - (char __user *)lower_buf,
29314 + (__force char __user *)lower_buf,
29315 lower_bufsiz);
29316 set_fs(old_fs);
29317 if (rc < 0)
29318 @@ -712,7 +712,7 @@ static void *ecryptfs_follow_link(struct
29319 }
29320 old_fs = get_fs();
29321 set_fs(get_ds());
29322 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
29323 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
29324 set_fs(old_fs);
29325 if (rc < 0) {
29326 kfree(buf);
29327 @@ -727,7 +727,7 @@ out:
29328 static void
29329 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
29330 {
29331 - char *buf = nd_get_link(nd);
29332 + const char *buf = nd_get_link(nd);
29333 if (!IS_ERR(buf)) {
29334 /* Free the char* */
29335 kfree(buf);
29336 diff -urNp linux-2.6.34.1/fs/ecryptfs/miscdev.c linux-2.6.34.1/fs/ecryptfs/miscdev.c
29337 --- linux-2.6.34.1/fs/ecryptfs/miscdev.c 2010-07-05 14:24:10.000000000 -0400
29338 +++ linux-2.6.34.1/fs/ecryptfs/miscdev.c 2010-07-07 09:04:55.000000000 -0400
29339 @@ -328,7 +328,7 @@ check_list:
29340 goto out_unlock_msg_ctx;
29341 i = 5;
29342 if (msg_ctx->msg) {
29343 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
29344 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
29345 goto out_unlock_msg_ctx;
29346 i += packet_length_size;
29347 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
29348 diff -urNp linux-2.6.34.1/fs/exec.c linux-2.6.34.1/fs/exec.c
29349 --- linux-2.6.34.1/fs/exec.c 2010-07-05 14:24:10.000000000 -0400
29350 +++ linux-2.6.34.1/fs/exec.c 2010-07-07 09:04:55.000000000 -0400
29351 @@ -55,12 +55,24 @@
29352 #include <linux/fsnotify.h>
29353 #include <linux/fs_struct.h>
29354 #include <linux/pipe_fs_i.h>
29355 +#include <linux/random.h>
29356 +#include <linux/seq_file.h>
29357 +
29358 +#ifdef CONFIG_PAX_REFCOUNT
29359 +#include <linux/kallsyms.h>
29360 +#include <linux/kdebug.h>
29361 +#endif
29362
29363 #include <asm/uaccess.h>
29364 #include <asm/mmu_context.h>
29365 #include <asm/tlb.h>
29366 #include "internal.h"
29367
29368 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
29369 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
29370 +EXPORT_SYMBOL(pax_set_initial_flags_func);
29371 +#endif
29372 +
29373 int core_uses_pid;
29374 char core_pattern[CORENAME_MAX_SIZE] = "core";
29375 unsigned int core_pipe_limit;
29376 @@ -114,7 +126,7 @@ SYSCALL_DEFINE1(uselib, const char __use
29377 goto out;
29378
29379 file = do_filp_open(AT_FDCWD, tmp,
29380 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
29381 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
29382 MAY_READ | MAY_EXEC | MAY_OPEN);
29383 putname(tmp);
29384 error = PTR_ERR(file);
29385 @@ -162,18 +174,10 @@ static struct page *get_arg_page(struct
29386 int write)
29387 {
29388 struct page *page;
29389 - int ret;
29390
29391 -#ifdef CONFIG_STACK_GROWSUP
29392 - if (write) {
29393 - ret = expand_stack_downwards(bprm->vma, pos);
29394 - if (ret < 0)
29395 - return NULL;
29396 - }
29397 -#endif
29398 - ret = get_user_pages(current, bprm->mm, pos,
29399 - 1, write, 1, &page, NULL);
29400 - if (ret <= 0)
29401 + if (0 > expand_stack_downwards(bprm->vma, pos))
29402 + return NULL;
29403 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
29404 return NULL;
29405
29406 if (write) {
29407 @@ -245,6 +249,11 @@ static int __bprm_mm_init(struct linux_b
29408 vma->vm_end = STACK_TOP_MAX;
29409 vma->vm_start = vma->vm_end - PAGE_SIZE;
29410 vma->vm_flags = VM_STACK_FLAGS;
29411 +
29412 +#ifdef CONFIG_PAX_SEGMEXEC
29413 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
29414 +#endif
29415 +
29416 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
29417 INIT_LIST_HEAD(&vma->anon_vma_chain);
29418 err = insert_vm_struct(mm, vma);
29419 @@ -254,6 +263,12 @@ static int __bprm_mm_init(struct linux_b
29420 mm->stack_vm = mm->total_vm = 1;
29421 up_write(&mm->mmap_sem);
29422 bprm->p = vma->vm_end - sizeof(void *);
29423 +
29424 +#ifdef CONFIG_PAX_RANDUSTACK
29425 + if (randomize_va_space)
29426 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
29427 +#endif
29428 +
29429 return 0;
29430 err:
29431 up_write(&mm->mmap_sem);
29432 @@ -475,7 +490,7 @@ int copy_strings_kernel(int argc,char **
29433 int r;
29434 mm_segment_t oldfs = get_fs();
29435 set_fs(KERNEL_DS);
29436 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
29437 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
29438 set_fs(oldfs);
29439 return r;
29440 }
29441 @@ -505,7 +520,8 @@ static int shift_arg_pages(struct vm_are
29442 unsigned long new_end = old_end - shift;
29443 struct mmu_gather *tlb;
29444
29445 - BUG_ON(new_start > new_end);
29446 + if (new_start >= new_end || new_start < mmap_min_addr)
29447 + return -EFAULT;
29448
29449 /*
29450 * ensure there are no vmas between where we want to go
29451 @@ -514,6 +530,10 @@ static int shift_arg_pages(struct vm_are
29452 if (vma != find_vma(mm, new_start))
29453 return -EFAULT;
29454
29455 +#ifdef CONFIG_PAX_SEGMEXEC
29456 + BUG_ON(pax_find_mirror_vma(vma));
29457 +#endif
29458 +
29459 /*
29460 * cover the whole range: [new_start, old_end)
29461 */
29462 @@ -604,8 +624,28 @@ int setup_arg_pages(struct linux_binprm
29463 bprm->exec -= stack_shift;
29464
29465 down_write(&mm->mmap_sem);
29466 +
29467 + /* Move stack pages down in memory. */
29468 + if (stack_shift) {
29469 + ret = shift_arg_pages(vma, stack_shift);
29470 + if (ret)
29471 + goto out_unlock;
29472 + }
29473 +
29474 vm_flags = VM_STACK_FLAGS;
29475
29476 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29477 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29478 + vm_flags &= ~VM_EXEC;
29479 +
29480 +#ifdef CONFIG_PAX_MPROTECT
29481 + if (mm->pax_flags & MF_PAX_MPROTECT)
29482 + vm_flags &= ~VM_MAYEXEC;
29483 +#endif
29484 +
29485 + }
29486 +#endif
29487 +
29488 /*
29489 * Adjust stack execute permissions; explicitly enable for
29490 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
29491 @@ -623,13 +663,6 @@ int setup_arg_pages(struct linux_binprm
29492 goto out_unlock;
29493 BUG_ON(prev != vma);
29494
29495 - /* Move stack pages down in memory. */
29496 - if (stack_shift) {
29497 - ret = shift_arg_pages(vma, stack_shift);
29498 - if (ret)
29499 - goto out_unlock;
29500 - }
29501 -
29502 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
29503 stack_size = vma->vm_end - vma->vm_start;
29504 /*
29505 @@ -666,7 +699,7 @@ struct file *open_exec(const char *name)
29506 int err;
29507
29508 file = do_filp_open(AT_FDCWD, name,
29509 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
29510 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
29511 MAY_EXEC | MAY_OPEN);
29512 if (IS_ERR(file))
29513 goto out;
29514 @@ -703,7 +736,7 @@ int kernel_read(struct file *file, loff_
29515 old_fs = get_fs();
29516 set_fs(get_ds());
29517 /* The cast to a user pointer is valid due to the set_fs() */
29518 - result = vfs_read(file, (void __user *)addr, count, &pos);
29519 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
29520 set_fs(old_fs);
29521 return result;
29522 }
29523 @@ -1121,7 +1154,7 @@ int check_unsafe_exec(struct linux_binpr
29524 }
29525 rcu_read_unlock();
29526
29527 - if (p->fs->users > n_fs) {
29528 + if (atomic_read(&p->fs->users) > n_fs) {
29529 bprm->unsafe |= LSM_UNSAFE_SHARE;
29530 } else {
29531 res = -EAGAIN;
29532 @@ -1317,6 +1350,11 @@ int do_execve(char * filename,
29533 char __user *__user *envp,
29534 struct pt_regs * regs)
29535 {
29536 +#ifdef CONFIG_GRKERNSEC
29537 + struct file *old_exec_file;
29538 + struct acl_subject_label *old_acl;
29539 + struct rlimit old_rlim[RLIM_NLIMITS];
29540 +#endif
29541 struct linux_binprm *bprm;
29542 struct file *file;
29543 struct files_struct *displaced;
29544 @@ -1353,6 +1391,18 @@ int do_execve(char * filename,
29545 bprm->filename = filename;
29546 bprm->interp = filename;
29547
29548 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
29549 +
29550 + if (gr_handle_nproc()) {
29551 + retval = -EAGAIN;
29552 + goto out_file;
29553 + }
29554 +
29555 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
29556 + retval = -EACCES;
29557 + goto out_file;
29558 + }
29559 +
29560 retval = bprm_mm_init(bprm);
29561 if (retval)
29562 goto out_file;
29563 @@ -1382,10 +1432,41 @@ int do_execve(char * filename,
29564 if (retval < 0)
29565 goto out;
29566
29567 + if (!gr_tpe_allow(file)) {
29568 + retval = -EACCES;
29569 + goto out;
29570 + }
29571 +
29572 + if (gr_check_crash_exec(file)) {
29573 + retval = -EACCES;
29574 + goto out;
29575 + }
29576 +
29577 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
29578 +
29579 + gr_handle_exec_args(bprm, argv);
29580 +
29581 +#ifdef CONFIG_GRKERNSEC
29582 + old_acl = current->acl;
29583 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
29584 + old_exec_file = current->exec_file;
29585 + get_file(file);
29586 + current->exec_file = file;
29587 +#endif
29588 +
29589 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
29590 + bprm->unsafe & LSM_UNSAFE_SHARE);
29591 + if (retval < 0)
29592 + goto out_fail;
29593 +
29594 current->flags &= ~PF_KTHREAD;
29595 retval = search_binary_handler(bprm,regs);
29596 if (retval < 0)
29597 - goto out;
29598 + goto out_fail;
29599 +#ifdef CONFIG_GRKERNSEC
29600 + if (old_exec_file)
29601 + fput(old_exec_file);
29602 +#endif
29603
29604 /* execve succeeded */
29605 current->fs->in_exec = 0;
29606 @@ -1396,6 +1477,14 @@ int do_execve(char * filename,
29607 put_files_struct(displaced);
29608 return retval;
29609
29610 +out_fail:
29611 +#ifdef CONFIG_GRKERNSEC
29612 + current->acl = old_acl;
29613 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
29614 + fput(current->exec_file);
29615 + current->exec_file = old_exec_file;
29616 +#endif
29617 +
29618 out:
29619 if (bprm->mm)
29620 mmput (bprm->mm);
29621 @@ -1559,6 +1648,169 @@ out:
29622 return ispipe;
29623 }
29624
29625 +int pax_check_flags(unsigned long *flags)
29626 +{
29627 + int retval = 0;
29628 +
29629 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
29630 + if (*flags & MF_PAX_SEGMEXEC)
29631 + {
29632 + *flags &= ~MF_PAX_SEGMEXEC;
29633 + retval = -EINVAL;
29634 + }
29635 +#endif
29636 +
29637 + if ((*flags & MF_PAX_PAGEEXEC)
29638 +
29639 +#ifdef CONFIG_PAX_PAGEEXEC
29640 + && (*flags & MF_PAX_SEGMEXEC)
29641 +#endif
29642 +
29643 + )
29644 + {
29645 + *flags &= ~MF_PAX_PAGEEXEC;
29646 + retval = -EINVAL;
29647 + }
29648 +
29649 + if ((*flags & MF_PAX_MPROTECT)
29650 +
29651 +#ifdef CONFIG_PAX_MPROTECT
29652 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
29653 +#endif
29654 +
29655 + )
29656 + {
29657 + *flags &= ~MF_PAX_MPROTECT;
29658 + retval = -EINVAL;
29659 + }
29660 +
29661 + if ((*flags & MF_PAX_EMUTRAMP)
29662 +
29663 +#ifdef CONFIG_PAX_EMUTRAMP
29664 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
29665 +#endif
29666 +
29667 + )
29668 + {
29669 + *flags &= ~MF_PAX_EMUTRAMP;
29670 + retval = -EINVAL;
29671 + }
29672 +
29673 + return retval;
29674 +}
29675 +
29676 +EXPORT_SYMBOL(pax_check_flags);
29677 +
29678 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29679 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
29680 +{
29681 + struct task_struct *tsk = current;
29682 + struct mm_struct *mm = current->mm;
29683 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
29684 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
29685 + char *path_exec = NULL;
29686 + char *path_fault = NULL;
29687 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
29688 +
29689 + if (buffer_exec && buffer_fault) {
29690 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
29691 +
29692 + down_read(&mm->mmap_sem);
29693 + vma = mm->mmap;
29694 + while (vma && (!vma_exec || !vma_fault)) {
29695 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
29696 + vma_exec = vma;
29697 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
29698 + vma_fault = vma;
29699 + vma = vma->vm_next;
29700 + }
29701 + if (vma_exec) {
29702 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
29703 + if (IS_ERR(path_exec))
29704 + path_exec = "<path too long>";
29705 + else {
29706 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
29707 + if (path_exec) {
29708 + *path_exec = 0;
29709 + path_exec = buffer_exec;
29710 + } else
29711 + path_exec = "<path too long>";
29712 + }
29713 + }
29714 + if (vma_fault) {
29715 + start = vma_fault->vm_start;
29716 + end = vma_fault->vm_end;
29717 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
29718 + if (vma_fault->vm_file) {
29719 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
29720 + if (IS_ERR(path_fault))
29721 + path_fault = "<path too long>";
29722 + else {
29723 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
29724 + if (path_fault) {
29725 + *path_fault = 0;
29726 + path_fault = buffer_fault;
29727 + } else
29728 + path_fault = "<path too long>";
29729 + }
29730 + } else
29731 + path_fault = "<anonymous mapping>";
29732 + }
29733 + up_read(&mm->mmap_sem);
29734 + }
29735 + if (tsk->signal->curr_ip)
29736 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
29737 + else
29738 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
29739 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
29740 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
29741 + task_uid(tsk), task_euid(tsk), pc, sp);
29742 + free_page((unsigned long)buffer_exec);
29743 + free_page((unsigned long)buffer_fault);
29744 + pax_report_insns(pc, sp);
29745 + do_coredump(SIGKILL, SIGKILL, regs);
29746 +}
29747 +#endif
29748 +
29749 +#ifdef CONFIG_PAX_REFCOUNT
29750 +void pax_report_refcount_overflow(struct pt_regs *regs)
29751 +{
29752 + if (current->signal->curr_ip)
29753 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
29754 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
29755 + else
29756 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
29757 + current->comm, task_pid_nr(current), current_uid(), current_euid());
29758 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
29759 + show_regs(regs);
29760 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
29761 +}
29762 +#endif
29763 +
29764 +#ifdef CONFIG_PAX_USERCOPY
29765 +void pax_report_leak_to_user(const void *ptr, unsigned long len)
29766 +{
29767 + if (current->signal->curr_ip)
29768 + printk(KERN_ERR "PAX: From %pI4: kernel memory leak attempt detected from %p (%lu bytes)\n",
29769 + &current->signal->curr_ip, ptr, len);
29770 + else
29771 + printk(KERN_ERR "PAX: kernel memory leak attempt detected from %p (%lu bytes)\n", ptr, len);
29772 + dump_stack();
29773 + do_group_exit(SIGKILL);
29774 +}
29775 +
29776 +void pax_report_overflow_from_user(const void *ptr, unsigned long len)
29777 +{
29778 + if (current->signal->curr_ip)
29779 + printk(KERN_ERR "PAX: From %pI4: kernel memory overflow attempt detected to %p (%lu bytes)\n",
29780 + &current->signal->curr_ip, ptr, len);
29781 + else
29782 + printk(KERN_ERR "PAX: kernel memory overflow attempt detected to %p (%lu bytes)\n", ptr, len);
29783 + dump_stack();
29784 + do_group_exit(SIGKILL);
29785 +}
29786 +#endif
29787 +
29788 static int zap_process(struct task_struct *start, int exit_code)
29789 {
29790 struct task_struct *t;
29791 @@ -1766,17 +2018,17 @@ static void wait_for_dump_helpers(struct
29792 pipe = file->f_path.dentry->d_inode->i_pipe;
29793
29794 pipe_lock(pipe);
29795 - pipe->readers++;
29796 - pipe->writers--;
29797 + atomic_inc(&pipe->readers);
29798 + atomic_dec(&pipe->writers);
29799
29800 - while ((pipe->readers > 1) && (!signal_pending(current))) {
29801 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
29802 wake_up_interruptible_sync(&pipe->wait);
29803 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
29804 pipe_wait(pipe);
29805 }
29806
29807 - pipe->readers--;
29808 - pipe->writers++;
29809 + atomic_dec(&pipe->readers);
29810 + atomic_inc(&pipe->writers);
29811 pipe_unlock(pipe);
29812
29813 }
29814 @@ -1857,6 +2109,10 @@ void do_coredump(long signr, int exit_co
29815 */
29816 clear_thread_flag(TIF_SIGPENDING);
29817
29818 + if (signr == SIGKILL || signr == SIGILL)
29819 + gr_handle_brute_attach(current);
29820 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
29821 +
29822 /*
29823 * lock_kernel() because format_corename() is controlled by sysctl, which
29824 * uses lock_kernel()
29825 diff -urNp linux-2.6.34.1/fs/ext2/balloc.c linux-2.6.34.1/fs/ext2/balloc.c
29826 --- linux-2.6.34.1/fs/ext2/balloc.c 2010-07-05 14:24:10.000000000 -0400
29827 +++ linux-2.6.34.1/fs/ext2/balloc.c 2010-07-07 09:04:55.000000000 -0400
29828 @@ -1193,7 +1193,7 @@ static int ext2_has_free_blocks(struct e
29829
29830 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
29831 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
29832 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
29833 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
29834 sbi->s_resuid != current_fsuid() &&
29835 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
29836 return 0;
29837 diff -urNp linux-2.6.34.1/fs/ext2/xattr.c linux-2.6.34.1/fs/ext2/xattr.c
29838 --- linux-2.6.34.1/fs/ext2/xattr.c 2010-07-05 14:24:10.000000000 -0400
29839 +++ linux-2.6.34.1/fs/ext2/xattr.c 2010-07-07 09:04:55.000000000 -0400
29840 @@ -86,8 +86,8 @@
29841 printk("\n"); \
29842 } while (0)
29843 #else
29844 -# define ea_idebug(f...)
29845 -# define ea_bdebug(f...)
29846 +# define ea_idebug(inode, f...) do {} while (0)
29847 +# define ea_bdebug(bh, f...) do {} while (0)
29848 #endif
29849
29850 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
29851 diff -urNp linux-2.6.34.1/fs/ext3/balloc.c linux-2.6.34.1/fs/ext3/balloc.c
29852 --- linux-2.6.34.1/fs/ext3/balloc.c 2010-07-05 14:24:10.000000000 -0400
29853 +++ linux-2.6.34.1/fs/ext3/balloc.c 2010-07-07 09:04:55.000000000 -0400
29854 @@ -1422,7 +1422,7 @@ static int ext3_has_free_blocks(struct e
29855
29856 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
29857 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
29858 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
29859 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
29860 sbi->s_resuid != current_fsuid() &&
29861 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
29862 return 0;
29863 diff -urNp linux-2.6.34.1/fs/ext3/namei.c linux-2.6.34.1/fs/ext3/namei.c
29864 --- linux-2.6.34.1/fs/ext3/namei.c 2010-07-05 14:24:10.000000000 -0400
29865 +++ linux-2.6.34.1/fs/ext3/namei.c 2010-07-07 09:04:55.000000000 -0400
29866 @@ -1168,7 +1168,7 @@ static struct ext3_dir_entry_2 *do_split
29867 char *data1 = (*bh)->b_data, *data2;
29868 unsigned split, move, size;
29869 struct ext3_dir_entry_2 *de = NULL, *de2;
29870 - int err = 0, i;
29871 + int i, err = 0;
29872
29873 bh2 = ext3_append (handle, dir, &newblock, &err);
29874 if (!(bh2)) {
29875 diff -urNp linux-2.6.34.1/fs/ext3/xattr.c linux-2.6.34.1/fs/ext3/xattr.c
29876 --- linux-2.6.34.1/fs/ext3/xattr.c 2010-07-05 14:24:10.000000000 -0400
29877 +++ linux-2.6.34.1/fs/ext3/xattr.c 2010-07-07 09:04:55.000000000 -0400
29878 @@ -89,8 +89,8 @@
29879 printk("\n"); \
29880 } while (0)
29881 #else
29882 -# define ea_idebug(f...)
29883 -# define ea_bdebug(f...)
29884 +# define ea_idebug(f...) do {} while (0)
29885 +# define ea_bdebug(f...) do {} while (0)
29886 #endif
29887
29888 static void ext3_xattr_cache_insert(struct buffer_head *);
29889 diff -urNp linux-2.6.34.1/fs/ext4/balloc.c linux-2.6.34.1/fs/ext4/balloc.c
29890 --- linux-2.6.34.1/fs/ext4/balloc.c 2010-07-05 14:24:10.000000000 -0400
29891 +++ linux-2.6.34.1/fs/ext4/balloc.c 2010-07-07 09:04:55.000000000 -0400
29892 @@ -522,7 +522,7 @@ int ext4_has_free_blocks(struct ext4_sb_
29893 /* Hm, nope. Are (enough) root reserved blocks available? */
29894 if (sbi->s_resuid == current_fsuid() ||
29895 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
29896 - capable(CAP_SYS_RESOURCE)) {
29897 + capable_nolog(CAP_SYS_RESOURCE)) {
29898 if (free_blocks >= (nblocks + dirty_blocks))
29899 return 1;
29900 }
29901 diff -urNp linux-2.6.34.1/fs/ext4/ioctl.c linux-2.6.34.1/fs/ext4/ioctl.c
29902 --- linux-2.6.34.1/fs/ext4/ioctl.c 2010-07-05 14:24:10.000000000 -0400
29903 +++ linux-2.6.34.1/fs/ext4/ioctl.c 2010-07-07 09:04:55.000000000 -0400
29904 @@ -230,6 +230,9 @@ setversion_out:
29905 struct file *donor_filp;
29906 int err;
29907
29908 + /* temporary workaround for bugs in here */
29909 + return -EOPNOTSUPP;
29910 +
29911 if (!(filp->f_mode & FMODE_READ) ||
29912 !(filp->f_mode & FMODE_WRITE))
29913 return -EBADF;
29914 diff -urNp linux-2.6.34.1/fs/ext4/namei.c linux-2.6.34.1/fs/ext4/namei.c
29915 --- linux-2.6.34.1/fs/ext4/namei.c 2010-07-05 14:24:10.000000000 -0400
29916 +++ linux-2.6.34.1/fs/ext4/namei.c 2010-07-07 09:04:55.000000000 -0400
29917 @@ -1197,7 +1197,7 @@ static struct ext4_dir_entry_2 *do_split
29918 char *data1 = (*bh)->b_data, *data2;
29919 unsigned split, move, size;
29920 struct ext4_dir_entry_2 *de = NULL, *de2;
29921 - int err = 0, i;
29922 + int i, err = 0;
29923
29924 bh2 = ext4_append (handle, dir, &newblock, &err);
29925 if (!(bh2)) {
29926 diff -urNp linux-2.6.34.1/fs/ext4/xattr.c linux-2.6.34.1/fs/ext4/xattr.c
29927 --- linux-2.6.34.1/fs/ext4/xattr.c 2010-07-05 14:24:10.000000000 -0400
29928 +++ linux-2.6.34.1/fs/ext4/xattr.c 2010-07-07 09:04:55.000000000 -0400
29929 @@ -82,8 +82,8 @@
29930 printk("\n"); \
29931 } while (0)
29932 #else
29933 -# define ea_idebug(f...)
29934 -# define ea_bdebug(f...)
29935 +# define ea_idebug(inode, f...) do {} while (0)
29936 +# define ea_bdebug(bh, f...) do {} while (0)
29937 #endif
29938
29939 static void ext4_xattr_cache_insert(struct buffer_head *);
29940 diff -urNp linux-2.6.34.1/fs/fcntl.c linux-2.6.34.1/fs/fcntl.c
29941 --- linux-2.6.34.1/fs/fcntl.c 2010-07-05 14:24:10.000000000 -0400
29942 +++ linux-2.6.34.1/fs/fcntl.c 2010-07-07 09:04:55.000000000 -0400
29943 @@ -344,6 +344,7 @@ static long do_fcntl(int fd, unsigned in
29944 switch (cmd) {
29945 case F_DUPFD:
29946 case F_DUPFD_CLOEXEC:
29947 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
29948 if (arg >= rlimit(RLIMIT_NOFILE))
29949 break;
29950 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
29951 @@ -500,7 +501,8 @@ static inline int sigio_perm(struct task
29952 ret = ((fown->euid == 0 ||
29953 fown->euid == cred->suid || fown->euid == cred->uid ||
29954 fown->uid == cred->suid || fown->uid == cred->uid) &&
29955 - !security_file_send_sigiotask(p, fown, sig));
29956 + !security_file_send_sigiotask(p, fown, sig) &&
29957 + !gr_check_protected_task(p) && !gr_pid_is_chrooted(p));
29958 rcu_read_unlock();
29959 return ret;
29960 }
29961 diff -urNp linux-2.6.34.1/fs/fifo.c linux-2.6.34.1/fs/fifo.c
29962 --- linux-2.6.34.1/fs/fifo.c 2010-07-05 14:24:10.000000000 -0400
29963 +++ linux-2.6.34.1/fs/fifo.c 2010-07-07 09:04:55.000000000 -0400
29964 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
29965 */
29966 filp->f_op = &read_pipefifo_fops;
29967 pipe->r_counter++;
29968 - if (pipe->readers++ == 0)
29969 + if (atomic_inc_return(&pipe->readers) == 1)
29970 wake_up_partner(inode);
29971
29972 - if (!pipe->writers) {
29973 + if (!atomic_read(&pipe->writers)) {
29974 if ((filp->f_flags & O_NONBLOCK)) {
29975 /* suppress POLLHUP until we have
29976 * seen a writer */
29977 @@ -82,15 +82,15 @@ static int fifo_open(struct inode *inode
29978 * errno=ENXIO when there is no process reading the FIFO.
29979 */
29980 ret = -ENXIO;
29981 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
29982 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
29983 goto err;
29984
29985 filp->f_op = &write_pipefifo_fops;
29986 pipe->w_counter++;
29987 - if (!pipe->writers++)
29988 + if (atomic_inc_return(&pipe->writers) == 1)
29989 wake_up_partner(inode);
29990
29991 - if (!pipe->readers) {
29992 + if (!atomic_read(&pipe->readers)) {
29993 wait_for_partner(inode, &pipe->r_counter);
29994 if (signal_pending(current))
29995 goto err_wr;
29996 @@ -106,11 +106,11 @@ static int fifo_open(struct inode *inode
29997 */
29998 filp->f_op = &rdwr_pipefifo_fops;
29999
30000 - pipe->readers++;
30001 - pipe->writers++;
30002 + atomic_inc(&pipe->readers);
30003 + atomic_inc(&pipe->writers);
30004 pipe->r_counter++;
30005 pipe->w_counter++;
30006 - if (pipe->readers == 1 || pipe->writers == 1)
30007 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
30008 wake_up_partner(inode);
30009 break;
30010
30011 @@ -124,19 +124,19 @@ static int fifo_open(struct inode *inode
30012 return 0;
30013
30014 err_rd:
30015 - if (!--pipe->readers)
30016 + if (atomic_dec_and_test(&pipe->readers))
30017 wake_up_interruptible(&pipe->wait);
30018 ret = -ERESTARTSYS;
30019 goto err;
30020
30021 err_wr:
30022 - if (!--pipe->writers)
30023 + if (atomic_dec_and_test(&pipe->writers))
30024 wake_up_interruptible(&pipe->wait);
30025 ret = -ERESTARTSYS;
30026 goto err;
30027
30028 err:
30029 - if (!pipe->readers && !pipe->writers)
30030 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
30031 free_pipe_info(inode);
30032
30033 err_nocleanup:
30034 diff -urNp linux-2.6.34.1/fs/file.c linux-2.6.34.1/fs/file.c
30035 --- linux-2.6.34.1/fs/file.c 2010-07-05 14:24:10.000000000 -0400
30036 +++ linux-2.6.34.1/fs/file.c 2010-07-07 09:04:55.000000000 -0400
30037 @@ -14,6 +14,7 @@
30038 #include <linux/slab.h>
30039 #include <linux/vmalloc.h>
30040 #include <linux/file.h>
30041 +#include <linux/security.h>
30042 #include <linux/fdtable.h>
30043 #include <linux/bitops.h>
30044 #include <linux/interrupt.h>
30045 @@ -257,6 +258,7 @@ int expand_files(struct files_struct *fi
30046 * N.B. For clone tasks sharing a files structure, this test
30047 * will limit the total number of files that can be opened.
30048 */
30049 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
30050 if (nr >= rlimit(RLIMIT_NOFILE))
30051 return -EMFILE;
30052
30053 diff -urNp linux-2.6.34.1/fs/fs_struct.c linux-2.6.34.1/fs/fs_struct.c
30054 --- linux-2.6.34.1/fs/fs_struct.c 2010-07-05 14:24:10.000000000 -0400
30055 +++ linux-2.6.34.1/fs/fs_struct.c 2010-07-07 09:04:55.000000000 -0400
30056 @@ -4,6 +4,7 @@
30057 #include <linux/path.h>
30058 #include <linux/slab.h>
30059 #include <linux/fs_struct.h>
30060 +#include <linux/grsecurity.h>
30061
30062 /*
30063 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
30064 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
30065 old_root = fs->root;
30066 fs->root = *path;
30067 path_get(path);
30068 + gr_set_chroot_entries(current, path);
30069 write_unlock(&fs->lock);
30070 if (old_root.dentry)
30071 path_put(&old_root);
30072 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
30073 && fs->root.mnt == old_root->mnt) {
30074 path_get(new_root);
30075 fs->root = *new_root;
30076 + gr_set_chroot_entries(p, new_root);
30077 count++;
30078 }
30079 if (fs->pwd.dentry == old_root->dentry
30080 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
30081 task_lock(tsk);
30082 write_lock(&fs->lock);
30083 tsk->fs = NULL;
30084 - kill = !--fs->users;
30085 + gr_clear_chroot_entries(tsk);
30086 + kill = !atomic_dec_return(&fs->users);
30087 write_unlock(&fs->lock);
30088 task_unlock(tsk);
30089 if (kill)
30090 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
30091 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
30092 /* We don't need to lock fs - think why ;-) */
30093 if (fs) {
30094 - fs->users = 1;
30095 + atomic_set(&fs->users, 1);
30096 fs->in_exec = 0;
30097 rwlock_init(&fs->lock);
30098 fs->umask = old->umask;
30099 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
30100
30101 task_lock(current);
30102 write_lock(&fs->lock);
30103 - kill = !--fs->users;
30104 + kill = !atomic_dec_return(&fs->users);
30105 current->fs = new_fs;
30106 + gr_set_chroot_entries(current, &new_fs->root);
30107 write_unlock(&fs->lock);
30108 task_unlock(current);
30109
30110 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
30111
30112 /* to be mentioned only in INIT_TASK */
30113 struct fs_struct init_fs = {
30114 - .users = 1,
30115 + .users = ATOMIC_INIT(1),
30116 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
30117 .umask = 0022,
30118 };
30119 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
30120 task_lock(current);
30121
30122 write_lock(&init_fs.lock);
30123 - init_fs.users++;
30124 + atomic_inc(&init_fs.users);
30125 write_unlock(&init_fs.lock);
30126
30127 write_lock(&fs->lock);
30128 current->fs = &init_fs;
30129 - kill = !--fs->users;
30130 + gr_set_chroot_entries(current, &current->fs->root);
30131 + kill = !atomic_dec_return(&fs->users);
30132 write_unlock(&fs->lock);
30133
30134 task_unlock(current);
30135 diff -urNp linux-2.6.34.1/fs/fuse/control.c linux-2.6.34.1/fs/fuse/control.c
30136 --- linux-2.6.34.1/fs/fuse/control.c 2010-07-05 14:24:10.000000000 -0400
30137 +++ linux-2.6.34.1/fs/fuse/control.c 2010-07-07 09:04:55.000000000 -0400
30138 @@ -293,7 +293,7 @@ void fuse_ctl_remove_conn(struct fuse_co
30139
30140 static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
30141 {
30142 - struct tree_descr empty_descr = {""};
30143 + struct tree_descr empty_descr = {"", NULL, 0};
30144 struct fuse_conn *fc;
30145 int err;
30146
30147 diff -urNp linux-2.6.34.1/fs/fuse/cuse.c linux-2.6.34.1/fs/fuse/cuse.c
30148 --- linux-2.6.34.1/fs/fuse/cuse.c 2010-07-05 14:24:10.000000000 -0400
30149 +++ linux-2.6.34.1/fs/fuse/cuse.c 2010-07-07 09:04:55.000000000 -0400
30150 @@ -529,8 +529,18 @@ static int cuse_channel_release(struct i
30151 return rc;
30152 }
30153
30154 -static struct file_operations cuse_channel_fops; /* initialized during init */
30155 -
30156 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
30157 + .owner = THIS_MODULE,
30158 + .llseek = no_llseek,
30159 + .read = do_sync_read,
30160 + .aio_read = fuse_dev_read,
30161 + .write = do_sync_write,
30162 + .aio_write = fuse_dev_write,
30163 + .poll = fuse_dev_poll,
30164 + .open = cuse_channel_open,
30165 + .release = cuse_channel_release,
30166 + .fasync = fuse_dev_fasync,
30167 +};
30168
30169 /**************************************************************************
30170 * Misc stuff and module initializatiion
30171 @@ -576,12 +586,6 @@ static int __init cuse_init(void)
30172 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
30173 INIT_LIST_HEAD(&cuse_conntbl[i]);
30174
30175 - /* inherit and extend fuse_dev_operations */
30176 - cuse_channel_fops = fuse_dev_operations;
30177 - cuse_channel_fops.owner = THIS_MODULE;
30178 - cuse_channel_fops.open = cuse_channel_open;
30179 - cuse_channel_fops.release = cuse_channel_release;
30180 -
30181 cuse_class = class_create(THIS_MODULE, "cuse");
30182 if (IS_ERR(cuse_class))
30183 return PTR_ERR(cuse_class);
30184 diff -urNp linux-2.6.34.1/fs/fuse/dev.c linux-2.6.34.1/fs/fuse/dev.c
30185 --- linux-2.6.34.1/fs/fuse/dev.c 2010-07-05 14:24:10.000000000 -0400
30186 +++ linux-2.6.34.1/fs/fuse/dev.c 2010-07-07 09:04:55.000000000 -0400
30187 @@ -745,7 +745,7 @@ __releases(&fc->lock)
30188 * request_end(). Otherwise add it to the processing list, and set
30189 * the 'sent' flag.
30190 */
30191 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
30192 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
30193 unsigned long nr_segs, loff_t pos)
30194 {
30195 int err;
30196 @@ -828,6 +828,8 @@ static ssize_t fuse_dev_read(struct kioc
30197 return err;
30198 }
30199
30200 +EXPORT_SYMBOL_GPL(fuse_dev_read);
30201 +
30202 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
30203 struct fuse_copy_state *cs)
30204 {
30205 @@ -987,7 +989,7 @@ static int copy_out_args(struct fuse_cop
30206 * it from the list and copy the rest of the buffer to the request.
30207 * The request is finished by calling request_end()
30208 */
30209 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
30210 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
30211 unsigned long nr_segs, loff_t pos)
30212 {
30213 int err;
30214 @@ -1084,7 +1086,9 @@ static ssize_t fuse_dev_write(struct kio
30215 return err;
30216 }
30217
30218 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
30219 +EXPORT_SYMBOL_GPL(fuse_dev_write);
30220 +
30221 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
30222 {
30223 unsigned mask = POLLOUT | POLLWRNORM;
30224 struct fuse_conn *fc = fuse_get_conn(file);
30225 @@ -1103,6 +1107,8 @@ static unsigned fuse_dev_poll(struct fil
30226 return mask;
30227 }
30228
30229 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
30230 +
30231 /*
30232 * Abort all requests on the given list (pending or processing)
30233 *
30234 @@ -1210,7 +1216,7 @@ int fuse_dev_release(struct inode *inode
30235 }
30236 EXPORT_SYMBOL_GPL(fuse_dev_release);
30237
30238 -static int fuse_dev_fasync(int fd, struct file *file, int on)
30239 +int fuse_dev_fasync(int fd, struct file *file, int on)
30240 {
30241 struct fuse_conn *fc = fuse_get_conn(file);
30242 if (!fc)
30243 @@ -1220,6 +1226,8 @@ static int fuse_dev_fasync(int fd, struc
30244 return fasync_helper(fd, file, on, &fc->fasync);
30245 }
30246
30247 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
30248 +
30249 const struct file_operations fuse_dev_operations = {
30250 .owner = THIS_MODULE,
30251 .llseek = no_llseek,
30252 diff -urNp linux-2.6.34.1/fs/fuse/dir.c linux-2.6.34.1/fs/fuse/dir.c
30253 --- linux-2.6.34.1/fs/fuse/dir.c 2010-07-05 14:24:10.000000000 -0400
30254 +++ linux-2.6.34.1/fs/fuse/dir.c 2010-07-07 09:04:55.000000000 -0400
30255 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
30256 return link;
30257 }
30258
30259 -static void free_link(char *link)
30260 +static void free_link(const char *link)
30261 {
30262 if (!IS_ERR(link))
30263 free_page((unsigned long) link);
30264 diff -urNp linux-2.6.34.1/fs/fuse/fuse_i.h linux-2.6.34.1/fs/fuse/fuse_i.h
30265 --- linux-2.6.34.1/fs/fuse/fuse_i.h 2010-07-05 14:24:10.000000000 -0400
30266 +++ linux-2.6.34.1/fs/fuse/fuse_i.h 2010-07-07 09:04:55.000000000 -0400
30267 @@ -521,6 +521,16 @@ extern const struct file_operations fuse
30268
30269 extern const struct dentry_operations fuse_dentry_operations;
30270
30271 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
30272 + unsigned long nr_segs, loff_t pos);
30273 +
30274 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
30275 + unsigned long nr_segs, loff_t pos);
30276 +
30277 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
30278 +
30279 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
30280 +
30281 /**
30282 * Inode to nodeid comparison.
30283 */
30284 diff -urNp linux-2.6.34.1/fs/hfs/inode.c linux-2.6.34.1/fs/hfs/inode.c
30285 --- linux-2.6.34.1/fs/hfs/inode.c 2010-07-05 14:24:10.000000000 -0400
30286 +++ linux-2.6.34.1/fs/hfs/inode.c 2010-07-07 09:04:55.000000000 -0400
30287 @@ -423,7 +423,7 @@ int hfs_write_inode(struct inode *inode,
30288
30289 if (S_ISDIR(main_inode->i_mode)) {
30290 if (fd.entrylength < sizeof(struct hfs_cat_dir))
30291 - /* panic? */;
30292 + {/* panic? */}
30293 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
30294 sizeof(struct hfs_cat_dir));
30295 if (rec.type != HFS_CDR_DIR ||
30296 @@ -444,7 +444,7 @@ int hfs_write_inode(struct inode *inode,
30297 sizeof(struct hfs_cat_file));
30298 } else {
30299 if (fd.entrylength < sizeof(struct hfs_cat_file))
30300 - /* panic? */;
30301 + {/* panic? */}
30302 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
30303 sizeof(struct hfs_cat_file));
30304 if (rec.type != HFS_CDR_FIL ||
30305 diff -urNp linux-2.6.34.1/fs/hfsplus/inode.c linux-2.6.34.1/fs/hfsplus/inode.c
30306 --- linux-2.6.34.1/fs/hfsplus/inode.c 2010-07-05 14:24:10.000000000 -0400
30307 +++ linux-2.6.34.1/fs/hfsplus/inode.c 2010-07-07 09:04:55.000000000 -0400
30308 @@ -406,7 +406,7 @@ int hfsplus_cat_read_inode(struct inode
30309 struct hfsplus_cat_folder *folder = &entry.folder;
30310
30311 if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
30312 - /* panic? */;
30313 + {/* panic? */}
30314 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
30315 sizeof(struct hfsplus_cat_folder));
30316 hfsplus_get_perms(inode, &folder->permissions, 1);
30317 @@ -423,7 +423,7 @@ int hfsplus_cat_read_inode(struct inode
30318 struct hfsplus_cat_file *file = &entry.file;
30319
30320 if (fd->entrylength < sizeof(struct hfsplus_cat_file))
30321 - /* panic? */;
30322 + {/* panic? */}
30323 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
30324 sizeof(struct hfsplus_cat_file));
30325
30326 @@ -479,7 +479,7 @@ int hfsplus_cat_write_inode(struct inode
30327 struct hfsplus_cat_folder *folder = &entry.folder;
30328
30329 if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
30330 - /* panic? */;
30331 + {/* panic? */}
30332 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
30333 sizeof(struct hfsplus_cat_folder));
30334 /* simple node checks? */
30335 @@ -501,7 +501,7 @@ int hfsplus_cat_write_inode(struct inode
30336 struct hfsplus_cat_file *file = &entry.file;
30337
30338 if (fd.entrylength < sizeof(struct hfsplus_cat_file))
30339 - /* panic? */;
30340 + {/* panic? */}
30341 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
30342 sizeof(struct hfsplus_cat_file));
30343 hfsplus_inode_write_fork(inode, &file->data_fork);
30344 diff -urNp linux-2.6.34.1/fs/hugetlbfs/inode.c linux-2.6.34.1/fs/hugetlbfs/inode.c
30345 --- linux-2.6.34.1/fs/hugetlbfs/inode.c 2010-07-05 14:24:10.000000000 -0400
30346 +++ linux-2.6.34.1/fs/hugetlbfs/inode.c 2010-07-07 09:04:55.000000000 -0400
30347 @@ -908,7 +908,7 @@ static struct file_system_type hugetlbfs
30348 .kill_sb = kill_litter_super,
30349 };
30350
30351 -static struct vfsmount *hugetlbfs_vfsmount;
30352 +struct vfsmount *hugetlbfs_vfsmount;
30353
30354 static int can_do_hugetlb_shm(void)
30355 {
30356 diff -urNp linux-2.6.34.1/fs/ioctl.c linux-2.6.34.1/fs/ioctl.c
30357 --- linux-2.6.34.1/fs/ioctl.c 2010-07-05 14:24:10.000000000 -0400
30358 +++ linux-2.6.34.1/fs/ioctl.c 2010-07-07 09:04:55.000000000 -0400
30359 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
30360 u64 phys, u64 len, u32 flags)
30361 {
30362 struct fiemap_extent extent;
30363 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
30364 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
30365
30366 /* only count the extents */
30367 if (fieinfo->fi_extents_max == 0) {
30368 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
30369
30370 fieinfo.fi_flags = fiemap.fm_flags;
30371 fieinfo.fi_extents_max = fiemap.fm_extent_count;
30372 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
30373 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
30374
30375 if (fiemap.fm_extent_count != 0 &&
30376 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
30377 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
30378 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
30379 fiemap.fm_flags = fieinfo.fi_flags;
30380 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
30381 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
30382 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
30383 error = -EFAULT;
30384
30385 return error;
30386 diff -urNp linux-2.6.34.1/fs/jffs2/debug.h linux-2.6.34.1/fs/jffs2/debug.h
30387 --- linux-2.6.34.1/fs/jffs2/debug.h 2010-07-05 14:24:10.000000000 -0400
30388 +++ linux-2.6.34.1/fs/jffs2/debug.h 2010-07-07 09:04:55.000000000 -0400
30389 @@ -52,13 +52,13 @@
30390 #if CONFIG_JFFS2_FS_DEBUG > 0
30391 #define D1(x) x
30392 #else
30393 -#define D1(x)
30394 +#define D1(x) do {} while (0);
30395 #endif
30396
30397 #if CONFIG_JFFS2_FS_DEBUG > 1
30398 #define D2(x) x
30399 #else
30400 -#define D2(x)
30401 +#define D2(x) do {} while (0);
30402 #endif
30403
30404 /* The prefixes of JFFS2 messages */
30405 @@ -114,73 +114,73 @@
30406 #ifdef JFFS2_DBG_READINODE_MESSAGES
30407 #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30408 #else
30409 -#define dbg_readinode(fmt, ...)
30410 +#define dbg_readinode(fmt, ...) do {} while (0)
30411 #endif
30412 #ifdef JFFS2_DBG_READINODE2_MESSAGES
30413 #define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30414 #else
30415 -#define dbg_readinode2(fmt, ...)
30416 +#define dbg_readinode2(fmt, ...) do {} while (0)
30417 #endif
30418
30419 /* Fragtree build debugging messages */
30420 #ifdef JFFS2_DBG_FRAGTREE_MESSAGES
30421 #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30422 #else
30423 -#define dbg_fragtree(fmt, ...)
30424 +#define dbg_fragtree(fmt, ...) do {} while (0)
30425 #endif
30426 #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
30427 #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30428 #else
30429 -#define dbg_fragtree2(fmt, ...)
30430 +#define dbg_fragtree2(fmt, ...) do {} while (0)
30431 #endif
30432
30433 /* Directory entry list manilulation debugging messages */
30434 #ifdef JFFS2_DBG_DENTLIST_MESSAGES
30435 #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30436 #else
30437 -#define dbg_dentlist(fmt, ...)
30438 +#define dbg_dentlist(fmt, ...) do {} while (0)
30439 #endif
30440
30441 /* Print the messages about manipulating node_refs */
30442 #ifdef JFFS2_DBG_NODEREF_MESSAGES
30443 #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30444 #else
30445 -#define dbg_noderef(fmt, ...)
30446 +#define dbg_noderef(fmt, ...) do {} while (0)
30447 #endif
30448
30449 /* Manipulations with the list of inodes (JFFS2 inocache) */
30450 #ifdef JFFS2_DBG_INOCACHE_MESSAGES
30451 #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30452 #else
30453 -#define dbg_inocache(fmt, ...)
30454 +#define dbg_inocache(fmt, ...) do {} while (0)
30455 #endif
30456
30457 /* Summary debugging messages */
30458 #ifdef JFFS2_DBG_SUMMARY_MESSAGES
30459 #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30460 #else
30461 -#define dbg_summary(fmt, ...)
30462 +#define dbg_summary(fmt, ...) do {} while (0)
30463 #endif
30464
30465 /* File system build messages */
30466 #ifdef JFFS2_DBG_FSBUILD_MESSAGES
30467 #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30468 #else
30469 -#define dbg_fsbuild(fmt, ...)
30470 +#define dbg_fsbuild(fmt, ...) do {} while (0)
30471 #endif
30472
30473 /* Watch the object allocations */
30474 #ifdef JFFS2_DBG_MEMALLOC_MESSAGES
30475 #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30476 #else
30477 -#define dbg_memalloc(fmt, ...)
30478 +#define dbg_memalloc(fmt, ...) do {} while (0)
30479 #endif
30480
30481 /* Watch the XATTR subsystem */
30482 #ifdef JFFS2_DBG_XATTR_MESSAGES
30483 #define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
30484 #else
30485 -#define dbg_xattr(fmt, ...)
30486 +#define dbg_xattr(fmt, ...) do {} while (0)
30487 #endif
30488
30489 /* "Sanity" checks */
30490 diff -urNp linux-2.6.34.1/fs/jffs2/erase.c linux-2.6.34.1/fs/jffs2/erase.c
30491 --- linux-2.6.34.1/fs/jffs2/erase.c 2010-07-05 14:24:10.000000000 -0400
30492 +++ linux-2.6.34.1/fs/jffs2/erase.c 2010-07-07 09:04:55.000000000 -0400
30493 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
30494 struct jffs2_unknown_node marker = {
30495 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
30496 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
30497 - .totlen = cpu_to_je32(c->cleanmarker_size)
30498 + .totlen = cpu_to_je32(c->cleanmarker_size),
30499 + .hdr_crc = cpu_to_je32(0)
30500 };
30501
30502 jffs2_prealloc_raw_node_refs(c, jeb, 1);
30503 diff -urNp linux-2.6.34.1/fs/jffs2/summary.h linux-2.6.34.1/fs/jffs2/summary.h
30504 --- linux-2.6.34.1/fs/jffs2/summary.h 2010-07-05 14:24:10.000000000 -0400
30505 +++ linux-2.6.34.1/fs/jffs2/summary.h 2010-07-07 09:04:55.000000000 -0400
30506 @@ -194,18 +194,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_
30507
30508 #define jffs2_sum_active() (0)
30509 #define jffs2_sum_init(a) (0)
30510 -#define jffs2_sum_exit(a)
30511 -#define jffs2_sum_disable_collecting(a)
30512 +#define jffs2_sum_exit(a) do {} while (0)
30513 +#define jffs2_sum_disable_collecting(a) do {} while (0)
30514 #define jffs2_sum_is_disabled(a) (0)
30515 -#define jffs2_sum_reset_collected(a)
30516 +#define jffs2_sum_reset_collected(a) do {} while (0)
30517 #define jffs2_sum_add_kvec(a,b,c,d) (0)
30518 -#define jffs2_sum_move_collected(a,b)
30519 +#define jffs2_sum_move_collected(a,b) do {} while (0)
30520 #define jffs2_sum_write_sumnode(a) (0)
30521 -#define jffs2_sum_add_padding_mem(a,b)
30522 -#define jffs2_sum_add_inode_mem(a,b,c)
30523 -#define jffs2_sum_add_dirent_mem(a,b,c)
30524 -#define jffs2_sum_add_xattr_mem(a,b,c)
30525 -#define jffs2_sum_add_xref_mem(a,b,c)
30526 +#define jffs2_sum_add_padding_mem(a,b) do {} while (0)
30527 +#define jffs2_sum_add_inode_mem(a,b,c) do {} while (0)
30528 +#define jffs2_sum_add_dirent_mem(a,b,c) do {} while (0)
30529 +#define jffs2_sum_add_xattr_mem(a,b,c) do {} while (0)
30530 +#define jffs2_sum_add_xref_mem(a,b,c) do {} while (0)
30531 #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
30532
30533 #endif /* CONFIG_JFFS2_SUMMARY */
30534 diff -urNp linux-2.6.34.1/fs/jffs2/wbuf.c linux-2.6.34.1/fs/jffs2/wbuf.c
30535 --- linux-2.6.34.1/fs/jffs2/wbuf.c 2010-07-05 14:24:10.000000000 -0400
30536 +++ linux-2.6.34.1/fs/jffs2/wbuf.c 2010-07-07 09:04:55.000000000 -0400
30537 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
30538 {
30539 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
30540 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
30541 - .totlen = constant_cpu_to_je32(8)
30542 + .totlen = constant_cpu_to_je32(8),
30543 + .hdr_crc = constant_cpu_to_je32(0)
30544 };
30545
30546 /*
30547 diff -urNp linux-2.6.34.1/fs/lockd/svc.c linux-2.6.34.1/fs/lockd/svc.c
30548 --- linux-2.6.34.1/fs/lockd/svc.c 2010-07-05 14:24:10.000000000 -0400
30549 +++ linux-2.6.34.1/fs/lockd/svc.c 2010-07-07 09:04:55.000000000 -0400
30550 @@ -42,7 +42,7 @@
30551
30552 static struct svc_program nlmsvc_program;
30553
30554 -struct nlmsvc_binding * nlmsvc_ops;
30555 +const struct nlmsvc_binding * nlmsvc_ops;
30556 EXPORT_SYMBOL_GPL(nlmsvc_ops);
30557
30558 static DEFINE_MUTEX(nlmsvc_mutex);
30559 diff -urNp linux-2.6.34.1/fs/locks.c linux-2.6.34.1/fs/locks.c
30560 --- linux-2.6.34.1/fs/locks.c 2010-07-05 14:24:10.000000000 -0400
30561 +++ linux-2.6.34.1/fs/locks.c 2010-07-07 09:04:55.000000000 -0400
30562 @@ -2008,16 +2008,16 @@ void locks_remove_flock(struct file *fil
30563 return;
30564
30565 if (filp->f_op && filp->f_op->flock) {
30566 - struct file_lock fl = {
30567 + struct file_lock flock = {
30568 .fl_pid = current->tgid,
30569 .fl_file = filp,
30570 .fl_flags = FL_FLOCK,
30571 .fl_type = F_UNLCK,
30572 .fl_end = OFFSET_MAX,
30573 };
30574 - filp->f_op->flock(filp, F_SETLKW, &fl);
30575 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
30576 - fl.fl_ops->fl_release_private(&fl);
30577 + filp->f_op->flock(filp, F_SETLKW, &flock);
30578 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
30579 + flock.fl_ops->fl_release_private(&flock);
30580 }
30581
30582 lock_kernel();
30583 diff -urNp linux-2.6.34.1/fs/namei.c linux-2.6.34.1/fs/namei.c
30584 --- linux-2.6.34.1/fs/namei.c 2010-07-05 14:24:10.000000000 -0400
30585 +++ linux-2.6.34.1/fs/namei.c 2010-07-07 09:04:55.000000000 -0400
30586 @@ -547,7 +547,7 @@ __do_follow_link(struct path *path, stru
30587 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
30588 error = PTR_ERR(*p);
30589 if (!IS_ERR(*p)) {
30590 - char *s = nd_get_link(nd);
30591 + const char *s = nd_get_link(nd);
30592 error = 0;
30593 if (s)
30594 error = __vfs_follow_link(nd, s);
30595 @@ -580,6 +580,13 @@ static inline int do_follow_link(struct
30596 err = security_inode_follow_link(path->dentry, nd);
30597 if (err)
30598 goto loop;
30599 +
30600 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
30601 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
30602 + err = -EACCES;
30603 + goto loop;
30604 + }
30605 +
30606 current->link_count++;
30607 current->total_link_count++;
30608 nd->depth++;
30609 @@ -964,11 +971,18 @@ return_reval:
30610 break;
30611 }
30612 return_base:
30613 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
30614 + path_put(&nd->path);
30615 + return -ENOENT;
30616 + }
30617 return 0;
30618 out_dput:
30619 path_put_conditional(&next, nd);
30620 break;
30621 }
30622 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
30623 + err = -ENOENT;
30624 +
30625 path_put(&nd->path);
30626 return_err:
30627 return err;
30628 @@ -1505,12 +1519,19 @@ static int __open_namei_create(struct na
30629 int error;
30630 struct dentry *dir = nd->path.dentry;
30631
30632 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, open_flag, mode)) {
30633 + error = -EACCES;
30634 + goto out_unlock;
30635 + }
30636 +
30637 if (!IS_POSIXACL(dir->d_inode))
30638 mode &= ~current_umask();
30639 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
30640 if (error)
30641 goto out_unlock;
30642 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
30643 + if (!error)
30644 + gr_handle_create(path->dentry, nd->path.mnt);
30645 out_unlock:
30646 mutex_unlock(&dir->d_inode->i_mutex);
30647 dput(nd->path.dentry);
30648 @@ -1613,6 +1634,7 @@ static struct file *do_last(struct namei
30649 int mode, const char *pathname)
30650 {
30651 struct dentry *dir = nd->path.dentry;
30652 + int flag = open_to_namei_flags(open_flag);
30653 struct file *filp;
30654 int error = -EISDIR;
30655
30656 @@ -1661,6 +1683,22 @@ static struct file *do_last(struct namei
30657 }
30658 path_to_nameidata(path, nd);
30659 audit_inode(pathname, nd->path.dentry);
30660 +
30661 + if (gr_handle_rofs_blockwrite(nd->path.dentry, nd->path.mnt, acc_mode)) {
30662 + error = -EPERM;
30663 + goto exit;
30664 + }
30665 +
30666 + if (gr_handle_rawio(nd->path.dentry->d_inode)) {
30667 + error = -EPERM;
30668 + goto exit;
30669 + }
30670 +
30671 + if (!gr_acl_handle_open(nd->path.dentry, nd->path.mnt, flag)) {
30672 + error = -EACCES;
30673 + goto exit;
30674 + }
30675 +
30676 goto ok;
30677 }
30678
30679 @@ -1713,6 +1751,24 @@ static struct file *do_last(struct namei
30680 /*
30681 * It already exists.
30682 */
30683 +
30684 + if (gr_handle_rofs_blockwrite(path->dentry, nd->path.mnt, acc_mode)) {
30685 + error = -EPERM;
30686 + goto exit_mutex_unlock;
30687 + }
30688 + if (gr_handle_rawio(path->dentry->d_inode)) {
30689 + error = -EPERM;
30690 + goto exit_mutex_unlock;
30691 + }
30692 + if (!gr_acl_handle_open(path->dentry, nd->path.mnt, flag)) {
30693 + error = -EACCES;
30694 + goto exit_mutex_unlock;
30695 + }
30696 + if (gr_handle_fifo(path->dentry, nd->path.mnt, dir, flag, acc_mode)) {
30697 + error = -EACCES;
30698 + goto exit_mutex_unlock;
30699 + }
30700 +
30701 mutex_unlock(&dir->d_inode->i_mutex);
30702 audit_inode(pathname, path->dentry);
30703
30704 @@ -2033,6 +2089,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
30705 error = may_mknod(mode);
30706 if (error)
30707 goto out_dput;
30708 +
30709 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
30710 + error = -EPERM;
30711 + goto out_dput;
30712 + }
30713 +
30714 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
30715 + error = -EACCES;
30716 + goto out_dput;
30717 + }
30718 +
30719 error = mnt_want_write(nd.path.mnt);
30720 if (error)
30721 goto out_dput;
30722 @@ -2053,6 +2120,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
30723 }
30724 out_drop_write:
30725 mnt_drop_write(nd.path.mnt);
30726 +
30727 + if (!error)
30728 + gr_handle_create(dentry, nd.path.mnt);
30729 out_dput:
30730 dput(dentry);
30731 out_unlock:
30732 @@ -2105,6 +2175,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
30733 if (IS_ERR(dentry))
30734 goto out_unlock;
30735
30736 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
30737 + error = -EACCES;
30738 + goto out_dput;
30739 + }
30740 +
30741 if (!IS_POSIXACL(nd.path.dentry->d_inode))
30742 mode &= ~current_umask();
30743 error = mnt_want_write(nd.path.mnt);
30744 @@ -2116,6 +2191,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
30745 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
30746 out_drop_write:
30747 mnt_drop_write(nd.path.mnt);
30748 +
30749 + if (!error)
30750 + gr_handle_create(dentry, nd.path.mnt);
30751 +
30752 out_dput:
30753 dput(dentry);
30754 out_unlock:
30755 @@ -2197,6 +2276,8 @@ static long do_rmdir(int dfd, const char
30756 char * name;
30757 struct dentry *dentry;
30758 struct nameidata nd;
30759 + ino_t saved_ino = 0;
30760 + dev_t saved_dev = 0;
30761
30762 error = user_path_parent(dfd, pathname, &nd, &name);
30763 if (error)
30764 @@ -2221,6 +2302,19 @@ static long do_rmdir(int dfd, const char
30765 error = PTR_ERR(dentry);
30766 if (IS_ERR(dentry))
30767 goto exit2;
30768 +
30769 + if (dentry->d_inode != NULL) {
30770 + if (dentry->d_inode->i_nlink <= 1) {
30771 + saved_ino = dentry->d_inode->i_ino;
30772 + saved_dev = dentry->d_inode->i_sb->s_dev;
30773 + }
30774 +
30775 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
30776 + error = -EACCES;
30777 + goto exit3;
30778 + }
30779 + }
30780 +
30781 error = mnt_want_write(nd.path.mnt);
30782 if (error)
30783 goto exit3;
30784 @@ -2228,6 +2322,8 @@ static long do_rmdir(int dfd, const char
30785 if (error)
30786 goto exit4;
30787 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
30788 + if (!error && (saved_dev || saved_ino))
30789 + gr_handle_delete(saved_ino, saved_dev);
30790 exit4:
30791 mnt_drop_write(nd.path.mnt);
30792 exit3:
30793 @@ -2290,6 +2386,8 @@ static long do_unlinkat(int dfd, const c
30794 struct dentry *dentry;
30795 struct nameidata nd;
30796 struct inode *inode = NULL;
30797 + ino_t saved_ino = 0;
30798 + dev_t saved_dev = 0;
30799
30800 error = user_path_parent(dfd, pathname, &nd, &name);
30801 if (error)
30802 @@ -2309,8 +2407,19 @@ static long do_unlinkat(int dfd, const c
30803 if (nd.last.name[nd.last.len])
30804 goto slashes;
30805 inode = dentry->d_inode;
30806 - if (inode)
30807 + if (inode) {
30808 + if (inode->i_nlink <= 1) {
30809 + saved_ino = inode->i_ino;
30810 + saved_dev = inode->i_sb->s_dev;
30811 + }
30812 +
30813 atomic_inc(&inode->i_count);
30814 +
30815 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
30816 + error = -EACCES;
30817 + goto exit2;
30818 + }
30819 + }
30820 error = mnt_want_write(nd.path.mnt);
30821 if (error)
30822 goto exit2;
30823 @@ -2318,6 +2427,8 @@ static long do_unlinkat(int dfd, const c
30824 if (error)
30825 goto exit3;
30826 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
30827 + if (!error && (saved_ino || saved_dev))
30828 + gr_handle_delete(saved_ino, saved_dev);
30829 exit3:
30830 mnt_drop_write(nd.path.mnt);
30831 exit2:
30832 @@ -2395,6 +2506,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
30833 if (IS_ERR(dentry))
30834 goto out_unlock;
30835
30836 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
30837 + error = -EACCES;
30838 + goto out_dput;
30839 + }
30840 +
30841 error = mnt_want_write(nd.path.mnt);
30842 if (error)
30843 goto out_dput;
30844 @@ -2402,6 +2518,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
30845 if (error)
30846 goto out_drop_write;
30847 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
30848 + if (!error)
30849 + gr_handle_create(dentry, nd.path.mnt);
30850 out_drop_write:
30851 mnt_drop_write(nd.path.mnt);
30852 out_dput:
30853 @@ -2494,6 +2612,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
30854 error = PTR_ERR(new_dentry);
30855 if (IS_ERR(new_dentry))
30856 goto out_unlock;
30857 +
30858 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
30859 + old_path.dentry->d_inode,
30860 + old_path.dentry->d_inode->i_mode, to)) {
30861 + error = -EACCES;
30862 + goto out_dput;
30863 + }
30864 +
30865 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
30866 + old_path.dentry, old_path.mnt, to)) {
30867 + error = -EACCES;
30868 + goto out_dput;
30869 + }
30870 +
30871 error = mnt_want_write(nd.path.mnt);
30872 if (error)
30873 goto out_dput;
30874 @@ -2501,6 +2633,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
30875 if (error)
30876 goto out_drop_write;
30877 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
30878 + if (!error)
30879 + gr_handle_create(new_dentry, nd.path.mnt);
30880 out_drop_write:
30881 mnt_drop_write(nd.path.mnt);
30882 out_dput:
30883 @@ -2734,6 +2868,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
30884 if (new_dentry == trap)
30885 goto exit5;
30886
30887 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
30888 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
30889 + to);
30890 + if (error)
30891 + goto exit5;
30892 +
30893 error = mnt_want_write(oldnd.path.mnt);
30894 if (error)
30895 goto exit5;
30896 @@ -2743,6 +2883,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
30897 goto exit6;
30898 error = vfs_rename(old_dir->d_inode, old_dentry,
30899 new_dir->d_inode, new_dentry);
30900 + if (!error)
30901 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
30902 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
30903 exit6:
30904 mnt_drop_write(oldnd.path.mnt);
30905 exit5:
30906 diff -urNp linux-2.6.34.1/fs/namespace.c linux-2.6.34.1/fs/namespace.c
30907 --- linux-2.6.34.1/fs/namespace.c 2010-07-05 14:24:10.000000000 -0400
30908 +++ linux-2.6.34.1/fs/namespace.c 2010-07-07 09:04:55.000000000 -0400
30909 @@ -1100,6 +1100,9 @@ static int do_umount(struct vfsmount *mn
30910 if (!(sb->s_flags & MS_RDONLY))
30911 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
30912 up_write(&sb->s_umount);
30913 +
30914 + gr_log_remount(mnt->mnt_devname, retval);
30915 +
30916 return retval;
30917 }
30918
30919 @@ -1121,6 +1124,9 @@ static int do_umount(struct vfsmount *mn
30920 security_sb_umount_busy(mnt);
30921 up_write(&namespace_sem);
30922 release_mounts(&umount_list);
30923 +
30924 + gr_log_unmount(mnt->mnt_devname, retval);
30925 +
30926 return retval;
30927 }
30928
30929 @@ -2000,6 +2006,16 @@ long do_mount(char *dev_name, char *dir_
30930 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
30931 MS_STRICTATIME);
30932
30933 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
30934 + retval = -EPERM;
30935 + goto dput_out;
30936 + }
30937 +
30938 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
30939 + retval = -EPERM;
30940 + goto dput_out;
30941 + }
30942 +
30943 if (flags & MS_REMOUNT)
30944 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
30945 data_page);
30946 @@ -2014,6 +2030,9 @@ long do_mount(char *dev_name, char *dir_
30947 dev_name, data_page);
30948 dput_out:
30949 path_put(&path);
30950 +
30951 + gr_log_mount(dev_name, dir_name, retval);
30952 +
30953 return retval;
30954 }
30955
30956 @@ -2220,6 +2239,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
30957 goto out1;
30958 }
30959
30960 + if (gr_handle_chroot_pivot()) {
30961 + error = -EPERM;
30962 + path_put(&old);
30963 + goto out1;
30964 + }
30965 +
30966 read_lock(&current->fs->lock);
30967 root = current->fs->root;
30968 path_get(&current->fs->root);
30969 diff -urNp linux-2.6.34.1/fs/nfs/inode.c linux-2.6.34.1/fs/nfs/inode.c
30970 --- linux-2.6.34.1/fs/nfs/inode.c 2010-07-05 14:24:10.000000000 -0400
30971 +++ linux-2.6.34.1/fs/nfs/inode.c 2010-07-07 09:04:55.000000000 -0400
30972 @@ -897,16 +897,16 @@ static int nfs_size_need_update(const st
30973 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
30974 }
30975
30976 -static atomic_long_t nfs_attr_generation_counter;
30977 +static atomic_long_unchecked_t nfs_attr_generation_counter;
30978
30979 static unsigned long nfs_read_attr_generation_counter(void)
30980 {
30981 - return atomic_long_read(&nfs_attr_generation_counter);
30982 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
30983 }
30984
30985 unsigned long nfs_inc_attr_generation_counter(void)
30986 {
30987 - return atomic_long_inc_return(&nfs_attr_generation_counter);
30988 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
30989 }
30990
30991 void nfs_fattr_init(struct nfs_fattr *fattr)
30992 diff -urNp linux-2.6.34.1/fs/nfs/nfs4proc.c linux-2.6.34.1/fs/nfs/nfs4proc.c
30993 --- linux-2.6.34.1/fs/nfs/nfs4proc.c 2010-07-05 14:24:10.000000000 -0400
30994 +++ linux-2.6.34.1/fs/nfs/nfs4proc.c 2010-07-07 09:04:55.000000000 -0400
30995 @@ -1162,7 +1162,7 @@ static int _nfs4_do_open_reclaim(struct
30996 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
30997 {
30998 struct nfs_server *server = NFS_SERVER(state->inode);
30999 - struct nfs4_exception exception = { };
31000 + struct nfs4_exception exception = {0, 0};
31001 int err;
31002 do {
31003 err = _nfs4_do_open_reclaim(ctx, state);
31004 @@ -1204,7 +1204,7 @@ static int _nfs4_open_delegation_recall(
31005
31006 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
31007 {
31008 - struct nfs4_exception exception = { };
31009 + struct nfs4_exception exception = {0, 0};
31010 struct nfs_server *server = NFS_SERVER(state->inode);
31011 int err;
31012 do {
31013 @@ -1577,7 +1577,7 @@ static int _nfs4_open_expired(struct nfs
31014 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
31015 {
31016 struct nfs_server *server = NFS_SERVER(state->inode);
31017 - struct nfs4_exception exception = { };
31018 + struct nfs4_exception exception = {0, 0};
31019 int err;
31020
31021 do {
31022 @@ -1684,7 +1684,7 @@ out_err:
31023
31024 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
31025 {
31026 - struct nfs4_exception exception = { };
31027 + struct nfs4_exception exception = {0, 0};
31028 struct nfs4_state *res;
31029 int status;
31030
31031 @@ -1775,7 +1775,7 @@ static int nfs4_do_setattr(struct inode
31032 struct nfs4_state *state)
31033 {
31034 struct nfs_server *server = NFS_SERVER(inode);
31035 - struct nfs4_exception exception = { };
31036 + struct nfs4_exception exception = {0, 0};
31037 int err;
31038 do {
31039 err = nfs4_handle_exception(server,
31040 @@ -2151,7 +2151,7 @@ static int _nfs4_server_capabilities(str
31041
31042 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
31043 {
31044 - struct nfs4_exception exception = { };
31045 + struct nfs4_exception exception = {0, 0};
31046 int err;
31047 do {
31048 err = nfs4_handle_exception(server,
31049 @@ -2185,7 +2185,7 @@ static int _nfs4_lookup_root(struct nfs_
31050 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
31051 struct nfs_fsinfo *info)
31052 {
31053 - struct nfs4_exception exception = { };
31054 + struct nfs4_exception exception = {0, 0};
31055 int err;
31056 do {
31057 err = nfs4_handle_exception(server,
31058 @@ -2274,7 +2274,7 @@ static int _nfs4_proc_getattr(struct nfs
31059
31060 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
31061 {
31062 - struct nfs4_exception exception = { };
31063 + struct nfs4_exception exception = {0, 0};
31064 int err;
31065 do {
31066 err = nfs4_handle_exception(server,
31067 @@ -2362,7 +2362,7 @@ static int nfs4_proc_lookupfh(struct nfs
31068 struct qstr *name, struct nfs_fh *fhandle,
31069 struct nfs_fattr *fattr)
31070 {
31071 - struct nfs4_exception exception = { };
31072 + struct nfs4_exception exception = {0, 0};
31073 int err;
31074 do {
31075 err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
31076 @@ -2391,7 +2391,7 @@ static int _nfs4_proc_lookup(struct inod
31077
31078 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
31079 {
31080 - struct nfs4_exception exception = { };
31081 + struct nfs4_exception exception = {0, 0};
31082 int err;
31083 do {
31084 err = nfs4_handle_exception(NFS_SERVER(dir),
31085 @@ -2455,7 +2455,7 @@ static int _nfs4_proc_access(struct inod
31086
31087 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
31088 {
31089 - struct nfs4_exception exception = { };
31090 + struct nfs4_exception exception = {0, 0};
31091 int err;
31092 do {
31093 err = nfs4_handle_exception(NFS_SERVER(inode),
31094 @@ -2511,7 +2511,7 @@ static int _nfs4_proc_readlink(struct in
31095 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
31096 unsigned int pgbase, unsigned int pglen)
31097 {
31098 - struct nfs4_exception exception = { };
31099 + struct nfs4_exception exception = {0, 0};
31100 int err;
31101 do {
31102 err = nfs4_handle_exception(NFS_SERVER(inode),
31103 @@ -2609,7 +2609,7 @@ static int _nfs4_proc_remove(struct inod
31104
31105 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
31106 {
31107 - struct nfs4_exception exception = { };
31108 + struct nfs4_exception exception = {0, 0};
31109 int err;
31110 do {
31111 err = nfs4_handle_exception(NFS_SERVER(dir),
31112 @@ -2682,7 +2682,7 @@ static int _nfs4_proc_rename(struct inod
31113 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
31114 struct inode *new_dir, struct qstr *new_name)
31115 {
31116 - struct nfs4_exception exception = { };
31117 + struct nfs4_exception exception = {0, 0};
31118 int err;
31119 do {
31120 err = nfs4_handle_exception(NFS_SERVER(old_dir),
31121 @@ -2729,7 +2729,7 @@ static int _nfs4_proc_link(struct inode
31122
31123 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
31124 {
31125 - struct nfs4_exception exception = { };
31126 + struct nfs4_exception exception = {0, 0};
31127 int err;
31128 do {
31129 err = nfs4_handle_exception(NFS_SERVER(inode),
31130 @@ -2821,7 +2821,7 @@ out:
31131 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
31132 struct page *page, unsigned int len, struct iattr *sattr)
31133 {
31134 - struct nfs4_exception exception = { };
31135 + struct nfs4_exception exception = {0, 0};
31136 int err;
31137 do {
31138 err = nfs4_handle_exception(NFS_SERVER(dir),
31139 @@ -2852,7 +2852,7 @@ out:
31140 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
31141 struct iattr *sattr)
31142 {
31143 - struct nfs4_exception exception = { };
31144 + struct nfs4_exception exception = {0, 0};
31145 int err;
31146 do {
31147 err = nfs4_handle_exception(NFS_SERVER(dir),
31148 @@ -2901,7 +2901,7 @@ static int _nfs4_proc_readdir(struct den
31149 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
31150 u64 cookie, struct page *page, unsigned int count, int plus)
31151 {
31152 - struct nfs4_exception exception = { };
31153 + struct nfs4_exception exception = {0, 0};
31154 int err;
31155 do {
31156 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
31157 @@ -2949,7 +2949,7 @@ out:
31158 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
31159 struct iattr *sattr, dev_t rdev)
31160 {
31161 - struct nfs4_exception exception = { };
31162 + struct nfs4_exception exception = {0, 0};
31163 int err;
31164 do {
31165 err = nfs4_handle_exception(NFS_SERVER(dir),
31166 @@ -2981,7 +2981,7 @@ static int _nfs4_proc_statfs(struct nfs_
31167
31168 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
31169 {
31170 - struct nfs4_exception exception = { };
31171 + struct nfs4_exception exception = {0, 0};
31172 int err;
31173 do {
31174 err = nfs4_handle_exception(server,
31175 @@ -3012,7 +3012,7 @@ static int _nfs4_do_fsinfo(struct nfs_se
31176
31177 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
31178 {
31179 - struct nfs4_exception exception = { };
31180 + struct nfs4_exception exception = {0, 0};
31181 int err;
31182
31183 do {
31184 @@ -3058,7 +3058,7 @@ static int _nfs4_proc_pathconf(struct nf
31185 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
31186 struct nfs_pathconf *pathconf)
31187 {
31188 - struct nfs4_exception exception = { };
31189 + struct nfs4_exception exception = {0, 0};
31190 int err;
31191
31192 do {
31193 @@ -3365,7 +3365,7 @@ out_free:
31194
31195 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
31196 {
31197 - struct nfs4_exception exception = { };
31198 + struct nfs4_exception exception = {0, 0};
31199 ssize_t ret;
31200 do {
31201 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
31202 @@ -3421,7 +3421,7 @@ static int __nfs4_proc_set_acl(struct in
31203
31204 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
31205 {
31206 - struct nfs4_exception exception = { };
31207 + struct nfs4_exception exception = {0, 0};
31208 int err;
31209 do {
31210 err = nfs4_handle_exception(NFS_SERVER(inode),
31211 @@ -3705,7 +3705,7 @@ out:
31212 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
31213 {
31214 struct nfs_server *server = NFS_SERVER(inode);
31215 - struct nfs4_exception exception = { };
31216 + struct nfs4_exception exception = {0, 0};
31217 int err;
31218 do {
31219 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
31220 @@ -3778,7 +3778,7 @@ out:
31221
31222 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
31223 {
31224 - struct nfs4_exception exception = { };
31225 + struct nfs4_exception exception = {0, 0};
31226 int err;
31227
31228 do {
31229 @@ -4190,7 +4190,7 @@ static int _nfs4_do_setlk(struct nfs4_st
31230 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
31231 {
31232 struct nfs_server *server = NFS_SERVER(state->inode);
31233 - struct nfs4_exception exception = { };
31234 + struct nfs4_exception exception = {0, 0};
31235 int err;
31236
31237 do {
31238 @@ -4208,7 +4208,7 @@ static int nfs4_lock_reclaim(struct nfs4
31239 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
31240 {
31241 struct nfs_server *server = NFS_SERVER(state->inode);
31242 - struct nfs4_exception exception = { };
31243 + struct nfs4_exception exception = {0, 0};
31244 int err;
31245
31246 err = nfs4_set_lock_state(state, request);
31247 @@ -4273,7 +4273,7 @@ out:
31248
31249 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
31250 {
31251 - struct nfs4_exception exception = { };
31252 + struct nfs4_exception exception = {0, 0};
31253 int err;
31254
31255 do {
31256 @@ -4333,7 +4333,7 @@ nfs4_proc_lock(struct file *filp, int cm
31257 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
31258 {
31259 struct nfs_server *server = NFS_SERVER(state->inode);
31260 - struct nfs4_exception exception = { };
31261 + struct nfs4_exception exception = {0, 0};
31262 int err;
31263
31264 err = nfs4_set_lock_state(state, fl);
31265 diff -urNp linux-2.6.34.1/fs/nfsd/lockd.c linux-2.6.34.1/fs/nfsd/lockd.c
31266 --- linux-2.6.34.1/fs/nfsd/lockd.c 2010-07-05 14:24:10.000000000 -0400
31267 +++ linux-2.6.34.1/fs/nfsd/lockd.c 2010-07-07 09:04:55.000000000 -0400
31268 @@ -61,7 +61,7 @@ nlm_fclose(struct file *filp)
31269 fput(filp);
31270 }
31271
31272 -static struct nlmsvc_binding nfsd_nlm_ops = {
31273 +static const struct nlmsvc_binding nfsd_nlm_ops = {
31274 .fopen = nlm_fopen, /* open file for locking */
31275 .fclose = nlm_fclose, /* close file */
31276 };
31277 diff -urNp linux-2.6.34.1/fs/nfsd/nfsctl.c linux-2.6.34.1/fs/nfsd/nfsctl.c
31278 --- linux-2.6.34.1/fs/nfsd/nfsctl.c 2010-07-05 14:24:10.000000000 -0400
31279 +++ linux-2.6.34.1/fs/nfsd/nfsctl.c 2010-07-07 09:04:55.000000000 -0400
31280 @@ -160,7 +160,7 @@ static int export_features_open(struct i
31281 return single_open(file, export_features_show, NULL);
31282 }
31283
31284 -static struct file_operations export_features_operations = {
31285 +static const struct file_operations export_features_operations = {
31286 .open = export_features_open,
31287 .read = seq_read,
31288 .llseek = seq_lseek,
31289 diff -urNp linux-2.6.34.1/fs/nfsd/vfs.c linux-2.6.34.1/fs/nfsd/vfs.c
31290 --- linux-2.6.34.1/fs/nfsd/vfs.c 2010-07-05 14:24:10.000000000 -0400
31291 +++ linux-2.6.34.1/fs/nfsd/vfs.c 2010-07-07 09:04:55.000000000 -0400
31292 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
31293 } else {
31294 oldfs = get_fs();
31295 set_fs(KERNEL_DS);
31296 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
31297 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
31298 set_fs(oldfs);
31299 }
31300
31301 @@ -1056,7 +1056,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
31302
31303 /* Write the data. */
31304 oldfs = get_fs(); set_fs(KERNEL_DS);
31305 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
31306 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
31307 set_fs(oldfs);
31308 if (host_err < 0)
31309 goto out_nfserr;
31310 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
31311 */
31312
31313 oldfs = get_fs(); set_fs(KERNEL_DS);
31314 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
31315 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
31316 set_fs(oldfs);
31317
31318 if (host_err < 0)
31319 diff -urNp linux-2.6.34.1/fs/nls/nls_base.c linux-2.6.34.1/fs/nls/nls_base.c
31320 --- linux-2.6.34.1/fs/nls/nls_base.c 2010-07-05 14:24:10.000000000 -0400
31321 +++ linux-2.6.34.1/fs/nls/nls_base.c 2010-07-07 09:04:55.000000000 -0400
31322 @@ -41,7 +41,7 @@ static const struct utf8_table utf8_tabl
31323 {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */},
31324 {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */},
31325 {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */},
31326 - {0, /* end of table */}
31327 + {0, 0, 0, 0, 0, /* end of table */}
31328 };
31329
31330 #define UNICODE_MAX 0x0010ffff
31331 diff -urNp linux-2.6.34.1/fs/ntfs/file.c linux-2.6.34.1/fs/ntfs/file.c
31332 --- linux-2.6.34.1/fs/ntfs/file.c 2010-07-05 14:24:10.000000000 -0400
31333 +++ linux-2.6.34.1/fs/ntfs/file.c 2010-07-07 09:04:55.000000000 -0400
31334 @@ -2244,6 +2244,6 @@ const struct inode_operations ntfs_file_
31335 #endif /* NTFS_RW */
31336 };
31337
31338 -const struct file_operations ntfs_empty_file_ops = {};
31339 +const struct file_operations ntfs_empty_file_ops __read_only;
31340
31341 -const struct inode_operations ntfs_empty_inode_ops = {};
31342 +const struct inode_operations ntfs_empty_inode_ops __read_only;
31343 diff -urNp linux-2.6.34.1/fs/ocfs2/localalloc.c linux-2.6.34.1/fs/ocfs2/localalloc.c
31344 --- linux-2.6.34.1/fs/ocfs2/localalloc.c 2010-07-05 14:24:10.000000000 -0400
31345 +++ linux-2.6.34.1/fs/ocfs2/localalloc.c 2010-07-07 09:04:55.000000000 -0400
31346 @@ -1190,7 +1190,7 @@ static int ocfs2_local_alloc_slide_windo
31347 goto bail;
31348 }
31349
31350 - atomic_inc(&osb->alloc_stats.moves);
31351 + atomic_inc_unchecked(&osb->alloc_stats.moves);
31352
31353 status = 0;
31354 bail:
31355 diff -urNp linux-2.6.34.1/fs/ocfs2/ocfs2.h linux-2.6.34.1/fs/ocfs2/ocfs2.h
31356 --- linux-2.6.34.1/fs/ocfs2/ocfs2.h 2010-07-05 14:24:10.000000000 -0400
31357 +++ linux-2.6.34.1/fs/ocfs2/ocfs2.h 2010-07-07 09:04:55.000000000 -0400
31358 @@ -222,11 +222,11 @@ enum ocfs2_vol_state
31359
31360 struct ocfs2_alloc_stats
31361 {
31362 - atomic_t moves;
31363 - atomic_t local_data;
31364 - atomic_t bitmap_data;
31365 - atomic_t bg_allocs;
31366 - atomic_t bg_extends;
31367 + atomic_unchecked_t moves;
31368 + atomic_unchecked_t local_data;
31369 + atomic_unchecked_t bitmap_data;
31370 + atomic_unchecked_t bg_allocs;
31371 + atomic_unchecked_t bg_extends;
31372 };
31373
31374 enum ocfs2_local_alloc_state
31375 diff -urNp linux-2.6.34.1/fs/ocfs2/suballoc.c linux-2.6.34.1/fs/ocfs2/suballoc.c
31376 --- linux-2.6.34.1/fs/ocfs2/suballoc.c 2010-07-05 14:24:10.000000000 -0400
31377 +++ linux-2.6.34.1/fs/ocfs2/suballoc.c 2010-07-07 09:04:55.000000000 -0400
31378 @@ -616,7 +616,7 @@ static int ocfs2_reserve_suballoc_bits(s
31379 mlog_errno(status);
31380 goto bail;
31381 }
31382 - atomic_inc(&osb->alloc_stats.bg_extends);
31383 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
31384
31385 /* You should never ask for this much metadata */
31386 BUG_ON(bits_wanted >
31387 @@ -1738,7 +1738,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
31388 mlog_errno(status);
31389 goto bail;
31390 }
31391 - atomic_inc(&osb->alloc_stats.bg_allocs);
31392 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
31393
31394 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
31395 ac->ac_bits_given += (*num_bits);
31396 @@ -1812,7 +1812,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
31397 mlog_errno(status);
31398 goto bail;
31399 }
31400 - atomic_inc(&osb->alloc_stats.bg_allocs);
31401 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
31402
31403 BUG_ON(num_bits != 1);
31404
31405 @@ -1914,7 +1914,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
31406 cluster_start,
31407 num_clusters);
31408 if (!status)
31409 - atomic_inc(&osb->alloc_stats.local_data);
31410 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
31411 } else {
31412 if (min_clusters > (osb->bitmap_cpg - 1)) {
31413 /* The only paths asking for contiguousness
31414 @@ -1942,7 +1942,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
31415 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
31416 bg_blkno,
31417 bg_bit_off);
31418 - atomic_inc(&osb->alloc_stats.bitmap_data);
31419 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
31420 }
31421 }
31422 if (status < 0) {
31423 diff -urNp linux-2.6.34.1/fs/ocfs2/super.c linux-2.6.34.1/fs/ocfs2/super.c
31424 --- linux-2.6.34.1/fs/ocfs2/super.c 2010-07-05 14:24:10.000000000 -0400
31425 +++ linux-2.6.34.1/fs/ocfs2/super.c 2010-07-07 09:04:55.000000000 -0400
31426 @@ -287,11 +287,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
31427 "%10s => GlobalAllocs: %d LocalAllocs: %d "
31428 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
31429 "Stats",
31430 - atomic_read(&osb->alloc_stats.bitmap_data),
31431 - atomic_read(&osb->alloc_stats.local_data),
31432 - atomic_read(&osb->alloc_stats.bg_allocs),
31433 - atomic_read(&osb->alloc_stats.moves),
31434 - atomic_read(&osb->alloc_stats.bg_extends));
31435 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
31436 + atomic_read_unchecked(&osb->alloc_stats.local_data),
31437 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
31438 + atomic_read_unchecked(&osb->alloc_stats.moves),
31439 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
31440
31441 out += snprintf(buf + out, len - out,
31442 "%10s => State: %u Descriptor: %llu Size: %u bits "
31443 @@ -2003,11 +2003,11 @@ static int ocfs2_initialize_super(struct
31444 spin_lock_init(&osb->osb_xattr_lock);
31445 ocfs2_init_steal_slots(osb);
31446
31447 - atomic_set(&osb->alloc_stats.moves, 0);
31448 - atomic_set(&osb->alloc_stats.local_data, 0);
31449 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
31450 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
31451 - atomic_set(&osb->alloc_stats.bg_extends, 0);
31452 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
31453 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
31454 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
31455 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
31456 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
31457
31458 /* Copy the blockcheck stats from the superblock probe */
31459 osb->osb_ecc_stats = *stats;
31460 diff -urNp linux-2.6.34.1/fs/ocfs2/symlink.c linux-2.6.34.1/fs/ocfs2/symlink.c
31461 --- linux-2.6.34.1/fs/ocfs2/symlink.c 2010-07-05 14:24:10.000000000 -0400
31462 +++ linux-2.6.34.1/fs/ocfs2/symlink.c 2010-07-07 09:04:55.000000000 -0400
31463 @@ -148,7 +148,7 @@ bail:
31464
31465 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
31466 {
31467 - char *link = nd_get_link(nd);
31468 + const char *link = nd_get_link(nd);
31469 if (!IS_ERR(link))
31470 kfree(link);
31471 }
31472 diff -urNp linux-2.6.34.1/fs/open.c linux-2.6.34.1/fs/open.c
31473 --- linux-2.6.34.1/fs/open.c 2010-07-05 14:24:10.000000000 -0400
31474 +++ linux-2.6.34.1/fs/open.c 2010-07-07 09:04:55.000000000 -0400
31475 @@ -208,6 +208,9 @@ int do_truncate(struct dentry *dentry, l
31476 if (length < 0)
31477 return -EINVAL;
31478
31479 + if (filp && !gr_acl_handle_truncate(dentry, filp->f_path.mnt))
31480 + return -EACCES;
31481 +
31482 newattrs.ia_size = length;
31483 newattrs.ia_valid = ATTR_SIZE | time_attrs;
31484 if (filp) {
31485 @@ -511,6 +514,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
31486 if (__mnt_is_readonly(path.mnt))
31487 res = -EROFS;
31488
31489 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
31490 + res = -EACCES;
31491 +
31492 out_path_release:
31493 path_put(&path);
31494 out:
31495 @@ -537,6 +543,8 @@ SYSCALL_DEFINE1(chdir, const char __user
31496 if (error)
31497 goto dput_and_out;
31498
31499 + gr_log_chdir(path.dentry, path.mnt);
31500 +
31501 set_fs_pwd(current->fs, &path);
31502
31503 dput_and_out:
31504 @@ -563,6 +571,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
31505 goto out_putf;
31506
31507 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
31508 +
31509 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
31510 + error = -EPERM;
31511 +
31512 + if (!error)
31513 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
31514 +
31515 if (!error)
31516 set_fs_pwd(current->fs, &file->f_path);
31517 out_putf:
31518 @@ -591,7 +606,18 @@ SYSCALL_DEFINE1(chroot, const char __use
31519 if (error)
31520 goto dput_and_out;
31521
31522 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
31523 + goto dput_and_out;
31524 +
31525 + if (gr_handle_chroot_caps(&path)) {
31526 + error = -ENOMEM;
31527 + goto dput_and_out;
31528 + }
31529 +
31530 set_fs_root(current->fs, &path);
31531 +
31532 + gr_handle_chroot_chdir(&path);
31533 +
31534 error = 0;
31535 dput_and_out:
31536 path_put(&path);
31537 @@ -619,6 +645,12 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
31538 err = mnt_want_write_file(file);
31539 if (err)
31540 goto out_putf;
31541 +
31542 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
31543 + err = -EACCES;
31544 + goto out_drop_write;
31545 + }
31546 +
31547 mutex_lock(&inode->i_mutex);
31548 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
31549 if (err)
31550 @@ -630,6 +662,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
31551 err = notify_change(dentry, &newattrs);
31552 out_unlock:
31553 mutex_unlock(&inode->i_mutex);
31554 +out_drop_write:
31555 mnt_drop_write(file->f_path.mnt);
31556 out_putf:
31557 fput(file);
31558 @@ -652,17 +685,30 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
31559 error = mnt_want_write(path.mnt);
31560 if (error)
31561 goto dput_and_out;
31562 +
31563 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
31564 + error = -EACCES;
31565 + goto out_drop_write;
31566 + }
31567 +
31568 mutex_lock(&inode->i_mutex);
31569 error = security_path_chmod(path.dentry, path.mnt, mode);
31570 if (error)
31571 goto out_unlock;
31572 if (mode == (mode_t) -1)
31573 mode = inode->i_mode;
31574 +
31575 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
31576 + error = -EACCES;
31577 + goto out_unlock;
31578 + }
31579 +
31580 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
31581 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
31582 error = notify_change(path.dentry, &newattrs);
31583 out_unlock:
31584 mutex_unlock(&inode->i_mutex);
31585 +out_drop_write:
31586 mnt_drop_write(path.mnt);
31587 dput_and_out:
31588 path_put(&path);
31589 @@ -681,6 +727,9 @@ static int chown_common(struct path *pat
31590 int error;
31591 struct iattr newattrs;
31592
31593 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
31594 + return -EACCES;
31595 +
31596 newattrs.ia_valid = ATTR_CTIME;
31597 if (user != (uid_t) -1) {
31598 newattrs.ia_valid |= ATTR_UID;
31599 diff -urNp linux-2.6.34.1/fs/pipe.c linux-2.6.34.1/fs/pipe.c
31600 --- linux-2.6.34.1/fs/pipe.c 2010-07-05 14:24:10.000000000 -0400
31601 +++ linux-2.6.34.1/fs/pipe.c 2010-07-07 09:04:56.000000000 -0400
31602 @@ -401,9 +401,9 @@ redo:
31603 }
31604 if (bufs) /* More to do? */
31605 continue;
31606 - if (!pipe->writers)
31607 + if (!atomic_read(&pipe->writers))
31608 break;
31609 - if (!pipe->waiting_writers) {
31610 + if (!atomic_read(&pipe->waiting_writers)) {
31611 /* syscall merging: Usually we must not sleep
31612 * if O_NONBLOCK is set, or if we got some data.
31613 * But if a writer sleeps in kernel space, then
31614 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
31615 mutex_lock(&inode->i_mutex);
31616 pipe = inode->i_pipe;
31617
31618 - if (!pipe->readers) {
31619 + if (!atomic_read(&pipe->readers)) {
31620 send_sig(SIGPIPE, current, 0);
31621 ret = -EPIPE;
31622 goto out;
31623 @@ -511,7 +511,7 @@ redo1:
31624 for (;;) {
31625 int bufs;
31626
31627 - if (!pipe->readers) {
31628 + if (!atomic_read(&pipe->readers)) {
31629 send_sig(SIGPIPE, current, 0);
31630 if (!ret)
31631 ret = -EPIPE;
31632 @@ -597,9 +597,9 @@ redo2:
31633 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
31634 do_wakeup = 0;
31635 }
31636 - pipe->waiting_writers++;
31637 + atomic_inc(&pipe->waiting_writers);
31638 pipe_wait(pipe);
31639 - pipe->waiting_writers--;
31640 + atomic_dec(&pipe->waiting_writers);
31641 }
31642 out:
31643 mutex_unlock(&inode->i_mutex);
31644 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
31645 mask = 0;
31646 if (filp->f_mode & FMODE_READ) {
31647 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
31648 - if (!pipe->writers && filp->f_version != pipe->w_counter)
31649 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
31650 mask |= POLLHUP;
31651 }
31652
31653 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
31654 * Most Unices do not set POLLERR for FIFOs but on Linux they
31655 * behave exactly like pipes for poll().
31656 */
31657 - if (!pipe->readers)
31658 + if (!atomic_read(&pipe->readers))
31659 mask |= POLLERR;
31660 }
31661
31662 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
31663
31664 mutex_lock(&inode->i_mutex);
31665 pipe = inode->i_pipe;
31666 - pipe->readers -= decr;
31667 - pipe->writers -= decw;
31668 + atomic_sub(decr, &pipe->readers);
31669 + atomic_sub(decw, &pipe->writers);
31670
31671 - if (!pipe->readers && !pipe->writers) {
31672 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
31673 free_pipe_info(inode);
31674 } else {
31675 wake_up_interruptible_sync(&pipe->wait);
31676 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
31677
31678 if (inode->i_pipe) {
31679 ret = 0;
31680 - inode->i_pipe->readers++;
31681 + atomic_inc(&inode->i_pipe->readers);
31682 }
31683
31684 mutex_unlock(&inode->i_mutex);
31685 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
31686
31687 if (inode->i_pipe) {
31688 ret = 0;
31689 - inode->i_pipe->writers++;
31690 + atomic_inc(&inode->i_pipe->writers);
31691 }
31692
31693 mutex_unlock(&inode->i_mutex);
31694 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
31695 if (inode->i_pipe) {
31696 ret = 0;
31697 if (filp->f_mode & FMODE_READ)
31698 - inode->i_pipe->readers++;
31699 + atomic_inc(&inode->i_pipe->readers);
31700 if (filp->f_mode & FMODE_WRITE)
31701 - inode->i_pipe->writers++;
31702 + atomic_inc(&inode->i_pipe->writers);
31703 }
31704
31705 mutex_unlock(&inode->i_mutex);
31706 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
31707 inode->i_pipe = NULL;
31708 }
31709
31710 -static struct vfsmount *pipe_mnt __read_mostly;
31711 +struct vfsmount *pipe_mnt __read_mostly;
31712
31713 /*
31714 * pipefs_dname() is called from d_path().
31715 @@ -933,7 +933,8 @@ static struct inode * get_pipe_inode(voi
31716 goto fail_iput;
31717 inode->i_pipe = pipe;
31718
31719 - pipe->readers = pipe->writers = 1;
31720 + atomic_set(&pipe->readers, 1);
31721 + atomic_set(&pipe->writers, 1);
31722 inode->i_fop = &rdwr_pipefifo_fops;
31723
31724 /*
31725 diff -urNp linux-2.6.34.1/fs/proc/Kconfig linux-2.6.34.1/fs/proc/Kconfig
31726 --- linux-2.6.34.1/fs/proc/Kconfig 2010-07-05 14:24:10.000000000 -0400
31727 +++ linux-2.6.34.1/fs/proc/Kconfig 2010-07-07 09:04:56.000000000 -0400
31728 @@ -30,12 +30,12 @@ config PROC_FS
31729
31730 config PROC_KCORE
31731 bool "/proc/kcore support" if !ARM
31732 - depends on PROC_FS && MMU
31733 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
31734
31735 config PROC_VMCORE
31736 bool "/proc/vmcore support (EXPERIMENTAL)"
31737 - depends on PROC_FS && CRASH_DUMP
31738 - default y
31739 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
31740 + default n
31741 help
31742 Exports the dump image of crashed kernel in ELF format.
31743
31744 @@ -59,8 +59,8 @@ config PROC_SYSCTL
31745 limited in memory.
31746
31747 config PROC_PAGE_MONITOR
31748 - default y
31749 - depends on PROC_FS && MMU
31750 + default n
31751 + depends on PROC_FS && MMU && !GRKERNSEC
31752 bool "Enable /proc page monitoring" if EMBEDDED
31753 help
31754 Various /proc files exist to monitor process memory utilization:
31755 diff -urNp linux-2.6.34.1/fs/proc/array.c linux-2.6.34.1/fs/proc/array.c
31756 --- linux-2.6.34.1/fs/proc/array.c 2010-07-05 14:24:10.000000000 -0400
31757 +++ linux-2.6.34.1/fs/proc/array.c 2010-07-07 09:04:56.000000000 -0400
31758 @@ -337,6 +337,21 @@ static void task_cpus_allowed(struct seq
31759 seq_printf(m, "\n");
31760 }
31761
31762 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
31763 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
31764 +{
31765 + if (p->mm)
31766 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
31767 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
31768 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
31769 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
31770 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
31771 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
31772 + else
31773 + seq_printf(m, "PaX:\t-----\n");
31774 +}
31775 +#endif
31776 +
31777 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
31778 struct pid *pid, struct task_struct *task)
31779 {
31780 @@ -357,9 +372,20 @@ int proc_pid_status(struct seq_file *m,
31781 task_show_regs(m, task);
31782 #endif
31783 task_context_switch_counts(m, task);
31784 +
31785 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
31786 + task_pax(m, task);
31787 +#endif
31788 +
31789 return 0;
31790 }
31791
31792 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
31793 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
31794 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
31795 + _mm->pax_flags & MF_PAX_SEGMEXEC))
31796 +#endif
31797 +
31798 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
31799 struct pid *pid, struct task_struct *task, int whole)
31800 {
31801 @@ -452,6 +478,19 @@ static int do_task_stat(struct seq_file
31802 gtime = task->gtime;
31803 }
31804
31805 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
31806 + if (PAX_RAND_FLAGS(mm)) {
31807 + eip = 0;
31808 + esp = 0;
31809 + wchan = 0;
31810 + }
31811 +#endif
31812 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31813 + wchan = 0;
31814 + eip =0;
31815 + esp =0;
31816 +#endif
31817 +
31818 /* scale priority and nice values from timeslices to -20..20 */
31819 /* to make it look like a "normal" Unix priority/nice value */
31820 priority = task_prio(task);
31821 @@ -492,9 +531,15 @@ static int do_task_stat(struct seq_file
31822 vsize,
31823 mm ? get_mm_rss(mm) : 0,
31824 rsslim,
31825 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
31826 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0),
31827 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0),
31828 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
31829 +#else
31830 mm ? mm->start_code : 0,
31831 mm ? mm->end_code : 0,
31832 (permitted && mm) ? mm->start_stack : 0,
31833 +#endif
31834 esp,
31835 eip,
31836 /* The signal information here is obsolete.
31837 @@ -547,3 +592,10 @@ int proc_pid_statm(struct seq_file *m, s
31838
31839 return 0;
31840 }
31841 +
31842 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
31843 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
31844 +{
31845 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
31846 +}
31847 +#endif
31848 diff -urNp linux-2.6.34.1/fs/proc/base.c linux-2.6.34.1/fs/proc/base.c
31849 --- linux-2.6.34.1/fs/proc/base.c 2010-07-05 14:24:10.000000000 -0400
31850 +++ linux-2.6.34.1/fs/proc/base.c 2010-07-07 09:04:56.000000000 -0400
31851 @@ -103,6 +103,22 @@ struct pid_entry {
31852 union proc_op op;
31853 };
31854
31855 +struct getdents_callback {
31856 + struct linux_dirent __user * current_dir;
31857 + struct linux_dirent __user * previous;
31858 + struct file * file;
31859 + int count;
31860 + int error;
31861 +};
31862 +
31863 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
31864 + loff_t offset, u64 ino, unsigned int d_type)
31865 +{
31866 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
31867 + buf->error = -EINVAL;
31868 + return 0;
31869 +}
31870 +
31871 #define NOD(NAME, MODE, IOP, FOP, OP) { \
31872 .name = (NAME), \
31873 .len = sizeof(NAME) - 1, \
31874 @@ -214,6 +230,9 @@ static int check_mem_permission(struct t
31875 if (task == current)
31876 return 0;
31877
31878 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
31879 + return -EPERM;
31880 +
31881 /*
31882 * If current is actively ptrace'ing, and would also be
31883 * permitted to freshly attach with ptrace now, permit it.
31884 @@ -261,6 +280,9 @@ static int proc_pid_cmdline(struct task_
31885 if (!mm->arg_end)
31886 goto out_mm; /* Shh! No looking before we're done */
31887
31888 + if (gr_acl_handle_procpidmem(task))
31889 + goto out_mm;
31890 +
31891 len = mm->arg_end - mm->arg_start;
31892
31893 if (len > PAGE_SIZE)
31894 @@ -288,12 +310,26 @@ out:
31895 return res;
31896 }
31897
31898 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
31899 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
31900 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
31901 + _mm->pax_flags & MF_PAX_SEGMEXEC))
31902 +#endif
31903 +
31904 static int proc_pid_auxv(struct task_struct *task, char *buffer)
31905 {
31906 int res = 0;
31907 struct mm_struct *mm = get_task_mm(task);
31908 if (mm) {
31909 unsigned int nwords = 0;
31910 +
31911 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
31912 + if (PAX_RAND_FLAGS(mm)) {
31913 + mmput(mm);
31914 + return res;
31915 + }
31916 +#endif
31917 +
31918 do {
31919 nwords += 2;
31920 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
31921 @@ -329,7 +365,7 @@ static int proc_pid_wchan(struct task_st
31922 }
31923 #endif /* CONFIG_KALLSYMS */
31924
31925 -#ifdef CONFIG_STACKTRACE
31926 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
31927
31928 #define MAX_STACK_TRACE_DEPTH 64
31929
31930 @@ -523,7 +559,7 @@ static int proc_pid_limits(struct task_s
31931 return count;
31932 }
31933
31934 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
31935 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31936 static int proc_pid_syscall(struct task_struct *task, char *buffer)
31937 {
31938 long nr;
31939 @@ -931,6 +967,9 @@ static ssize_t environ_read(struct file
31940 if (!task)
31941 goto out_no_task;
31942
31943 + if (gr_acl_handle_procpidmem(task))
31944 + goto out;
31945 +
31946 if (!ptrace_may_access(task, PTRACE_MODE_READ))
31947 goto out;
31948
31949 @@ -1520,7 +1559,11 @@ static struct inode *proc_pid_make_inode
31950 rcu_read_lock();
31951 cred = __task_cred(task);
31952 inode->i_uid = cred->euid;
31953 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
31954 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
31955 +#else
31956 inode->i_gid = cred->egid;
31957 +#endif
31958 rcu_read_unlock();
31959 }
31960 security_task_to_inode(task, inode);
31961 @@ -1538,6 +1581,9 @@ static int pid_getattr(struct vfsmount *
31962 struct inode *inode = dentry->d_inode;
31963 struct task_struct *task;
31964 const struct cred *cred;
31965 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31966 + const struct cred *tmpcred = current_cred();
31967 +#endif
31968
31969 generic_fillattr(inode, stat);
31970
31971 @@ -1545,12 +1591,34 @@ static int pid_getattr(struct vfsmount *
31972 stat->uid = 0;
31973 stat->gid = 0;
31974 task = pid_task(proc_pid(inode), PIDTYPE_PID);
31975 +
31976 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
31977 + rcu_read_unlock();
31978 + return -ENOENT;
31979 + }
31980 +
31981 if (task) {
31982 + cred = __task_cred(task);
31983 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31984 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
31985 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
31986 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
31987 +#endif
31988 + )
31989 +#endif
31990 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
31991 +#ifdef CONFIG_GRKERNSEC_PROC_USER
31992 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
31993 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31994 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
31995 +#endif
31996 task_dumpable(task)) {
31997 - cred = __task_cred(task);
31998 stat->uid = cred->euid;
31999 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32000 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
32001 +#else
32002 stat->gid = cred->egid;
32003 +#endif
32004 }
32005 }
32006 rcu_read_unlock();
32007 @@ -1582,11 +1650,20 @@ static int pid_revalidate(struct dentry
32008
32009 if (task) {
32010 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
32011 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32012 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
32013 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32014 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
32015 +#endif
32016 task_dumpable(task)) {
32017 rcu_read_lock();
32018 cred = __task_cred(task);
32019 inode->i_uid = cred->euid;
32020 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32021 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
32022 +#else
32023 inode->i_gid = cred->egid;
32024 +#endif
32025 rcu_read_unlock();
32026 } else {
32027 inode->i_uid = 0;
32028 @@ -1707,7 +1784,8 @@ static int proc_fd_info(struct inode *in
32029 int fd = proc_fd(inode);
32030
32031 if (task) {
32032 - files = get_files_struct(task);
32033 + if (!gr_acl_handle_procpidmem(task))
32034 + files = get_files_struct(task);
32035 put_task_struct(task);
32036 }
32037 if (files) {
32038 @@ -1959,12 +2037,22 @@ static const struct file_operations proc
32039 static int proc_fd_permission(struct inode *inode, int mask)
32040 {
32041 int rv;
32042 + struct task_struct *task;
32043
32044 rv = generic_permission(inode, mask, NULL);
32045 - if (rv == 0)
32046 - return 0;
32047 +
32048 if (task_pid(current) == proc_pid(inode))
32049 rv = 0;
32050 +
32051 + task = get_proc_task(inode);
32052 + if (task == NULL)
32053 + return rv;
32054 +
32055 + if (gr_acl_handle_procpidmem(task))
32056 + rv = -EACCES;
32057 +
32058 + put_task_struct(task);
32059 +
32060 return rv;
32061 }
32062
32063 @@ -2073,6 +2161,9 @@ static struct dentry *proc_pident_lookup
32064 if (!task)
32065 goto out_no_task;
32066
32067 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
32068 + goto out;
32069 +
32070 /*
32071 * Yes, it does not scale. And it should not. Don't add
32072 * new entries into /proc/<tgid>/ without very good reasons.
32073 @@ -2117,6 +2208,9 @@ static int proc_pident_readdir(struct fi
32074 if (!task)
32075 goto out_no_task;
32076
32077 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
32078 + goto out;
32079 +
32080 ret = 0;
32081 i = filp->f_pos;
32082 switch (i) {
32083 @@ -2384,7 +2478,7 @@ static void *proc_self_follow_link(struc
32084 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
32085 void *cookie)
32086 {
32087 - char *s = nd_get_link(nd);
32088 + const char *s = nd_get_link(nd);
32089 if (!IS_ERR(s))
32090 __putname(s);
32091 }
32092 @@ -2497,6 +2591,9 @@ static struct dentry *proc_base_lookup(s
32093 if (p > last)
32094 goto out;
32095
32096 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
32097 + goto out;
32098 +
32099 error = proc_base_instantiate(dir, dentry, task, p);
32100
32101 out:
32102 @@ -2584,7 +2681,7 @@ static const struct pid_entry tgid_base_
32103 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
32104 #endif
32105 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
32106 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
32107 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32108 INF("syscall", S_IRUSR, proc_pid_syscall),
32109 #endif
32110 INF("cmdline", S_IRUGO, proc_pid_cmdline),
32111 @@ -2612,7 +2709,7 @@ static const struct pid_entry tgid_base_
32112 #ifdef CONFIG_KALLSYMS
32113 INF("wchan", S_IRUGO, proc_pid_wchan),
32114 #endif
32115 -#ifdef CONFIG_STACKTRACE
32116 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
32117 ONE("stack", S_IRUSR, proc_pid_stack),
32118 #endif
32119 #ifdef CONFIG_SCHEDSTATS
32120 @@ -2642,6 +2739,9 @@ static const struct pid_entry tgid_base_
32121 #ifdef CONFIG_TASK_IO_ACCOUNTING
32122 INF("io", S_IRUGO, proc_tgid_io_accounting),
32123 #endif
32124 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
32125 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
32126 +#endif
32127 };
32128
32129 static int proc_tgid_base_readdir(struct file * filp,
32130 @@ -2766,7 +2866,14 @@ static struct dentry *proc_pid_instantia
32131 if (!inode)
32132 goto out;
32133
32134 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32135 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
32136 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32137 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
32138 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
32139 +#else
32140 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
32141 +#endif
32142 inode->i_op = &proc_tgid_base_inode_operations;
32143 inode->i_fop = &proc_tgid_base_operations;
32144 inode->i_flags|=S_IMMUTABLE;
32145 @@ -2808,7 +2915,11 @@ struct dentry *proc_pid_lookup(struct in
32146 if (!task)
32147 goto out;
32148
32149 + if (gr_check_hidden_task(task))
32150 + goto out_put_task;
32151 +
32152 result = proc_pid_instantiate(dir, dentry, task, NULL);
32153 +out_put_task:
32154 put_task_struct(task);
32155 out:
32156 return result;
32157 @@ -2873,6 +2984,11 @@ int proc_pid_readdir(struct file * filp,
32158 {
32159 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
32160 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
32161 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32162 + const struct cred *tmpcred = current_cred();
32163 + const struct cred *itercred;
32164 +#endif
32165 + filldir_t __filldir = filldir;
32166 struct tgid_iter iter;
32167 struct pid_namespace *ns;
32168
32169 @@ -2891,8 +3007,27 @@ int proc_pid_readdir(struct file * filp,
32170 for (iter = next_tgid(ns, iter);
32171 iter.task;
32172 iter.tgid += 1, iter = next_tgid(ns, iter)) {
32173 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32174 + rcu_read_lock();
32175 + itercred = __task_cred(iter.task);
32176 +#endif
32177 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
32178 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32179 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
32180 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32181 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
32182 +#endif
32183 + )
32184 +#endif
32185 + )
32186 + __filldir = &gr_fake_filldir;
32187 + else
32188 + __filldir = filldir;
32189 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32190 + rcu_read_unlock();
32191 +#endif
32192 filp->f_pos = iter.tgid + TGID_OFFSET;
32193 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
32194 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
32195 put_task_struct(iter.task);
32196 goto out;
32197 }
32198 @@ -2919,7 +3054,7 @@ static const struct pid_entry tid_base_s
32199 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
32200 #endif
32201 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
32202 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
32203 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32204 INF("syscall", S_IRUSR, proc_pid_syscall),
32205 #endif
32206 INF("cmdline", S_IRUGO, proc_pid_cmdline),
32207 @@ -2946,7 +3081,7 @@ static const struct pid_entry tid_base_s
32208 #ifdef CONFIG_KALLSYMS
32209 INF("wchan", S_IRUGO, proc_pid_wchan),
32210 #endif
32211 -#ifdef CONFIG_STACKTRACE
32212 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
32213 ONE("stack", S_IRUSR, proc_pid_stack),
32214 #endif
32215 #ifdef CONFIG_SCHEDSTATS
32216 diff -urNp linux-2.6.34.1/fs/proc/cmdline.c linux-2.6.34.1/fs/proc/cmdline.c
32217 --- linux-2.6.34.1/fs/proc/cmdline.c 2010-07-05 14:24:10.000000000 -0400
32218 +++ linux-2.6.34.1/fs/proc/cmdline.c 2010-07-07 09:04:56.000000000 -0400
32219 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
32220
32221 static int __init proc_cmdline_init(void)
32222 {
32223 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32224 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
32225 +#else
32226 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
32227 +#endif
32228 return 0;
32229 }
32230 module_init(proc_cmdline_init);
32231 diff -urNp linux-2.6.34.1/fs/proc/devices.c linux-2.6.34.1/fs/proc/devices.c
32232 --- linux-2.6.34.1/fs/proc/devices.c 2010-07-05 14:24:10.000000000 -0400
32233 +++ linux-2.6.34.1/fs/proc/devices.c 2010-07-07 09:04:56.000000000 -0400
32234 @@ -64,7 +64,11 @@ static const struct file_operations proc
32235
32236 static int __init proc_devices_init(void)
32237 {
32238 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32239 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
32240 +#else
32241 proc_create("devices", 0, NULL, &proc_devinfo_operations);
32242 +#endif
32243 return 0;
32244 }
32245 module_init(proc_devices_init);
32246 diff -urNp linux-2.6.34.1/fs/proc/inode.c linux-2.6.34.1/fs/proc/inode.c
32247 --- linux-2.6.34.1/fs/proc/inode.c 2010-07-05 14:24:10.000000000 -0400
32248 +++ linux-2.6.34.1/fs/proc/inode.c 2010-07-07 09:04:56.000000000 -0400
32249 @@ -435,7 +435,11 @@ struct inode *proc_get_inode(struct supe
32250 if (de->mode) {
32251 inode->i_mode = de->mode;
32252 inode->i_uid = de->uid;
32253 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32254 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
32255 +#else
32256 inode->i_gid = de->gid;
32257 +#endif
32258 }
32259 if (de->size)
32260 inode->i_size = de->size;
32261 diff -urNp linux-2.6.34.1/fs/proc/internal.h linux-2.6.34.1/fs/proc/internal.h
32262 --- linux-2.6.34.1/fs/proc/internal.h 2010-07-05 14:24:10.000000000 -0400
32263 +++ linux-2.6.34.1/fs/proc/internal.h 2010-07-07 09:04:56.000000000 -0400
32264 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
32265 struct pid *pid, struct task_struct *task);
32266 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
32267 struct pid *pid, struct task_struct *task);
32268 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
32269 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
32270 +#endif
32271 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
32272
32273 extern const struct file_operations proc_maps_operations;
32274 diff -urNp linux-2.6.34.1/fs/proc/kcore.c linux-2.6.34.1/fs/proc/kcore.c
32275 --- linux-2.6.34.1/fs/proc/kcore.c 2010-07-05 14:24:10.000000000 -0400
32276 +++ linux-2.6.34.1/fs/proc/kcore.c 2010-07-07 09:04:56.000000000 -0400
32277 @@ -542,6 +542,9 @@ read_kcore(struct file *file, char __use
32278
32279 static int open_kcore(struct inode *inode, struct file *filp)
32280 {
32281 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
32282 + return -EPERM;
32283 +#endif
32284 if (!capable(CAP_SYS_RAWIO))
32285 return -EPERM;
32286 if (kcore_need_update)
32287 diff -urNp linux-2.6.34.1/fs/proc/meminfo.c linux-2.6.34.1/fs/proc/meminfo.c
32288 --- linux-2.6.34.1/fs/proc/meminfo.c 2010-07-05 14:24:10.000000000 -0400
32289 +++ linux-2.6.34.1/fs/proc/meminfo.c 2010-07-07 09:04:56.000000000 -0400
32290 @@ -149,7 +149,7 @@ static int meminfo_proc_show(struct seq_
32291 vmi.used >> 10,
32292 vmi.largest_chunk >> 10
32293 #ifdef CONFIG_MEMORY_FAILURE
32294 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
32295 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
32296 #endif
32297 );
32298
32299 diff -urNp linux-2.6.34.1/fs/proc/nommu.c linux-2.6.34.1/fs/proc/nommu.c
32300 --- linux-2.6.34.1/fs/proc/nommu.c 2010-07-05 14:24:10.000000000 -0400
32301 +++ linux-2.6.34.1/fs/proc/nommu.c 2010-07-07 09:04:56.000000000 -0400
32302 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
32303 if (len < 1)
32304 len = 1;
32305 seq_printf(m, "%*c", len, ' ');
32306 - seq_path(m, &file->f_path, "");
32307 + seq_path(m, &file->f_path, "\n\\");
32308 }
32309
32310 seq_putc(m, '\n');
32311 diff -urNp linux-2.6.34.1/fs/proc/proc_net.c linux-2.6.34.1/fs/proc/proc_net.c
32312 --- linux-2.6.34.1/fs/proc/proc_net.c 2010-07-05 14:24:10.000000000 -0400
32313 +++ linux-2.6.34.1/fs/proc/proc_net.c 2010-07-07 09:04:56.000000000 -0400
32314 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
32315 struct task_struct *task;
32316 struct nsproxy *ns;
32317 struct net *net = NULL;
32318 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32319 + const struct cred *cred = current_cred();
32320 +#endif
32321 +
32322 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32323 + if (cred->fsuid)
32324 + return net;
32325 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32326 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
32327 + return net;
32328 +#endif
32329
32330 rcu_read_lock();
32331 task = pid_task(proc_pid(dir), PIDTYPE_PID);
32332 diff -urNp linux-2.6.34.1/fs/proc/proc_sysctl.c linux-2.6.34.1/fs/proc/proc_sysctl.c
32333 --- linux-2.6.34.1/fs/proc/proc_sysctl.c 2010-07-05 14:24:10.000000000 -0400
32334 +++ linux-2.6.34.1/fs/proc/proc_sysctl.c 2010-07-07 09:04:56.000000000 -0400
32335 @@ -7,6 +7,8 @@
32336 #include <linux/security.h>
32337 #include "internal.h"
32338
32339 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
32340 +
32341 static const struct dentry_operations proc_sys_dentry_operations;
32342 static const struct file_operations proc_sys_file_operations;
32343 static const struct inode_operations proc_sys_inode_operations;
32344 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
32345 if (!p)
32346 goto out;
32347
32348 + if (gr_handle_sysctl(p, MAY_EXEC))
32349 + goto out;
32350 +
32351 err = ERR_PTR(-ENOMEM);
32352 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
32353 if (h)
32354 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
32355 if (*pos < file->f_pos)
32356 continue;
32357
32358 + if (gr_handle_sysctl(table, 0))
32359 + continue;
32360 +
32361 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
32362 if (res)
32363 return res;
32364 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
32365 if (IS_ERR(head))
32366 return PTR_ERR(head);
32367
32368 + if (table && gr_handle_sysctl(table, MAY_EXEC))
32369 + return -ENOENT;
32370 +
32371 generic_fillattr(inode, stat);
32372 if (table)
32373 stat->mode = (stat->mode & S_IFMT) | table->mode;
32374 diff -urNp linux-2.6.34.1/fs/proc/root.c linux-2.6.34.1/fs/proc/root.c
32375 --- linux-2.6.34.1/fs/proc/root.c 2010-07-05 14:24:10.000000000 -0400
32376 +++ linux-2.6.34.1/fs/proc/root.c 2010-07-07 09:04:56.000000000 -0400
32377 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
32378 #ifdef CONFIG_PROC_DEVICETREE
32379 proc_device_tree_init();
32380 #endif
32381 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32382 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32383 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
32384 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32385 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32386 +#endif
32387 +#else
32388 proc_mkdir("bus", NULL);
32389 +#endif
32390 proc_sys_init();
32391 }
32392
32393 diff -urNp linux-2.6.34.1/fs/proc/task_mmu.c linux-2.6.34.1/fs/proc/task_mmu.c
32394 --- linux-2.6.34.1/fs/proc/task_mmu.c 2010-07-05 14:24:10.000000000 -0400
32395 +++ linux-2.6.34.1/fs/proc/task_mmu.c 2010-07-07 09:04:56.000000000 -0400
32396 @@ -49,8 +49,13 @@ void task_mem(struct seq_file *m, struct
32397 "VmExe:\t%8lu kB\n"
32398 "VmLib:\t%8lu kB\n"
32399 "VmPTE:\t%8lu kB\n"
32400 - "VmSwap:\t%8lu kB\n",
32401 - hiwater_vm << (PAGE_SHIFT-10),
32402 + "VmSwap:\t%8lu kB\n"
32403 +
32404 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
32405 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
32406 +#endif
32407 +
32408 + ,hiwater_vm << (PAGE_SHIFT-10),
32409 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
32410 mm->locked_vm << (PAGE_SHIFT-10),
32411 hiwater_rss << (PAGE_SHIFT-10),
32412 @@ -58,7 +63,13 @@ void task_mem(struct seq_file *m, struct
32413 data << (PAGE_SHIFT-10),
32414 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
32415 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
32416 - swap << (PAGE_SHIFT-10));
32417 + swap << (PAGE_SHIFT-10)
32418 +
32419 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
32420 + , mm->context.user_cs_base, mm->context.user_cs_limit
32421 +#endif
32422 +
32423 + );
32424 }
32425
32426 unsigned long task_vsize(struct mm_struct *mm)
32427 @@ -203,6 +214,12 @@ static int do_maps_open(struct inode *in
32428 return ret;
32429 }
32430
32431 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32432 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
32433 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
32434 + _mm->pax_flags & MF_PAX_SEGMEXEC))
32435 +#endif
32436 +
32437 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
32438 {
32439 struct mm_struct *mm = vma->vm_mm;
32440 @@ -221,13 +238,22 @@ static void show_map_vma(struct seq_file
32441 }
32442
32443 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
32444 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32445 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
32446 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
32447 +#else
32448 vma->vm_start,
32449 vma->vm_end,
32450 +#endif
32451 flags & VM_READ ? 'r' : '-',
32452 flags & VM_WRITE ? 'w' : '-',
32453 flags & VM_EXEC ? 'x' : '-',
32454 flags & VM_MAYSHARE ? 's' : 'p',
32455 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32456 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
32457 +#else
32458 pgoff,
32459 +#endif
32460 MAJOR(dev), MINOR(dev), ino, &len);
32461
32462 /*
32463 @@ -236,16 +262,16 @@ static void show_map_vma(struct seq_file
32464 */
32465 if (file) {
32466 pad_len_spaces(m, len);
32467 - seq_path(m, &file->f_path, "\n");
32468 + seq_path(m, &file->f_path, "\n\\");
32469 } else {
32470 const char *name = arch_vma_name(vma);
32471 if (!name) {
32472 if (mm) {
32473 - if (vma->vm_start <= mm->start_brk &&
32474 - vma->vm_end >= mm->brk) {
32475 + if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
32476 name = "[heap]";
32477 - } else if (vma->vm_start <= mm->start_stack &&
32478 - vma->vm_end >= mm->start_stack) {
32479 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
32480 + (vma->vm_start <= mm->start_stack &&
32481 + vma->vm_end >= mm->start_stack)) {
32482 name = "[stack]";
32483 }
32484 } else {
32485 @@ -387,11 +413,16 @@ static int show_smap(struct seq_file *m,
32486 };
32487
32488 memset(&mss, 0, sizeof mss);
32489 - mss.vma = vma;
32490 - /* mmap_sem is held in m_start */
32491 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
32492 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
32493 -
32494 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32495 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
32496 +#endif
32497 + mss.vma = vma;
32498 + /* mmap_sem is held in m_start */
32499 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
32500 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
32501 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32502 + }
32503 +#endif
32504 show_map_vma(m, vma);
32505
32506 seq_printf(m,
32507 @@ -406,7 +437,11 @@ static int show_smap(struct seq_file *m,
32508 "Swap: %8lu kB\n"
32509 "KernelPageSize: %8lu kB\n"
32510 "MMUPageSize: %8lu kB\n",
32511 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32512 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
32513 +#else
32514 (vma->vm_end - vma->vm_start) >> 10,
32515 +#endif
32516 mss.resident >> 10,
32517 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
32518 mss.shared_clean >> 10,
32519 diff -urNp linux-2.6.34.1/fs/proc/task_nommu.c linux-2.6.34.1/fs/proc/task_nommu.c
32520 --- linux-2.6.34.1/fs/proc/task_nommu.c 2010-07-05 14:24:10.000000000 -0400
32521 +++ linux-2.6.34.1/fs/proc/task_nommu.c 2010-07-07 09:04:56.000000000 -0400
32522 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
32523 else
32524 bytes += kobjsize(mm);
32525
32526 - if (current->fs && current->fs->users > 1)
32527 + if (current->fs && atomic_read(&current->fs->users) > 1)
32528 sbytes += kobjsize(current->fs);
32529 else
32530 bytes += kobjsize(current->fs);
32531 @@ -159,7 +159,7 @@ static int nommu_vma_show(struct seq_fil
32532 if (len < 1)
32533 len = 1;
32534 seq_printf(m, "%*c", len, ' ');
32535 - seq_path(m, &file->f_path, "");
32536 + seq_path(m, &file->f_path, "\n\\");
32537 }
32538
32539 seq_putc(m, '\n');
32540 diff -urNp linux-2.6.34.1/fs/readdir.c linux-2.6.34.1/fs/readdir.c
32541 --- linux-2.6.34.1/fs/readdir.c 2010-07-05 14:24:10.000000000 -0400
32542 +++ linux-2.6.34.1/fs/readdir.c 2010-07-07 09:04:56.000000000 -0400
32543 @@ -16,6 +16,7 @@
32544 #include <linux/security.h>
32545 #include <linux/syscalls.h>
32546 #include <linux/unistd.h>
32547 +#include <linux/namei.h>
32548
32549 #include <asm/uaccess.h>
32550
32551 @@ -67,6 +68,7 @@ struct old_linux_dirent {
32552
32553 struct readdir_callback {
32554 struct old_linux_dirent __user * dirent;
32555 + struct file * file;
32556 int result;
32557 };
32558
32559 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
32560 buf->result = -EOVERFLOW;
32561 return -EOVERFLOW;
32562 }
32563 +
32564 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
32565 + return 0;
32566 +
32567 buf->result++;
32568 dirent = buf->dirent;
32569 if (!access_ok(VERIFY_WRITE, dirent,
32570 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
32571
32572 buf.result = 0;
32573 buf.dirent = dirent;
32574 + buf.file = file;
32575
32576 error = vfs_readdir(file, fillonedir, &buf);
32577 if (buf.result)
32578 @@ -142,6 +149,7 @@ struct linux_dirent {
32579 struct getdents_callback {
32580 struct linux_dirent __user * current_dir;
32581 struct linux_dirent __user * previous;
32582 + struct file * file;
32583 int count;
32584 int error;
32585 };
32586 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
32587 buf->error = -EOVERFLOW;
32588 return -EOVERFLOW;
32589 }
32590 +
32591 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
32592 + return 0;
32593 +
32594 dirent = buf->previous;
32595 if (dirent) {
32596 if (__put_user(offset, &dirent->d_off))
32597 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
32598 buf.previous = NULL;
32599 buf.count = count;
32600 buf.error = 0;
32601 + buf.file = file;
32602
32603 error = vfs_readdir(file, filldir, &buf);
32604 if (error >= 0)
32605 @@ -228,6 +241,7 @@ out:
32606 struct getdents_callback64 {
32607 struct linux_dirent64 __user * current_dir;
32608 struct linux_dirent64 __user * previous;
32609 + struct file *file;
32610 int count;
32611 int error;
32612 };
32613 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
32614 buf->error = -EINVAL; /* only used if we fail.. */
32615 if (reclen > buf->count)
32616 return -EINVAL;
32617 +
32618 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
32619 + return 0;
32620 +
32621 dirent = buf->previous;
32622 if (dirent) {
32623 if (__put_user(offset, &dirent->d_off))
32624 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
32625
32626 buf.current_dir = dirent;
32627 buf.previous = NULL;
32628 + buf.file = file;
32629 buf.count = count;
32630 buf.error = 0;
32631
32632 diff -urNp linux-2.6.34.1/fs/reiserfs/do_balan.c linux-2.6.34.1/fs/reiserfs/do_balan.c
32633 --- linux-2.6.34.1/fs/reiserfs/do_balan.c 2010-07-05 14:24:10.000000000 -0400
32634 +++ linux-2.6.34.1/fs/reiserfs/do_balan.c 2010-07-07 09:04:56.000000000 -0400
32635 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
32636 return;
32637 }
32638
32639 - atomic_inc(&(fs_generation(tb->tb_sb)));
32640 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
32641 do_balance_starts(tb);
32642
32643 /* balance leaf returns 0 except if combining L R and S into
32644 diff -urNp linux-2.6.34.1/fs/reiserfs/item_ops.c linux-2.6.34.1/fs/reiserfs/item_ops.c
32645 --- linux-2.6.34.1/fs/reiserfs/item_ops.c 2010-07-05 14:24:10.000000000 -0400
32646 +++ linux-2.6.34.1/fs/reiserfs/item_ops.c 2010-07-07 09:04:56.000000000 -0400
32647 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
32648 vi->vi_index, vi->vi_type, vi->vi_ih);
32649 }
32650
32651 -static struct item_operations stat_data_ops = {
32652 +static const struct item_operations stat_data_ops = {
32653 .bytes_number = sd_bytes_number,
32654 .decrement_key = sd_decrement_key,
32655 .is_left_mergeable = sd_is_left_mergeable,
32656 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
32657 vi->vi_index, vi->vi_type, vi->vi_ih);
32658 }
32659
32660 -static struct item_operations direct_ops = {
32661 +static const struct item_operations direct_ops = {
32662 .bytes_number = direct_bytes_number,
32663 .decrement_key = direct_decrement_key,
32664 .is_left_mergeable = direct_is_left_mergeable,
32665 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
32666 vi->vi_index, vi->vi_type, vi->vi_ih);
32667 }
32668
32669 -static struct item_operations indirect_ops = {
32670 +static const struct item_operations indirect_ops = {
32671 .bytes_number = indirect_bytes_number,
32672 .decrement_key = indirect_decrement_key,
32673 .is_left_mergeable = indirect_is_left_mergeable,
32674 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
32675 printk("\n");
32676 }
32677
32678 -static struct item_operations direntry_ops = {
32679 +static const struct item_operations direntry_ops = {
32680 .bytes_number = direntry_bytes_number,
32681 .decrement_key = direntry_decrement_key,
32682 .is_left_mergeable = direntry_is_left_mergeable,
32683 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
32684 "Invalid item type observed, run fsck ASAP");
32685 }
32686
32687 -static struct item_operations errcatch_ops = {
32688 +static const struct item_operations errcatch_ops = {
32689 errcatch_bytes_number,
32690 errcatch_decrement_key,
32691 errcatch_is_left_mergeable,
32692 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
32693 #error Item types must use disk-format assigned values.
32694 #endif
32695
32696 -struct item_operations *item_ops[TYPE_ANY + 1] = {
32697 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
32698 &stat_data_ops,
32699 &indirect_ops,
32700 &direct_ops,
32701 diff -urNp linux-2.6.34.1/fs/reiserfs/procfs.c linux-2.6.34.1/fs/reiserfs/procfs.c
32702 --- linux-2.6.34.1/fs/reiserfs/procfs.c 2010-07-05 14:24:10.000000000 -0400
32703 +++ linux-2.6.34.1/fs/reiserfs/procfs.c 2010-07-07 09:04:56.000000000 -0400
32704 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
32705 "SMALL_TAILS " : "NO_TAILS ",
32706 replay_only(sb) ? "REPLAY_ONLY " : "",
32707 convert_reiserfs(sb) ? "CONV " : "",
32708 - atomic_read(&r->s_generation_counter),
32709 + atomic_read_unchecked(&r->s_generation_counter),
32710 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
32711 SF(s_do_balance), SF(s_unneeded_left_neighbor),
32712 SF(s_good_search_by_key_reada), SF(s_bmaps),
32713 diff -urNp linux-2.6.34.1/fs/select.c linux-2.6.34.1/fs/select.c
32714 --- linux-2.6.34.1/fs/select.c 2010-07-05 14:24:10.000000000 -0400
32715 +++ linux-2.6.34.1/fs/select.c 2010-07-07 09:04:56.000000000 -0400
32716 @@ -20,6 +20,7 @@
32717 #include <linux/module.h>
32718 #include <linux/slab.h>
32719 #include <linux/poll.h>
32720 +#include <linux/security.h>
32721 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
32722 #include <linux/file.h>
32723 #include <linux/fdtable.h>
32724 @@ -838,6 +839,7 @@ int do_sys_poll(struct pollfd __user *uf
32725 struct poll_list *walk = head;
32726 unsigned long todo = nfds;
32727
32728 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
32729 if (nfds > rlimit(RLIMIT_NOFILE))
32730 return -EINVAL;
32731
32732 diff -urNp linux-2.6.34.1/fs/seq_file.c linux-2.6.34.1/fs/seq_file.c
32733 --- linux-2.6.34.1/fs/seq_file.c 2010-07-05 14:24:10.000000000 -0400
32734 +++ linux-2.6.34.1/fs/seq_file.c 2010-07-07 09:04:56.000000000 -0400
32735 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
32736 return 0;
32737 }
32738 if (!m->buf) {
32739 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
32740 + m->size = PAGE_SIZE;
32741 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
32742 if (!m->buf)
32743 return -ENOMEM;
32744 }
32745 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
32746 Eoverflow:
32747 m->op->stop(m, p);
32748 kfree(m->buf);
32749 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
32750 + m->size <<= 1;
32751 + m->buf = kmalloc(m->size, GFP_KERNEL);
32752 return !m->buf ? -ENOMEM : -EAGAIN;
32753 }
32754
32755 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
32756 m->version = file->f_version;
32757 /* grab buffer if we didn't have one */
32758 if (!m->buf) {
32759 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
32760 + m->size = PAGE_SIZE;
32761 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
32762 if (!m->buf)
32763 goto Enomem;
32764 }
32765 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
32766 goto Fill;
32767 m->op->stop(m, p);
32768 kfree(m->buf);
32769 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
32770 + m->size <<= 1;
32771 + m->buf = kmalloc(m->size, GFP_KERNEL);
32772 if (!m->buf)
32773 goto Enomem;
32774 m->count = 0;
32775 diff -urNp linux-2.6.34.1/fs/smbfs/symlink.c linux-2.6.34.1/fs/smbfs/symlink.c
32776 --- linux-2.6.34.1/fs/smbfs/symlink.c 2010-07-05 14:24:10.000000000 -0400
32777 +++ linux-2.6.34.1/fs/smbfs/symlink.c 2010-07-07 09:04:56.000000000 -0400
32778 @@ -56,7 +56,7 @@ static void *smb_follow_link(struct dent
32779
32780 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
32781 {
32782 - char *s = nd_get_link(nd);
32783 + const char *s = nd_get_link(nd);
32784 if (!IS_ERR(s))
32785 __putname(s);
32786 }
32787 diff -urNp linux-2.6.34.1/fs/splice.c linux-2.6.34.1/fs/splice.c
32788 --- linux-2.6.34.1/fs/splice.c 2010-07-05 14:24:10.000000000 -0400
32789 +++ linux-2.6.34.1/fs/splice.c 2010-07-07 09:04:56.000000000 -0400
32790 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
32791 pipe_lock(pipe);
32792
32793 for (;;) {
32794 - if (!pipe->readers) {
32795 + if (!atomic_read(&pipe->readers)) {
32796 send_sig(SIGPIPE, current, 0);
32797 if (!ret)
32798 ret = -EPIPE;
32799 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
32800 do_wakeup = 0;
32801 }
32802
32803 - pipe->waiting_writers++;
32804 + atomic_inc(&pipe->waiting_writers);
32805 pipe_wait(pipe);
32806 - pipe->waiting_writers--;
32807 + atomic_dec(&pipe->waiting_writers);
32808 }
32809
32810 pipe_unlock(pipe);
32811 @@ -532,7 +532,7 @@ static ssize_t kernel_readv(struct file
32812 old_fs = get_fs();
32813 set_fs(get_ds());
32814 /* The cast to a user pointer is valid due to the set_fs() */
32815 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
32816 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
32817 set_fs(old_fs);
32818
32819 return res;
32820 @@ -547,7 +547,7 @@ static ssize_t kernel_write(struct file
32821 old_fs = get_fs();
32822 set_fs(get_ds());
32823 /* The cast to a user pointer is valid due to the set_fs() */
32824 - res = vfs_write(file, (const char __user *)buf, count, &pos);
32825 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
32826 set_fs(old_fs);
32827
32828 return res;
32829 @@ -589,7 +589,7 @@ ssize_t default_file_splice_read(struct
32830 goto err;
32831
32832 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
32833 - vec[i].iov_base = (void __user *) page_address(page);
32834 + vec[i].iov_base = (__force void __user *) page_address(page);
32835 vec[i].iov_len = this_len;
32836 pages[i] = page;
32837 spd.nr_pages++;
32838 @@ -811,10 +811,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
32839 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
32840 {
32841 while (!pipe->nrbufs) {
32842 - if (!pipe->writers)
32843 + if (!atomic_read(&pipe->writers))
32844 return 0;
32845
32846 - if (!pipe->waiting_writers && sd->num_spliced)
32847 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
32848 return 0;
32849
32850 if (sd->flags & SPLICE_F_NONBLOCK)
32851 @@ -1151,7 +1151,7 @@ ssize_t splice_direct_to_actor(struct fi
32852 * out of the pipe right after the splice_to_pipe(). So set
32853 * PIPE_READERS appropriately.
32854 */
32855 - pipe->readers = 1;
32856 + atomic_set(&pipe->readers, 1);
32857
32858 current->splice_pipe = pipe;
32859 }
32860 @@ -1711,9 +1711,9 @@ static int ipipe_prep(struct pipe_inode_
32861 ret = -ERESTARTSYS;
32862 break;
32863 }
32864 - if (!pipe->writers)
32865 + if (!atomic_read(&pipe->writers))
32866 break;
32867 - if (!pipe->waiting_writers) {
32868 + if (!atomic_read(&pipe->waiting_writers)) {
32869 if (flags & SPLICE_F_NONBLOCK) {
32870 ret = -EAGAIN;
32871 break;
32872 @@ -1745,7 +1745,7 @@ static int opipe_prep(struct pipe_inode_
32873 pipe_lock(pipe);
32874
32875 while (pipe->nrbufs >= PIPE_BUFFERS) {
32876 - if (!pipe->readers) {
32877 + if (!atomic_read(&pipe->readers)) {
32878 send_sig(SIGPIPE, current, 0);
32879 ret = -EPIPE;
32880 break;
32881 @@ -1758,9 +1758,9 @@ static int opipe_prep(struct pipe_inode_
32882 ret = -ERESTARTSYS;
32883 break;
32884 }
32885 - pipe->waiting_writers++;
32886 + atomic_inc(&pipe->waiting_writers);
32887 pipe_wait(pipe);
32888 - pipe->waiting_writers--;
32889 + atomic_dec(&pipe->waiting_writers);
32890 }
32891
32892 pipe_unlock(pipe);
32893 @@ -1796,14 +1796,14 @@ retry:
32894 pipe_double_lock(ipipe, opipe);
32895
32896 do {
32897 - if (!opipe->readers) {
32898 + if (!atomic_read(&opipe->readers)) {
32899 send_sig(SIGPIPE, current, 0);
32900 if (!ret)
32901 ret = -EPIPE;
32902 break;
32903 }
32904
32905 - if (!ipipe->nrbufs && !ipipe->writers)
32906 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
32907 break;
32908
32909 /*
32910 @@ -1903,7 +1903,7 @@ static int link_pipe(struct pipe_inode_i
32911 pipe_double_lock(ipipe, opipe);
32912
32913 do {
32914 - if (!opipe->readers) {
32915 + if (!atomic_read(&opipe->readers)) {
32916 send_sig(SIGPIPE, current, 0);
32917 if (!ret)
32918 ret = -EPIPE;
32919 @@ -1948,7 +1948,7 @@ static int link_pipe(struct pipe_inode_i
32920 * return EAGAIN if we have the potential of some data in the
32921 * future, otherwise just return 0
32922 */
32923 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
32924 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
32925 ret = -EAGAIN;
32926
32927 pipe_unlock(ipipe);
32928 diff -urNp linux-2.6.34.1/fs/sysfs/symlink.c linux-2.6.34.1/fs/sysfs/symlink.c
32929 --- linux-2.6.34.1/fs/sysfs/symlink.c 2010-07-05 14:24:10.000000000 -0400
32930 +++ linux-2.6.34.1/fs/sysfs/symlink.c 2010-07-07 09:04:56.000000000 -0400
32931 @@ -243,7 +243,7 @@ static void *sysfs_follow_link(struct de
32932
32933 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
32934 {
32935 - char *page = nd_get_link(nd);
32936 + const char *page = nd_get_link(nd);
32937 if (!IS_ERR(page))
32938 free_page((unsigned long)page);
32939 }
32940 diff -urNp linux-2.6.34.1/fs/udf/misc.c linux-2.6.34.1/fs/udf/misc.c
32941 --- linux-2.6.34.1/fs/udf/misc.c 2010-07-05 14:24:10.000000000 -0400
32942 +++ linux-2.6.34.1/fs/udf/misc.c 2010-07-07 09:04:56.000000000 -0400
32943 @@ -142,8 +142,8 @@ struct genericFormat *udf_add_extendedat
32944 iinfo->i_lenEAttr += size;
32945 return (struct genericFormat *)&ea[offset];
32946 }
32947 - if (loc & 0x02)
32948 - ;
32949 + if (loc & 0x02) {
32950 + }
32951
32952 return NULL;
32953 }
32954 diff -urNp linux-2.6.34.1/fs/udf/udfdecl.h linux-2.6.34.1/fs/udf/udfdecl.h
32955 --- linux-2.6.34.1/fs/udf/udfdecl.h 2010-07-05 14:24:10.000000000 -0400
32956 +++ linux-2.6.34.1/fs/udf/udfdecl.h 2010-07-07 09:04:56.000000000 -0400
32957 @@ -26,7 +26,7 @@ do { \
32958 printk(f, ##a); \
32959 } while (0)
32960 #else
32961 -#define udf_debug(f, a...) /**/
32962 +#define udf_debug(f, a...) do {} while (0)
32963 #endif
32964
32965 #define udf_info(f, a...) \
32966 diff -urNp linux-2.6.34.1/fs/utimes.c linux-2.6.34.1/fs/utimes.c
32967 --- linux-2.6.34.1/fs/utimes.c 2010-07-05 14:24:10.000000000 -0400
32968 +++ linux-2.6.34.1/fs/utimes.c 2010-07-07 09:04:56.000000000 -0400
32969 @@ -1,6 +1,7 @@
32970 #include <linux/compiler.h>
32971 #include <linux/file.h>
32972 #include <linux/fs.h>
32973 +#include <linux/security.h>
32974 #include <linux/linkage.h>
32975 #include <linux/mount.h>
32976 #include <linux/namei.h>
32977 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
32978 goto mnt_drop_write_and_out;
32979 }
32980 }
32981 +
32982 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
32983 + error = -EACCES;
32984 + goto mnt_drop_write_and_out;
32985 + }
32986 +
32987 mutex_lock(&inode->i_mutex);
32988 error = notify_change(path->dentry, &newattrs);
32989 mutex_unlock(&inode->i_mutex);
32990 diff -urNp linux-2.6.34.1/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.34.1/fs/xfs/linux-2.6/xfs_ioctl.c
32991 --- linux-2.6.34.1/fs/xfs/linux-2.6/xfs_ioctl.c 2010-07-05 14:24:10.000000000 -0400
32992 +++ linux-2.6.34.1/fs/xfs/linux-2.6/xfs_ioctl.c 2010-07-07 09:04:56.000000000 -0400
32993 @@ -136,7 +136,7 @@ xfs_find_handle(
32994 }
32995
32996 error = -EFAULT;
32997 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
32998 + if (hsize > sizeof(handle) || copy_to_user(hreq->ohandle, &handle, hsize) ||
32999 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
33000 goto out_put;
33001
33002 diff -urNp linux-2.6.34.1/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.34.1/fs/xfs/linux-2.6/xfs_iops.c
33003 --- linux-2.6.34.1/fs/xfs/linux-2.6/xfs_iops.c 2010-07-05 14:24:10.000000000 -0400
33004 +++ linux-2.6.34.1/fs/xfs/linux-2.6/xfs_iops.c 2010-07-07 09:04:56.000000000 -0400
33005 @@ -480,7 +480,7 @@ xfs_vn_put_link(
33006 struct nameidata *nd,
33007 void *p)
33008 {
33009 - char *s = nd_get_link(nd);
33010 + const char *s = nd_get_link(nd);
33011
33012 if (!IS_ERR(s))
33013 kfree(s);
33014 diff -urNp linux-2.6.34.1/fs/xfs/xfs_bmap.c linux-2.6.34.1/fs/xfs/xfs_bmap.c
33015 --- linux-2.6.34.1/fs/xfs/xfs_bmap.c 2010-07-05 14:24:10.000000000 -0400
33016 +++ linux-2.6.34.1/fs/xfs/xfs_bmap.c 2010-07-07 09:04:56.000000000 -0400
33017 @@ -296,7 +296,7 @@ xfs_bmap_validate_ret(
33018 int nmap,
33019 int ret_nmap);
33020 #else
33021 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
33022 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
33023 #endif /* DEBUG */
33024
33025 STATIC int
33026 diff -urNp linux-2.6.34.1/grsecurity/Kconfig linux-2.6.34.1/grsecurity/Kconfig
33027 --- linux-2.6.34.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
33028 +++ linux-2.6.34.1/grsecurity/Kconfig 2010-07-07 09:04:56.000000000 -0400
33029 @@ -0,0 +1,981 @@
33030 +#
33031 +# grecurity configuration
33032 +#
33033 +
33034 +menu "Grsecurity"
33035 +
33036 +config GRKERNSEC
33037 + bool "Grsecurity"
33038 + select CRYPTO
33039 + select CRYPTO_SHA256
33040 + help
33041 + If you say Y here, you will be able to configure many features
33042 + that will enhance the security of your system. It is highly
33043 + recommended that you say Y here and read through the help
33044 + for each option so that you fully understand the features and
33045 + can evaluate their usefulness for your machine.
33046 +
33047 +choice
33048 + prompt "Security Level"
33049 + depends on GRKERNSEC
33050 + default GRKERNSEC_CUSTOM
33051 +
33052 +config GRKERNSEC_LOW
33053 + bool "Low"
33054 + select GRKERNSEC_LINK
33055 + select GRKERNSEC_FIFO
33056 + select GRKERNSEC_EXECVE
33057 + select GRKERNSEC_RANDNET
33058 + select GRKERNSEC_DMESG
33059 + select GRKERNSEC_CHROOT
33060 + select GRKERNSEC_CHROOT_CHDIR
33061 +
33062 + help
33063 + If you choose this option, several of the grsecurity options will
33064 + be enabled that will give you greater protection against a number
33065 + of attacks, while assuring that none of your software will have any
33066 + conflicts with the additional security measures. If you run a lot
33067 + of unusual software, or you are having problems with the higher
33068 + security levels, you should say Y here. With this option, the
33069 + following features are enabled:
33070 +
33071 + - Linking restrictions
33072 + - FIFO restrictions
33073 + - Enforcing RLIMIT_NPROC on execve
33074 + - Restricted dmesg
33075 + - Enforced chdir("/") on chroot
33076 + - Runtime module disabling
33077 +
33078 +config GRKERNSEC_MEDIUM
33079 + bool "Medium"
33080 + select PAX
33081 + select PAX_EI_PAX
33082 + select PAX_PT_PAX_FLAGS
33083 + select PAX_HAVE_ACL_FLAGS
33084 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
33085 + select GRKERNSEC_CHROOT
33086 + select GRKERNSEC_CHROOT_SYSCTL
33087 + select GRKERNSEC_LINK
33088 + select GRKERNSEC_FIFO
33089 + select GRKERNSEC_EXECVE
33090 + select GRKERNSEC_DMESG
33091 + select GRKERNSEC_RANDNET
33092 + select GRKERNSEC_FORKFAIL
33093 + select GRKERNSEC_TIME
33094 + select GRKERNSEC_SIGNAL
33095 + select GRKERNSEC_CHROOT
33096 + select GRKERNSEC_CHROOT_UNIX
33097 + select GRKERNSEC_CHROOT_MOUNT
33098 + select GRKERNSEC_CHROOT_PIVOT
33099 + select GRKERNSEC_CHROOT_DOUBLE
33100 + select GRKERNSEC_CHROOT_CHDIR
33101 + select GRKERNSEC_CHROOT_MKNOD
33102 + select GRKERNSEC_PROC
33103 + select GRKERNSEC_PROC_USERGROUP
33104 + select PAX_RANDUSTACK
33105 + select PAX_ASLR
33106 + select PAX_RANDMMAP
33107 + select PAX_REFCOUNT if (X86 || SPARC64)
33108 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC32 || PPC64) && (SLAB || SLUB || SLOB))
33109 +
33110 + help
33111 + If you say Y here, several features in addition to those included
33112 + in the low additional security level will be enabled. These
33113 + features provide even more security to your system, though in rare
33114 + cases they may be incompatible with very old or poorly written
33115 + software. If you enable this option, make sure that your auth
33116 + service (identd) is running as gid 1001. With this option,
33117 + the following features (in addition to those provided in the
33118 + low additional security level) will be enabled:
33119 +
33120 + - Failed fork logging
33121 + - Time change logging
33122 + - Signal logging
33123 + - Deny mounts in chroot
33124 + - Deny double chrooting
33125 + - Deny sysctl writes in chroot
33126 + - Deny mknod in chroot
33127 + - Deny access to abstract AF_UNIX sockets out of chroot
33128 + - Deny pivot_root in chroot
33129 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
33130 + - /proc restrictions with special GID set to 10 (usually wheel)
33131 + - Address Space Layout Randomization (ASLR)
33132 + - Prevent exploitation of most refcount overflows
33133 + - Bounds checking of copying between the kernel and userland
33134 +
33135 +config GRKERNSEC_HIGH
33136 + bool "High"
33137 + select GRKERNSEC_LINK
33138 + select GRKERNSEC_FIFO
33139 + select GRKERNSEC_EXECVE
33140 + select GRKERNSEC_DMESG
33141 + select GRKERNSEC_FORKFAIL
33142 + select GRKERNSEC_TIME
33143 + select GRKERNSEC_SIGNAL
33144 + select GRKERNSEC_CHROOT
33145 + select GRKERNSEC_CHROOT_SHMAT
33146 + select GRKERNSEC_CHROOT_UNIX
33147 + select GRKERNSEC_CHROOT_MOUNT
33148 + select GRKERNSEC_CHROOT_FCHDIR
33149 + select GRKERNSEC_CHROOT_PIVOT
33150 + select GRKERNSEC_CHROOT_DOUBLE
33151 + select GRKERNSEC_CHROOT_CHDIR
33152 + select GRKERNSEC_CHROOT_MKNOD
33153 + select GRKERNSEC_CHROOT_CAPS
33154 + select GRKERNSEC_CHROOT_SYSCTL
33155 + select GRKERNSEC_CHROOT_FINDTASK
33156 + select GRKERNSEC_PROC
33157 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
33158 + select GRKERNSEC_HIDESYM
33159 + select GRKERNSEC_BRUTE
33160 + select GRKERNSEC_PROC_USERGROUP
33161 + select GRKERNSEC_KMEM
33162 + select GRKERNSEC_RESLOG
33163 + select GRKERNSEC_RANDNET
33164 + select GRKERNSEC_PROC_ADD
33165 + select GRKERNSEC_CHROOT_CHMOD
33166 + select GRKERNSEC_CHROOT_NICE
33167 + select GRKERNSEC_AUDIT_MOUNT
33168 + select GRKERNSEC_MODHARDEN if (MODULES)
33169 + select GRKERNSEC_HARDEN_PTRACE
33170 + select GRKERNSEC_VM86 if (X86_32)
33171 + select PAX
33172 + select PAX_RANDUSTACK
33173 + select PAX_ASLR
33174 + select PAX_RANDMMAP
33175 + select PAX_NOEXEC
33176 + select PAX_MPROTECT
33177 + select PAX_EI_PAX
33178 + select PAX_PT_PAX_FLAGS
33179 + select PAX_HAVE_ACL_FLAGS
33180 + select PAX_KERNEXEC if ((PPC32 || PPC64 || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
33181 + select PAX_MEMORY_UDEREF if (X86_32 && !XEN)
33182 + select PAX_RANDKSTACK if (X86_TSC && !X86_64)
33183 + select PAX_SEGMEXEC if (X86_32)
33184 + select PAX_PAGEEXEC
33185 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
33186 + select PAX_EMUTRAMP if (PARISC)
33187 + select PAX_EMUSIGRT if (PARISC)
33188 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
33189 + select PAX_REFCOUNT if (X86 || SPARC64)
33190 + select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
33191 + help
33192 + If you say Y here, many of the features of grsecurity will be
33193 + enabled, which will protect you against many kinds of attacks
33194 + against your system. The heightened security comes at a cost
33195 + of an increased chance of incompatibilities with rare software
33196 + on your machine. Since this security level enables PaX, you should
33197 + view <http://pax.grsecurity.net> and read about the PaX
33198 + project. While you are there, download chpax and run it on
33199 + binaries that cause problems with PaX. Also remember that
33200 + since the /proc restrictions are enabled, you must run your
33201 + identd as gid 1001. This security level enables the following
33202 + features in addition to those listed in the low and medium
33203 + security levels:
33204 +
33205 + - Additional /proc restrictions
33206 + - Chmod restrictions in chroot
33207 + - No signals, ptrace, or viewing of processes outside of chroot
33208 + - Capability restrictions in chroot
33209 + - Deny fchdir out of chroot
33210 + - Priority restrictions in chroot
33211 + - Segmentation-based implementation of PaX
33212 + - Mprotect restrictions
33213 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
33214 + - Kernel stack randomization
33215 + - Mount/unmount/remount logging
33216 + - Kernel symbol hiding
33217 + - Prevention of memory exhaustion-based exploits
33218 + - Hardening of module auto-loading
33219 + - Ptrace restrictions
33220 + - Restricted vm86 mode
33221 +
33222 +config GRKERNSEC_CUSTOM
33223 + bool "Custom"
33224 + help
33225 + If you say Y here, you will be able to configure every grsecurity
33226 + option, which allows you to enable many more features that aren't
33227 + covered in the basic security levels. These additional features
33228 + include TPE, socket restrictions, and the sysctl system for
33229 + grsecurity. It is advised that you read through the help for
33230 + each option to determine its usefulness in your situation.
33231 +
33232 +endchoice
33233 +
33234 +menu "Address Space Protection"
33235 +depends on GRKERNSEC
33236 +
33237 +config GRKERNSEC_KMEM
33238 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
33239 + help
33240 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
33241 + be written to via mmap or otherwise to modify the running kernel.
33242 + /dev/port will also not be allowed to be opened. If you have module
33243 + support disabled, enabling this will close up four ways that are
33244 + currently used to insert malicious code into the running kernel.
33245 + Even with all these features enabled, we still highly recommend that
33246 + you use the RBAC system, as it is still possible for an attacker to
33247 + modify the running kernel through privileged I/O granted by ioperm/iopl.
33248 + If you are not using XFree86, you may be able to stop this additional
33249 + case by enabling the 'Disable privileged I/O' option. Though nothing
33250 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
33251 + but only to video memory, which is the only writing we allow in this
33252 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
33253 + not be allowed to mprotect it with PROT_WRITE later.
33254 + It is highly recommended that you say Y here if you meet all the
33255 + conditions above.
33256 +
33257 +config GRKERNSEC_VM86
33258 + bool "Restrict VM86 mode"
33259 + depends on X86_32
33260 +
33261 + help
33262 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
33263 + make use of a special execution mode on 32bit x86 processors called
33264 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
33265 + video cards and will still work with this option enabled. The purpose
33266 + of the option is to prevent exploitation of emulation errors in
33267 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
33268 + Nearly all users should be able to enable this option.
33269 +
33270 +config GRKERNSEC_IO
33271 + bool "Disable privileged I/O"
33272 + depends on X86
33273 + select RTC_CLASS
33274 + select RTC_INTF_DEV
33275 + select RTC_DRV_CMOS
33276 +
33277 + help
33278 + If you say Y here, all ioperm and iopl calls will return an error.
33279 + Ioperm and iopl can be used to modify the running kernel.
33280 + Unfortunately, some programs need this access to operate properly,
33281 + the most notable of which are XFree86 and hwclock. hwclock can be
33282 + remedied by having RTC support in the kernel, so real-time
33283 + clock support is enabled if this option is enabled, to ensure
33284 + that hwclock operates correctly. XFree86 still will not
33285 + operate correctly with this option enabled, so DO NOT CHOOSE Y
33286 + IF YOU USE XFree86. If you use XFree86 and you still want to
33287 + protect your kernel against modification, use the RBAC system.
33288 +
33289 +config GRKERNSEC_PROC_MEMMAP
33290 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
33291 + default y if (PAX_NOEXEC || PAX_ASLR)
33292 + depends on PAX_NOEXEC || PAX_ASLR
33293 + help
33294 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
33295 + give no information about the addresses of its mappings if
33296 + PaX features that rely on random addresses are enabled on the task.
33297 + If you use PaX it is greatly recommended that you say Y here as it
33298 + closes up a hole that makes the full ASLR useless for suid
33299 + binaries.
33300 +
33301 +config GRKERNSEC_BRUTE
33302 + bool "Deter exploit bruteforcing"
33303 + help
33304 + If you say Y here, attempts to bruteforce exploits against forking
33305 + daemons such as apache or sshd will be deterred. When a child of a
33306 + forking daemon is killed by PaX or crashes due to an illegal
33307 + instruction, the parent process will be delayed 30 seconds upon every
33308 + subsequent fork until the administrator is able to assess the
33309 + situation and restart the daemon. It is recommended that you also
33310 + enable signal logging in the auditing section so that logs are
33311 + generated when a process performs an illegal instruction.
33312 +
33313 +config GRKERNSEC_MODHARDEN
33314 + bool "Harden module auto-loading"
33315 + depends on MODULES
33316 + help
33317 + If you say Y here, module auto-loading in response to use of some
33318 + feature implemented by an unloaded module will be restricted to
33319 + root users. Enabling this option helps defend against attacks
33320 + by unprivileged users who abuse the auto-loading behavior to
33321 + cause a vulnerable module to load that is then exploited.
33322 +
33323 + If this option prevents a legitimate use of auto-loading for a
33324 + non-root user, the administrator can execute modprobe manually
33325 + with the exact name of the module mentioned in the alert log.
33326 + Alternatively, the administrator can add the module to the list
33327 + of modules loaded at boot by modifying init scripts.
33328 +
33329 + Modification of init scripts will most likely be needed on
33330 + Ubuntu servers with encrypted home directory support enabled,
33331 + as the first non-root user logging in will cause the ecb(aes),
33332 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
33333 +
33334 +config GRKERNSEC_HIDESYM
33335 + bool "Hide kernel symbols"
33336 + help
33337 + If you say Y here, getting information on loaded modules, and
33338 + displaying all kernel symbols through a syscall will be restricted
33339 + to users with CAP_SYS_MODULE. For software compatibility reasons,
33340 + /proc/kallsyms will be restricted to the root user. The RBAC
33341 + system can hide that entry even from root. Note that this option
33342 + is only effective provided the following conditions are met:
33343 + 1) The kernel using grsecurity is not precompiled by some distribution
33344 + 2) You are using the RBAC system and hiding other files such as your
33345 + kernel image and System.map. Alternatively, enabling this option
33346 + causes the permissions on /boot, /lib/modules, and the kernel
33347 + source directory to change at compile time to prevent
33348 + reading by non-root users.
33349 + If the above conditions are met, this option will aid in providing a
33350 + useful protection against local kernel exploitation of overflows
33351 + and arbitrary read/write vulnerabilities.
33352 +
33353 +endmenu
33354 +menu "Role Based Access Control Options"
33355 +depends on GRKERNSEC
33356 +
33357 +config GRKERNSEC_NO_RBAC
33358 + bool "Disable RBAC system"
33359 + help
33360 + If you say Y here, the /dev/grsec device will be removed from the kernel,
33361 + preventing the RBAC system from being enabled. You should only say Y
33362 + here if you have no intention of using the RBAC system, so as to prevent
33363 + an attacker with root access from misusing the RBAC system to hide files
33364 + and processes when loadable module support and /dev/[k]mem have been
33365 + locked down.
33366 +
33367 +config GRKERNSEC_ACL_HIDEKERN
33368 + bool "Hide kernel processes"
33369 + help
33370 + If you say Y here, all kernel threads will be hidden to all
33371 + processes but those whose subject has the "view hidden processes"
33372 + flag.
33373 +
33374 +config GRKERNSEC_ACL_MAXTRIES
33375 + int "Maximum tries before password lockout"
33376 + default 3
33377 + help
33378 + This option enforces the maximum number of times a user can attempt
33379 + to authorize themselves with the grsecurity RBAC system before being
33380 + denied the ability to attempt authorization again for a specified time.
33381 + The lower the number, the harder it will be to brute-force a password.
33382 +
33383 +config GRKERNSEC_ACL_TIMEOUT
33384 + int "Time to wait after max password tries, in seconds"
33385 + default 30
33386 + help
33387 + This option specifies the time the user must wait after attempting to
33388 + authorize to the RBAC system with the maximum number of invalid
33389 + passwords. The higher the number, the harder it will be to brute-force
33390 + a password.
33391 +
33392 +endmenu
33393 +menu "Filesystem Protections"
33394 +depends on GRKERNSEC
33395 +
33396 +config GRKERNSEC_PROC
33397 + bool "Proc restrictions"
33398 + help
33399 + If you say Y here, the permissions of the /proc filesystem
33400 + will be altered to enhance system security and privacy. You MUST
33401 + choose either a user only restriction or a user and group restriction.
33402 + Depending upon the option you choose, you can either restrict users to
33403 + see only the processes they themselves run, or choose a group that can
33404 + view all processes and files normally restricted to root if you choose
33405 + the "restrict to user only" option. NOTE: If you're running identd as
33406 + a non-root user, you will have to run it as the group you specify here.
33407 +
33408 +config GRKERNSEC_PROC_USER
33409 + bool "Restrict /proc to user only"
33410 + depends on GRKERNSEC_PROC
33411 + help
33412 + If you say Y here, non-root users will only be able to view their own
33413 + processes, and restricts them from viewing network-related information,
33414 + and viewing kernel symbol and module information.
33415 +
33416 +config GRKERNSEC_PROC_USERGROUP
33417 + bool "Allow special group"
33418 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
33419 + help
33420 + If you say Y here, you will be able to select a group that will be
33421 + able to view all processes, network-related information, and
33422 + kernel and symbol information. This option is useful if you want
33423 + to run identd as a non-root user.
33424 +
33425 +config GRKERNSEC_PROC_GID
33426 + int "GID for special group"
33427 + depends on GRKERNSEC_PROC_USERGROUP
33428 + default 1001
33429 +
33430 +config GRKERNSEC_PROC_ADD
33431 + bool "Additional restrictions"
33432 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
33433 + help
33434 + If you say Y here, additional restrictions will be placed on
33435 + /proc that keep normal users from viewing device information and
33436 + slabinfo information that could be useful for exploits.
33437 +
33438 +config GRKERNSEC_LINK
33439 + bool "Linking restrictions"
33440 + help
33441 + If you say Y here, /tmp race exploits will be prevented, since users
33442 + will no longer be able to follow symlinks owned by other users in
33443 + world-writable +t directories (i.e. /tmp), unless the owner of the
33444 + symlink is the owner of the directory. users will also not be
33445 + able to hardlink to files they do not own. If the sysctl option is
33446 + enabled, a sysctl option with name "linking_restrictions" is created.
33447 +
33448 +config GRKERNSEC_FIFO
33449 + bool "FIFO restrictions"
33450 + help
33451 + If you say Y here, users will not be able to write to FIFOs they don't
33452 + own in world-writable +t directories (i.e. /tmp), unless the owner of
33453 + the FIFO is the same owner of the directory it's held in. If the sysctl
33454 + option is enabled, a sysctl option with name "fifo_restrictions" is
33455 + created.
33456 +
33457 +config GRKERNSEC_ROFS
33458 + bool "Runtime read-only mount protection"
33459 + help
33460 + If you say Y here, a sysctl option with name "romount_protect" will
33461 + be created. By setting this option to 1 at runtime, filesystems
33462 + will be protected in the following ways:
33463 + * No new writable mounts will be allowed
33464 + * Existing read-only mounts won't be able to be remounted read/write
33465 + * Write operations will be denied on all block devices
33466 + This option acts independently of grsec_lock: once it is set to 1,
33467 + it cannot be turned off. Therefore, please be mindful of the resulting
33468 + behavior if this option is enabled in an init script on a read-only
33469 + filesystem. This feature is mainly intended for secure embedded systems.
33470 +
33471 +config GRKERNSEC_CHROOT
33472 + bool "Chroot jail restrictions"
33473 + help
33474 + If you say Y here, you will be able to choose several options that will
33475 + make breaking out of a chrooted jail much more difficult. If you
33476 + encounter no software incompatibilities with the following options, it
33477 + is recommended that you enable each one.
33478 +
33479 +config GRKERNSEC_CHROOT_MOUNT
33480 + bool "Deny mounts"
33481 + depends on GRKERNSEC_CHROOT
33482 + help
33483 + If you say Y here, processes inside a chroot will not be able to
33484 + mount or remount filesystems. If the sysctl option is enabled, a
33485 + sysctl option with name "chroot_deny_mount" is created.
33486 +
33487 +config GRKERNSEC_CHROOT_DOUBLE
33488 + bool "Deny double-chroots"
33489 + depends on GRKERNSEC_CHROOT
33490 + help
33491 + If you say Y here, processes inside a chroot will not be able to chroot
33492 + again outside the chroot. This is a widely used method of breaking
33493 + out of a chroot jail and should not be allowed. If the sysctl
33494 + option is enabled, a sysctl option with name
33495 + "chroot_deny_chroot" is created.
33496 +
33497 +config GRKERNSEC_CHROOT_PIVOT
33498 + bool "Deny pivot_root in chroot"
33499 + depends on GRKERNSEC_CHROOT
33500 + help
33501 + If you say Y here, processes inside a chroot will not be able to use
33502 + a function called pivot_root() that was introduced in Linux 2.3.41. It
33503 + works similar to chroot in that it changes the root filesystem. This
33504 + function could be misused in a chrooted process to attempt to break out
33505 + of the chroot, and therefore should not be allowed. If the sysctl
33506 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
33507 + created.
33508 +
33509 +config GRKERNSEC_CHROOT_CHDIR
33510 + bool "Enforce chdir(\"/\") on all chroots"
33511 + depends on GRKERNSEC_CHROOT
33512 + help
33513 + If you say Y here, the current working directory of all newly-chrooted
33514 + applications will be set to the the root directory of the chroot.
33515 + The man page on chroot(2) states:
33516 + Note that this call does not change the current working
33517 + directory, so that `.' can be outside the tree rooted at
33518 + `/'. In particular, the super-user can escape from a
33519 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
33520 +
33521 + It is recommended that you say Y here, since it's not known to break
33522 + any software. If the sysctl option is enabled, a sysctl option with
33523 + name "chroot_enforce_chdir" is created.
33524 +
33525 +config GRKERNSEC_CHROOT_CHMOD
33526 + bool "Deny (f)chmod +s"
33527 + depends on GRKERNSEC_CHROOT
33528 + help
33529 + If you say Y here, processes inside a chroot will not be able to chmod
33530 + or fchmod files to make them have suid or sgid bits. This protects
33531 + against another published method of breaking a chroot. If the sysctl
33532 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
33533 + created.
33534 +
33535 +config GRKERNSEC_CHROOT_FCHDIR
33536 + bool "Deny fchdir out of chroot"
33537 + depends on GRKERNSEC_CHROOT
33538 + help
33539 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
33540 + to a file descriptor of the chrooting process that points to a directory
33541 + outside the filesystem will be stopped. If the sysctl option
33542 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
33543 +
33544 +config GRKERNSEC_CHROOT_MKNOD
33545 + bool "Deny mknod"
33546 + depends on GRKERNSEC_CHROOT
33547 + help
33548 + If you say Y here, processes inside a chroot will not be allowed to
33549 + mknod. The problem with using mknod inside a chroot is that it
33550 + would allow an attacker to create a device entry that is the same
33551 + as one on the physical root of your system, which could range from
33552 + anything from the console device to a device for your harddrive (which
33553 + they could then use to wipe the drive or steal data). It is recommended
33554 + that you say Y here, unless you run into software incompatibilities.
33555 + If the sysctl option is enabled, a sysctl option with name
33556 + "chroot_deny_mknod" is created.
33557 +
33558 +config GRKERNSEC_CHROOT_SHMAT
33559 + bool "Deny shmat() out of chroot"
33560 + depends on GRKERNSEC_CHROOT
33561 + help
33562 + If you say Y here, processes inside a chroot will not be able to attach
33563 + to shared memory segments that were created outside of the chroot jail.
33564 + It is recommended that you say Y here. If the sysctl option is enabled,
33565 + a sysctl option with name "chroot_deny_shmat" is created.
33566 +
33567 +config GRKERNSEC_CHROOT_UNIX
33568 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
33569 + depends on GRKERNSEC_CHROOT
33570 + help
33571 + If you say Y here, processes inside a chroot will not be able to
33572 + connect to abstract (meaning not belonging to a filesystem) Unix
33573 + domain sockets that were bound outside of a chroot. It is recommended
33574 + that you say Y here. If the sysctl option is enabled, a sysctl option
33575 + with name "chroot_deny_unix" is created.
33576 +
33577 +config GRKERNSEC_CHROOT_FINDTASK
33578 + bool "Protect outside processes"
33579 + depends on GRKERNSEC_CHROOT
33580 + help
33581 + If you say Y here, processes inside a chroot will not be able to
33582 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
33583 + getsid, or view any process outside of the chroot. If the sysctl
33584 + option is enabled, a sysctl option with name "chroot_findtask" is
33585 + created.
33586 +
33587 +config GRKERNSEC_CHROOT_NICE
33588 + bool "Restrict priority changes"
33589 + depends on GRKERNSEC_CHROOT
33590 + help
33591 + If you say Y here, processes inside a chroot will not be able to raise
33592 + the priority of processes in the chroot, or alter the priority of
33593 + processes outside the chroot. This provides more security than simply
33594 + removing CAP_SYS_NICE from the process' capability set. If the
33595 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
33596 + is created.
33597 +
33598 +config GRKERNSEC_CHROOT_SYSCTL
33599 + bool "Deny sysctl writes"
33600 + depends on GRKERNSEC_CHROOT
33601 + help
33602 + If you say Y here, an attacker in a chroot will not be able to
33603 + write to sysctl entries, either by sysctl(2) or through a /proc
33604 + interface. It is strongly recommended that you say Y here. If the
33605 + sysctl option is enabled, a sysctl option with name
33606 + "chroot_deny_sysctl" is created.
33607 +
33608 +config GRKERNSEC_CHROOT_CAPS
33609 + bool "Capability restrictions"
33610 + depends on GRKERNSEC_CHROOT
33611 + help
33612 + If you say Y here, the capabilities on all root processes within a
33613 + chroot jail will be lowered to stop module insertion, raw i/o,
33614 + system and net admin tasks, rebooting the system, modifying immutable
33615 + files, modifying IPC owned by another, and changing the system time.
33616 + This is left an option because it can break some apps. Disable this
33617 + if your chrooted apps are having problems performing those kinds of
33618 + tasks. If the sysctl option is enabled, a sysctl option with
33619 + name "chroot_caps" is created.
33620 +
33621 +endmenu
33622 +menu "Kernel Auditing"
33623 +depends on GRKERNSEC
33624 +
33625 +config GRKERNSEC_AUDIT_GROUP
33626 + bool "Single group for auditing"
33627 + help
33628 + If you say Y here, the exec, chdir, and (un)mount logging features
33629 + will only operate on a group you specify. This option is recommended
33630 + if you only want to watch certain users instead of having a large
33631 + amount of logs from the entire system. If the sysctl option is enabled,
33632 + a sysctl option with name "audit_group" is created.
33633 +
33634 +config GRKERNSEC_AUDIT_GID
33635 + int "GID for auditing"
33636 + depends on GRKERNSEC_AUDIT_GROUP
33637 + default 1007
33638 +
33639 +config GRKERNSEC_EXECLOG
33640 + bool "Exec logging"
33641 + help
33642 + If you say Y here, all execve() calls will be logged (since the
33643 + other exec*() calls are frontends to execve(), all execution
33644 + will be logged). Useful for shell-servers that like to keep track
33645 + of their users. If the sysctl option is enabled, a sysctl option with
33646 + name "exec_logging" is created.
33647 + WARNING: This option when enabled will produce a LOT of logs, especially
33648 + on an active system.
33649 +
33650 +config GRKERNSEC_RESLOG
33651 + bool "Resource logging"
33652 + help
33653 + If you say Y here, all attempts to overstep resource limits will
33654 + be logged with the resource name, the requested size, and the current
33655 + limit. It is highly recommended that you say Y here. If the sysctl
33656 + option is enabled, a sysctl option with name "resource_logging" is
33657 + created. If the RBAC system is enabled, the sysctl value is ignored.
33658 +
33659 +config GRKERNSEC_CHROOT_EXECLOG
33660 + bool "Log execs within chroot"
33661 + help
33662 + If you say Y here, all executions inside a chroot jail will be logged
33663 + to syslog. This can cause a large amount of logs if certain
33664 + applications (eg. djb's daemontools) are installed on the system, and
33665 + is therefore left as an option. If the sysctl option is enabled, a
33666 + sysctl option with name "chroot_execlog" is created.
33667 +
33668 +config GRKERNSEC_AUDIT_PTRACE
33669 + bool "Ptrace logging"
33670 + help
33671 + If you say Y here, all attempts to attach to a process via ptrace
33672 + will be logged. If the sysctl option is enabled, a sysctl option
33673 + with name "audit_ptrace" is created.
33674 +
33675 +config GRKERNSEC_AUDIT_CHDIR
33676 + bool "Chdir logging"
33677 + help
33678 + If you say Y here, all chdir() calls will be logged. If the sysctl
33679 + option is enabled, a sysctl option with name "audit_chdir" is created.
33680 +
33681 +config GRKERNSEC_AUDIT_MOUNT
33682 + bool "(Un)Mount logging"
33683 + help
33684 + If you say Y here, all mounts and unmounts will be logged. If the
33685 + sysctl option is enabled, a sysctl option with name "audit_mount" is
33686 + created.
33687 +
33688 +config GRKERNSEC_SIGNAL
33689 + bool "Signal logging"
33690 + help
33691 + If you say Y here, certain important signals will be logged, such as
33692 + SIGSEGV, which will as a result inform you of when a error in a program
33693 + occurred, which in some cases could mean a possible exploit attempt.
33694 + If the sysctl option is enabled, a sysctl option with name
33695 + "signal_logging" is created.
33696 +
33697 +config GRKERNSEC_FORKFAIL
33698 + bool "Fork failure logging"
33699 + help
33700 + If you say Y here, all failed fork() attempts will be logged.
33701 + This could suggest a fork bomb, or someone attempting to overstep
33702 + their process limit. If the sysctl option is enabled, a sysctl option
33703 + with name "forkfail_logging" is created.
33704 +
33705 +config GRKERNSEC_TIME
33706 + bool "Time change logging"
33707 + help
33708 + If you say Y here, any changes of the system clock will be logged.
33709 + If the sysctl option is enabled, a sysctl option with name
33710 + "timechange_logging" is created.
33711 +
33712 +config GRKERNSEC_PROC_IPADDR
33713 + bool "/proc/<pid>/ipaddr support"
33714 + help
33715 + If you say Y here, a new entry will be added to each /proc/<pid>
33716 + directory that contains the IP address of the person using the task.
33717 + The IP is carried across local TCP and AF_UNIX stream sockets.
33718 + This information can be useful for IDS/IPSes to perform remote response
33719 + to a local attack. The entry is readable by only the owner of the
33720 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
33721 + the RBAC system), and thus does not create privacy concerns.
33722 +
33723 +config GRKERNSEC_AUDIT_TEXTREL
33724 + bool 'ELF text relocations logging (READ HELP)'
33725 + depends on PAX_MPROTECT
33726 + help
33727 + If you say Y here, text relocations will be logged with the filename
33728 + of the offending library or binary. The purpose of the feature is
33729 + to help Linux distribution developers get rid of libraries and
33730 + binaries that need text relocations which hinder the future progress
33731 + of PaX. Only Linux distribution developers should say Y here, and
33732 + never on a production machine, as this option creates an information
33733 + leak that could aid an attacker in defeating the randomization of
33734 + a single memory region. If the sysctl option is enabled, a sysctl
33735 + option with name "audit_textrel" is created.
33736 +
33737 +endmenu
33738 +
33739 +menu "Executable Protections"
33740 +depends on GRKERNSEC
33741 +
33742 +config GRKERNSEC_EXECVE
33743 + bool "Enforce RLIMIT_NPROC on execs"
33744 + help
33745 + If you say Y here, users with a resource limit on processes will
33746 + have the value checked during execve() calls. The current system
33747 + only checks the system limit during fork() calls. If the sysctl option
33748 + is enabled, a sysctl option with name "execve_limiting" is created.
33749 +
33750 +config GRKERNSEC_DMESG
33751 + bool "Dmesg(8) restriction"
33752 + help
33753 + If you say Y here, non-root users will not be able to use dmesg(8)
33754 + to view up to the last 4kb of messages in the kernel's log buffer.
33755 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
33756 + created.
33757 +
33758 +config GRKERNSEC_HARDEN_PTRACE
33759 + bool "Deter ptrace-based process snooping"
33760 + help
33761 + If you say Y here, TTY sniffers and other malicious monitoring
33762 + programs implemented through ptrace will be defeated. If you
33763 + have been using the RBAC system, this option has already been
33764 + enabled for several years for all users, with the ability to make
33765 + fine-grained exceptions.
33766 +
33767 + This option only affects the ability of non-root users to ptrace
33768 + processes that are not a descendent of the ptracing process.
33769 + This means that strace ./binary and gdb ./binary will still work,
33770 + but attaching to arbitrary processes will not. If the sysctl
33771 + option is enabled, a sysctl option with name "harden_ptrace" is
33772 + created.
33773 +
33774 +config GRKERNSEC_TPE
33775 + bool "Trusted Path Execution (TPE)"
33776 + help
33777 + If you say Y here, you will be able to choose a gid to add to the
33778 + supplementary groups of users you want to mark as "untrusted."
33779 + These users will not be able to execute any files that are not in
33780 + root-owned directories writable only by root. If the sysctl option
33781 + is enabled, a sysctl option with name "tpe" is created.
33782 +
33783 +config GRKERNSEC_TPE_ALL
33784 + bool "Partially restrict non-root users"
33785 + depends on GRKERNSEC_TPE
33786 + help
33787 + If you say Y here, All non-root users other than the ones in the
33788 + group specified in the main TPE option will only be allowed to
33789 + execute files in directories they own that are not group or
33790 + world-writable, or in directories owned by root and writable only by
33791 + root. If the sysctl option is enabled, a sysctl option with name
33792 + "tpe_restrict_all" is created.
33793 +
33794 +config GRKERNSEC_TPE_INVERT
33795 + bool "Invert GID option"
33796 + depends on GRKERNSEC_TPE
33797 + help
33798 + If you say Y here, the group you specify in the TPE configuration will
33799 + decide what group TPE restrictions will be *disabled* for. This
33800 + option is useful if you want TPE restrictions to be applied to most
33801 + users on the system.
33802 +
33803 +config GRKERNSEC_TPE_GID
33804 + int "GID for untrusted users"
33805 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
33806 + default 1005
33807 + help
33808 + If you have selected the "Invert GID option" above, setting this
33809 + GID determines what group TPE restrictions will be *disabled* for.
33810 + If you have not selected the "Invert GID option" above, setting this
33811 + GID determines what group TPE restrictions will be *enabled* for.
33812 + If the sysctl option is enabled, a sysctl option with name "tpe_gid"
33813 + is created.
33814 +
33815 +config GRKERNSEC_TPE_GID
33816 + int "GID for trusted users"
33817 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
33818 + default 1005
33819 + help
33820 + If you have selected the "Invert GID option" above, setting this
33821 + GID determines what group TPE restrictions will be *disabled* for.
33822 + If you have not selected the "Invert GID option" above, setting this
33823 + GID determines what group TPE restrictions will be *enabled* for.
33824 + If the sysctl option is enabled, a sysctl option with name "tpe_gid"
33825 + is created.
33826 +
33827 +endmenu
33828 +menu "Network Protections"
33829 +depends on GRKERNSEC
33830 +
33831 +config GRKERNSEC_RANDNET
33832 + bool "Larger entropy pools"
33833 + help
33834 + If you say Y here, the entropy pools used for many features of Linux
33835 + and grsecurity will be doubled in size. Since several grsecurity
33836 + features use additional randomness, it is recommended that you say Y
33837 + here. Saying Y here has a similar effect as modifying
33838 + /proc/sys/kernel/random/poolsize.
33839 +
33840 +config GRKERNSEC_BLACKHOLE
33841 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
33842 + help
33843 + If you say Y here, neither TCP resets nor ICMP
33844 + destination-unreachable packets will be sent in response to packets
33845 + sent to ports for which no associated listening process exists.
33846 + This feature supports both IPV4 and IPV6 and exempts the
33847 + loopback interface from blackholing. Enabling this feature
33848 + makes a host more resilient to DoS attacks and reduces network
33849 + visibility against scanners.
33850 +
33851 + The blackhole feature as-implemented is equivalent to the FreeBSD
33852 + blackhole feature, as it prevents RST responses to all packets, not
33853 + just SYNs. Under most application behavior this causes no
33854 + problems, but applications (like haproxy) may not close certain
33855 + connections in a way that cleanly terminates them on the remote
33856 + end, leaving the remote host in LAST_ACK state. Because of this
33857 + side-effect and to prevent intentional LAST_ACK DoSes, this
33858 + feature also adds automatic mitigation against such attacks.
33859 + The mitigation drastically reduces the amount of time a socket
33860 + can spend in LAST_ACK state. If you're using haproxy and not
33861 + all servers it connects to have this option enabled, consider
33862 + disabling this feature on the haproxy host.
33863 +
33864 + If the sysctl option is enabled, two sysctl options with names
33865 + "ip_blackhole" and "lastack_retries" will be created.
33866 + While "ip_blackhole" takes the standard zero/non-zero on/off
33867 + toggle, "lastack_retries" uses the same kinds of values as
33868 + "tcp_retries1" and "tcp_retries2". The default value of 4
33869 + prevents a socket from lasting more than 45 seconds in LAST_ACK
33870 + state.
33871 +
33872 +config GRKERNSEC_SOCKET
33873 + bool "Socket restrictions"
33874 + help
33875 + If you say Y here, you will be able to choose from several options.
33876 + If you assign a GID on your system and add it to the supplementary
33877 + groups of users you want to restrict socket access to, this patch
33878 + will perform up to three things, based on the option(s) you choose.
33879 +
33880 +config GRKERNSEC_SOCKET_ALL
33881 + bool "Deny any sockets to group"
33882 + depends on GRKERNSEC_SOCKET
33883 + help
33884 + If you say Y here, you will be able to choose a GID of whose users will
33885 + be unable to connect to other hosts from your machine or run server
33886 + applications from your machine. If the sysctl option is enabled, a
33887 + sysctl option with name "socket_all" is created.
33888 +
33889 +config GRKERNSEC_SOCKET_ALL_GID
33890 + int "GID to deny all sockets for"
33891 + depends on GRKERNSEC_SOCKET_ALL
33892 + default 1004
33893 + help
33894 + Here you can choose the GID to disable socket access for. Remember to
33895 + add the users you want socket access disabled for to the GID
33896 + specified here. If the sysctl option is enabled, a sysctl option
33897 + with name "socket_all_gid" is created.
33898 +
33899 +config GRKERNSEC_SOCKET_CLIENT
33900 + bool "Deny client sockets to group"
33901 + depends on GRKERNSEC_SOCKET
33902 + help
33903 + If you say Y here, you will be able to choose a GID of whose users will
33904 + be unable to connect to other hosts from your machine, but will be
33905 + able to run servers. If this option is enabled, all users in the group
33906 + you specify will have to use passive mode when initiating ftp transfers
33907 + from the shell on your machine. If the sysctl option is enabled, a
33908 + sysctl option with name "socket_client" is created.
33909 +
33910 +config GRKERNSEC_SOCKET_CLIENT_GID
33911 + int "GID to deny client sockets for"
33912 + depends on GRKERNSEC_SOCKET_CLIENT
33913 + default 1003
33914 + help
33915 + Here you can choose the GID to disable client socket access for.
33916 + Remember to add the users you want client socket access disabled for to
33917 + the GID specified here. If the sysctl option is enabled, a sysctl
33918 + option with name "socket_client_gid" is created.
33919 +
33920 +config GRKERNSEC_SOCKET_SERVER
33921 + bool "Deny server sockets to group"
33922 + depends on GRKERNSEC_SOCKET
33923 + help
33924 + If you say Y here, you will be able to choose a GID of whose users will
33925 + be unable to run server applications from your machine. If the sysctl
33926 + option is enabled, a sysctl option with name "socket_server" is created.
33927 +
33928 +config GRKERNSEC_SOCKET_SERVER_GID
33929 + int "GID to deny server sockets for"
33930 + depends on GRKERNSEC_SOCKET_SERVER
33931 + default 1002
33932 + help
33933 + Here you can choose the GID to disable server socket access for.
33934 + Remember to add the users you want server socket access disabled for to
33935 + the GID specified here. If the sysctl option is enabled, a sysctl
33936 + option with name "socket_server_gid" is created.
33937 +
33938 +endmenu
33939 +menu "Sysctl support"
33940 +depends on GRKERNSEC && SYSCTL
33941 +
33942 +config GRKERNSEC_SYSCTL
33943 + bool "Sysctl support"
33944 + help
33945 + If you say Y here, you will be able to change the options that
33946 + grsecurity runs with at bootup, without having to recompile your
33947 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
33948 + to enable (1) or disable (0) various features. All the sysctl entries
33949 + are mutable until the "grsec_lock" entry is set to a non-zero value.
33950 + All features enabled in the kernel configuration are disabled at boot
33951 + if you do not say Y to the "Turn on features by default" option.
33952 + All options should be set at startup, and the grsec_lock entry should
33953 + be set to a non-zero value after all the options are set.
33954 + *THIS IS EXTREMELY IMPORTANT*
33955 +
33956 +config GRKERNSEC_SYSCTL_DISTRO
33957 + bool "Extra sysctl support for distro makers (READ HELP)"
33958 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
33959 + help
33960 + If you say Y here, additional sysctl options will be created
33961 + for features that affect processes running as root. Therefore,
33962 + it is critical when using this option that the grsec_lock entry be
33963 + enabled after boot. Only distros with prebuilt kernel packages
33964 + with this option enabled that can ensure grsec_lock is enabled
33965 + after boot should use this option.
33966 + *Failure to set grsec_lock after boot makes all grsec features
33967 + this option covers useless*
33968 +
33969 + Currently this option creates the following sysctl entries:
33970 + "Disable Privileged I/O": "disable_priv_io"
33971 +
33972 +config GRKERNSEC_SYSCTL_ON
33973 + bool "Turn on features by default"
33974 + depends on GRKERNSEC_SYSCTL
33975 + help
33976 + If you say Y here, instead of having all features enabled in the
33977 + kernel configuration disabled at boot time, the features will be
33978 + enabled at boot time. It is recommended you say Y here unless
33979 + there is some reason you would want all sysctl-tunable features to
33980 + be disabled by default. As mentioned elsewhere, it is important
33981 + to enable the grsec_lock entry once you have finished modifying
33982 + the sysctl entries.
33983 +
33984 +endmenu
33985 +menu "Logging Options"
33986 +depends on GRKERNSEC
33987 +
33988 +config GRKERNSEC_FLOODTIME
33989 + int "Seconds in between log messages (minimum)"
33990 + default 10
33991 + help
33992 + This option allows you to enforce the number of seconds between
33993 + grsecurity log messages. The default should be suitable for most
33994 + people, however, if you choose to change it, choose a value small enough
33995 + to allow informative logs to be produced, but large enough to
33996 + prevent flooding.
33997 +
33998 +config GRKERNSEC_FLOODBURST
33999 + int "Number of messages in a burst (maximum)"
34000 + default 4
34001 + help
34002 + This option allows you to choose the maximum number of messages allowed
34003 + within the flood time interval you chose in a separate option. The
34004 + default should be suitable for most people, however if you find that
34005 + many of your logs are being interpreted as flooding, you may want to
34006 + raise this value.
34007 +
34008 +endmenu
34009 +
34010 +endmenu
34011 diff -urNp linux-2.6.34.1/grsecurity/Makefile linux-2.6.34.1/grsecurity/Makefile
34012 --- linux-2.6.34.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
34013 +++ linux-2.6.34.1/grsecurity/Makefile 2010-07-07 09:04:56.000000000 -0400
34014 @@ -0,0 +1,29 @@
34015 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
34016 +# during 2001-2009 it has been completely redesigned by Brad Spengler
34017 +# into an RBAC system
34018 +#
34019 +# All code in this directory and various hooks inserted throughout the kernel
34020 +# are copyright Brad Spengler - Open Source Security, Inc., and released
34021 +# under the GPL v2 or higher
34022 +
34023 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
34024 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
34025 + grsec_time.o grsec_tpe.o grsec_link.o grsec_textrel.o grsec_ptrace.o
34026 +
34027 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \
34028 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
34029 + gracl_learn.o grsec_log.o
34030 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
34031 +
34032 +ifndef CONFIG_GRKERNSEC
34033 +obj-y += grsec_disabled.o
34034 +endif
34035 +
34036 +ifdef CONFIG_GRKERNSEC_HIDESYM
34037 +extra-y := grsec_hidesym.o
34038 +$(obj)/grsec_hidesym.o:
34039 + @-chmod -f 500 /boot
34040 + @-chmod -f 500 /lib/modules
34041 + @-chmod -f 700 .
34042 + @echo ' grsec: protected kernel image paths'
34043 +endif
34044 diff -urNp linux-2.6.34.1/grsecurity/gracl.c linux-2.6.34.1/grsecurity/gracl.c
34045 --- linux-2.6.34.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
34046 +++ linux-2.6.34.1/grsecurity/gracl.c 2010-07-07 09:04:56.000000000 -0400
34047 @@ -0,0 +1,3899 @@
34048 +#include <linux/kernel.h>
34049 +#include <linux/module.h>
34050 +#include <linux/sched.h>
34051 +#include <linux/mm.h>
34052 +#include <linux/file.h>
34053 +#include <linux/fs.h>
34054 +#include <linux/namei.h>
34055 +#include <linux/mount.h>
34056 +#include <linux/tty.h>
34057 +#include <linux/proc_fs.h>
34058 +#include <linux/smp_lock.h>
34059 +#include <linux/slab.h>
34060 +#include <linux/vmalloc.h>
34061 +#include <linux/types.h>
34062 +#include <linux/sysctl.h>
34063 +#include <linux/netdevice.h>
34064 +#include <linux/ptrace.h>
34065 +#include <linux/gracl.h>
34066 +#include <linux/gralloc.h>
34067 +#include <linux/grsecurity.h>
34068 +#include <linux/grinternal.h>
34069 +#include <linux/pid_namespace.h>
34070 +#include <linux/fdtable.h>
34071 +#include <linux/percpu.h>
34072 +
34073 +#include <asm/uaccess.h>
34074 +#include <asm/errno.h>
34075 +#include <asm/mman.h>
34076 +
34077 +static struct acl_role_db acl_role_set;
34078 +static struct name_db name_set;
34079 +static struct inodev_db inodev_set;
34080 +
34081 +/* for keeping track of userspace pointers used for subjects, so we
34082 + can share references in the kernel as well
34083 +*/
34084 +
34085 +static struct dentry *real_root;
34086 +static struct vfsmount *real_root_mnt;
34087 +
34088 +static struct acl_subj_map_db subj_map_set;
34089 +
34090 +static struct acl_role_label *default_role;
34091 +
34092 +static struct acl_role_label *role_list;
34093 +
34094 +static u16 acl_sp_role_value;
34095 +
34096 +extern char *gr_shared_page[4];
34097 +static DECLARE_MUTEX(gr_dev_sem);
34098 +DEFINE_RWLOCK(gr_inode_lock);
34099 +
34100 +struct gr_arg *gr_usermode;
34101 +
34102 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
34103 +
34104 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
34105 +extern void gr_clear_learn_entries(void);
34106 +
34107 +#ifdef CONFIG_GRKERNSEC_RESLOG
34108 +extern void gr_log_resource(const struct task_struct *task,
34109 + const int res, const unsigned long wanted, const int gt);
34110 +#endif
34111 +
34112 +unsigned char *gr_system_salt;
34113 +unsigned char *gr_system_sum;
34114 +
34115 +static struct sprole_pw **acl_special_roles = NULL;
34116 +static __u16 num_sprole_pws = 0;
34117 +
34118 +static struct acl_role_label *kernel_role = NULL;
34119 +
34120 +static unsigned int gr_auth_attempts = 0;
34121 +static unsigned long gr_auth_expires = 0UL;
34122 +
34123 +extern struct vfsmount *sock_mnt;
34124 +extern struct vfsmount *pipe_mnt;
34125 +extern struct vfsmount *shm_mnt;
34126 +#ifdef CONFIG_HUGETLBFS
34127 +extern struct vfsmount *hugetlbfs_vfsmount;
34128 +#endif
34129 +
34130 +static struct acl_object_label *fakefs_obj;
34131 +
34132 +extern int gr_init_uidset(void);
34133 +extern void gr_free_uidset(void);
34134 +extern void gr_remove_uid(uid_t uid);
34135 +extern int gr_find_uid(uid_t uid);
34136 +
34137 +extern spinlock_t vfsmount_lock;
34138 +
34139 +__inline__ int
34140 +gr_acl_is_enabled(void)
34141 +{
34142 + return (gr_status & GR_READY);
34143 +}
34144 +
34145 +char gr_roletype_to_char(void)
34146 +{
34147 + switch (current->role->roletype &
34148 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
34149 + GR_ROLE_SPECIAL)) {
34150 + case GR_ROLE_DEFAULT:
34151 + return 'D';
34152 + case GR_ROLE_USER:
34153 + return 'U';
34154 + case GR_ROLE_GROUP:
34155 + return 'G';
34156 + case GR_ROLE_SPECIAL:
34157 + return 'S';
34158 + }
34159 +
34160 + return 'X';
34161 +}
34162 +
34163 +__inline__ int
34164 +gr_acl_tpe_check(void)
34165 +{
34166 + if (unlikely(!(gr_status & GR_READY)))
34167 + return 0;
34168 + if (current->role->roletype & GR_ROLE_TPE)
34169 + return 1;
34170 + else
34171 + return 0;
34172 +}
34173 +
34174 +int
34175 +gr_handle_rawio(const struct inode *inode)
34176 +{
34177 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
34178 + if (inode && S_ISBLK(inode->i_mode) &&
34179 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
34180 + !capable(CAP_SYS_RAWIO))
34181 + return 1;
34182 +#endif
34183 + return 0;
34184 +}
34185 +
34186 +static int
34187 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
34188 +{
34189 + if (likely(lena != lenb))
34190 + return 0;
34191 +
34192 + return !memcmp(a, b, lena);
34193 +}
34194 +
34195 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
34196 + struct dentry *root, struct vfsmount *rootmnt,
34197 + char *buffer, int buflen)
34198 +{
34199 + char * end = buffer+buflen;
34200 + char * retval;
34201 + int namelen;
34202 +
34203 + spin_lock(&vfsmount_lock);
34204 + *--end = '\0';
34205 + buflen--;
34206 +
34207 + if (buflen < 1)
34208 + goto Elong;
34209 + /* Get '/' right */
34210 + retval = end-1;
34211 + *retval = '/';
34212 +
34213 + for (;;) {
34214 + struct dentry * parent;
34215 +
34216 + if (dentry == root && vfsmnt == rootmnt)
34217 + break;
34218 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
34219 + /* Global root? */
34220 + if (vfsmnt->mnt_parent == vfsmnt) {
34221 + goto global_root;
34222 + }
34223 + dentry = vfsmnt->mnt_mountpoint;
34224 + vfsmnt = vfsmnt->mnt_parent;
34225 + continue;
34226 + }
34227 + parent = dentry->d_parent;
34228 + prefetch(parent);
34229 + namelen = dentry->d_name.len;
34230 + buflen -= namelen + 1;
34231 + if (buflen < 0)
34232 + goto Elong;
34233 + end -= namelen;
34234 + memcpy(end, dentry->d_name.name, namelen);
34235 + *--end = '/';
34236 + retval = end;
34237 + dentry = parent;
34238 + }
34239 +
34240 +out:
34241 + spin_unlock(&vfsmount_lock);
34242 + return retval;
34243 +
34244 +global_root:
34245 + namelen = dentry->d_name.len;
34246 + buflen -= namelen;
34247 + if (buflen < 0)
34248 + goto Elong;
34249 + retval -= namelen-1; /* hit the slash */
34250 + memcpy(retval, dentry->d_name.name, namelen);
34251 + goto out;
34252 +Elong:
34253 + retval = ERR_PTR(-ENAMETOOLONG);
34254 + goto out;
34255 +}
34256 +
34257 +static char *
34258 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
34259 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
34260 +{
34261 + char *retval;
34262 +
34263 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
34264 + if (unlikely(IS_ERR(retval)))
34265 + retval = strcpy(buf, "<path too long>");
34266 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
34267 + retval[1] = '\0';
34268 +
34269 + return retval;
34270 +}
34271 +
34272 +static char *
34273 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
34274 + char *buf, int buflen)
34275 +{
34276 + char *res;
34277 +
34278 + /* we can use real_root, real_root_mnt, because this is only called
34279 + by the RBAC system */
34280 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
34281 +
34282 + return res;
34283 +}
34284 +
34285 +static char *
34286 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
34287 + char *buf, int buflen)
34288 +{
34289 + char *res;
34290 + struct dentry *root;
34291 + struct vfsmount *rootmnt;
34292 + struct task_struct *reaper = &init_task;
34293 +
34294 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
34295 + read_lock(&reaper->fs->lock);
34296 + root = dget(reaper->fs->root.dentry);
34297 + rootmnt = mntget(reaper->fs->root.mnt);
34298 + read_unlock(&reaper->fs->lock);
34299 +
34300 + spin_lock(&dcache_lock);
34301 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
34302 + spin_unlock(&dcache_lock);
34303 +
34304 + dput(root);
34305 + mntput(rootmnt);
34306 + return res;
34307 +}
34308 +
34309 +static char *
34310 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
34311 +{
34312 + char *ret;
34313 + spin_lock(&dcache_lock);
34314 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
34315 + PAGE_SIZE);
34316 + spin_unlock(&dcache_lock);
34317 + return ret;
34318 +}
34319 +
34320 +char *
34321 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
34322 +{
34323 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
34324 + PAGE_SIZE);
34325 +}
34326 +
34327 +char *
34328 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
34329 +{
34330 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
34331 + PAGE_SIZE);
34332 +}
34333 +
34334 +char *
34335 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
34336 +{
34337 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
34338 + PAGE_SIZE);
34339 +}
34340 +
34341 +char *
34342 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
34343 +{
34344 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
34345 + PAGE_SIZE);
34346 +}
34347 +
34348 +char *
34349 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
34350 +{
34351 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
34352 + PAGE_SIZE);
34353 +}
34354 +
34355 +__inline__ __u32
34356 +to_gr_audit(const __u32 reqmode)
34357 +{
34358 + /* masks off auditable permission flags, then shifts them to create
34359 + auditing flags, and adds the special case of append auditing if
34360 + we're requesting write */
34361 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
34362 +}
34363 +
34364 +struct acl_subject_label *
34365 +lookup_subject_map(const struct acl_subject_label *userp)
34366 +{
34367 + unsigned int index = shash(userp, subj_map_set.s_size);
34368 + struct subject_map *match;
34369 +
34370 + match = subj_map_set.s_hash[index];
34371 +
34372 + while (match && match->user != userp)
34373 + match = match->next;
34374 +
34375 + if (match != NULL)
34376 + return match->kernel;
34377 + else
34378 + return NULL;
34379 +}
34380 +
34381 +static void
34382 +insert_subj_map_entry(struct subject_map *subjmap)
34383 +{
34384 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
34385 + struct subject_map **curr;
34386 +
34387 + subjmap->prev = NULL;
34388 +
34389 + curr = &subj_map_set.s_hash[index];
34390 + if (*curr != NULL)
34391 + (*curr)->prev = subjmap;
34392 +
34393 + subjmap->next = *curr;
34394 + *curr = subjmap;
34395 +
34396 + return;
34397 +}
34398 +
34399 +static struct acl_role_label *
34400 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
34401 + const gid_t gid)
34402 +{
34403 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
34404 + struct acl_role_label *match;
34405 + struct role_allowed_ip *ipp;
34406 + unsigned int x;
34407 +
34408 + match = acl_role_set.r_hash[index];
34409 +
34410 + while (match) {
34411 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
34412 + for (x = 0; x < match->domain_child_num; x++) {
34413 + if (match->domain_children[x] == uid)
34414 + goto found;
34415 + }
34416 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
34417 + break;
34418 + match = match->next;
34419 + }
34420 +found:
34421 + if (match == NULL) {
34422 + try_group:
34423 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
34424 + match = acl_role_set.r_hash[index];
34425 +
34426 + while (match) {
34427 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
34428 + for (x = 0; x < match->domain_child_num; x++) {
34429 + if (match->domain_children[x] == gid)
34430 + goto found2;
34431 + }
34432 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
34433 + break;
34434 + match = match->next;
34435 + }
34436 +found2:
34437 + if (match == NULL)
34438 + match = default_role;
34439 + if (match->allowed_ips == NULL)
34440 + return match;
34441 + else {
34442 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
34443 + if (likely
34444 + ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
34445 + (ntohl(ipp->addr) & ipp->netmask)))
34446 + return match;
34447 + }
34448 + match = default_role;
34449 + }
34450 + } else if (match->allowed_ips == NULL) {
34451 + return match;
34452 + } else {
34453 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
34454 + if (likely
34455 + ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
34456 + (ntohl(ipp->addr) & ipp->netmask)))
34457 + return match;
34458 + }
34459 + goto try_group;
34460 + }
34461 +
34462 + return match;
34463 +}
34464 +
34465 +struct acl_subject_label *
34466 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
34467 + const struct acl_role_label *role)
34468 +{
34469 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
34470 + struct acl_subject_label *match;
34471 +
34472 + match = role->subj_hash[index];
34473 +
34474 + while (match && (match->inode != ino || match->device != dev ||
34475 + (match->mode & GR_DELETED))) {
34476 + match = match->next;
34477 + }
34478 +
34479 + if (match && !(match->mode & GR_DELETED))
34480 + return match;
34481 + else
34482 + return NULL;
34483 +}
34484 +
34485 +struct acl_subject_label *
34486 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
34487 + const struct acl_role_label *role)
34488 +{
34489 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
34490 + struct acl_subject_label *match;
34491 +
34492 + match = role->subj_hash[index];
34493 +
34494 + while (match && (match->inode != ino || match->device != dev ||
34495 + !(match->mode & GR_DELETED))) {
34496 + match = match->next;
34497 + }
34498 +
34499 + if (match && (match->mode & GR_DELETED))
34500 + return match;
34501 + else
34502 + return NULL;
34503 +}
34504 +
34505 +static struct acl_object_label *
34506 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
34507 + const struct acl_subject_label *subj)
34508 +{
34509 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
34510 + struct acl_object_label *match;
34511 +
34512 + match = subj->obj_hash[index];
34513 +
34514 + while (match && (match->inode != ino || match->device != dev ||
34515 + (match->mode & GR_DELETED))) {
34516 + match = match->next;
34517 + }
34518 +
34519 + if (match && !(match->mode & GR_DELETED))
34520 + return match;
34521 + else
34522 + return NULL;
34523 +}
34524 +
34525 +static struct acl_object_label *
34526 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
34527 + const struct acl_subject_label *subj)
34528 +{
34529 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
34530 + struct acl_object_label *match;
34531 +
34532 + match = subj->obj_hash[index];
34533 +
34534 + while (match && (match->inode != ino || match->device != dev ||
34535 + !(match->mode & GR_DELETED))) {
34536 + match = match->next;
34537 + }
34538 +
34539 + if (match && (match->mode & GR_DELETED))
34540 + return match;
34541 +
34542 + match = subj->obj_hash[index];
34543 +
34544 + while (match && (match->inode != ino || match->device != dev ||
34545 + (match->mode & GR_DELETED))) {
34546 + match = match->next;
34547 + }
34548 +
34549 + if (match && !(match->mode & GR_DELETED))
34550 + return match;
34551 + else
34552 + return NULL;
34553 +}
34554 +
34555 +static struct name_entry *
34556 +lookup_name_entry(const char *name)
34557 +{
34558 + unsigned int len = strlen(name);
34559 + unsigned int key = full_name_hash(name, len);
34560 + unsigned int index = key % name_set.n_size;
34561 + struct name_entry *match;
34562 +
34563 + match = name_set.n_hash[index];
34564 +
34565 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
34566 + match = match->next;
34567 +
34568 + return match;
34569 +}
34570 +
34571 +static struct name_entry *
34572 +lookup_name_entry_create(const char *name)
34573 +{
34574 + unsigned int len = strlen(name);
34575 + unsigned int key = full_name_hash(name, len);
34576 + unsigned int index = key % name_set.n_size;
34577 + struct name_entry *match;
34578 +
34579 + match = name_set.n_hash[index];
34580 +
34581 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
34582 + !match->deleted))
34583 + match = match->next;
34584 +
34585 + if (match && match->deleted)
34586 + return match;
34587 +
34588 + match = name_set.n_hash[index];
34589 +
34590 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
34591 + match->deleted))
34592 + match = match->next;
34593 +
34594 + if (match && !match->deleted)
34595 + return match;
34596 + else
34597 + return NULL;
34598 +}
34599 +
34600 +static struct inodev_entry *
34601 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
34602 +{
34603 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
34604 + struct inodev_entry *match;
34605 +
34606 + match = inodev_set.i_hash[index];
34607 +
34608 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
34609 + match = match->next;
34610 +
34611 + return match;
34612 +}
34613 +
34614 +static void
34615 +insert_inodev_entry(struct inodev_entry *entry)
34616 +{
34617 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
34618 + inodev_set.i_size);
34619 + struct inodev_entry **curr;
34620 +
34621 + entry->prev = NULL;
34622 +
34623 + curr = &inodev_set.i_hash[index];
34624 + if (*curr != NULL)
34625 + (*curr)->prev = entry;
34626 +
34627 + entry->next = *curr;
34628 + *curr = entry;
34629 +
34630 + return;
34631 +}
34632 +
34633 +static void
34634 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
34635 +{
34636 + unsigned int index =
34637 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
34638 + struct acl_role_label **curr;
34639 + struct acl_role_label *tmp;
34640 +
34641 + curr = &acl_role_set.r_hash[index];
34642 +
34643 + /* if role was already inserted due to domains and already has
34644 + a role in the same bucket as it attached, then we need to
34645 + combine these two buckets
34646 + */
34647 + if (role->next) {
34648 + tmp = role->next;
34649 + while (tmp->next)
34650 + tmp = tmp->next;
34651 + tmp->next = *curr;
34652 + } else
34653 + role->next = *curr;
34654 + *curr = role;
34655 +
34656 + return;
34657 +}
34658 +
34659 +static void
34660 +insert_acl_role_label(struct acl_role_label *role)
34661 +{
34662 + int i;
34663 +
34664 + if (role_list == NULL) {
34665 + role_list = role;
34666 + role->prev = NULL;
34667 + } else {
34668 + role->prev = role_list;
34669 + role_list = role;
34670 + }
34671 +
34672 + /* used for hash chains */
34673 + role->next = NULL;
34674 +
34675 + if (role->roletype & GR_ROLE_DOMAIN) {
34676 + for (i = 0; i < role->domain_child_num; i++)
34677 + __insert_acl_role_label(role, role->domain_children[i]);
34678 + } else
34679 + __insert_acl_role_label(role, role->uidgid);
34680 +}
34681 +
34682 +static int
34683 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
34684 +{
34685 + struct name_entry **curr, *nentry;
34686 + struct inodev_entry *ientry;
34687 + unsigned int len = strlen(name);
34688 + unsigned int key = full_name_hash(name, len);
34689 + unsigned int index = key % name_set.n_size;
34690 +
34691 + curr = &name_set.n_hash[index];
34692 +
34693 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
34694 + curr = &((*curr)->next);
34695 +
34696 + if (*curr != NULL)
34697 + return 1;
34698 +
34699 + nentry = acl_alloc(sizeof (struct name_entry));
34700 + if (nentry == NULL)
34701 + return 0;
34702 + ientry = acl_alloc(sizeof (struct inodev_entry));
34703 + if (ientry == NULL)
34704 + return 0;
34705 + ientry->nentry = nentry;
34706 +
34707 + nentry->key = key;
34708 + nentry->name = name;
34709 + nentry->inode = inode;
34710 + nentry->device = device;
34711 + nentry->len = len;
34712 + nentry->deleted = deleted;
34713 +
34714 + nentry->prev = NULL;
34715 + curr = &name_set.n_hash[index];
34716 + if (*curr != NULL)
34717 + (*curr)->prev = nentry;
34718 + nentry->next = *curr;
34719 + *curr = nentry;
34720 +
34721 + /* insert us into the table searchable by inode/dev */
34722 + insert_inodev_entry(ientry);
34723 +
34724 + return 1;
34725 +}
34726 +
34727 +static void
34728 +insert_acl_obj_label(struct acl_object_label *obj,
34729 + struct acl_subject_label *subj)
34730 +{
34731 + unsigned int index =
34732 + fhash(obj->inode, obj->device, subj->obj_hash_size);
34733 + struct acl_object_label **curr;
34734 +
34735 +
34736 + obj->prev = NULL;
34737 +
34738 + curr = &subj->obj_hash[index];
34739 + if (*curr != NULL)
34740 + (*curr)->prev = obj;
34741 +
34742 + obj->next = *curr;
34743 + *curr = obj;
34744 +
34745 + return;
34746 +}
34747 +
34748 +static void
34749 +insert_acl_subj_label(struct acl_subject_label *obj,
34750 + struct acl_role_label *role)
34751 +{
34752 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
34753 + struct acl_subject_label **curr;
34754 +
34755 + obj->prev = NULL;
34756 +
34757 + curr = &role->subj_hash[index];
34758 + if (*curr != NULL)
34759 + (*curr)->prev = obj;
34760 +
34761 + obj->next = *curr;
34762 + *curr = obj;
34763 +
34764 + return;
34765 +}
34766 +
34767 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
34768 +
34769 +static void *
34770 +create_table(__u32 * len, int elementsize)
34771 +{
34772 + unsigned int table_sizes[] = {
34773 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
34774 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
34775 + 4194301, 8388593, 16777213, 33554393, 67108859
34776 + };
34777 + void *newtable = NULL;
34778 + unsigned int pwr = 0;
34779 +
34780 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
34781 + table_sizes[pwr] <= *len)
34782 + pwr++;
34783 +
34784 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
34785 + return newtable;
34786 +
34787 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
34788 + newtable =
34789 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
34790 + else
34791 + newtable = vmalloc(table_sizes[pwr] * elementsize);
34792 +
34793 + *len = table_sizes[pwr];
34794 +
34795 + return newtable;
34796 +}
34797 +
34798 +static int
34799 +init_variables(const struct gr_arg *arg)
34800 +{
34801 + struct task_struct *reaper = &init_task;
34802 + unsigned int stacksize;
34803 +
34804 + subj_map_set.s_size = arg->role_db.num_subjects;
34805 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
34806 + name_set.n_size = arg->role_db.num_objects;
34807 + inodev_set.i_size = arg->role_db.num_objects;
34808 +
34809 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
34810 + !name_set.n_size || !inodev_set.i_size)
34811 + return 1;
34812 +
34813 + if (!gr_init_uidset())
34814 + return 1;
34815 +
34816 + /* set up the stack that holds allocation info */
34817 +
34818 + stacksize = arg->role_db.num_pointers + 5;
34819 +
34820 + if (!acl_alloc_stack_init(stacksize))
34821 + return 1;
34822 +
34823 + /* grab reference for the real root dentry and vfsmount */
34824 + read_lock(&reaper->fs->lock);
34825 + real_root_mnt = mntget(reaper->fs->root.mnt);
34826 + real_root = dget(reaper->fs->root.dentry);
34827 + read_unlock(&reaper->fs->lock);
34828 +
34829 + fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
34830 + if (fakefs_obj == NULL)
34831 + return 1;
34832 + fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
34833 +
34834 + subj_map_set.s_hash =
34835 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
34836 + acl_role_set.r_hash =
34837 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
34838 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
34839 + inodev_set.i_hash =
34840 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
34841 +
34842 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
34843 + !name_set.n_hash || !inodev_set.i_hash)
34844 + return 1;
34845 +
34846 + memset(subj_map_set.s_hash, 0,
34847 + sizeof(struct subject_map *) * subj_map_set.s_size);
34848 + memset(acl_role_set.r_hash, 0,
34849 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
34850 + memset(name_set.n_hash, 0,
34851 + sizeof (struct name_entry *) * name_set.n_size);
34852 + memset(inodev_set.i_hash, 0,
34853 + sizeof (struct inodev_entry *) * inodev_set.i_size);
34854 +
34855 + return 0;
34856 +}
34857 +
34858 +/* free information not needed after startup
34859 + currently contains user->kernel pointer mappings for subjects
34860 +*/
34861 +
34862 +static void
34863 +free_init_variables(void)
34864 +{
34865 + __u32 i;
34866 +
34867 + if (subj_map_set.s_hash) {
34868 + for (i = 0; i < subj_map_set.s_size; i++) {
34869 + if (subj_map_set.s_hash[i]) {
34870 + kfree(subj_map_set.s_hash[i]);
34871 + subj_map_set.s_hash[i] = NULL;
34872 + }
34873 + }
34874 +
34875 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
34876 + PAGE_SIZE)
34877 + kfree(subj_map_set.s_hash);
34878 + else
34879 + vfree(subj_map_set.s_hash);
34880 + }
34881 +
34882 + return;
34883 +}
34884 +
34885 +static void
34886 +free_variables(void)
34887 +{
34888 + struct acl_subject_label *s;
34889 + struct acl_role_label *r;
34890 + struct task_struct *task, *task2;
34891 + unsigned int x;
34892 +
34893 + gr_clear_learn_entries();
34894 +
34895 + read_lock(&tasklist_lock);
34896 + do_each_thread(task2, task) {
34897 + task->acl_sp_role = 0;
34898 + task->acl_role_id = 0;
34899 + task->acl = NULL;
34900 + task->role = NULL;
34901 + } while_each_thread(task2, task);
34902 + read_unlock(&tasklist_lock);
34903 +
34904 + /* release the reference to the real root dentry and vfsmount */
34905 + if (real_root)
34906 + dput(real_root);
34907 + real_root = NULL;
34908 + if (real_root_mnt)
34909 + mntput(real_root_mnt);
34910 + real_root_mnt = NULL;
34911 +
34912 + /* free all object hash tables */
34913 +
34914 + FOR_EACH_ROLE_START(r)
34915 + if (r->subj_hash == NULL)
34916 + goto next_role;
34917 + FOR_EACH_SUBJECT_START(r, s, x)
34918 + if (s->obj_hash == NULL)
34919 + break;
34920 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
34921 + kfree(s->obj_hash);
34922 + else
34923 + vfree(s->obj_hash);
34924 + FOR_EACH_SUBJECT_END(s, x)
34925 + FOR_EACH_NESTED_SUBJECT_START(r, s)
34926 + if (s->obj_hash == NULL)
34927 + break;
34928 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
34929 + kfree(s->obj_hash);
34930 + else
34931 + vfree(s->obj_hash);
34932 + FOR_EACH_NESTED_SUBJECT_END(s)
34933 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
34934 + kfree(r->subj_hash);
34935 + else
34936 + vfree(r->subj_hash);
34937 + r->subj_hash = NULL;
34938 +next_role:
34939 + FOR_EACH_ROLE_END(r)
34940 +
34941 + acl_free_all();
34942 +
34943 + if (acl_role_set.r_hash) {
34944 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
34945 + PAGE_SIZE)
34946 + kfree(acl_role_set.r_hash);
34947 + else
34948 + vfree(acl_role_set.r_hash);
34949 + }
34950 + if (name_set.n_hash) {
34951 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
34952 + PAGE_SIZE)
34953 + kfree(name_set.n_hash);
34954 + else
34955 + vfree(name_set.n_hash);
34956 + }
34957 +
34958 + if (inodev_set.i_hash) {
34959 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
34960 + PAGE_SIZE)
34961 + kfree(inodev_set.i_hash);
34962 + else
34963 + vfree(inodev_set.i_hash);
34964 + }
34965 +
34966 + gr_free_uidset();
34967 +
34968 + memset(&name_set, 0, sizeof (struct name_db));
34969 + memset(&inodev_set, 0, sizeof (struct inodev_db));
34970 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
34971 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
34972 +
34973 + default_role = NULL;
34974 + role_list = NULL;
34975 +
34976 + return;
34977 +}
34978 +
34979 +static __u32
34980 +count_user_objs(struct acl_object_label *userp)
34981 +{
34982 + struct acl_object_label o_tmp;
34983 + __u32 num = 0;
34984 +
34985 + while (userp) {
34986 + if (copy_from_user(&o_tmp, userp,
34987 + sizeof (struct acl_object_label)))
34988 + break;
34989 +
34990 + userp = o_tmp.prev;
34991 + num++;
34992 + }
34993 +
34994 + return num;
34995 +}
34996 +
34997 +static struct acl_subject_label *
34998 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
34999 +
35000 +static int
35001 +copy_user_glob(struct acl_object_label *obj)
35002 +{
35003 + struct acl_object_label *g_tmp, **guser;
35004 + unsigned int len;
35005 + char *tmp;
35006 +
35007 + if (obj->globbed == NULL)
35008 + return 0;
35009 +
35010 + guser = &obj->globbed;
35011 + while (*guser) {
35012 + g_tmp = (struct acl_object_label *)
35013 + acl_alloc(sizeof (struct acl_object_label));
35014 + if (g_tmp == NULL)
35015 + return -ENOMEM;
35016 +
35017 + if (copy_from_user(g_tmp, *guser,
35018 + sizeof (struct acl_object_label)))
35019 + return -EFAULT;
35020 +
35021 + len = strnlen_user(g_tmp->filename, PATH_MAX);
35022 +
35023 + if (!len || len >= PATH_MAX)
35024 + return -EINVAL;
35025 +
35026 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35027 + return -ENOMEM;
35028 +
35029 + if (copy_from_user(tmp, g_tmp->filename, len))
35030 + return -EFAULT;
35031 + tmp[len-1] = '\0';
35032 + g_tmp->filename = tmp;
35033 +
35034 + *guser = g_tmp;
35035 + guser = &(g_tmp->next);
35036 + }
35037 +
35038 + return 0;
35039 +}
35040 +
35041 +static int
35042 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
35043 + struct acl_role_label *role)
35044 +{
35045 + struct acl_object_label *o_tmp;
35046 + unsigned int len;
35047 + int ret;
35048 + char *tmp;
35049 +
35050 + while (userp) {
35051 + if ((o_tmp = (struct acl_object_label *)
35052 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
35053 + return -ENOMEM;
35054 +
35055 + if (copy_from_user(o_tmp, userp,
35056 + sizeof (struct acl_object_label)))
35057 + return -EFAULT;
35058 +
35059 + userp = o_tmp->prev;
35060 +
35061 + len = strnlen_user(o_tmp->filename, PATH_MAX);
35062 +
35063 + if (!len || len >= PATH_MAX)
35064 + return -EINVAL;
35065 +
35066 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35067 + return -ENOMEM;
35068 +
35069 + if (copy_from_user(tmp, o_tmp->filename, len))
35070 + return -EFAULT;
35071 + tmp[len-1] = '\0';
35072 + o_tmp->filename = tmp;
35073 +
35074 + insert_acl_obj_label(o_tmp, subj);
35075 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
35076 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
35077 + return -ENOMEM;
35078 +
35079 + ret = copy_user_glob(o_tmp);
35080 + if (ret)
35081 + return ret;
35082 +
35083 + if (o_tmp->nested) {
35084 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
35085 + if (IS_ERR(o_tmp->nested))
35086 + return PTR_ERR(o_tmp->nested);
35087 +
35088 + /* insert into nested subject list */
35089 + o_tmp->nested->next = role->hash->first;
35090 + role->hash->first = o_tmp->nested;
35091 + }
35092 + }
35093 +
35094 + return 0;
35095 +}
35096 +
35097 +static __u32
35098 +count_user_subjs(struct acl_subject_label *userp)
35099 +{
35100 + struct acl_subject_label s_tmp;
35101 + __u32 num = 0;
35102 +
35103 + while (userp) {
35104 + if (copy_from_user(&s_tmp, userp,
35105 + sizeof (struct acl_subject_label)))
35106 + break;
35107 +
35108 + userp = s_tmp.prev;
35109 + /* do not count nested subjects against this count, since
35110 + they are not included in the hash table, but are
35111 + attached to objects. We have already counted
35112 + the subjects in userspace for the allocation
35113 + stack
35114 + */
35115 + if (!(s_tmp.mode & GR_NESTED))
35116 + num++;
35117 + }
35118 +
35119 + return num;
35120 +}
35121 +
35122 +static int
35123 +copy_user_allowedips(struct acl_role_label *rolep)
35124 +{
35125 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
35126 +
35127 + ruserip = rolep->allowed_ips;
35128 +
35129 + while (ruserip) {
35130 + rlast = rtmp;
35131 +
35132 + if ((rtmp = (struct role_allowed_ip *)
35133 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
35134 + return -ENOMEM;
35135 +
35136 + if (copy_from_user(rtmp, ruserip,
35137 + sizeof (struct role_allowed_ip)))
35138 + return -EFAULT;
35139 +
35140 + ruserip = rtmp->prev;
35141 +
35142 + if (!rlast) {
35143 + rtmp->prev = NULL;
35144 + rolep->allowed_ips = rtmp;
35145 + } else {
35146 + rlast->next = rtmp;
35147 + rtmp->prev = rlast;
35148 + }
35149 +
35150 + if (!ruserip)
35151 + rtmp->next = NULL;
35152 + }
35153 +
35154 + return 0;
35155 +}
35156 +
35157 +static int
35158 +copy_user_transitions(struct acl_role_label *rolep)
35159 +{
35160 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
35161 +
35162 + unsigned int len;
35163 + char *tmp;
35164 +
35165 + rusertp = rolep->transitions;
35166 +
35167 + while (rusertp) {
35168 + rlast = rtmp;
35169 +
35170 + if ((rtmp = (struct role_transition *)
35171 + acl_alloc(sizeof (struct role_transition))) == NULL)
35172 + return -ENOMEM;
35173 +
35174 + if (copy_from_user(rtmp, rusertp,
35175 + sizeof (struct role_transition)))
35176 + return -EFAULT;
35177 +
35178 + rusertp = rtmp->prev;
35179 +
35180 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
35181 +
35182 + if (!len || len >= GR_SPROLE_LEN)
35183 + return -EINVAL;
35184 +
35185 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35186 + return -ENOMEM;
35187 +
35188 + if (copy_from_user(tmp, rtmp->rolename, len))
35189 + return -EFAULT;
35190 + tmp[len-1] = '\0';
35191 + rtmp->rolename = tmp;
35192 +
35193 + if (!rlast) {
35194 + rtmp->prev = NULL;
35195 + rolep->transitions = rtmp;
35196 + } else {
35197 + rlast->next = rtmp;
35198 + rtmp->prev = rlast;
35199 + }
35200 +
35201 + if (!rusertp)
35202 + rtmp->next = NULL;
35203 + }
35204 +
35205 + return 0;
35206 +}
35207 +
35208 +static struct acl_subject_label *
35209 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
35210 +{
35211 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
35212 + unsigned int len;
35213 + char *tmp;
35214 + __u32 num_objs;
35215 + struct acl_ip_label **i_tmp, *i_utmp2;
35216 + struct gr_hash_struct ghash;
35217 + struct subject_map *subjmap;
35218 + unsigned int i_num;
35219 + int err;
35220 +
35221 + s_tmp = lookup_subject_map(userp);
35222 +
35223 + /* we've already copied this subject into the kernel, just return
35224 + the reference to it, and don't copy it over again
35225 + */
35226 + if (s_tmp)
35227 + return(s_tmp);
35228 +
35229 + if ((s_tmp = (struct acl_subject_label *)
35230 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
35231 + return ERR_PTR(-ENOMEM);
35232 +
35233 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
35234 + if (subjmap == NULL)
35235 + return ERR_PTR(-ENOMEM);
35236 +
35237 + subjmap->user = userp;
35238 + subjmap->kernel = s_tmp;
35239 + insert_subj_map_entry(subjmap);
35240 +
35241 + if (copy_from_user(s_tmp, userp,
35242 + sizeof (struct acl_subject_label)))
35243 + return ERR_PTR(-EFAULT);
35244 +
35245 + len = strnlen_user(s_tmp->filename, PATH_MAX);
35246 +
35247 + if (!len || len >= PATH_MAX)
35248 + return ERR_PTR(-EINVAL);
35249 +
35250 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35251 + return ERR_PTR(-ENOMEM);
35252 +
35253 + if (copy_from_user(tmp, s_tmp->filename, len))
35254 + return ERR_PTR(-EFAULT);
35255 + tmp[len-1] = '\0';
35256 + s_tmp->filename = tmp;
35257 +
35258 + if (!strcmp(s_tmp->filename, "/"))
35259 + role->root_label = s_tmp;
35260 +
35261 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
35262 + return ERR_PTR(-EFAULT);
35263 +
35264 + /* copy user and group transition tables */
35265 +
35266 + if (s_tmp->user_trans_num) {
35267 + uid_t *uidlist;
35268 +
35269 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
35270 + if (uidlist == NULL)
35271 + return ERR_PTR(-ENOMEM);
35272 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
35273 + return ERR_PTR(-EFAULT);
35274 +
35275 + s_tmp->user_transitions = uidlist;
35276 + }
35277 +
35278 + if (s_tmp->group_trans_num) {
35279 + gid_t *gidlist;
35280 +
35281 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
35282 + if (gidlist == NULL)
35283 + return ERR_PTR(-ENOMEM);
35284 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
35285 + return ERR_PTR(-EFAULT);
35286 +
35287 + s_tmp->group_transitions = gidlist;
35288 + }
35289 +
35290 + /* set up object hash table */
35291 + num_objs = count_user_objs(ghash.first);
35292 +
35293 + s_tmp->obj_hash_size = num_objs;
35294 + s_tmp->obj_hash =
35295 + (struct acl_object_label **)
35296 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
35297 +
35298 + if (!s_tmp->obj_hash)
35299 + return ERR_PTR(-ENOMEM);
35300 +
35301 + memset(s_tmp->obj_hash, 0,
35302 + s_tmp->obj_hash_size *
35303 + sizeof (struct acl_object_label *));
35304 +
35305 + /* add in objects */
35306 + err = copy_user_objs(ghash.first, s_tmp, role);
35307 +
35308 + if (err)
35309 + return ERR_PTR(err);
35310 +
35311 + /* set pointer for parent subject */
35312 + if (s_tmp->parent_subject) {
35313 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
35314 +
35315 + if (IS_ERR(s_tmp2))
35316 + return s_tmp2;
35317 +
35318 + s_tmp->parent_subject = s_tmp2;
35319 + }
35320 +
35321 + /* add in ip acls */
35322 +
35323 + if (!s_tmp->ip_num) {
35324 + s_tmp->ips = NULL;
35325 + goto insert;
35326 + }
35327 +
35328 + i_tmp =
35329 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
35330 + sizeof (struct acl_ip_label *));
35331 +
35332 + if (!i_tmp)
35333 + return ERR_PTR(-ENOMEM);
35334 +
35335 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
35336 + *(i_tmp + i_num) =
35337 + (struct acl_ip_label *)
35338 + acl_alloc(sizeof (struct acl_ip_label));
35339 + if (!*(i_tmp + i_num))
35340 + return ERR_PTR(-ENOMEM);
35341 +
35342 + if (copy_from_user
35343 + (&i_utmp2, s_tmp->ips + i_num,
35344 + sizeof (struct acl_ip_label *)))
35345 + return ERR_PTR(-EFAULT);
35346 +
35347 + if (copy_from_user
35348 + (*(i_tmp + i_num), i_utmp2,
35349 + sizeof (struct acl_ip_label)))
35350 + return ERR_PTR(-EFAULT);
35351 +
35352 + if ((*(i_tmp + i_num))->iface == NULL)
35353 + continue;
35354 +
35355 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
35356 + if (!len || len >= IFNAMSIZ)
35357 + return ERR_PTR(-EINVAL);
35358 + tmp = acl_alloc(len);
35359 + if (tmp == NULL)
35360 + return ERR_PTR(-ENOMEM);
35361 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
35362 + return ERR_PTR(-EFAULT);
35363 + (*(i_tmp + i_num))->iface = tmp;
35364 + }
35365 +
35366 + s_tmp->ips = i_tmp;
35367 +
35368 +insert:
35369 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
35370 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
35371 + return ERR_PTR(-ENOMEM);
35372 +
35373 + return s_tmp;
35374 +}
35375 +
35376 +static int
35377 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
35378 +{
35379 + struct acl_subject_label s_pre;
35380 + struct acl_subject_label * ret;
35381 + int err;
35382 +
35383 + while (userp) {
35384 + if (copy_from_user(&s_pre, userp,
35385 + sizeof (struct acl_subject_label)))
35386 + return -EFAULT;
35387 +
35388 + /* do not add nested subjects here, add
35389 + while parsing objects
35390 + */
35391 +
35392 + if (s_pre.mode & GR_NESTED) {
35393 + userp = s_pre.prev;
35394 + continue;
35395 + }
35396 +
35397 + ret = do_copy_user_subj(userp, role);
35398 +
35399 + err = PTR_ERR(ret);
35400 + if (IS_ERR(ret))
35401 + return err;
35402 +
35403 + insert_acl_subj_label(ret, role);
35404 +
35405 + userp = s_pre.prev;
35406 + }
35407 +
35408 + return 0;
35409 +}
35410 +
35411 +static int
35412 +copy_user_acl(struct gr_arg *arg)
35413 +{
35414 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
35415 + struct sprole_pw *sptmp;
35416 + struct gr_hash_struct *ghash;
35417 + uid_t *domainlist;
35418 + unsigned int r_num;
35419 + unsigned int len;
35420 + char *tmp;
35421 + int err = 0;
35422 + __u16 i;
35423 + __u32 num_subjs;
35424 +
35425 + /* we need a default and kernel role */
35426 + if (arg->role_db.num_roles < 2)
35427 + return -EINVAL;
35428 +
35429 + /* copy special role authentication info from userspace */
35430 +
35431 + num_sprole_pws = arg->num_sprole_pws;
35432 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
35433 +
35434 + if (!acl_special_roles) {
35435 + err = -ENOMEM;
35436 + goto cleanup;
35437 + }
35438 +
35439 + for (i = 0; i < num_sprole_pws; i++) {
35440 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
35441 + if (!sptmp) {
35442 + err = -ENOMEM;
35443 + goto cleanup;
35444 + }
35445 + if (copy_from_user(sptmp, arg->sprole_pws + i,
35446 + sizeof (struct sprole_pw))) {
35447 + err = -EFAULT;
35448 + goto cleanup;
35449 + }
35450 +
35451 + len =
35452 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
35453 +
35454 + if (!len || len >= GR_SPROLE_LEN) {
35455 + err = -EINVAL;
35456 + goto cleanup;
35457 + }
35458 +
35459 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
35460 + err = -ENOMEM;
35461 + goto cleanup;
35462 + }
35463 +
35464 + if (copy_from_user(tmp, sptmp->rolename, len)) {
35465 + err = -EFAULT;
35466 + goto cleanup;
35467 + }
35468 + tmp[len-1] = '\0';
35469 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
35470 + printk(KERN_ALERT "Copying special role %s\n", tmp);
35471 +#endif
35472 + sptmp->rolename = tmp;
35473 + acl_special_roles[i] = sptmp;
35474 + }
35475 +
35476 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
35477 +
35478 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
35479 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
35480 +
35481 + if (!r_tmp) {
35482 + err = -ENOMEM;
35483 + goto cleanup;
35484 + }
35485 +
35486 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
35487 + sizeof (struct acl_role_label *))) {
35488 + err = -EFAULT;
35489 + goto cleanup;
35490 + }
35491 +
35492 + if (copy_from_user(r_tmp, r_utmp2,
35493 + sizeof (struct acl_role_label))) {
35494 + err = -EFAULT;
35495 + goto cleanup;
35496 + }
35497 +
35498 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
35499 +
35500 + if (!len || len >= PATH_MAX) {
35501 + err = -EINVAL;
35502 + goto cleanup;
35503 + }
35504 +
35505 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
35506 + err = -ENOMEM;
35507 + goto cleanup;
35508 + }
35509 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
35510 + err = -EFAULT;
35511 + goto cleanup;
35512 + }
35513 + tmp[len-1] = '\0';
35514 + r_tmp->rolename = tmp;
35515 +
35516 + if (!strcmp(r_tmp->rolename, "default")
35517 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
35518 + default_role = r_tmp;
35519 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
35520 + kernel_role = r_tmp;
35521 + }
35522 +
35523 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
35524 + err = -ENOMEM;
35525 + goto cleanup;
35526 + }
35527 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
35528 + err = -EFAULT;
35529 + goto cleanup;
35530 + }
35531 +
35532 + r_tmp->hash = ghash;
35533 +
35534 + num_subjs = count_user_subjs(r_tmp->hash->first);
35535 +
35536 + r_tmp->subj_hash_size = num_subjs;
35537 + r_tmp->subj_hash =
35538 + (struct acl_subject_label **)
35539 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
35540 +
35541 + if (!r_tmp->subj_hash) {
35542 + err = -ENOMEM;
35543 + goto cleanup;
35544 + }
35545 +
35546 + err = copy_user_allowedips(r_tmp);
35547 + if (err)
35548 + goto cleanup;
35549 +
35550 + /* copy domain info */
35551 + if (r_tmp->domain_children != NULL) {
35552 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
35553 + if (domainlist == NULL) {
35554 + err = -ENOMEM;
35555 + goto cleanup;
35556 + }
35557 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
35558 + err = -EFAULT;
35559 + goto cleanup;
35560 + }
35561 + r_tmp->domain_children = domainlist;
35562 + }
35563 +
35564 + err = copy_user_transitions(r_tmp);
35565 + if (err)
35566 + goto cleanup;
35567 +
35568 + memset(r_tmp->subj_hash, 0,
35569 + r_tmp->subj_hash_size *
35570 + sizeof (struct acl_subject_label *));
35571 +
35572 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
35573 +
35574 + if (err)
35575 + goto cleanup;
35576 +
35577 + /* set nested subject list to null */
35578 + r_tmp->hash->first = NULL;
35579 +
35580 + insert_acl_role_label(r_tmp);
35581 + }
35582 +
35583 + goto return_err;
35584 + cleanup:
35585 + free_variables();
35586 + return_err:
35587 + return err;
35588 +
35589 +}
35590 +
35591 +static int
35592 +gracl_init(struct gr_arg *args)
35593 +{
35594 + int error = 0;
35595 +
35596 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
35597 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
35598 +
35599 + if (init_variables(args)) {
35600 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
35601 + error = -ENOMEM;
35602 + free_variables();
35603 + goto out;
35604 + }
35605 +
35606 + error = copy_user_acl(args);
35607 + free_init_variables();
35608 + if (error) {
35609 + free_variables();
35610 + goto out;
35611 + }
35612 +
35613 + if ((error = gr_set_acls(0))) {
35614 + free_variables();
35615 + goto out;
35616 + }
35617 +
35618 + pax_open_kernel();
35619 + gr_status |= GR_READY;
35620 + pax_close_kernel();
35621 +
35622 + out:
35623 + return error;
35624 +}
35625 +
35626 +/* derived from glibc fnmatch() 0: match, 1: no match*/
35627 +
35628 +static int
35629 +glob_match(const char *p, const char *n)
35630 +{
35631 + char c;
35632 +
35633 + while ((c = *p++) != '\0') {
35634 + switch (c) {
35635 + case '?':
35636 + if (*n == '\0')
35637 + return 1;
35638 + else if (*n == '/')
35639 + return 1;
35640 + break;
35641 + case '\\':
35642 + if (*n != c)
35643 + return 1;
35644 + break;
35645 + case '*':
35646 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
35647 + if (*n == '/')
35648 + return 1;
35649 + else if (c == '?') {
35650 + if (*n == '\0')
35651 + return 1;
35652 + else
35653 + ++n;
35654 + }
35655 + }
35656 + if (c == '\0') {
35657 + return 0;
35658 + } else {
35659 + const char *endp;
35660 +
35661 + if ((endp = strchr(n, '/')) == NULL)
35662 + endp = n + strlen(n);
35663 +
35664 + if (c == '[') {
35665 + for (--p; n < endp; ++n)
35666 + if (!glob_match(p, n))
35667 + return 0;
35668 + } else if (c == '/') {
35669 + while (*n != '\0' && *n != '/')
35670 + ++n;
35671 + if (*n == '/' && !glob_match(p, n + 1))
35672 + return 0;
35673 + } else {
35674 + for (--p; n < endp; ++n)
35675 + if (*n == c && !glob_match(p, n))
35676 + return 0;
35677 + }
35678 +
35679 + return 1;
35680 + }
35681 + case '[':
35682 + {
35683 + int not;
35684 + char cold;
35685 +
35686 + if (*n == '\0' || *n == '/')
35687 + return 1;
35688 +
35689 + not = (*p == '!' || *p == '^');
35690 + if (not)
35691 + ++p;
35692 +
35693 + c = *p++;
35694 + for (;;) {
35695 + unsigned char fn = (unsigned char)*n;
35696 +
35697 + if (c == '\0')
35698 + return 1;
35699 + else {
35700 + if (c == fn)
35701 + goto matched;
35702 + cold = c;
35703 + c = *p++;
35704 +
35705 + if (c == '-' && *p != ']') {
35706 + unsigned char cend = *p++;
35707 +
35708 + if (cend == '\0')
35709 + return 1;
35710 +
35711 + if (cold <= fn && fn <= cend)
35712 + goto matched;
35713 +
35714 + c = *p++;
35715 + }
35716 + }
35717 +
35718 + if (c == ']')
35719 + break;
35720 + }
35721 + if (!not)
35722 + return 1;
35723 + break;
35724 + matched:
35725 + while (c != ']') {
35726 + if (c == '\0')
35727 + return 1;
35728 +
35729 + c = *p++;
35730 + }
35731 + if (not)
35732 + return 1;
35733 + }
35734 + break;
35735 + default:
35736 + if (c != *n)
35737 + return 1;
35738 + }
35739 +
35740 + ++n;
35741 + }
35742 +
35743 + if (*n == '\0')
35744 + return 0;
35745 +
35746 + if (*n == '/')
35747 + return 0;
35748 +
35749 + return 1;
35750 +}
35751 +
35752 +static struct acl_object_label *
35753 +chk_glob_label(struct acl_object_label *globbed,
35754 + struct dentry *dentry, struct vfsmount *mnt, char **path)
35755 +{
35756 + struct acl_object_label *tmp;
35757 +
35758 + if (*path == NULL)
35759 + *path = gr_to_filename_nolock(dentry, mnt);
35760 +
35761 + tmp = globbed;
35762 +
35763 + while (tmp) {
35764 + if (!glob_match(tmp->filename, *path))
35765 + return tmp;
35766 + tmp = tmp->next;
35767 + }
35768 +
35769 + return NULL;
35770 +}
35771 +
35772 +static struct acl_object_label *
35773 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
35774 + const ino_t curr_ino, const dev_t curr_dev,
35775 + const struct acl_subject_label *subj, char **path, const int checkglob)
35776 +{
35777 + struct acl_subject_label *tmpsubj;
35778 + struct acl_object_label *retval;
35779 + struct acl_object_label *retval2;
35780 +
35781 + tmpsubj = (struct acl_subject_label *) subj;
35782 + read_lock(&gr_inode_lock);
35783 + do {
35784 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
35785 + if (retval) {
35786 + if (checkglob && retval->globbed) {
35787 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
35788 + (struct vfsmount *)orig_mnt, path);
35789 + if (retval2)
35790 + retval = retval2;
35791 + }
35792 + break;
35793 + }
35794 + } while ((tmpsubj = tmpsubj->parent_subject));
35795 + read_unlock(&gr_inode_lock);
35796 +
35797 + return retval;
35798 +}
35799 +
35800 +static __inline__ struct acl_object_label *
35801 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
35802 + const struct dentry *curr_dentry,
35803 + const struct acl_subject_label *subj, char **path, const int checkglob)
35804 +{
35805 + return __full_lookup(orig_dentry, orig_mnt,
35806 + curr_dentry->d_inode->i_ino,
35807 + curr_dentry->d_inode->i_sb->s_dev, subj, path, checkglob);
35808 +}
35809 +
35810 +static struct acl_object_label *
35811 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35812 + const struct acl_subject_label *subj, char *path, const int checkglob)
35813 +{
35814 + struct dentry *dentry = (struct dentry *) l_dentry;
35815 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
35816 + struct acl_object_label *retval;
35817 +
35818 + spin_lock(&dcache_lock);
35819 +
35820 + if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt ||
35821 +#ifdef CONFIG_HUGETLBFS
35822 + mnt == hugetlbfs_vfsmount ||
35823 +#endif
35824 + /* ignore Eric Biederman */
35825 + IS_PRIVATE(l_dentry->d_inode))) {
35826 + retval = fakefs_obj;
35827 + goto out;
35828 + }
35829 +
35830 + for (;;) {
35831 + if (dentry == real_root && mnt == real_root_mnt)
35832 + break;
35833 +
35834 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
35835 + if (mnt->mnt_parent == mnt)
35836 + break;
35837 +
35838 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35839 + if (retval != NULL)
35840 + goto out;
35841 +
35842 + dentry = mnt->mnt_mountpoint;
35843 + mnt = mnt->mnt_parent;
35844 + continue;
35845 + }
35846 +
35847 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35848 + if (retval != NULL)
35849 + goto out;
35850 +
35851 + dentry = dentry->d_parent;
35852 + }
35853 +
35854 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35855 +
35856 + if (retval == NULL)
35857 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
35858 +out:
35859 + spin_unlock(&dcache_lock);
35860 + return retval;
35861 +}
35862 +
35863 +static __inline__ struct acl_object_label *
35864 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35865 + const struct acl_subject_label *subj)
35866 +{
35867 + char *path = NULL;
35868 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
35869 +}
35870 +
35871 +static __inline__ struct acl_object_label *
35872 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35873 + const struct acl_subject_label *subj)
35874 +{
35875 + char *path = NULL;
35876 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 0);
35877 +}
35878 +
35879 +static __inline__ struct acl_object_label *
35880 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35881 + const struct acl_subject_label *subj, char *path)
35882 +{
35883 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
35884 +}
35885 +
35886 +static struct acl_subject_label *
35887 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35888 + const struct acl_role_label *role)
35889 +{
35890 + struct dentry *dentry = (struct dentry *) l_dentry;
35891 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
35892 + struct acl_subject_label *retval;
35893 +
35894 + spin_lock(&dcache_lock);
35895 +
35896 + for (;;) {
35897 + if (dentry == real_root && mnt == real_root_mnt)
35898 + break;
35899 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
35900 + if (mnt->mnt_parent == mnt)
35901 + break;
35902 +
35903 + read_lock(&gr_inode_lock);
35904 + retval =
35905 + lookup_acl_subj_label(dentry->d_inode->i_ino,
35906 + dentry->d_inode->i_sb->s_dev, role);
35907 + read_unlock(&gr_inode_lock);
35908 + if (retval != NULL)
35909 + goto out;
35910 +
35911 + dentry = mnt->mnt_mountpoint;
35912 + mnt = mnt->mnt_parent;
35913 + continue;
35914 + }
35915 +
35916 + read_lock(&gr_inode_lock);
35917 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
35918 + dentry->d_inode->i_sb->s_dev, role);
35919 + read_unlock(&gr_inode_lock);
35920 + if (retval != NULL)
35921 + goto out;
35922 +
35923 + dentry = dentry->d_parent;
35924 + }
35925 +
35926 + read_lock(&gr_inode_lock);
35927 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
35928 + dentry->d_inode->i_sb->s_dev, role);
35929 + read_unlock(&gr_inode_lock);
35930 +
35931 + if (unlikely(retval == NULL)) {
35932 + read_lock(&gr_inode_lock);
35933 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
35934 + real_root->d_inode->i_sb->s_dev, role);
35935 + read_unlock(&gr_inode_lock);
35936 + }
35937 +out:
35938 + spin_unlock(&dcache_lock);
35939 +
35940 + return retval;
35941 +}
35942 +
35943 +static void
35944 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
35945 +{
35946 + struct task_struct *task = current;
35947 + const struct cred *cred = current_cred();
35948 +
35949 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
35950 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
35951 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
35952 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->curr_ip);
35953 +
35954 + return;
35955 +}
35956 +
35957 +static void
35958 +gr_log_learn_sysctl(const char *path, const __u32 mode)
35959 +{
35960 + struct task_struct *task = current;
35961 + const struct cred *cred = current_cred();
35962 +
35963 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
35964 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
35965 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
35966 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->curr_ip);
35967 +
35968 + return;
35969 +}
35970 +
35971 +static void
35972 +gr_log_learn_id_change(const char type, const unsigned int real,
35973 + const unsigned int effective, const unsigned int fs)
35974 +{
35975 + struct task_struct *task = current;
35976 + const struct cred *cred = current_cred();
35977 +
35978 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
35979 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
35980 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
35981 + type, real, effective, fs, &task->signal->curr_ip);
35982 +
35983 + return;
35984 +}
35985 +
35986 +__u32
35987 +gr_check_link(const struct dentry * new_dentry,
35988 + const struct dentry * parent_dentry,
35989 + const struct vfsmount * parent_mnt,
35990 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
35991 +{
35992 + struct acl_object_label *obj;
35993 + __u32 oldmode, newmode;
35994 + __u32 needmode;
35995 +
35996 + if (unlikely(!(gr_status & GR_READY)))
35997 + return (GR_CREATE | GR_LINK);
35998 +
35999 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
36000 + oldmode = obj->mode;
36001 +
36002 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36003 + oldmode |= (GR_CREATE | GR_LINK);
36004 +
36005 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
36006 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
36007 + needmode |= GR_SETID | GR_AUDIT_SETID;
36008 +
36009 + newmode =
36010 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
36011 + oldmode | needmode);
36012 +
36013 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
36014 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
36015 + GR_INHERIT | GR_AUDIT_INHERIT);
36016 +
36017 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
36018 + goto bad;
36019 +
36020 + if ((oldmode & needmode) != needmode)
36021 + goto bad;
36022 +
36023 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
36024 + if ((newmode & needmode) != needmode)
36025 + goto bad;
36026 +
36027 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
36028 + return newmode;
36029 +bad:
36030 + needmode = oldmode;
36031 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
36032 + needmode |= GR_SETID;
36033 +
36034 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
36035 + gr_log_learn(old_dentry, old_mnt, needmode);
36036 + return (GR_CREATE | GR_LINK);
36037 + } else if (newmode & GR_SUPPRESS)
36038 + return GR_SUPPRESS;
36039 + else
36040 + return 0;
36041 +}
36042 +
36043 +__u32
36044 +gr_search_file(const struct dentry * dentry, const __u32 mode,
36045 + const struct vfsmount * mnt)
36046 +{
36047 + __u32 retval = mode;
36048 + struct acl_subject_label *curracl;
36049 + struct acl_object_label *currobj;
36050 +
36051 + if (unlikely(!(gr_status & GR_READY)))
36052 + return (mode & ~GR_AUDITS);
36053 +
36054 + curracl = current->acl;
36055 +
36056 + currobj = chk_obj_label(dentry, mnt, curracl);
36057 + retval = currobj->mode & mode;
36058 +
36059 + if (unlikely
36060 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
36061 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
36062 + __u32 new_mode = mode;
36063 +
36064 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36065 +
36066 + retval = new_mode;
36067 +
36068 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
36069 + new_mode |= GR_INHERIT;
36070 +
36071 + if (!(mode & GR_NOLEARN))
36072 + gr_log_learn(dentry, mnt, new_mode);
36073 + }
36074 +
36075 + return retval;
36076 +}
36077 +
36078 +__u32
36079 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
36080 + const struct vfsmount * mnt, const __u32 mode)
36081 +{
36082 + struct name_entry *match;
36083 + struct acl_object_label *matchpo;
36084 + struct acl_subject_label *curracl;
36085 + char *path;
36086 + __u32 retval;
36087 +
36088 + if (unlikely(!(gr_status & GR_READY)))
36089 + return (mode & ~GR_AUDITS);
36090 +
36091 + preempt_disable();
36092 + path = gr_to_filename_rbac(new_dentry, mnt);
36093 + match = lookup_name_entry_create(path);
36094 +
36095 + if (!match)
36096 + goto check_parent;
36097 +
36098 + curracl = current->acl;
36099 +
36100 + read_lock(&gr_inode_lock);
36101 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
36102 + read_unlock(&gr_inode_lock);
36103 +
36104 + if (matchpo) {
36105 + if ((matchpo->mode & mode) !=
36106 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
36107 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
36108 + __u32 new_mode = mode;
36109 +
36110 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36111 +
36112 + gr_log_learn(new_dentry, mnt, new_mode);
36113 +
36114 + preempt_enable();
36115 + return new_mode;
36116 + }
36117 + preempt_enable();
36118 + return (matchpo->mode & mode);
36119 + }
36120 +
36121 + check_parent:
36122 + curracl = current->acl;
36123 +
36124 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
36125 + retval = matchpo->mode & mode;
36126 +
36127 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
36128 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
36129 + __u32 new_mode = mode;
36130 +
36131 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36132 +
36133 + gr_log_learn(new_dentry, mnt, new_mode);
36134 + preempt_enable();
36135 + return new_mode;
36136 + }
36137 +
36138 + preempt_enable();
36139 + return retval;
36140 +}
36141 +
36142 +int
36143 +gr_check_hidden_task(const struct task_struct *task)
36144 +{
36145 + if (unlikely(!(gr_status & GR_READY)))
36146 + return 0;
36147 +
36148 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
36149 + return 1;
36150 +
36151 + return 0;
36152 +}
36153 +
36154 +int
36155 +gr_check_protected_task(const struct task_struct *task)
36156 +{
36157 + if (unlikely(!(gr_status & GR_READY) || !task))
36158 + return 0;
36159 +
36160 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
36161 + task->acl != current->acl)
36162 + return 1;
36163 +
36164 + return 0;
36165 +}
36166 +
36167 +void
36168 +gr_copy_label(struct task_struct *tsk)
36169 +{
36170 + tsk->signal->used_accept = 0;
36171 + tsk->acl_sp_role = 0;
36172 + tsk->acl_role_id = current->acl_role_id;
36173 + tsk->acl = current->acl;
36174 + tsk->role = current->role;
36175 + tsk->signal->curr_ip = current->signal->curr_ip;
36176 + if (current->exec_file)
36177 + get_file(current->exec_file);
36178 + tsk->exec_file = current->exec_file;
36179 + tsk->is_writable = current->is_writable;
36180 + if (unlikely(current->signal->used_accept))
36181 + current->signal->curr_ip = 0;
36182 +
36183 + return;
36184 +}
36185 +
36186 +static void
36187 +gr_set_proc_res(struct task_struct *task)
36188 +{
36189 + struct acl_subject_label *proc;
36190 + unsigned short i;
36191 +
36192 + proc = task->acl;
36193 +
36194 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
36195 + return;
36196 +
36197 + for (i = 0; i < RLIM_NLIMITS; i++) {
36198 + if (!(proc->resmask & (1 << i)))
36199 + continue;
36200 +
36201 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
36202 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
36203 + }
36204 +
36205 + return;
36206 +}
36207 +
36208 +int
36209 +gr_check_user_change(int real, int effective, int fs)
36210 +{
36211 + unsigned int i;
36212 + __u16 num;
36213 + uid_t *uidlist;
36214 + int curuid;
36215 + int realok = 0;
36216 + int effectiveok = 0;
36217 + int fsok = 0;
36218 +
36219 + if (unlikely(!(gr_status & GR_READY)))
36220 + return 0;
36221 +
36222 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36223 + gr_log_learn_id_change('u', real, effective, fs);
36224 +
36225 + num = current->acl->user_trans_num;
36226 + uidlist = current->acl->user_transitions;
36227 +
36228 + if (uidlist == NULL)
36229 + return 0;
36230 +
36231 + if (real == -1)
36232 + realok = 1;
36233 + if (effective == -1)
36234 + effectiveok = 1;
36235 + if (fs == -1)
36236 + fsok = 1;
36237 +
36238 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
36239 + for (i = 0; i < num; i++) {
36240 + curuid = (int)uidlist[i];
36241 + if (real == curuid)
36242 + realok = 1;
36243 + if (effective == curuid)
36244 + effectiveok = 1;
36245 + if (fs == curuid)
36246 + fsok = 1;
36247 + }
36248 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
36249 + for (i = 0; i < num; i++) {
36250 + curuid = (int)uidlist[i];
36251 + if (real == curuid)
36252 + break;
36253 + if (effective == curuid)
36254 + break;
36255 + if (fs == curuid)
36256 + break;
36257 + }
36258 + /* not in deny list */
36259 + if (i == num) {
36260 + realok = 1;
36261 + effectiveok = 1;
36262 + fsok = 1;
36263 + }
36264 + }
36265 +
36266 + if (realok && effectiveok && fsok)
36267 + return 0;
36268 + else {
36269 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
36270 + return 1;
36271 + }
36272 +}
36273 +
36274 +int
36275 +gr_check_group_change(int real, int effective, int fs)
36276 +{
36277 + unsigned int i;
36278 + __u16 num;
36279 + gid_t *gidlist;
36280 + int curgid;
36281 + int realok = 0;
36282 + int effectiveok = 0;
36283 + int fsok = 0;
36284 +
36285 + if (unlikely(!(gr_status & GR_READY)))
36286 + return 0;
36287 +
36288 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36289 + gr_log_learn_id_change('g', real, effective, fs);
36290 +
36291 + num = current->acl->group_trans_num;
36292 + gidlist = current->acl->group_transitions;
36293 +
36294 + if (gidlist == NULL)
36295 + return 0;
36296 +
36297 + if (real == -1)
36298 + realok = 1;
36299 + if (effective == -1)
36300 + effectiveok = 1;
36301 + if (fs == -1)
36302 + fsok = 1;
36303 +
36304 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
36305 + for (i = 0; i < num; i++) {
36306 + curgid = (int)gidlist[i];
36307 + if (real == curgid)
36308 + realok = 1;
36309 + if (effective == curgid)
36310 + effectiveok = 1;
36311 + if (fs == curgid)
36312 + fsok = 1;
36313 + }
36314 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
36315 + for (i = 0; i < num; i++) {
36316 + curgid = (int)gidlist[i];
36317 + if (real == curgid)
36318 + break;
36319 + if (effective == curgid)
36320 + break;
36321 + if (fs == curgid)
36322 + break;
36323 + }
36324 + /* not in deny list */
36325 + if (i == num) {
36326 + realok = 1;
36327 + effectiveok = 1;
36328 + fsok = 1;
36329 + }
36330 + }
36331 +
36332 + if (realok && effectiveok && fsok)
36333 + return 0;
36334 + else {
36335 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
36336 + return 1;
36337 + }
36338 +}
36339 +
36340 +void
36341 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
36342 +{
36343 + struct acl_role_label *role = task->role;
36344 + struct acl_subject_label *subj = NULL;
36345 + struct acl_object_label *obj;
36346 + struct file *filp;
36347 +
36348 + if (unlikely(!(gr_status & GR_READY)))
36349 + return;
36350 +
36351 + filp = task->exec_file;
36352 +
36353 + /* kernel process, we'll give them the kernel role */
36354 + if (unlikely(!filp)) {
36355 + task->role = kernel_role;
36356 + task->acl = kernel_role->root_label;
36357 + return;
36358 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
36359 + role = lookup_acl_role_label(task, uid, gid);
36360 +
36361 + /* perform subject lookup in possibly new role
36362 + we can use this result below in the case where role == task->role
36363 + */
36364 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
36365 +
36366 + /* if we changed uid/gid, but result in the same role
36367 + and are using inheritance, don't lose the inherited subject
36368 + if current subject is other than what normal lookup
36369 + would result in, we arrived via inheritance, don't
36370 + lose subject
36371 + */
36372 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
36373 + (subj == task->acl)))
36374 + task->acl = subj;
36375 +
36376 + task->role = role;
36377 +
36378 + task->is_writable = 0;
36379 +
36380 + /* ignore additional mmap checks for processes that are writable
36381 + by the default ACL */
36382 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
36383 + if (unlikely(obj->mode & GR_WRITE))
36384 + task->is_writable = 1;
36385 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
36386 + if (unlikely(obj->mode & GR_WRITE))
36387 + task->is_writable = 1;
36388 +
36389 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36390 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
36391 +#endif
36392 +
36393 + gr_set_proc_res(task);
36394 +
36395 + return;
36396 +}
36397 +
36398 +int
36399 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
36400 + const int unsafe_share)
36401 +{
36402 + struct task_struct *task = current;
36403 + struct acl_subject_label *newacl;
36404 + struct acl_object_label *obj;
36405 + __u32 retmode;
36406 +
36407 + if (unlikely(!(gr_status & GR_READY)))
36408 + return 0;
36409 +
36410 + newacl = chk_subj_label(dentry, mnt, task->role);
36411 +
36412 + task_lock(task);
36413 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
36414 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
36415 + !(task->role->roletype & GR_ROLE_GOD) &&
36416 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
36417 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
36418 + task_unlock(task);
36419 + if (unsafe_share)
36420 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
36421 + else
36422 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
36423 + return -EACCES;
36424 + }
36425 + task_unlock(task);
36426 +
36427 + obj = chk_obj_label(dentry, mnt, task->acl);
36428 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
36429 +
36430 + if (!(task->acl->mode & GR_INHERITLEARN) &&
36431 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
36432 + if (obj->nested)
36433 + task->acl = obj->nested;
36434 + else
36435 + task->acl = newacl;
36436 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
36437 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
36438 +
36439 + task->is_writable = 0;
36440 +
36441 + /* ignore additional mmap checks for processes that are writable
36442 + by the default ACL */
36443 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
36444 + if (unlikely(obj->mode & GR_WRITE))
36445 + task->is_writable = 1;
36446 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
36447 + if (unlikely(obj->mode & GR_WRITE))
36448 + task->is_writable = 1;
36449 +
36450 + gr_set_proc_res(task);
36451 +
36452 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36453 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
36454 +#endif
36455 + return 0;
36456 +}
36457 +
36458 +/* always called with valid inodev ptr */
36459 +static void
36460 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
36461 +{
36462 + struct acl_object_label *matchpo;
36463 + struct acl_subject_label *matchps;
36464 + struct acl_subject_label *subj;
36465 + struct acl_role_label *role;
36466 + unsigned int x;
36467 +
36468 + FOR_EACH_ROLE_START(role)
36469 + FOR_EACH_SUBJECT_START(role, subj, x)
36470 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
36471 + matchpo->mode |= GR_DELETED;
36472 + FOR_EACH_SUBJECT_END(subj,x)
36473 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
36474 + if (subj->inode == ino && subj->device == dev)
36475 + subj->mode |= GR_DELETED;
36476 + FOR_EACH_NESTED_SUBJECT_END(subj)
36477 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
36478 + matchps->mode |= GR_DELETED;
36479 + FOR_EACH_ROLE_END(role)
36480 +
36481 + inodev->nentry->deleted = 1;
36482 +
36483 + return;
36484 +}
36485 +
36486 +void
36487 +gr_handle_delete(const ino_t ino, const dev_t dev)
36488 +{
36489 + struct inodev_entry *inodev;
36490 +
36491 + if (unlikely(!(gr_status & GR_READY)))
36492 + return;
36493 +
36494 + write_lock(&gr_inode_lock);
36495 + inodev = lookup_inodev_entry(ino, dev);
36496 + if (inodev != NULL)
36497 + do_handle_delete(inodev, ino, dev);
36498 + write_unlock(&gr_inode_lock);
36499 +
36500 + return;
36501 +}
36502 +
36503 +static void
36504 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
36505 + const ino_t newinode, const dev_t newdevice,
36506 + struct acl_subject_label *subj)
36507 +{
36508 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
36509 + struct acl_object_label *match;
36510 +
36511 + match = subj->obj_hash[index];
36512 +
36513 + while (match && (match->inode != oldinode ||
36514 + match->device != olddevice ||
36515 + !(match->mode & GR_DELETED)))
36516 + match = match->next;
36517 +
36518 + if (match && (match->inode == oldinode)
36519 + && (match->device == olddevice)
36520 + && (match->mode & GR_DELETED)) {
36521 + if (match->prev == NULL) {
36522 + subj->obj_hash[index] = match->next;
36523 + if (match->next != NULL)
36524 + match->next->prev = NULL;
36525 + } else {
36526 + match->prev->next = match->next;
36527 + if (match->next != NULL)
36528 + match->next->prev = match->prev;
36529 + }
36530 + match->prev = NULL;
36531 + match->next = NULL;
36532 + match->inode = newinode;
36533 + match->device = newdevice;
36534 + match->mode &= ~GR_DELETED;
36535 +
36536 + insert_acl_obj_label(match, subj);
36537 + }
36538 +
36539 + return;
36540 +}
36541 +
36542 +static void
36543 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
36544 + const ino_t newinode, const dev_t newdevice,
36545 + struct acl_role_label *role)
36546 +{
36547 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
36548 + struct acl_subject_label *match;
36549 +
36550 + match = role->subj_hash[index];
36551 +
36552 + while (match && (match->inode != oldinode ||
36553 + match->device != olddevice ||
36554 + !(match->mode & GR_DELETED)))
36555 + match = match->next;
36556 +
36557 + if (match && (match->inode == oldinode)
36558 + && (match->device == olddevice)
36559 + && (match->mode & GR_DELETED)) {
36560 + if (match->prev == NULL) {
36561 + role->subj_hash[index] = match->next;
36562 + if (match->next != NULL)
36563 + match->next->prev = NULL;
36564 + } else {
36565 + match->prev->next = match->next;
36566 + if (match->next != NULL)
36567 + match->next->prev = match->prev;
36568 + }
36569 + match->prev = NULL;
36570 + match->next = NULL;
36571 + match->inode = newinode;
36572 + match->device = newdevice;
36573 + match->mode &= ~GR_DELETED;
36574 +
36575 + insert_acl_subj_label(match, role);
36576 + }
36577 +
36578 + return;
36579 +}
36580 +
36581 +static void
36582 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
36583 + const ino_t newinode, const dev_t newdevice)
36584 +{
36585 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
36586 + struct inodev_entry *match;
36587 +
36588 + match = inodev_set.i_hash[index];
36589 +
36590 + while (match && (match->nentry->inode != oldinode ||
36591 + match->nentry->device != olddevice || !match->nentry->deleted))
36592 + match = match->next;
36593 +
36594 + if (match && (match->nentry->inode == oldinode)
36595 + && (match->nentry->device == olddevice) &&
36596 + match->nentry->deleted) {
36597 + if (match->prev == NULL) {
36598 + inodev_set.i_hash[index] = match->next;
36599 + if (match->next != NULL)
36600 + match->next->prev = NULL;
36601 + } else {
36602 + match->prev->next = match->next;
36603 + if (match->next != NULL)
36604 + match->next->prev = match->prev;
36605 + }
36606 + match->prev = NULL;
36607 + match->next = NULL;
36608 + match->nentry->inode = newinode;
36609 + match->nentry->device = newdevice;
36610 + match->nentry->deleted = 0;
36611 +
36612 + insert_inodev_entry(match);
36613 + }
36614 +
36615 + return;
36616 +}
36617 +
36618 +static void
36619 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
36620 + const struct vfsmount *mnt)
36621 +{
36622 + struct acl_subject_label *subj;
36623 + struct acl_role_label *role;
36624 + unsigned int x;
36625 +
36626 + FOR_EACH_ROLE_START(role)
36627 + update_acl_subj_label(matchn->inode, matchn->device,
36628 + dentry->d_inode->i_ino,
36629 + dentry->d_inode->i_sb->s_dev, role);
36630 +
36631 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
36632 + if ((subj->inode == dentry->d_inode->i_ino) &&
36633 + (subj->device == dentry->d_inode->i_sb->s_dev)) {
36634 + subj->inode = dentry->d_inode->i_ino;
36635 + subj->device = dentry->d_inode->i_sb->s_dev;
36636 + }
36637 + FOR_EACH_NESTED_SUBJECT_END(subj)
36638 + FOR_EACH_SUBJECT_START(role, subj, x)
36639 + update_acl_obj_label(matchn->inode, matchn->device,
36640 + dentry->d_inode->i_ino,
36641 + dentry->d_inode->i_sb->s_dev, subj);
36642 + FOR_EACH_SUBJECT_END(subj,x)
36643 + FOR_EACH_ROLE_END(role)
36644 +
36645 + update_inodev_entry(matchn->inode, matchn->device,
36646 + dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
36647 +
36648 + return;
36649 +}
36650 +
36651 +void
36652 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
36653 +{
36654 + struct name_entry *matchn;
36655 +
36656 + if (unlikely(!(gr_status & GR_READY)))
36657 + return;
36658 +
36659 + preempt_disable();
36660 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
36661 +
36662 + if (unlikely((unsigned long)matchn)) {
36663 + write_lock(&gr_inode_lock);
36664 + do_handle_create(matchn, dentry, mnt);
36665 + write_unlock(&gr_inode_lock);
36666 + }
36667 + preempt_enable();
36668 +
36669 + return;
36670 +}
36671 +
36672 +void
36673 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
36674 + struct dentry *old_dentry,
36675 + struct dentry *new_dentry,
36676 + struct vfsmount *mnt, const __u8 replace)
36677 +{
36678 + struct name_entry *matchn;
36679 + struct inodev_entry *inodev;
36680 +
36681 + /* vfs_rename swaps the name and parent link for old_dentry and
36682 + new_dentry
36683 + at this point, old_dentry has the new name, parent link, and inode
36684 + for the renamed file
36685 + if a file is being replaced by a rename, new_dentry has the inode
36686 + and name for the replaced file
36687 + */
36688 +
36689 + if (unlikely(!(gr_status & GR_READY)))
36690 + return;
36691 +
36692 + preempt_disable();
36693 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
36694 +
36695 + /* we wouldn't have to check d_inode if it weren't for
36696 + NFS silly-renaming
36697 + */
36698 +
36699 + write_lock(&gr_inode_lock);
36700 + if (unlikely(replace && new_dentry->d_inode)) {
36701 + inodev = lookup_inodev_entry(new_dentry->d_inode->i_ino,
36702 + new_dentry->d_inode->i_sb->s_dev);
36703 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
36704 + do_handle_delete(inodev, new_dentry->d_inode->i_ino,
36705 + new_dentry->d_inode->i_sb->s_dev);
36706 + }
36707 +
36708 + inodev = lookup_inodev_entry(old_dentry->d_inode->i_ino,
36709 + old_dentry->d_inode->i_sb->s_dev);
36710 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
36711 + do_handle_delete(inodev, old_dentry->d_inode->i_ino,
36712 + old_dentry->d_inode->i_sb->s_dev);
36713 +
36714 + if (unlikely((unsigned long)matchn))
36715 + do_handle_create(matchn, old_dentry, mnt);
36716 +
36717 + write_unlock(&gr_inode_lock);
36718 + preempt_enable();
36719 +
36720 + return;
36721 +}
36722 +
36723 +static int
36724 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
36725 + unsigned char **sum)
36726 +{
36727 + struct acl_role_label *r;
36728 + struct role_allowed_ip *ipp;
36729 + struct role_transition *trans;
36730 + unsigned int i;
36731 + int found = 0;
36732 +
36733 + /* check transition table */
36734 +
36735 + for (trans = current->role->transitions; trans; trans = trans->next) {
36736 + if (!strcmp(rolename, trans->rolename)) {
36737 + found = 1;
36738 + break;
36739 + }
36740 + }
36741 +
36742 + if (!found)
36743 + return 0;
36744 +
36745 + /* handle special roles that do not require authentication
36746 + and check ip */
36747 +
36748 + FOR_EACH_ROLE_START(r)
36749 + if (!strcmp(rolename, r->rolename) &&
36750 + (r->roletype & GR_ROLE_SPECIAL)) {
36751 + found = 0;
36752 + if (r->allowed_ips != NULL) {
36753 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
36754 + if ((ntohl(current->signal->curr_ip) & ipp->netmask) ==
36755 + (ntohl(ipp->addr) & ipp->netmask))
36756 + found = 1;
36757 + }
36758 + } else
36759 + found = 2;
36760 + if (!found)
36761 + return 0;
36762 +
36763 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
36764 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
36765 + *salt = NULL;
36766 + *sum = NULL;
36767 + return 1;
36768 + }
36769 + }
36770 + FOR_EACH_ROLE_END(r)
36771 +
36772 + for (i = 0; i < num_sprole_pws; i++) {
36773 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
36774 + *salt = acl_special_roles[i]->salt;
36775 + *sum = acl_special_roles[i]->sum;
36776 + return 1;
36777 + }
36778 + }
36779 +
36780 + return 0;
36781 +}
36782 +
36783 +static void
36784 +assign_special_role(char *rolename)
36785 +{
36786 + struct acl_object_label *obj;
36787 + struct acl_role_label *r;
36788 + struct acl_role_label *assigned = NULL;
36789 + struct task_struct *tsk;
36790 + struct file *filp;
36791 +
36792 + FOR_EACH_ROLE_START(r)
36793 + if (!strcmp(rolename, r->rolename) &&
36794 + (r->roletype & GR_ROLE_SPECIAL)) {
36795 + assigned = r;
36796 + break;
36797 + }
36798 + FOR_EACH_ROLE_END(r)
36799 +
36800 + if (!assigned)
36801 + return;
36802 +
36803 + read_lock(&tasklist_lock);
36804 + read_lock(&grsec_exec_file_lock);
36805 +
36806 + tsk = current->parent;
36807 + if (tsk == NULL)
36808 + goto out_unlock;
36809 +
36810 + filp = tsk->exec_file;
36811 + if (filp == NULL)
36812 + goto out_unlock;
36813 +
36814 + tsk->is_writable = 0;
36815 +
36816 + tsk->acl_sp_role = 1;
36817 + tsk->acl_role_id = ++acl_sp_role_value;
36818 + tsk->role = assigned;
36819 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
36820 +
36821 + /* ignore additional mmap checks for processes that are writable
36822 + by the default ACL */
36823 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
36824 + if (unlikely(obj->mode & GR_WRITE))
36825 + tsk->is_writable = 1;
36826 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
36827 + if (unlikely(obj->mode & GR_WRITE))
36828 + tsk->is_writable = 1;
36829 +
36830 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36831 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
36832 +#endif
36833 +
36834 +out_unlock:
36835 + read_unlock(&grsec_exec_file_lock);
36836 + read_unlock(&tasklist_lock);
36837 + return;
36838 +}
36839 +
36840 +int gr_check_secure_terminal(struct task_struct *task)
36841 +{
36842 + struct task_struct *p, *p2, *p3;
36843 + struct files_struct *files;
36844 + struct fdtable *fdt;
36845 + struct file *our_file = NULL, *file;
36846 + int i;
36847 +
36848 + if (task->signal->tty == NULL)
36849 + return 1;
36850 +
36851 + files = get_files_struct(task);
36852 + if (files != NULL) {
36853 + rcu_read_lock();
36854 + fdt = files_fdtable(files);
36855 + for (i=0; i < fdt->max_fds; i++) {
36856 + file = fcheck_files(files, i);
36857 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
36858 + get_file(file);
36859 + our_file = file;
36860 + }
36861 + }
36862 + rcu_read_unlock();
36863 + put_files_struct(files);
36864 + }
36865 +
36866 + if (our_file == NULL)
36867 + return 1;
36868 +
36869 + read_lock(&tasklist_lock);
36870 + do_each_thread(p2, p) {
36871 + files = get_files_struct(p);
36872 + if (files == NULL ||
36873 + (p->signal && p->signal->tty == task->signal->tty)) {
36874 + if (files != NULL)
36875 + put_files_struct(files);
36876 + continue;
36877 + }
36878 + rcu_read_lock();
36879 + fdt = files_fdtable(files);
36880 + for (i=0; i < fdt->max_fds; i++) {
36881 + file = fcheck_files(files, i);
36882 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
36883 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
36884 + p3 = task;
36885 + while (p3->pid > 0) {
36886 + if (p3 == p)
36887 + break;
36888 + p3 = p3->parent;
36889 + }
36890 + if (p3 == p)
36891 + break;
36892 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
36893 + gr_handle_alertkill(p);
36894 + rcu_read_unlock();
36895 + put_files_struct(files);
36896 + read_unlock(&tasklist_lock);
36897 + fput(our_file);
36898 + return 0;
36899 + }
36900 + }
36901 + rcu_read_unlock();
36902 + put_files_struct(files);
36903 + } while_each_thread(p2, p);
36904 + read_unlock(&tasklist_lock);
36905 +
36906 + fput(our_file);
36907 + return 1;
36908 +}
36909 +
36910 +ssize_t
36911 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
36912 +{
36913 + struct gr_arg_wrapper uwrap;
36914 + unsigned char *sprole_salt = NULL;
36915 + unsigned char *sprole_sum = NULL;
36916 + int error = sizeof (struct gr_arg_wrapper);
36917 + int error2 = 0;
36918 +
36919 + down(&gr_dev_sem);
36920 +
36921 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
36922 + error = -EPERM;
36923 + goto out;
36924 + }
36925 +
36926 + if (count != sizeof (struct gr_arg_wrapper)) {
36927 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
36928 + error = -EINVAL;
36929 + goto out;
36930 + }
36931 +
36932 +
36933 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
36934 + gr_auth_expires = 0;
36935 + gr_auth_attempts = 0;
36936 + }
36937 +
36938 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
36939 + error = -EFAULT;
36940 + goto out;
36941 + }
36942 +
36943 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
36944 + error = -EINVAL;
36945 + goto out;
36946 + }
36947 +
36948 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
36949 + error = -EFAULT;
36950 + goto out;
36951 + }
36952 +
36953 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
36954 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
36955 + time_after(gr_auth_expires, get_seconds())) {
36956 + error = -EBUSY;
36957 + goto out;
36958 + }
36959 +
36960 + /* if non-root trying to do anything other than use a special role,
36961 + do not attempt authentication, do not count towards authentication
36962 + locking
36963 + */
36964 +
36965 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
36966 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
36967 + current_uid()) {
36968 + error = -EPERM;
36969 + goto out;
36970 + }
36971 +
36972 + /* ensure pw and special role name are null terminated */
36973 +
36974 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
36975 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
36976 +
36977 + /* Okay.
36978 + * We have our enough of the argument structure..(we have yet
36979 + * to copy_from_user the tables themselves) . Copy the tables
36980 + * only if we need them, i.e. for loading operations. */
36981 +
36982 + switch (gr_usermode->mode) {
36983 + case GR_STATUS:
36984 + if (gr_status & GR_READY) {
36985 + error = 1;
36986 + if (!gr_check_secure_terminal(current))
36987 + error = 3;
36988 + } else
36989 + error = 2;
36990 + goto out;
36991 + case GR_SHUTDOWN:
36992 + if ((gr_status & GR_READY)
36993 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
36994 + pax_open_kernel();
36995 + gr_status &= ~GR_READY;
36996 + pax_close_kernel();
36997 +
36998 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
36999 + free_variables();
37000 + memset(gr_usermode, 0, sizeof (struct gr_arg));
37001 + memset(gr_system_salt, 0, GR_SALT_LEN);
37002 + memset(gr_system_sum, 0, GR_SHA_LEN);
37003 + } else if (gr_status & GR_READY) {
37004 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
37005 + error = -EPERM;
37006 + } else {
37007 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
37008 + error = -EAGAIN;
37009 + }
37010 + break;
37011 + case GR_ENABLE:
37012 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
37013 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
37014 + else {
37015 + if (gr_status & GR_READY)
37016 + error = -EAGAIN;
37017 + else
37018 + error = error2;
37019 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
37020 + }
37021 + break;
37022 + case GR_RELOAD:
37023 + if (!(gr_status & GR_READY)) {
37024 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
37025 + error = -EAGAIN;
37026 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
37027 + lock_kernel();
37028 +
37029 + pax_open_kernel();
37030 + gr_status &= ~GR_READY;
37031 + pax_close_kernel();
37032 +
37033 + free_variables();
37034 + if (!(error2 = gracl_init(gr_usermode))) {
37035 + unlock_kernel();
37036 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
37037 + } else {
37038 + unlock_kernel();
37039 + error = error2;
37040 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
37041 + }
37042 + } else {
37043 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
37044 + error = -EPERM;
37045 + }
37046 + break;
37047 + case GR_SEGVMOD:
37048 + if (unlikely(!(gr_status & GR_READY))) {
37049 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
37050 + error = -EAGAIN;
37051 + break;
37052 + }
37053 +
37054 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
37055 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
37056 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
37057 + struct acl_subject_label *segvacl;
37058 + segvacl =
37059 + lookup_acl_subj_label(gr_usermode->segv_inode,
37060 + gr_usermode->segv_device,
37061 + current->role);
37062 + if (segvacl) {
37063 + segvacl->crashes = 0;
37064 + segvacl->expires = 0;
37065 + }
37066 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
37067 + gr_remove_uid(gr_usermode->segv_uid);
37068 + }
37069 + } else {
37070 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
37071 + error = -EPERM;
37072 + }
37073 + break;
37074 + case GR_SPROLE:
37075 + case GR_SPROLEPAM:
37076 + if (unlikely(!(gr_status & GR_READY))) {
37077 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
37078 + error = -EAGAIN;
37079 + break;
37080 + }
37081 +
37082 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
37083 + current->role->expires = 0;
37084 + current->role->auth_attempts = 0;
37085 + }
37086 +
37087 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
37088 + time_after(current->role->expires, get_seconds())) {
37089 + error = -EBUSY;
37090 + goto out;
37091 + }
37092 +
37093 + if (lookup_special_role_auth
37094 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
37095 + && ((!sprole_salt && !sprole_sum)
37096 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
37097 + char *p = "";
37098 + assign_special_role(gr_usermode->sp_role);
37099 + read_lock(&tasklist_lock);
37100 + if (current->parent)
37101 + p = current->parent->role->rolename;
37102 + read_unlock(&tasklist_lock);
37103 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
37104 + p, acl_sp_role_value);
37105 + } else {
37106 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
37107 + error = -EPERM;
37108 + if(!(current->role->auth_attempts++))
37109 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
37110 +
37111 + goto out;
37112 + }
37113 + break;
37114 + case GR_UNSPROLE:
37115 + if (unlikely(!(gr_status & GR_READY))) {
37116 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
37117 + error = -EAGAIN;
37118 + break;
37119 + }
37120 +
37121 + if (current->role->roletype & GR_ROLE_SPECIAL) {
37122 + char *p = "";
37123 + int i = 0;
37124 +
37125 + read_lock(&tasklist_lock);
37126 + if (current->parent) {
37127 + p = current->parent->role->rolename;
37128 + i = current->parent->acl_role_id;
37129 + }
37130 + read_unlock(&tasklist_lock);
37131 +
37132 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
37133 + gr_set_acls(1);
37134 + } else {
37135 + error = -EPERM;
37136 + goto out;
37137 + }
37138 + break;
37139 + default:
37140 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
37141 + error = -EINVAL;
37142 + break;
37143 + }
37144 +
37145 + if (error != -EPERM)
37146 + goto out;
37147 +
37148 + if(!(gr_auth_attempts++))
37149 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
37150 +
37151 + out:
37152 + up(&gr_dev_sem);
37153 + return error;
37154 +}
37155 +
37156 +int
37157 +gr_set_acls(const int type)
37158 +{
37159 + struct acl_object_label *obj;
37160 + struct task_struct *task, *task2;
37161 + struct file *filp;
37162 + struct acl_role_label *role = current->role;
37163 + __u16 acl_role_id = current->acl_role_id;
37164 + const struct cred *cred;
37165 + char *tmpname;
37166 + struct name_entry *nmatch;
37167 + struct acl_subject_label *tmpsubj;
37168 +
37169 + rcu_read_lock();
37170 + read_lock(&tasklist_lock);
37171 + read_lock(&grsec_exec_file_lock);
37172 + do_each_thread(task2, task) {
37173 + /* check to see if we're called from the exit handler,
37174 + if so, only replace ACLs that have inherited the admin
37175 + ACL */
37176 +
37177 + if (type && (task->role != role ||
37178 + task->acl_role_id != acl_role_id))
37179 + continue;
37180 +
37181 + task->acl_role_id = 0;
37182 + task->acl_sp_role = 0;
37183 +
37184 + if ((filp = task->exec_file)) {
37185 + cred = __task_cred(task);
37186 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
37187 +
37188 + /* the following is to apply the correct subject
37189 + on binaries running when the RBAC system
37190 + is enabled, when the binaries have been
37191 + replaced or deleted since their execution
37192 + -----
37193 + when the RBAC system starts, the inode/dev
37194 + from exec_file will be one the RBAC system
37195 + is unaware of. It only knows the inode/dev
37196 + of the present file on disk, or the absence
37197 + of it.
37198 + */
37199 + preempt_disable();
37200 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
37201 +
37202 + nmatch = lookup_name_entry(tmpname);
37203 + preempt_enable();
37204 + tmpsubj = NULL;
37205 + if (nmatch) {
37206 + if (nmatch->deleted)
37207 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
37208 + else
37209 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
37210 + if (tmpsubj != NULL)
37211 + task->acl = tmpsubj;
37212 + }
37213 + if (tmpsubj == NULL)
37214 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
37215 + task->role);
37216 + if (task->acl) {
37217 + struct acl_subject_label *curr;
37218 + curr = task->acl;
37219 +
37220 + task->is_writable = 0;
37221 + /* ignore additional mmap checks for processes that are writable
37222 + by the default ACL */
37223 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
37224 + if (unlikely(obj->mode & GR_WRITE))
37225 + task->is_writable = 1;
37226 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
37227 + if (unlikely(obj->mode & GR_WRITE))
37228 + task->is_writable = 1;
37229 +
37230 + gr_set_proc_res(task);
37231 +
37232 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
37233 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
37234 +#endif
37235 + } else {
37236 + read_unlock(&grsec_exec_file_lock);
37237 + read_unlock(&tasklist_lock);
37238 + rcu_read_unlock();
37239 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
37240 + return 1;
37241 + }
37242 + } else {
37243 + // it's a kernel process
37244 + task->role = kernel_role;
37245 + task->acl = kernel_role->root_label;
37246 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
37247 + task->acl->mode &= ~GR_PROCFIND;
37248 +#endif
37249 + }
37250 + } while_each_thread(task2, task);
37251 + read_unlock(&grsec_exec_file_lock);
37252 + read_unlock(&tasklist_lock);
37253 + rcu_read_unlock();
37254 +
37255 + return 0;
37256 +}
37257 +
37258 +void
37259 +gr_learn_resource(const struct task_struct *task,
37260 + const int res, const unsigned long wanted, const int gt)
37261 +{
37262 + struct acl_subject_label *acl;
37263 + const struct cred *cred;
37264 +
37265 + if (unlikely((gr_status & GR_READY) &&
37266 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
37267 + goto skip_reslog;
37268 +
37269 +#ifdef CONFIG_GRKERNSEC_RESLOG
37270 + gr_log_resource(task, res, wanted, gt);
37271 +#endif
37272 + skip_reslog:
37273 +
37274 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
37275 + return;
37276 +
37277 + acl = task->acl;
37278 +
37279 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
37280 + !(acl->resmask & (1 << (unsigned short) res))))
37281 + return;
37282 +
37283 + if (wanted >= acl->res[res].rlim_cur) {
37284 + unsigned long res_add;
37285 +
37286 + res_add = wanted;
37287 + switch (res) {
37288 + case RLIMIT_CPU:
37289 + res_add += GR_RLIM_CPU_BUMP;
37290 + break;
37291 + case RLIMIT_FSIZE:
37292 + res_add += GR_RLIM_FSIZE_BUMP;
37293 + break;
37294 + case RLIMIT_DATA:
37295 + res_add += GR_RLIM_DATA_BUMP;
37296 + break;
37297 + case RLIMIT_STACK:
37298 + res_add += GR_RLIM_STACK_BUMP;
37299 + break;
37300 + case RLIMIT_CORE:
37301 + res_add += GR_RLIM_CORE_BUMP;
37302 + break;
37303 + case RLIMIT_RSS:
37304 + res_add += GR_RLIM_RSS_BUMP;
37305 + break;
37306 + case RLIMIT_NPROC:
37307 + res_add += GR_RLIM_NPROC_BUMP;
37308 + break;
37309 + case RLIMIT_NOFILE:
37310 + res_add += GR_RLIM_NOFILE_BUMP;
37311 + break;
37312 + case RLIMIT_MEMLOCK:
37313 + res_add += GR_RLIM_MEMLOCK_BUMP;
37314 + break;
37315 + case RLIMIT_AS:
37316 + res_add += GR_RLIM_AS_BUMP;
37317 + break;
37318 + case RLIMIT_LOCKS:
37319 + res_add += GR_RLIM_LOCKS_BUMP;
37320 + break;
37321 + case RLIMIT_SIGPENDING:
37322 + res_add += GR_RLIM_SIGPENDING_BUMP;
37323 + break;
37324 + case RLIMIT_MSGQUEUE:
37325 + res_add += GR_RLIM_MSGQUEUE_BUMP;
37326 + break;
37327 + case RLIMIT_NICE:
37328 + res_add += GR_RLIM_NICE_BUMP;
37329 + break;
37330 + case RLIMIT_RTPRIO:
37331 + res_add += GR_RLIM_RTPRIO_BUMP;
37332 + break;
37333 + case RLIMIT_RTTIME:
37334 + res_add += GR_RLIM_RTTIME_BUMP;
37335 + break;
37336 + }
37337 +
37338 + acl->res[res].rlim_cur = res_add;
37339 +
37340 + if (wanted > acl->res[res].rlim_max)
37341 + acl->res[res].rlim_max = res_add;
37342 +
37343 + /* only log the subject filename, since resource logging is supported for
37344 + single-subject learning only */
37345 + rcu_read_lock();
37346 + cred = __task_cred(task);
37347 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
37348 + task->role->roletype, cred->uid, cred->gid, acl->filename,
37349 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
37350 + "", (unsigned long) res, &task->signal->curr_ip);
37351 + rcu_read_unlock();
37352 + }
37353 +
37354 + return;
37355 +}
37356 +
37357 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
37358 +void
37359 +pax_set_initial_flags(struct linux_binprm *bprm)
37360 +{
37361 + struct task_struct *task = current;
37362 + struct acl_subject_label *proc;
37363 + unsigned long flags;
37364 +
37365 + if (unlikely(!(gr_status & GR_READY)))
37366 + return;
37367 +
37368 + flags = pax_get_flags(task);
37369 +
37370 + proc = task->acl;
37371 +
37372 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
37373 + flags &= ~MF_PAX_PAGEEXEC;
37374 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
37375 + flags &= ~MF_PAX_SEGMEXEC;
37376 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
37377 + flags &= ~MF_PAX_RANDMMAP;
37378 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
37379 + flags &= ~MF_PAX_EMUTRAMP;
37380 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
37381 + flags &= ~MF_PAX_MPROTECT;
37382 +
37383 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
37384 + flags |= MF_PAX_PAGEEXEC;
37385 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
37386 + flags |= MF_PAX_SEGMEXEC;
37387 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
37388 + flags |= MF_PAX_RANDMMAP;
37389 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
37390 + flags |= MF_PAX_EMUTRAMP;
37391 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
37392 + flags |= MF_PAX_MPROTECT;
37393 +
37394 + pax_set_flags(task, flags);
37395 +
37396 + return;
37397 +}
37398 +#endif
37399 +
37400 +#ifdef CONFIG_SYSCTL
37401 +/* Eric Biederman likes breaking userland ABI and every inode-based security
37402 + system to save 35kb of memory */
37403 +
37404 +/* we modify the passed in filename, but adjust it back before returning */
37405 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
37406 +{
37407 + struct name_entry *nmatch;
37408 + char *p, *lastp = NULL;
37409 + struct acl_object_label *obj = NULL, *tmp;
37410 + struct acl_subject_label *tmpsubj;
37411 + char c = '\0';
37412 +
37413 + read_lock(&gr_inode_lock);
37414 +
37415 + p = name + len - 1;
37416 + do {
37417 + nmatch = lookup_name_entry(name);
37418 + if (lastp != NULL)
37419 + *lastp = c;
37420 +
37421 + if (nmatch == NULL)
37422 + goto next_component;
37423 + tmpsubj = current->acl;
37424 + do {
37425 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
37426 + if (obj != NULL) {
37427 + tmp = obj->globbed;
37428 + while (tmp) {
37429 + if (!glob_match(tmp->filename, name)) {
37430 + obj = tmp;
37431 + goto found_obj;
37432 + }
37433 + tmp = tmp->next;
37434 + }
37435 + goto found_obj;
37436 + }
37437 + } while ((tmpsubj = tmpsubj->parent_subject));
37438 +next_component:
37439 + /* end case */
37440 + if (p == name)
37441 + break;
37442 +
37443 + while (*p != '/')
37444 + p--;
37445 + if (p == name)
37446 + lastp = p + 1;
37447 + else {
37448 + lastp = p;
37449 + p--;
37450 + }
37451 + c = *lastp;
37452 + *lastp = '\0';
37453 + } while (1);
37454 +found_obj:
37455 + read_unlock(&gr_inode_lock);
37456 + /* obj returned will always be non-null */
37457 + return obj;
37458 +}
37459 +
37460 +/* returns 0 when allowing, non-zero on error
37461 + op of 0 is used for readdir, so we don't log the names of hidden files
37462 +*/
37463 +__u32
37464 +gr_handle_sysctl(const struct ctl_table *table, const int op)
37465 +{
37466 + ctl_table *tmp;
37467 + const char *proc_sys = "/proc/sys";
37468 + char *path;
37469 + struct acl_object_label *obj;
37470 + unsigned short len = 0, pos = 0, depth = 0, i;
37471 + __u32 err = 0;
37472 + __u32 mode = 0;
37473 +
37474 + if (unlikely(!(gr_status & GR_READY)))
37475 + return 0;
37476 +
37477 + /* for now, ignore operations on non-sysctl entries if it's not a
37478 + readdir*/
37479 + if (table->child != NULL && op != 0)
37480 + return 0;
37481 +
37482 + mode |= GR_FIND;
37483 + /* it's only a read if it's an entry, read on dirs is for readdir */
37484 + if (op & MAY_READ)
37485 + mode |= GR_READ;
37486 + if (op & MAY_WRITE)
37487 + mode |= GR_WRITE;
37488 +
37489 + preempt_disable();
37490 +
37491 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
37492 +
37493 + /* it's only a read/write if it's an actual entry, not a dir
37494 + (which are opened for readdir)
37495 + */
37496 +
37497 + /* convert the requested sysctl entry into a pathname */
37498 +
37499 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
37500 + len += strlen(tmp->procname);
37501 + len++;
37502 + depth++;
37503 + }
37504 +
37505 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
37506 + /* deny */
37507 + goto out;
37508 + }
37509 +
37510 + memset(path, 0, PAGE_SIZE);
37511 +
37512 + memcpy(path, proc_sys, strlen(proc_sys));
37513 +
37514 + pos += strlen(proc_sys);
37515 +
37516 + for (; depth > 0; depth--) {
37517 + path[pos] = '/';
37518 + pos++;
37519 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
37520 + if (depth == i) {
37521 + memcpy(path + pos, tmp->procname,
37522 + strlen(tmp->procname));
37523 + pos += strlen(tmp->procname);
37524 + }
37525 + i++;
37526 + }
37527 + }
37528 +
37529 + obj = gr_lookup_by_name(path, pos);
37530 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
37531 +
37532 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
37533 + ((err & mode) != mode))) {
37534 + __u32 new_mode = mode;
37535 +
37536 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
37537 +
37538 + err = 0;
37539 + gr_log_learn_sysctl(path, new_mode);
37540 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
37541 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
37542 + err = -ENOENT;
37543 + } else if (!(err & GR_FIND)) {
37544 + err = -ENOENT;
37545 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
37546 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
37547 + path, (mode & GR_READ) ? " reading" : "",
37548 + (mode & GR_WRITE) ? " writing" : "");
37549 + err = -EACCES;
37550 + } else if ((err & mode) != mode) {
37551 + err = -EACCES;
37552 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
37553 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
37554 + path, (mode & GR_READ) ? " reading" : "",
37555 + (mode & GR_WRITE) ? " writing" : "");
37556 + err = 0;
37557 + } else
37558 + err = 0;
37559 +
37560 + out:
37561 + preempt_enable();
37562 +
37563 + return err;
37564 +}
37565 +#endif
37566 +
37567 +int
37568 +gr_handle_proc_ptrace(struct task_struct *task)
37569 +{
37570 + struct file *filp;
37571 + struct task_struct *tmp = task;
37572 + struct task_struct *curtemp = current;
37573 + __u32 retmode;
37574 +
37575 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
37576 + if (unlikely(!(gr_status & GR_READY)))
37577 + return 0;
37578 +#endif
37579 +
37580 + read_lock(&tasklist_lock);
37581 + read_lock(&grsec_exec_file_lock);
37582 + filp = task->exec_file;
37583 +
37584 + while (tmp->pid > 0) {
37585 + if (tmp == curtemp)
37586 + break;
37587 + tmp = tmp->parent;
37588 + }
37589 +
37590 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
37591 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
37592 + read_unlock(&grsec_exec_file_lock);
37593 + read_unlock(&tasklist_lock);
37594 + return 1;
37595 + }
37596 +
37597 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
37598 + if (!(gr_status & GR_READY)) {
37599 + read_unlock(&grsec_exec_file_lock);
37600 + read_unlock(&tasklist_lock);
37601 + return 0;
37602 + }
37603 +#endif
37604 +
37605 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
37606 + read_unlock(&grsec_exec_file_lock);
37607 + read_unlock(&tasklist_lock);
37608 +
37609 + if (retmode & GR_NOPTRACE)
37610 + return 1;
37611 +
37612 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
37613 + && (current->acl != task->acl || (current->acl != current->role->root_label
37614 + && current->pid != task->pid)))
37615 + return 1;
37616 +
37617 + return 0;
37618 +}
37619 +
37620 +int
37621 +gr_handle_ptrace(struct task_struct *task, const long request)
37622 +{
37623 + struct task_struct *tmp = task;
37624 + struct task_struct *curtemp = current;
37625 + __u32 retmode;
37626 +
37627 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
37628 + if (unlikely(!(gr_status & GR_READY)))
37629 + return 0;
37630 +#endif
37631 +
37632 + read_lock(&tasklist_lock);
37633 + while (tmp->pid > 0) {
37634 + if (tmp == curtemp)
37635 + break;
37636 + tmp = tmp->parent;
37637 + }
37638 +
37639 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
37640 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
37641 + read_unlock(&tasklist_lock);
37642 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37643 + return 1;
37644 + }
37645 + read_unlock(&tasklist_lock);
37646 +
37647 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
37648 + if (!(gr_status & GR_READY))
37649 + return 0;
37650 +#endif
37651 +
37652 + read_lock(&grsec_exec_file_lock);
37653 + if (unlikely(!task->exec_file)) {
37654 + read_unlock(&grsec_exec_file_lock);
37655 + return 0;
37656 + }
37657 +
37658 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
37659 + read_unlock(&grsec_exec_file_lock);
37660 +
37661 + if (retmode & GR_NOPTRACE) {
37662 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37663 + return 1;
37664 + }
37665 +
37666 + if (retmode & GR_PTRACERD) {
37667 + switch (request) {
37668 + case PTRACE_POKETEXT:
37669 + case PTRACE_POKEDATA:
37670 + case PTRACE_POKEUSR:
37671 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
37672 + case PTRACE_SETREGS:
37673 + case PTRACE_SETFPREGS:
37674 +#endif
37675 +#ifdef CONFIG_X86
37676 + case PTRACE_SETFPXREGS:
37677 +#endif
37678 +#ifdef CONFIG_ALTIVEC
37679 + case PTRACE_SETVRREGS:
37680 +#endif
37681 + return 1;
37682 + default:
37683 + return 0;
37684 + }
37685 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
37686 + !(current->role->roletype & GR_ROLE_GOD) &&
37687 + (current->acl != task->acl)) {
37688 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37689 + return 1;
37690 + }
37691 +
37692 + return 0;
37693 +}
37694 +
37695 +static int is_writable_mmap(const struct file *filp)
37696 +{
37697 + struct task_struct *task = current;
37698 + struct acl_object_label *obj, *obj2;
37699 +
37700 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
37701 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode)) {
37702 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
37703 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
37704 + task->role->root_label);
37705 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
37706 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
37707 + return 1;
37708 + }
37709 + }
37710 + return 0;
37711 +}
37712 +
37713 +int
37714 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
37715 +{
37716 + __u32 mode;
37717 +
37718 + if (unlikely(!file || !(prot & PROT_EXEC)))
37719 + return 1;
37720 +
37721 + if (is_writable_mmap(file))
37722 + return 0;
37723 +
37724 + mode =
37725 + gr_search_file(file->f_path.dentry,
37726 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
37727 + file->f_path.mnt);
37728 +
37729 + if (!gr_tpe_allow(file))
37730 + return 0;
37731 +
37732 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
37733 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37734 + return 0;
37735 + } else if (unlikely(!(mode & GR_EXEC))) {
37736 + return 0;
37737 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
37738 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37739 + return 1;
37740 + }
37741 +
37742 + return 1;
37743 +}
37744 +
37745 +int
37746 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
37747 +{
37748 + __u32 mode;
37749 +
37750 + if (unlikely(!file || !(prot & PROT_EXEC)))
37751 + return 1;
37752 +
37753 + if (is_writable_mmap(file))
37754 + return 0;
37755 +
37756 + mode =
37757 + gr_search_file(file->f_path.dentry,
37758 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
37759 + file->f_path.mnt);
37760 +
37761 + if (!gr_tpe_allow(file))
37762 + return 0;
37763 +
37764 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
37765 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37766 + return 0;
37767 + } else if (unlikely(!(mode & GR_EXEC))) {
37768 + return 0;
37769 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
37770 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37771 + return 1;
37772 + }
37773 +
37774 + return 1;
37775 +}
37776 +
37777 +void
37778 +gr_acl_handle_psacct(struct task_struct *task, const long code)
37779 +{
37780 + unsigned long runtime;
37781 + unsigned long cputime;
37782 + unsigned int wday, cday;
37783 + __u8 whr, chr;
37784 + __u8 wmin, cmin;
37785 + __u8 wsec, csec;
37786 + struct timespec timeval;
37787 +
37788 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
37789 + !(task->acl->mode & GR_PROCACCT)))
37790 + return;
37791 +
37792 + do_posix_clock_monotonic_gettime(&timeval);
37793 + runtime = timeval.tv_sec - task->start_time.tv_sec;
37794 + wday = runtime / (3600 * 24);
37795 + runtime -= wday * (3600 * 24);
37796 + whr = runtime / 3600;
37797 + runtime -= whr * 3600;
37798 + wmin = runtime / 60;
37799 + runtime -= wmin * 60;
37800 + wsec = runtime;
37801 +
37802 + cputime = (task->utime + task->stime) / HZ;
37803 + cday = cputime / (3600 * 24);
37804 + cputime -= cday * (3600 * 24);
37805 + chr = cputime / 3600;
37806 + cputime -= chr * 3600;
37807 + cmin = cputime / 60;
37808 + cputime -= cmin * 60;
37809 + csec = cputime;
37810 +
37811 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
37812 +
37813 + return;
37814 +}
37815 +
37816 +void gr_set_kernel_label(struct task_struct *task)
37817 +{
37818 + if (gr_status & GR_READY) {
37819 + task->role = kernel_role;
37820 + task->acl = kernel_role->root_label;
37821 + }
37822 + return;
37823 +}
37824 +
37825 +#ifdef CONFIG_TASKSTATS
37826 +int gr_is_taskstats_denied(int pid)
37827 +{
37828 + struct task_struct *task;
37829 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37830 + const struct cred *cred;
37831 +#endif
37832 + int ret = 0;
37833 +
37834 + /* restrict taskstats viewing to un-chrooted root users
37835 + who have the 'view' subject flag if the RBAC system is enabled
37836 + */
37837 +
37838 + rcu_read_lock();
37839 + read_lock(&tasklist_lock);
37840 + task = find_task_by_vpid(pid);
37841 + if (task) {
37842 +#ifdef CONFIG_GRKERNSEC_CHROOT
37843 + if (proc_is_chrooted(task))
37844 + ret = -EACCES;
37845 +#endif
37846 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37847 + cred = __task_cred(task);
37848 +#ifdef CONFIG_GRKERNSEC_PROC_USER
37849 + if (cred->uid != 0)
37850 + ret = -EACCES;
37851 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37852 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
37853 + ret = -EACCES;
37854 +#endif
37855 +#endif
37856 + if (gr_status & GR_READY) {
37857 + if (!(task->acl->mode & GR_VIEW))
37858 + ret = -EACCES;
37859 + }
37860 + } else
37861 + ret = -ENOENT;
37862 +
37863 + read_unlock(&tasklist_lock);
37864 + rcu_read_unlock();
37865 +
37866 + return ret;
37867 +}
37868 +#endif
37869 +
37870 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
37871 +{
37872 + struct task_struct *task = current;
37873 + struct dentry *dentry = file->f_path.dentry;
37874 + struct vfsmount *mnt = file->f_path.mnt;
37875 + struct acl_object_label *obj, *tmp;
37876 + struct acl_subject_label *subj;
37877 + unsigned int bufsize;
37878 + int is_not_root;
37879 + char *path;
37880 +
37881 + if (unlikely(!(gr_status & GR_READY)))
37882 + return 1;
37883 +
37884 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
37885 + return 1;
37886 +
37887 + /* ignore Eric Biederman */
37888 + if (IS_PRIVATE(dentry->d_inode))
37889 + return 1;
37890 +
37891 + subj = task->acl;
37892 + do {
37893 + obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj);
37894 + if (obj != NULL)
37895 + return (obj->mode & GR_FIND) ? 1 : 0;
37896 + } while ((subj = subj->parent_subject));
37897 +
37898 + /* this is purely an optimization since we're looking for an object
37899 + for the directory we're doing a readdir on
37900 + if it's possible for any globbed object to match the entry we're
37901 + filling into the directory, then the object we find here will be
37902 + an anchor point with attached globbed objects
37903 + */
37904 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
37905 + if (obj->globbed == NULL)
37906 + return (obj->mode & GR_FIND) ? 1 : 0;
37907 +
37908 + is_not_root = ((obj->filename[0] == '/') &&
37909 + (obj->filename[1] == '\0')) ? 0 : 1;
37910 + bufsize = PAGE_SIZE - namelen - is_not_root;
37911 +
37912 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
37913 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
37914 + return 1;
37915 +
37916 + preempt_disable();
37917 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
37918 + bufsize);
37919 +
37920 + bufsize = strlen(path);
37921 +
37922 + /* if base is "/", don't append an additional slash */
37923 + if (is_not_root)
37924 + *(path + bufsize) = '/';
37925 + memcpy(path + bufsize + is_not_root, name, namelen);
37926 + *(path + bufsize + namelen + is_not_root) = '\0';
37927 +
37928 + tmp = obj->globbed;
37929 + while (tmp) {
37930 + if (!glob_match(tmp->filename, path)) {
37931 + preempt_enable();
37932 + return (tmp->mode & GR_FIND) ? 1 : 0;
37933 + }
37934 + tmp = tmp->next;
37935 + }
37936 + preempt_enable();
37937 + return (obj->mode & GR_FIND) ? 1 : 0;
37938 +}
37939 +
37940 +EXPORT_SYMBOL(gr_learn_resource);
37941 +EXPORT_SYMBOL(gr_set_kernel_label);
37942 +#ifdef CONFIG_SECURITY
37943 +EXPORT_SYMBOL(gr_check_user_change);
37944 +EXPORT_SYMBOL(gr_check_group_change);
37945 +#endif
37946 +
37947 diff -urNp linux-2.6.34.1/grsecurity/gracl_alloc.c linux-2.6.34.1/grsecurity/gracl_alloc.c
37948 --- linux-2.6.34.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
37949 +++ linux-2.6.34.1/grsecurity/gracl_alloc.c 2010-07-07 09:04:56.000000000 -0400
37950 @@ -0,0 +1,105 @@
37951 +#include <linux/kernel.h>
37952 +#include <linux/mm.h>
37953 +#include <linux/slab.h>
37954 +#include <linux/vmalloc.h>
37955 +#include <linux/gracl.h>
37956 +#include <linux/grsecurity.h>
37957 +
37958 +static unsigned long alloc_stack_next = 1;
37959 +static unsigned long alloc_stack_size = 1;
37960 +static void **alloc_stack;
37961 +
37962 +static __inline__ int
37963 +alloc_pop(void)
37964 +{
37965 + if (alloc_stack_next == 1)
37966 + return 0;
37967 +
37968 + kfree(alloc_stack[alloc_stack_next - 2]);
37969 +
37970 + alloc_stack_next--;
37971 +
37972 + return 1;
37973 +}
37974 +
37975 +static __inline__ int
37976 +alloc_push(void *buf)
37977 +{
37978 + if (alloc_stack_next >= alloc_stack_size)
37979 + return 1;
37980 +
37981 + alloc_stack[alloc_stack_next - 1] = buf;
37982 +
37983 + alloc_stack_next++;
37984 +
37985 + return 0;
37986 +}
37987 +
37988 +void *
37989 +acl_alloc(unsigned long len)
37990 +{
37991 + void *ret = NULL;
37992 +
37993 + if (!len || len > PAGE_SIZE)
37994 + goto out;
37995 +
37996 + ret = kmalloc(len, GFP_KERNEL);
37997 +
37998 + if (ret) {
37999 + if (alloc_push(ret)) {
38000 + kfree(ret);
38001 + ret = NULL;
38002 + }
38003 + }
38004 +
38005 +out:
38006 + return ret;
38007 +}
38008 +
38009 +void *
38010 +acl_alloc_num(unsigned long num, unsigned long len)
38011 +{
38012 + if (!len || (num > (PAGE_SIZE / len)))
38013 + return NULL;
38014 +
38015 + return acl_alloc(num * len);
38016 +}
38017 +
38018 +void
38019 +acl_free_all(void)
38020 +{
38021 + if (gr_acl_is_enabled() || !alloc_stack)
38022 + return;
38023 +
38024 + while (alloc_pop()) ;
38025 +
38026 + if (alloc_stack) {
38027 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
38028 + kfree(alloc_stack);
38029 + else
38030 + vfree(alloc_stack);
38031 + }
38032 +
38033 + alloc_stack = NULL;
38034 + alloc_stack_size = 1;
38035 + alloc_stack_next = 1;
38036 +
38037 + return;
38038 +}
38039 +
38040 +int
38041 +acl_alloc_stack_init(unsigned long size)
38042 +{
38043 + if ((size * sizeof (void *)) <= PAGE_SIZE)
38044 + alloc_stack =
38045 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
38046 + else
38047 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
38048 +
38049 + alloc_stack_size = size;
38050 +
38051 + if (!alloc_stack)
38052 + return 0;
38053 + else
38054 + return 1;
38055 +}
38056 diff -urNp linux-2.6.34.1/grsecurity/gracl_cap.c linux-2.6.34.1/grsecurity/gracl_cap.c
38057 --- linux-2.6.34.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
38058 +++ linux-2.6.34.1/grsecurity/gracl_cap.c 2010-07-07 09:04:56.000000000 -0400
38059 @@ -0,0 +1,138 @@
38060 +#include <linux/kernel.h>
38061 +#include <linux/module.h>
38062 +#include <linux/sched.h>
38063 +#include <linux/gracl.h>
38064 +#include <linux/grsecurity.h>
38065 +#include <linux/grinternal.h>
38066 +
38067 +static const char *captab_log[] = {
38068 + "CAP_CHOWN",
38069 + "CAP_DAC_OVERRIDE",
38070 + "CAP_DAC_READ_SEARCH",
38071 + "CAP_FOWNER",
38072 + "CAP_FSETID",
38073 + "CAP_KILL",
38074 + "CAP_SETGID",
38075 + "CAP_SETUID",
38076 + "CAP_SETPCAP",
38077 + "CAP_LINUX_IMMUTABLE",
38078 + "CAP_NET_BIND_SERVICE",
38079 + "CAP_NET_BROADCAST",
38080 + "CAP_NET_ADMIN",
38081 + "CAP_NET_RAW",
38082 + "CAP_IPC_LOCK",
38083 + "CAP_IPC_OWNER",
38084 + "CAP_SYS_MODULE",
38085 + "CAP_SYS_RAWIO",
38086 + "CAP_SYS_CHROOT",
38087 + "CAP_SYS_PTRACE",
38088 + "CAP_SYS_PACCT",
38089 + "CAP_SYS_ADMIN",
38090 + "CAP_SYS_BOOT",
38091 + "CAP_SYS_NICE",
38092 + "CAP_SYS_RESOURCE",
38093 + "CAP_SYS_TIME",
38094 + "CAP_SYS_TTY_CONFIG",
38095 + "CAP_MKNOD",
38096 + "CAP_LEASE",
38097 + "CAP_AUDIT_WRITE",
38098 + "CAP_AUDIT_CONTROL",
38099 + "CAP_SETFCAP",
38100 + "CAP_MAC_OVERRIDE",
38101 + "CAP_MAC_ADMIN"
38102 +};
38103 +
38104 +EXPORT_SYMBOL(gr_is_capable);
38105 +EXPORT_SYMBOL(gr_is_capable_nolog);
38106 +
38107 +int
38108 +gr_is_capable(const int cap)
38109 +{
38110 + struct task_struct *task = current;
38111 + const struct cred *cred = current_cred();
38112 + struct acl_subject_label *curracl;
38113 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
38114 + kernel_cap_t cap_audit = __cap_empty_set;
38115 +
38116 + if (!gr_acl_is_enabled())
38117 + return 1;
38118 +
38119 + curracl = task->acl;
38120 +
38121 + cap_drop = curracl->cap_lower;
38122 + cap_mask = curracl->cap_mask;
38123 + cap_audit = curracl->cap_invert_audit;
38124 +
38125 + while ((curracl = curracl->parent_subject)) {
38126 + /* if the cap isn't specified in the current computed mask but is specified in the
38127 + current level subject, and is lowered in the current level subject, then add
38128 + it to the set of dropped capabilities
38129 + otherwise, add the current level subject's mask to the current computed mask
38130 + */
38131 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
38132 + cap_raise(cap_mask, cap);
38133 + if (cap_raised(curracl->cap_lower, cap))
38134 + cap_raise(cap_drop, cap);
38135 + if (cap_raised(curracl->cap_invert_audit, cap))
38136 + cap_raise(cap_audit, cap);
38137 + }
38138 + }
38139 +
38140 + if (!cap_raised(cap_drop, cap)) {
38141 + if (cap_raised(cap_audit, cap))
38142 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
38143 + return 1;
38144 + }
38145 +
38146 + curracl = task->acl;
38147 +
38148 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
38149 + && cap_raised(cred->cap_effective, cap)) {
38150 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
38151 + task->role->roletype, cred->uid,
38152 + cred->gid, task->exec_file ?
38153 + gr_to_filename(task->exec_file->f_path.dentry,
38154 + task->exec_file->f_path.mnt) : curracl->filename,
38155 + curracl->filename, 0UL,
38156 + 0UL, "", (unsigned long) cap, &task->signal->curr_ip);
38157 + return 1;
38158 + }
38159 +
38160 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
38161 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
38162 + return 0;
38163 +}
38164 +
38165 +int
38166 +gr_is_capable_nolog(const int cap)
38167 +{
38168 + struct acl_subject_label *curracl;
38169 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
38170 +
38171 + if (!gr_acl_is_enabled())
38172 + return 1;
38173 +
38174 + curracl = current->acl;
38175 +
38176 + cap_drop = curracl->cap_lower;
38177 + cap_mask = curracl->cap_mask;
38178 +
38179 + while ((curracl = curracl->parent_subject)) {
38180 + /* if the cap isn't specified in the current computed mask but is specified in the
38181 + current level subject, and is lowered in the current level subject, then add
38182 + it to the set of dropped capabilities
38183 + otherwise, add the current level subject's mask to the current computed mask
38184 + */
38185 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
38186 + cap_raise(cap_mask, cap);
38187 + if (cap_raised(curracl->cap_lower, cap))
38188 + cap_raise(cap_drop, cap);
38189 + }
38190 + }
38191 +
38192 + if (!cap_raised(cap_drop, cap))
38193 + return 1;
38194 +
38195 + return 0;
38196 +}
38197 +
38198 diff -urNp linux-2.6.34.1/grsecurity/gracl_fs.c linux-2.6.34.1/grsecurity/gracl_fs.c
38199 --- linux-2.6.34.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
38200 +++ linux-2.6.34.1/grsecurity/gracl_fs.c 2010-07-07 09:04:56.000000000 -0400
38201 @@ -0,0 +1,424 @@
38202 +#include <linux/kernel.h>
38203 +#include <linux/sched.h>
38204 +#include <linux/types.h>
38205 +#include <linux/fs.h>
38206 +#include <linux/file.h>
38207 +#include <linux/stat.h>
38208 +#include <linux/grsecurity.h>
38209 +#include <linux/grinternal.h>
38210 +#include <linux/gracl.h>
38211 +
38212 +__u32
38213 +gr_acl_handle_hidden_file(const struct dentry * dentry,
38214 + const struct vfsmount * mnt)
38215 +{
38216 + __u32 mode;
38217 +
38218 + if (unlikely(!dentry->d_inode))
38219 + return GR_FIND;
38220 +
38221 + mode =
38222 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
38223 +
38224 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
38225 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
38226 + return mode;
38227 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
38228 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
38229 + return 0;
38230 + } else if (unlikely(!(mode & GR_FIND)))
38231 + return 0;
38232 +
38233 + return GR_FIND;
38234 +}
38235 +
38236 +__u32
38237 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
38238 + const int fmode)
38239 +{
38240 + __u32 reqmode = GR_FIND;
38241 + __u32 mode;
38242 +
38243 + if (unlikely(!dentry->d_inode))
38244 + return reqmode;
38245 +
38246 + if (unlikely(fmode & O_APPEND))
38247 + reqmode |= GR_APPEND;
38248 + else if (unlikely(fmode & FMODE_WRITE))
38249 + reqmode |= GR_WRITE;
38250 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
38251 + reqmode |= GR_READ;
38252 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
38253 + reqmode &= ~GR_READ;
38254 + mode =
38255 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
38256 + mnt);
38257 +
38258 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38259 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
38260 + reqmode & GR_READ ? " reading" : "",
38261 + reqmode & GR_WRITE ? " writing" : reqmode &
38262 + GR_APPEND ? " appending" : "");
38263 + return reqmode;
38264 + } else
38265 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38266 + {
38267 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
38268 + reqmode & GR_READ ? " reading" : "",
38269 + reqmode & GR_WRITE ? " writing" : reqmode &
38270 + GR_APPEND ? " appending" : "");
38271 + return 0;
38272 + } else if (unlikely((mode & reqmode) != reqmode))
38273 + return 0;
38274 +
38275 + return reqmode;
38276 +}
38277 +
38278 +__u32
38279 +gr_acl_handle_creat(const struct dentry * dentry,
38280 + const struct dentry * p_dentry,
38281 + const struct vfsmount * p_mnt, const int fmode,
38282 + const int imode)
38283 +{
38284 + __u32 reqmode = GR_WRITE | GR_CREATE;
38285 + __u32 mode;
38286 +
38287 + if (unlikely(fmode & O_APPEND))
38288 + reqmode |= GR_APPEND;
38289 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
38290 + reqmode |= GR_READ;
38291 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
38292 + reqmode |= GR_SETID;
38293 +
38294 + mode =
38295 + gr_check_create(dentry, p_dentry, p_mnt,
38296 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
38297 +
38298 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38299 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
38300 + reqmode & GR_READ ? " reading" : "",
38301 + reqmode & GR_WRITE ? " writing" : reqmode &
38302 + GR_APPEND ? " appending" : "");
38303 + return reqmode;
38304 + } else
38305 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38306 + {
38307 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
38308 + reqmode & GR_READ ? " reading" : "",
38309 + reqmode & GR_WRITE ? " writing" : reqmode &
38310 + GR_APPEND ? " appending" : "");
38311 + return 0;
38312 + } else if (unlikely((mode & reqmode) != reqmode))
38313 + return 0;
38314 +
38315 + return reqmode;
38316 +}
38317 +
38318 +__u32
38319 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
38320 + const int fmode)
38321 +{
38322 + __u32 mode, reqmode = GR_FIND;
38323 +
38324 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
38325 + reqmode |= GR_EXEC;
38326 + if (fmode & S_IWOTH)
38327 + reqmode |= GR_WRITE;
38328 + if (fmode & S_IROTH)
38329 + reqmode |= GR_READ;
38330 +
38331 + mode =
38332 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
38333 + mnt);
38334 +
38335 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38336 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
38337 + reqmode & GR_READ ? " reading" : "",
38338 + reqmode & GR_WRITE ? " writing" : "",
38339 + reqmode & GR_EXEC ? " executing" : "");
38340 + return reqmode;
38341 + } else
38342 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38343 + {
38344 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
38345 + reqmode & GR_READ ? " reading" : "",
38346 + reqmode & GR_WRITE ? " writing" : "",
38347 + reqmode & GR_EXEC ? " executing" : "");
38348 + return 0;
38349 + } else if (unlikely((mode & reqmode) != reqmode))
38350 + return 0;
38351 +
38352 + return reqmode;
38353 +}
38354 +
38355 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
38356 +{
38357 + __u32 mode;
38358 +
38359 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
38360 +
38361 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
38362 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
38363 + return mode;
38364 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
38365 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
38366 + return 0;
38367 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
38368 + return 0;
38369 +
38370 + return (reqmode);
38371 +}
38372 +
38373 +__u32
38374 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
38375 +{
38376 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
38377 +}
38378 +
38379 +__u32
38380 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
38381 +{
38382 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
38383 +}
38384 +
38385 +__u32
38386 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
38387 +{
38388 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
38389 +}
38390 +
38391 +__u32
38392 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
38393 +{
38394 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
38395 +}
38396 +
38397 +__u32
38398 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
38399 + mode_t mode)
38400 +{
38401 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
38402 + return 1;
38403 +
38404 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
38405 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
38406 + GR_FCHMOD_ACL_MSG);
38407 + } else {
38408 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
38409 + }
38410 +}
38411 +
38412 +__u32
38413 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
38414 + mode_t mode)
38415 +{
38416 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
38417 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
38418 + GR_CHMOD_ACL_MSG);
38419 + } else {
38420 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
38421 + }
38422 +}
38423 +
38424 +__u32
38425 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
38426 +{
38427 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
38428 +}
38429 +
38430 +__u32
38431 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
38432 +{
38433 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
38434 +}
38435 +
38436 +__u32
38437 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
38438 +{
38439 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
38440 + GR_UNIXCONNECT_ACL_MSG);
38441 +}
38442 +
38443 +/* hardlinks require at minimum create permission,
38444 + any additional privilege required is based on the
38445 + privilege of the file being linked to
38446 +*/
38447 +__u32
38448 +gr_acl_handle_link(const struct dentry * new_dentry,
38449 + const struct dentry * parent_dentry,
38450 + const struct vfsmount * parent_mnt,
38451 + const struct dentry * old_dentry,
38452 + const struct vfsmount * old_mnt, const char *to)
38453 +{
38454 + __u32 mode;
38455 + __u32 needmode = GR_CREATE | GR_LINK;
38456 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
38457 +
38458 + mode =
38459 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
38460 + old_mnt);
38461 +
38462 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
38463 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
38464 + return mode;
38465 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
38466 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
38467 + return 0;
38468 + } else if (unlikely((mode & needmode) != needmode))
38469 + return 0;
38470 +
38471 + return 1;
38472 +}
38473 +
38474 +__u32
38475 +gr_acl_handle_symlink(const struct dentry * new_dentry,
38476 + const struct dentry * parent_dentry,
38477 + const struct vfsmount * parent_mnt, const char *from)
38478 +{
38479 + __u32 needmode = GR_WRITE | GR_CREATE;
38480 + __u32 mode;
38481 +
38482 + mode =
38483 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
38484 + GR_CREATE | GR_AUDIT_CREATE |
38485 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
38486 +
38487 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
38488 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
38489 + return mode;
38490 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
38491 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
38492 + return 0;
38493 + } else if (unlikely((mode & needmode) != needmode))
38494 + return 0;
38495 +
38496 + return (GR_WRITE | GR_CREATE);
38497 +}
38498 +
38499 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
38500 +{
38501 + __u32 mode;
38502 +
38503 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
38504 +
38505 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
38506 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
38507 + return mode;
38508 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
38509 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
38510 + return 0;
38511 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
38512 + return 0;
38513 +
38514 + return (reqmode);
38515 +}
38516 +
38517 +__u32
38518 +gr_acl_handle_mknod(const struct dentry * new_dentry,
38519 + const struct dentry * parent_dentry,
38520 + const struct vfsmount * parent_mnt,
38521 + const int mode)
38522 +{
38523 + __u32 reqmode = GR_WRITE | GR_CREATE;
38524 + if (unlikely(mode & (S_ISUID | S_ISGID)))
38525 + reqmode |= GR_SETID;
38526 +
38527 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
38528 + reqmode, GR_MKNOD_ACL_MSG);
38529 +}
38530 +
38531 +__u32
38532 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
38533 + const struct dentry *parent_dentry,
38534 + const struct vfsmount *parent_mnt)
38535 +{
38536 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
38537 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
38538 +}
38539 +
38540 +#define RENAME_CHECK_SUCCESS(old, new) \
38541 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
38542 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
38543 +
38544 +int
38545 +gr_acl_handle_rename(struct dentry *new_dentry,
38546 + struct dentry *parent_dentry,
38547 + const struct vfsmount *parent_mnt,
38548 + struct dentry *old_dentry,
38549 + struct inode *old_parent_inode,
38550 + struct vfsmount *old_mnt, const char *newname)
38551 +{
38552 + __u32 comp1, comp2;
38553 + int error = 0;
38554 +
38555 + if (unlikely(!gr_acl_is_enabled()))
38556 + return 0;
38557 +
38558 + if (!new_dentry->d_inode) {
38559 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
38560 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
38561 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
38562 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
38563 + GR_DELETE | GR_AUDIT_DELETE |
38564 + GR_AUDIT_READ | GR_AUDIT_WRITE |
38565 + GR_SUPPRESS, old_mnt);
38566 + } else {
38567 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
38568 + GR_CREATE | GR_DELETE |
38569 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
38570 + GR_AUDIT_READ | GR_AUDIT_WRITE |
38571 + GR_SUPPRESS, parent_mnt);
38572 + comp2 =
38573 + gr_search_file(old_dentry,
38574 + GR_READ | GR_WRITE | GR_AUDIT_READ |
38575 + GR_DELETE | GR_AUDIT_DELETE |
38576 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
38577 + }
38578 +
38579 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
38580 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
38581 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
38582 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
38583 + && !(comp2 & GR_SUPPRESS)) {
38584 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
38585 + error = -EACCES;
38586 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
38587 + error = -EACCES;
38588 +
38589 + return error;
38590 +}
38591 +
38592 +void
38593 +gr_acl_handle_exit(void)
38594 +{
38595 + u16 id;
38596 + char *rolename;
38597 + struct file *exec_file;
38598 +
38599 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
38600 + id = current->acl_role_id;
38601 + rolename = current->role->rolename;
38602 + gr_set_acls(1);
38603 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
38604 + }
38605 +
38606 + write_lock(&grsec_exec_file_lock);
38607 + exec_file = current->exec_file;
38608 + current->exec_file = NULL;
38609 + write_unlock(&grsec_exec_file_lock);
38610 +
38611 + if (exec_file)
38612 + fput(exec_file);
38613 +}
38614 +
38615 +int
38616 +gr_acl_handle_procpidmem(const struct task_struct *task)
38617 +{
38618 + if (unlikely(!gr_acl_is_enabled()))
38619 + return 0;
38620 +
38621 + if (task != current && task->acl->mode & GR_PROTPROCFD)
38622 + return -EACCES;
38623 +
38624 + return 0;
38625 +}
38626 diff -urNp linux-2.6.34.1/grsecurity/gracl_ip.c linux-2.6.34.1/grsecurity/gracl_ip.c
38627 --- linux-2.6.34.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
38628 +++ linux-2.6.34.1/grsecurity/gracl_ip.c 2010-07-07 09:04:56.000000000 -0400
38629 @@ -0,0 +1,339 @@
38630 +#include <linux/kernel.h>
38631 +#include <asm/uaccess.h>
38632 +#include <asm/errno.h>
38633 +#include <net/sock.h>
38634 +#include <linux/file.h>
38635 +#include <linux/fs.h>
38636 +#include <linux/net.h>
38637 +#include <linux/in.h>
38638 +#include <linux/skbuff.h>
38639 +#include <linux/ip.h>
38640 +#include <linux/udp.h>
38641 +#include <linux/smp_lock.h>
38642 +#include <linux/types.h>
38643 +#include <linux/sched.h>
38644 +#include <linux/netdevice.h>
38645 +#include <linux/inetdevice.h>
38646 +#include <linux/gracl.h>
38647 +#include <linux/grsecurity.h>
38648 +#include <linux/grinternal.h>
38649 +
38650 +#define GR_BIND 0x01
38651 +#define GR_CONNECT 0x02
38652 +#define GR_INVERT 0x04
38653 +#define GR_BINDOVERRIDE 0x08
38654 +#define GR_CONNECTOVERRIDE 0x10
38655 +
38656 +static const char * gr_protocols[256] = {
38657 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
38658 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
38659 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
38660 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
38661 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
38662 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
38663 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
38664 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
38665 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
38666 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
38667 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
38668 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
38669 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
38670 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
38671 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
38672 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
38673 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
38674 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
38675 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
38676 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
38677 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
38678 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
38679 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
38680 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
38681 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
38682 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
38683 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
38684 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
38685 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
38686 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
38687 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
38688 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
38689 + };
38690 +
38691 +static const char * gr_socktypes[11] = {
38692 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
38693 + "unknown:7", "unknown:8", "unknown:9", "packet"
38694 + };
38695 +
38696 +const char *
38697 +gr_proto_to_name(unsigned char proto)
38698 +{
38699 + return gr_protocols[proto];
38700 +}
38701 +
38702 +const char *
38703 +gr_socktype_to_name(unsigned char type)
38704 +{
38705 + return gr_socktypes[type];
38706 +}
38707 +
38708 +int
38709 +gr_search_socket(const int domain, const int type, const int protocol)
38710 +{
38711 + struct acl_subject_label *curr;
38712 + const struct cred *cred = current_cred();
38713 +
38714 + if (unlikely(!gr_acl_is_enabled()))
38715 + goto exit;
38716 +
38717 + if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
38718 + || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
38719 + goto exit; // let the kernel handle it
38720 +
38721 + curr = current->acl;
38722 +
38723 + if (!curr->ips)
38724 + goto exit;
38725 +
38726 + if ((curr->ip_type & (1 << type)) &&
38727 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
38728 + goto exit;
38729 +
38730 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
38731 + /* we don't place acls on raw sockets , and sometimes
38732 + dgram/ip sockets are opened for ioctl and not
38733 + bind/connect, so we'll fake a bind learn log */
38734 + if (type == SOCK_RAW || type == SOCK_PACKET) {
38735 + __u32 fakeip = 0;
38736 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38737 + current->role->roletype, cred->uid,
38738 + cred->gid, current->exec_file ?
38739 + gr_to_filename(current->exec_file->f_path.dentry,
38740 + current->exec_file->f_path.mnt) :
38741 + curr->filename, curr->filename,
38742 + &fakeip, 0, type,
38743 + protocol, GR_CONNECT, &current->signal->curr_ip);
38744 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
38745 + __u32 fakeip = 0;
38746 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38747 + current->role->roletype, cred->uid,
38748 + cred->gid, current->exec_file ?
38749 + gr_to_filename(current->exec_file->f_path.dentry,
38750 + current->exec_file->f_path.mnt) :
38751 + curr->filename, curr->filename,
38752 + &fakeip, 0, type,
38753 + protocol, GR_BIND, &current->signal->curr_ip);
38754 + }
38755 + /* we'll log when they use connect or bind */
38756 + goto exit;
38757 + }
38758 +
38759 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet",
38760 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
38761 +
38762 + return 0;
38763 + exit:
38764 + return 1;
38765 +}
38766 +
38767 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
38768 +{
38769 + if ((ip->mode & mode) &&
38770 + (ip_port >= ip->low) &&
38771 + (ip_port <= ip->high) &&
38772 + ((ntohl(ip_addr) & our_netmask) ==
38773 + (ntohl(our_addr) & our_netmask))
38774 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
38775 + && (ip->type & (1 << type))) {
38776 + if (ip->mode & GR_INVERT)
38777 + return 2; // specifically denied
38778 + else
38779 + return 1; // allowed
38780 + }
38781 +
38782 + return 0; // not specifically allowed, may continue parsing
38783 +}
38784 +
38785 +static int
38786 +gr_search_connectbind(const int full_mode, struct sock *sk,
38787 + struct sockaddr_in *addr, const int type)
38788 +{
38789 + char iface[IFNAMSIZ] = {0};
38790 + struct acl_subject_label *curr;
38791 + struct acl_ip_label *ip;
38792 + struct inet_sock *isk;
38793 + struct net_device *dev;
38794 + struct in_device *idev;
38795 + unsigned long i;
38796 + int ret;
38797 + int mode = full_mode & (GR_BIND | GR_CONNECT);
38798 + __u32 ip_addr = 0;
38799 + __u32 our_addr;
38800 + __u32 our_netmask;
38801 + char *p;
38802 + __u16 ip_port = 0;
38803 + const struct cred *cred = current_cred();
38804 +
38805 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
38806 + return 0;
38807 +
38808 + curr = current->acl;
38809 + isk = inet_sk(sk);
38810 +
38811 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
38812 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
38813 + addr->sin_addr.s_addr = curr->inaddr_any_override;
38814 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
38815 + struct sockaddr_in saddr;
38816 + int err;
38817 +
38818 + saddr.sin_family = AF_INET;
38819 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
38820 + saddr.sin_port = isk->inet_sport;
38821 +
38822 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
38823 + if (err)
38824 + return err;
38825 +
38826 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
38827 + if (err)
38828 + return err;
38829 + }
38830 +
38831 + if (!curr->ips)
38832 + return 0;
38833 +
38834 + ip_addr = addr->sin_addr.s_addr;
38835 + ip_port = ntohs(addr->sin_port);
38836 +
38837 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
38838 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38839 + current->role->roletype, cred->uid,
38840 + cred->gid, current->exec_file ?
38841 + gr_to_filename(current->exec_file->f_path.dentry,
38842 + current->exec_file->f_path.mnt) :
38843 + curr->filename, curr->filename,
38844 + &ip_addr, ip_port, type,
38845 + sk->sk_protocol, mode, &current->signal->curr_ip);
38846 + return 0;
38847 + }
38848 +
38849 + for (i = 0; i < curr->ip_num; i++) {
38850 + ip = *(curr->ips + i);
38851 + if (ip->iface != NULL) {
38852 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
38853 + p = strchr(iface, ':');
38854 + if (p != NULL)
38855 + *p = '\0';
38856 + dev = dev_get_by_name(sock_net(sk), iface);
38857 + if (dev == NULL)
38858 + continue;
38859 + idev = in_dev_get(dev);
38860 + if (idev == NULL) {
38861 + dev_put(dev);
38862 + continue;
38863 + }
38864 + rcu_read_lock();
38865 + for_ifa(idev) {
38866 + if (!strcmp(ip->iface, ifa->ifa_label)) {
38867 + our_addr = ifa->ifa_address;
38868 + our_netmask = 0xffffffff;
38869 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
38870 + if (ret == 1) {
38871 + rcu_read_unlock();
38872 + in_dev_put(idev);
38873 + dev_put(dev);
38874 + return 0;
38875 + } else if (ret == 2) {
38876 + rcu_read_unlock();
38877 + in_dev_put(idev);
38878 + dev_put(dev);
38879 + goto denied;
38880 + }
38881 + }
38882 + } endfor_ifa(idev);
38883 + rcu_read_unlock();
38884 + in_dev_put(idev);
38885 + dev_put(dev);
38886 + } else {
38887 + our_addr = ip->addr;
38888 + our_netmask = ip->netmask;
38889 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
38890 + if (ret == 1)
38891 + return 0;
38892 + else if (ret == 2)
38893 + goto denied;
38894 + }
38895 + }
38896 +
38897 +denied:
38898 + if (mode == GR_BIND)
38899 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
38900 + else if (mode == GR_CONNECT)
38901 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
38902 +
38903 + return -EACCES;
38904 +}
38905 +
38906 +int
38907 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
38908 +{
38909 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
38910 +}
38911 +
38912 +int
38913 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
38914 +{
38915 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
38916 +}
38917 +
38918 +int gr_search_listen(struct socket *sock)
38919 +{
38920 + struct sock *sk = sock->sk;
38921 + struct sockaddr_in addr;
38922 +
38923 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
38924 + addr.sin_port = inet_sk(sk)->inet_sport;
38925 +
38926 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
38927 +}
38928 +
38929 +int gr_search_accept(struct socket *sock)
38930 +{
38931 + struct sock *sk = sock->sk;
38932 + struct sockaddr_in addr;
38933 +
38934 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
38935 + addr.sin_port = inet_sk(sk)->inet_sport;
38936 +
38937 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
38938 +}
38939 +
38940 +int
38941 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
38942 +{
38943 + if (addr)
38944 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
38945 + else {
38946 + struct sockaddr_in sin;
38947 + const struct inet_sock *inet = inet_sk(sk);
38948 +
38949 + sin.sin_addr.s_addr = inet->inet_daddr;
38950 + sin.sin_port = inet->inet_dport;
38951 +
38952 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
38953 + }
38954 +}
38955 +
38956 +int
38957 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
38958 +{
38959 + struct sockaddr_in sin;
38960 +
38961 + if (unlikely(skb->len < sizeof (struct udphdr)))
38962 + return 0; // skip this packet
38963 +
38964 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
38965 + sin.sin_port = udp_hdr(skb)->source;
38966 +
38967 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
38968 +}
38969 diff -urNp linux-2.6.34.1/grsecurity/gracl_learn.c linux-2.6.34.1/grsecurity/gracl_learn.c
38970 --- linux-2.6.34.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
38971 +++ linux-2.6.34.1/grsecurity/gracl_learn.c 2010-07-07 09:04:56.000000000 -0400
38972 @@ -0,0 +1,211 @@
38973 +#include <linux/kernel.h>
38974 +#include <linux/mm.h>
38975 +#include <linux/sched.h>
38976 +#include <linux/poll.h>
38977 +#include <linux/smp_lock.h>
38978 +#include <linux/string.h>
38979 +#include <linux/file.h>
38980 +#include <linux/types.h>
38981 +#include <linux/vmalloc.h>
38982 +#include <linux/grinternal.h>
38983 +
38984 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
38985 + size_t count, loff_t *ppos);
38986 +extern int gr_acl_is_enabled(void);
38987 +
38988 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
38989 +static int gr_learn_attached;
38990 +
38991 +/* use a 512k buffer */
38992 +#define LEARN_BUFFER_SIZE (512 * 1024)
38993 +
38994 +static DEFINE_SPINLOCK(gr_learn_lock);
38995 +static DECLARE_MUTEX(gr_learn_user_sem);
38996 +
38997 +/* we need to maintain two buffers, so that the kernel context of grlearn
38998 + uses a semaphore around the userspace copying, and the other kernel contexts
38999 + use a spinlock when copying into the buffer, since they cannot sleep
39000 +*/
39001 +static char *learn_buffer;
39002 +static char *learn_buffer_user;
39003 +static int learn_buffer_len;
39004 +static int learn_buffer_user_len;
39005 +
39006 +static ssize_t
39007 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
39008 +{
39009 + DECLARE_WAITQUEUE(wait, current);
39010 + ssize_t retval = 0;
39011 +
39012 + add_wait_queue(&learn_wait, &wait);
39013 + set_current_state(TASK_INTERRUPTIBLE);
39014 + do {
39015 + down(&gr_learn_user_sem);
39016 + spin_lock(&gr_learn_lock);
39017 + if (learn_buffer_len)
39018 + break;
39019 + spin_unlock(&gr_learn_lock);
39020 + up(&gr_learn_user_sem);
39021 + if (file->f_flags & O_NONBLOCK) {
39022 + retval = -EAGAIN;
39023 + goto out;
39024 + }
39025 + if (signal_pending(current)) {
39026 + retval = -ERESTARTSYS;
39027 + goto out;
39028 + }
39029 +
39030 + schedule();
39031 + } while (1);
39032 +
39033 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
39034 + learn_buffer_user_len = learn_buffer_len;
39035 + retval = learn_buffer_len;
39036 + learn_buffer_len = 0;
39037 +
39038 + spin_unlock(&gr_learn_lock);
39039 +
39040 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
39041 + retval = -EFAULT;
39042 +
39043 + up(&gr_learn_user_sem);
39044 +out:
39045 + set_current_state(TASK_RUNNING);
39046 + remove_wait_queue(&learn_wait, &wait);
39047 + return retval;
39048 +}
39049 +
39050 +static unsigned int
39051 +poll_learn(struct file * file, poll_table * wait)
39052 +{
39053 + poll_wait(file, &learn_wait, wait);
39054 +
39055 + if (learn_buffer_len)
39056 + return (POLLIN | POLLRDNORM);
39057 +
39058 + return 0;
39059 +}
39060 +
39061 +void
39062 +gr_clear_learn_entries(void)
39063 +{
39064 + char *tmp;
39065 +
39066 + down(&gr_learn_user_sem);
39067 + if (learn_buffer != NULL) {
39068 + spin_lock(&gr_learn_lock);
39069 + tmp = learn_buffer;
39070 + learn_buffer = NULL;
39071 + spin_unlock(&gr_learn_lock);
39072 + vfree(learn_buffer);
39073 + }
39074 + if (learn_buffer_user != NULL) {
39075 + vfree(learn_buffer_user);
39076 + learn_buffer_user = NULL;
39077 + }
39078 + learn_buffer_len = 0;
39079 + up(&gr_learn_user_sem);
39080 +
39081 + return;
39082 +}
39083 +
39084 +void
39085 +gr_add_learn_entry(const char *fmt, ...)
39086 +{
39087 + va_list args;
39088 + unsigned int len;
39089 +
39090 + if (!gr_learn_attached)
39091 + return;
39092 +
39093 + spin_lock(&gr_learn_lock);
39094 +
39095 + /* leave a gap at the end so we know when it's "full" but don't have to
39096 + compute the exact length of the string we're trying to append
39097 + */
39098 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
39099 + spin_unlock(&gr_learn_lock);
39100 + wake_up_interruptible(&learn_wait);
39101 + return;
39102 + }
39103 + if (learn_buffer == NULL) {
39104 + spin_unlock(&gr_learn_lock);
39105 + return;
39106 + }
39107 +
39108 + va_start(args, fmt);
39109 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
39110 + va_end(args);
39111 +
39112 + learn_buffer_len += len + 1;
39113 +
39114 + spin_unlock(&gr_learn_lock);
39115 + wake_up_interruptible(&learn_wait);
39116 +
39117 + return;
39118 +}
39119 +
39120 +static int
39121 +open_learn(struct inode *inode, struct file *file)
39122 +{
39123 + if (file->f_mode & FMODE_READ && gr_learn_attached)
39124 + return -EBUSY;
39125 + if (file->f_mode & FMODE_READ) {
39126 + int retval = 0;
39127 + down(&gr_learn_user_sem);
39128 + if (learn_buffer == NULL)
39129 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
39130 + if (learn_buffer_user == NULL)
39131 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
39132 + if (learn_buffer == NULL) {
39133 + retval = -ENOMEM;
39134 + goto out_error;
39135 + }
39136 + if (learn_buffer_user == NULL) {
39137 + retval = -ENOMEM;
39138 + goto out_error;
39139 + }
39140 + learn_buffer_len = 0;
39141 + learn_buffer_user_len = 0;
39142 + gr_learn_attached = 1;
39143 +out_error:
39144 + up(&gr_learn_user_sem);
39145 + return retval;
39146 + }
39147 + return 0;
39148 +}
39149 +
39150 +static int
39151 +close_learn(struct inode *inode, struct file *file)
39152 +{
39153 + char *tmp;
39154 +
39155 + if (file->f_mode & FMODE_READ) {
39156 + down(&gr_learn_user_sem);
39157 + if (learn_buffer != NULL) {
39158 + spin_lock(&gr_learn_lock);
39159 + tmp = learn_buffer;
39160 + learn_buffer = NULL;
39161 + spin_unlock(&gr_learn_lock);
39162 + vfree(tmp);
39163 + }
39164 + if (learn_buffer_user != NULL) {
39165 + vfree(learn_buffer_user);
39166 + learn_buffer_user = NULL;
39167 + }
39168 + learn_buffer_len = 0;
39169 + learn_buffer_user_len = 0;
39170 + gr_learn_attached = 0;
39171 + up(&gr_learn_user_sem);
39172 + }
39173 +
39174 + return 0;
39175 +}
39176 +
39177 +const struct file_operations grsec_fops = {
39178 + .read = read_learn,
39179 + .write = write_grsec_handler,
39180 + .open = open_learn,
39181 + .release = close_learn,
39182 + .poll = poll_learn,
39183 +};
39184 diff -urNp linux-2.6.34.1/grsecurity/gracl_res.c linux-2.6.34.1/grsecurity/gracl_res.c
39185 --- linux-2.6.34.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
39186 +++ linux-2.6.34.1/grsecurity/gracl_res.c 2010-07-07 09:04:56.000000000 -0400
39187 @@ -0,0 +1,68 @@
39188 +#include <linux/kernel.h>
39189 +#include <linux/sched.h>
39190 +#include <linux/gracl.h>
39191 +#include <linux/grinternal.h>
39192 +
39193 +static const char *restab_log[] = {
39194 + [RLIMIT_CPU] = "RLIMIT_CPU",
39195 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
39196 + [RLIMIT_DATA] = "RLIMIT_DATA",
39197 + [RLIMIT_STACK] = "RLIMIT_STACK",
39198 + [RLIMIT_CORE] = "RLIMIT_CORE",
39199 + [RLIMIT_RSS] = "RLIMIT_RSS",
39200 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
39201 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
39202 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
39203 + [RLIMIT_AS] = "RLIMIT_AS",
39204 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
39205 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
39206 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
39207 + [RLIMIT_NICE] = "RLIMIT_NICE",
39208 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
39209 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
39210 + [GR_CRASH_RES] = "RLIMIT_CRASH"
39211 +};
39212 +
39213 +void
39214 +gr_log_resource(const struct task_struct *task,
39215 + const int res, const unsigned long wanted, const int gt)
39216 +{
39217 + const struct cred *cred;
39218 + unsigned long rlim;
39219 +
39220 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
39221 + return;
39222 +
39223 + // not yet supported resource
39224 + if (unlikely(!restab_log[res]))
39225 + return;
39226 +
39227 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
39228 + rlim = task_rlimit_max(task, res);
39229 + else
39230 + rlim = task_rlimit(task, res);
39231 +
39232 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
39233 + return;
39234 +
39235 + rcu_read_lock();
39236 + cred = __task_cred(task);
39237 +
39238 + if (res == RLIMIT_NPROC &&
39239 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
39240 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
39241 + goto out_rcu_unlock;
39242 + else if (res == RLIMIT_MEMLOCK &&
39243 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
39244 + goto out_rcu_unlock;
39245 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
39246 + goto out_rcu_unlock;
39247 + rcu_read_unlock();
39248 +
39249 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
39250 +
39251 + return;
39252 +out_rcu_unlock:
39253 + rcu_read_unlock();
39254 + return;
39255 +}
39256 diff -urNp linux-2.6.34.1/grsecurity/gracl_segv.c linux-2.6.34.1/grsecurity/gracl_segv.c
39257 --- linux-2.6.34.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
39258 +++ linux-2.6.34.1/grsecurity/gracl_segv.c 2010-07-07 09:04:56.000000000 -0400
39259 @@ -0,0 +1,310 @@
39260 +#include <linux/kernel.h>
39261 +#include <linux/mm.h>
39262 +#include <asm/uaccess.h>
39263 +#include <asm/errno.h>
39264 +#include <asm/mman.h>
39265 +#include <net/sock.h>
39266 +#include <linux/file.h>
39267 +#include <linux/fs.h>
39268 +#include <linux/net.h>
39269 +#include <linux/in.h>
39270 +#include <linux/smp_lock.h>
39271 +#include <linux/slab.h>
39272 +#include <linux/types.h>
39273 +#include <linux/sched.h>
39274 +#include <linux/timer.h>
39275 +#include <linux/gracl.h>
39276 +#include <linux/grsecurity.h>
39277 +#include <linux/grinternal.h>
39278 +
39279 +static struct crash_uid *uid_set;
39280 +static unsigned short uid_used;
39281 +static DEFINE_SPINLOCK(gr_uid_lock);
39282 +extern rwlock_t gr_inode_lock;
39283 +extern struct acl_subject_label *
39284 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
39285 + struct acl_role_label *role);
39286 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
39287 +
39288 +int
39289 +gr_init_uidset(void)
39290 +{
39291 + uid_set =
39292 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
39293 + uid_used = 0;
39294 +
39295 + return uid_set ? 1 : 0;
39296 +}
39297 +
39298 +void
39299 +gr_free_uidset(void)
39300 +{
39301 + if (uid_set)
39302 + kfree(uid_set);
39303 +
39304 + return;
39305 +}
39306 +
39307 +int
39308 +gr_find_uid(const uid_t uid)
39309 +{
39310 + struct crash_uid *tmp = uid_set;
39311 + uid_t buid;
39312 + int low = 0, high = uid_used - 1, mid;
39313 +
39314 + while (high >= low) {
39315 + mid = (low + high) >> 1;
39316 + buid = tmp[mid].uid;
39317 + if (buid == uid)
39318 + return mid;
39319 + if (buid > uid)
39320 + high = mid - 1;
39321 + if (buid < uid)
39322 + low = mid + 1;
39323 + }
39324 +
39325 + return -1;
39326 +}
39327 +
39328 +static __inline__ void
39329 +gr_insertsort(void)
39330 +{
39331 + unsigned short i, j;
39332 + struct crash_uid index;
39333 +
39334 + for (i = 1; i < uid_used; i++) {
39335 + index = uid_set[i];
39336 + j = i;
39337 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
39338 + uid_set[j] = uid_set[j - 1];
39339 + j--;
39340 + }
39341 + uid_set[j] = index;
39342 + }
39343 +
39344 + return;
39345 +}
39346 +
39347 +static __inline__ void
39348 +gr_insert_uid(const uid_t uid, const unsigned long expires)
39349 +{
39350 + int loc;
39351 +
39352 + if (uid_used == GR_UIDTABLE_MAX)
39353 + return;
39354 +
39355 + loc = gr_find_uid(uid);
39356 +
39357 + if (loc >= 0) {
39358 + uid_set[loc].expires = expires;
39359 + return;
39360 + }
39361 +
39362 + uid_set[uid_used].uid = uid;
39363 + uid_set[uid_used].expires = expires;
39364 + uid_used++;
39365 +
39366 + gr_insertsort();
39367 +
39368 + return;
39369 +}
39370 +
39371 +void
39372 +gr_remove_uid(const unsigned short loc)
39373 +{
39374 + unsigned short i;
39375 +
39376 + for (i = loc + 1; i < uid_used; i++)
39377 + uid_set[i - 1] = uid_set[i];
39378 +
39379 + uid_used--;
39380 +
39381 + return;
39382 +}
39383 +
39384 +int
39385 +gr_check_crash_uid(const uid_t uid)
39386 +{
39387 + int loc;
39388 + int ret = 0;
39389 +
39390 + if (unlikely(!gr_acl_is_enabled()))
39391 + return 0;
39392 +
39393 + spin_lock(&gr_uid_lock);
39394 + loc = gr_find_uid(uid);
39395 +
39396 + if (loc < 0)
39397 + goto out_unlock;
39398 +
39399 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
39400 + gr_remove_uid(loc);
39401 + else
39402 + ret = 1;
39403 +
39404 +out_unlock:
39405 + spin_unlock(&gr_uid_lock);
39406 + return ret;
39407 +}
39408 +
39409 +static __inline__ int
39410 +proc_is_setxid(const struct cred *cred)
39411 +{
39412 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
39413 + cred->uid != cred->fsuid)
39414 + return 1;
39415 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
39416 + cred->gid != cred->fsgid)
39417 + return 1;
39418 +
39419 + return 0;
39420 +}
39421 +static __inline__ int
39422 +gr_fake_force_sig(int sig, struct task_struct *t)
39423 +{
39424 + unsigned long int flags;
39425 + int ret, blocked, ignored;
39426 + struct k_sigaction *action;
39427 +
39428 + spin_lock_irqsave(&t->sighand->siglock, flags);
39429 + action = &t->sighand->action[sig-1];
39430 + ignored = action->sa.sa_handler == SIG_IGN;
39431 + blocked = sigismember(&t->blocked, sig);
39432 + if (blocked || ignored) {
39433 + action->sa.sa_handler = SIG_DFL;
39434 + if (blocked) {
39435 + sigdelset(&t->blocked, sig);
39436 + recalc_sigpending_and_wake(t);
39437 + }
39438 + }
39439 + if (action->sa.sa_handler == SIG_DFL)
39440 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
39441 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
39442 +
39443 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
39444 +
39445 + return ret;
39446 +}
39447 +
39448 +void
39449 +gr_handle_crash(struct task_struct *task, const int sig)
39450 +{
39451 + struct acl_subject_label *curr;
39452 + struct acl_subject_label *curr2;
39453 + struct task_struct *tsk, *tsk2;
39454 + const struct cred *cred;
39455 + const struct cred *cred2;
39456 +
39457 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
39458 + return;
39459 +
39460 + if (unlikely(!gr_acl_is_enabled()))
39461 + return;
39462 +
39463 + curr = task->acl;
39464 +
39465 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
39466 + return;
39467 +
39468 + if (time_before_eq(curr->expires, get_seconds())) {
39469 + curr->expires = 0;
39470 + curr->crashes = 0;
39471 + }
39472 +
39473 + curr->crashes++;
39474 +
39475 + if (!curr->expires)
39476 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
39477 +
39478 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
39479 + time_after(curr->expires, get_seconds())) {
39480 + rcu_read_lock();
39481 + cred = __task_cred(task);
39482 + if (cred->uid && proc_is_setxid(cred)) {
39483 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
39484 + spin_lock(&gr_uid_lock);
39485 + gr_insert_uid(cred->uid, curr->expires);
39486 + spin_unlock(&gr_uid_lock);
39487 + curr->expires = 0;
39488 + curr->crashes = 0;
39489 + read_lock(&tasklist_lock);
39490 + do_each_thread(tsk2, tsk) {
39491 + cred2 = __task_cred(tsk);
39492 + if (tsk != task && cred2->uid == cred->uid)
39493 + gr_fake_force_sig(SIGKILL, tsk);
39494 + } while_each_thread(tsk2, tsk);
39495 + read_unlock(&tasklist_lock);
39496 + } else {
39497 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
39498 + read_lock(&tasklist_lock);
39499 + do_each_thread(tsk2, tsk) {
39500 + if (likely(tsk != task)) {
39501 + curr2 = tsk->acl;
39502 +
39503 + if (curr2->device == curr->device &&
39504 + curr2->inode == curr->inode)
39505 + gr_fake_force_sig(SIGKILL, tsk);
39506 + }
39507 + } while_each_thread(tsk2, tsk);
39508 + read_unlock(&tasklist_lock);
39509 + }
39510 + rcu_read_unlock();
39511 + }
39512 +
39513 + return;
39514 +}
39515 +
39516 +int
39517 +gr_check_crash_exec(const struct file *filp)
39518 +{
39519 + struct acl_subject_label *curr;
39520 +
39521 + if (unlikely(!gr_acl_is_enabled()))
39522 + return 0;
39523 +
39524 + read_lock(&gr_inode_lock);
39525 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
39526 + filp->f_path.dentry->d_inode->i_sb->s_dev,
39527 + current->role);
39528 + read_unlock(&gr_inode_lock);
39529 +
39530 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
39531 + (!curr->crashes && !curr->expires))
39532 + return 0;
39533 +
39534 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
39535 + time_after(curr->expires, get_seconds()))
39536 + return 1;
39537 + else if (time_before_eq(curr->expires, get_seconds())) {
39538 + curr->crashes = 0;
39539 + curr->expires = 0;
39540 + }
39541 +
39542 + return 0;
39543 +}
39544 +
39545 +void
39546 +gr_handle_alertkill(struct task_struct *task)
39547 +{
39548 + struct acl_subject_label *curracl;
39549 + __u32 curr_ip;
39550 + struct task_struct *p, *p2;
39551 +
39552 + if (unlikely(!gr_acl_is_enabled()))
39553 + return;
39554 +
39555 + curracl = task->acl;
39556 + curr_ip = task->signal->curr_ip;
39557 +
39558 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
39559 + read_lock(&tasklist_lock);
39560 + do_each_thread(p2, p) {
39561 + if (p->signal->curr_ip == curr_ip)
39562 + gr_fake_force_sig(SIGKILL, p);
39563 + } while_each_thread(p2, p);
39564 + read_unlock(&tasklist_lock);
39565 + } else if (curracl->mode & GR_KILLPROC)
39566 + gr_fake_force_sig(SIGKILL, task);
39567 +
39568 + return;
39569 +}
39570 diff -urNp linux-2.6.34.1/grsecurity/gracl_shm.c linux-2.6.34.1/grsecurity/gracl_shm.c
39571 --- linux-2.6.34.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
39572 +++ linux-2.6.34.1/grsecurity/gracl_shm.c 2010-07-07 09:04:56.000000000 -0400
39573 @@ -0,0 +1,40 @@
39574 +#include <linux/kernel.h>
39575 +#include <linux/mm.h>
39576 +#include <linux/sched.h>
39577 +#include <linux/file.h>
39578 +#include <linux/ipc.h>
39579 +#include <linux/gracl.h>
39580 +#include <linux/grsecurity.h>
39581 +#include <linux/grinternal.h>
39582 +
39583 +int
39584 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
39585 + const time_t shm_createtime, const uid_t cuid, const int shmid)
39586 +{
39587 + struct task_struct *task;
39588 +
39589 + if (!gr_acl_is_enabled())
39590 + return 1;
39591 +
39592 + rcu_read_lock();
39593 + read_lock(&tasklist_lock);
39594 +
39595 + task = find_task_by_vpid(shm_cprid);
39596 +
39597 + if (unlikely(!task))
39598 + task = find_task_by_vpid(shm_lapid);
39599 +
39600 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
39601 + (task->pid == shm_lapid)) &&
39602 + (task->acl->mode & GR_PROTSHM) &&
39603 + (task->acl != current->acl))) {
39604 + read_unlock(&tasklist_lock);
39605 + rcu_read_unlock();
39606 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
39607 + return 0;
39608 + }
39609 + read_unlock(&tasklist_lock);
39610 + rcu_read_unlock();
39611 +
39612 + return 1;
39613 +}
39614 diff -urNp linux-2.6.34.1/grsecurity/grsec_chdir.c linux-2.6.34.1/grsecurity/grsec_chdir.c
39615 --- linux-2.6.34.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
39616 +++ linux-2.6.34.1/grsecurity/grsec_chdir.c 2010-07-07 09:04:56.000000000 -0400
39617 @@ -0,0 +1,19 @@
39618 +#include <linux/kernel.h>
39619 +#include <linux/sched.h>
39620 +#include <linux/fs.h>
39621 +#include <linux/file.h>
39622 +#include <linux/grsecurity.h>
39623 +#include <linux/grinternal.h>
39624 +
39625 +void
39626 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
39627 +{
39628 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
39629 + if ((grsec_enable_chdir && grsec_enable_group &&
39630 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
39631 + !grsec_enable_group)) {
39632 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
39633 + }
39634 +#endif
39635 + return;
39636 +}
39637 diff -urNp linux-2.6.34.1/grsecurity/grsec_chroot.c linux-2.6.34.1/grsecurity/grsec_chroot.c
39638 --- linux-2.6.34.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
39639 +++ linux-2.6.34.1/grsecurity/grsec_chroot.c 2010-07-07 09:04:56.000000000 -0400
39640 @@ -0,0 +1,366 @@
39641 +#include <linux/kernel.h>
39642 +#include <linux/module.h>
39643 +#include <linux/sched.h>
39644 +#include <linux/file.h>
39645 +#include <linux/fs.h>
39646 +#include <linux/mount.h>
39647 +#include <linux/types.h>
39648 +#include <linux/pid_namespace.h>
39649 +#include <linux/grsecurity.h>
39650 +#include <linux/grinternal.h>
39651 +
39652 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
39653 +{
39654 +#ifdef CONFIG_GRKERNSEC
39655 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
39656 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
39657 + task->gr_is_chrooted = 1;
39658 + else
39659 + task->gr_is_chrooted = 0;
39660 +
39661 + task->gr_chroot_dentry = path->dentry;
39662 +#endif
39663 + return;
39664 +}
39665 +
39666 +void gr_clear_chroot_entries(struct task_struct *task)
39667 +{
39668 +#ifdef CONFIG_GRKERNSEC
39669 + task->gr_is_chrooted = 0;
39670 + task->gr_chroot_dentry = NULL;
39671 +#endif
39672 + return;
39673 +}
39674 +
39675 +int
39676 +gr_handle_chroot_unix(const pid_t pid)
39677 +{
39678 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
39679 + struct pid *spid = NULL;
39680 +
39681 + if (unlikely(!grsec_enable_chroot_unix))
39682 + return 1;
39683 +
39684 + if (likely(!proc_is_chrooted(current)))
39685 + return 1;
39686 +
39687 + rcu_read_lock();
39688 + read_lock(&tasklist_lock);
39689 +
39690 + spid = find_vpid(pid);
39691 + if (spid) {
39692 + struct task_struct *p;
39693 + p = pid_task(spid, PIDTYPE_PID);
39694 + if (unlikely(!have_same_root(current, p))) {
39695 + read_unlock(&tasklist_lock);
39696 + rcu_read_unlock();
39697 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
39698 + return 0;
39699 + }
39700 + }
39701 + read_unlock(&tasklist_lock);
39702 + rcu_read_unlock();
39703 +#endif
39704 + return 1;
39705 +}
39706 +
39707 +int
39708 +gr_handle_chroot_nice(void)
39709 +{
39710 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
39711 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
39712 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
39713 + return -EPERM;
39714 + }
39715 +#endif
39716 + return 0;
39717 +}
39718 +
39719 +int
39720 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
39721 +{
39722 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
39723 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
39724 + && proc_is_chrooted(current)) {
39725 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
39726 + return -EACCES;
39727 + }
39728 +#endif
39729 + return 0;
39730 +}
39731 +
39732 +int
39733 +gr_handle_chroot_rawio(const struct inode *inode)
39734 +{
39735 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
39736 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
39737 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
39738 + return 1;
39739 +#endif
39740 + return 0;
39741 +}
39742 +
39743 +int
39744 +gr_pid_is_chrooted(struct task_struct *p)
39745 +{
39746 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
39747 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
39748 + return 0;
39749 +
39750 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
39751 + !have_same_root(current, p)) {
39752 + return 1;
39753 + }
39754 +#endif
39755 + return 0;
39756 +}
39757 +
39758 +EXPORT_SYMBOL(gr_pid_is_chrooted);
39759 +
39760 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
39761 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
39762 +{
39763 + struct dentry *dentry = (struct dentry *)u_dentry;
39764 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
39765 + struct dentry *realroot;
39766 + struct vfsmount *realrootmnt;
39767 + struct dentry *currentroot;
39768 + struct vfsmount *currentmnt;
39769 + struct task_struct *reaper = &init_task;
39770 + int ret = 1;
39771 +
39772 + read_lock(&reaper->fs->lock);
39773 + realrootmnt = mntget(reaper->fs->root.mnt);
39774 + realroot = dget(reaper->fs->root.dentry);
39775 + read_unlock(&reaper->fs->lock);
39776 +
39777 + read_lock(&current->fs->lock);
39778 + currentmnt = mntget(current->fs->root.mnt);
39779 + currentroot = dget(current->fs->root.dentry);
39780 + read_unlock(&current->fs->lock);
39781 +
39782 + spin_lock(&dcache_lock);
39783 + for (;;) {
39784 + if (unlikely((dentry == realroot && mnt == realrootmnt)
39785 + || (dentry == currentroot && mnt == currentmnt)))
39786 + break;
39787 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
39788 + if (mnt->mnt_parent == mnt)
39789 + break;
39790 + dentry = mnt->mnt_mountpoint;
39791 + mnt = mnt->mnt_parent;
39792 + continue;
39793 + }
39794 + dentry = dentry->d_parent;
39795 + }
39796 + spin_unlock(&dcache_lock);
39797 +
39798 + dput(currentroot);
39799 + mntput(currentmnt);
39800 +
39801 + /* access is outside of chroot */
39802 + if (dentry == realroot && mnt == realrootmnt)
39803 + ret = 0;
39804 +
39805 + dput(realroot);
39806 + mntput(realrootmnt);
39807 + return ret;
39808 +}
39809 +#endif
39810 +
39811 +int
39812 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
39813 +{
39814 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
39815 + if (!grsec_enable_chroot_fchdir)
39816 + return 1;
39817 +
39818 + if (!proc_is_chrooted(current))
39819 + return 1;
39820 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
39821 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
39822 + return 0;
39823 + }
39824 +#endif
39825 + return 1;
39826 +}
39827 +
39828 +int
39829 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
39830 + const time_t shm_createtime)
39831 +{
39832 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
39833 + struct pid *pid = NULL;
39834 + time_t starttime;
39835 +
39836 + if (unlikely(!grsec_enable_chroot_shmat))
39837 + return 1;
39838 +
39839 + if (likely(!proc_is_chrooted(current)))
39840 + return 1;
39841 +
39842 + rcu_read_lock();
39843 + read_lock(&tasklist_lock);
39844 +
39845 + pid = find_vpid(shm_cprid);
39846 + if (pid) {
39847 + struct task_struct *p;
39848 + p = pid_task(pid, PIDTYPE_PID);
39849 + starttime = p->start_time.tv_sec;
39850 + if (unlikely(!have_same_root(current, p) &&
39851 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
39852 + read_unlock(&tasklist_lock);
39853 + rcu_read_unlock();
39854 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
39855 + return 0;
39856 + }
39857 + } else {
39858 + pid = find_vpid(shm_lapid);
39859 + if (pid) {
39860 + struct task_struct *p;
39861 + p = pid_task(pid, PIDTYPE_PID);
39862 + if (unlikely(!have_same_root(current, p))) {
39863 + read_unlock(&tasklist_lock);
39864 + rcu_read_unlock();
39865 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
39866 + return 0;
39867 + }
39868 + }
39869 + }
39870 +
39871 + read_unlock(&tasklist_lock);
39872 + rcu_read_unlock();
39873 +#endif
39874 + return 1;
39875 +}
39876 +
39877 +void
39878 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
39879 +{
39880 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
39881 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
39882 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
39883 +#endif
39884 + return;
39885 +}
39886 +
39887 +int
39888 +gr_handle_chroot_mknod(const struct dentry *dentry,
39889 + const struct vfsmount *mnt, const int mode)
39890 +{
39891 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
39892 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
39893 + proc_is_chrooted(current)) {
39894 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
39895 + return -EPERM;
39896 + }
39897 +#endif
39898 + return 0;
39899 +}
39900 +
39901 +int
39902 +gr_handle_chroot_mount(const struct dentry *dentry,
39903 + const struct vfsmount *mnt, const char *dev_name)
39904 +{
39905 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
39906 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
39907 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
39908 + return -EPERM;
39909 + }
39910 +#endif
39911 + return 0;
39912 +}
39913 +
39914 +int
39915 +gr_handle_chroot_pivot(void)
39916 +{
39917 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
39918 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
39919 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
39920 + return -EPERM;
39921 + }
39922 +#endif
39923 + return 0;
39924 +}
39925 +
39926 +int
39927 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
39928 +{
39929 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
39930 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
39931 + !gr_is_outside_chroot(dentry, mnt)) {
39932 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
39933 + return -EPERM;
39934 + }
39935 +#endif
39936 + return 0;
39937 +}
39938 +
39939 +int
39940 +gr_handle_chroot_caps(struct path *path)
39941 +{
39942 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
39943 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
39944 + (init_task.fs->root.dentry != path->dentry) &&
39945 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
39946 +
39947 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
39948 + const struct cred *old = current_cred();
39949 + struct cred *new = prepare_creds();
39950 + if (new == NULL)
39951 + return 1;
39952 +
39953 + new->cap_permitted = cap_drop(old->cap_permitted,
39954 + chroot_caps);
39955 + new->cap_inheritable = cap_drop(old->cap_inheritable,
39956 + chroot_caps);
39957 + new->cap_effective = cap_drop(old->cap_effective,
39958 + chroot_caps);
39959 +
39960 + commit_creds(new);
39961 +
39962 + return 0;
39963 + }
39964 +#endif
39965 + return 0;
39966 +}
39967 +
39968 +int
39969 +gr_handle_chroot_sysctl(const int op)
39970 +{
39971 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
39972 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
39973 + proc_is_chrooted(current))
39974 + return -EACCES;
39975 +#endif
39976 + return 0;
39977 +}
39978 +
39979 +void
39980 +gr_handle_chroot_chdir(struct path *path)
39981 +{
39982 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
39983 + if (grsec_enable_chroot_chdir)
39984 + set_fs_pwd(current->fs, path);
39985 +#endif
39986 + return;
39987 +}
39988 +
39989 +int
39990 +gr_handle_chroot_chmod(const struct dentry *dentry,
39991 + const struct vfsmount *mnt, const int mode)
39992 +{
39993 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
39994 + if (grsec_enable_chroot_chmod &&
39995 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
39996 + proc_is_chrooted(current)) {
39997 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
39998 + return -EPERM;
39999 + }
40000 +#endif
40001 + return 0;
40002 +}
40003 +
40004 +#ifdef CONFIG_SECURITY
40005 +EXPORT_SYMBOL(gr_handle_chroot_caps);
40006 +#endif
40007 diff -urNp linux-2.6.34.1/grsecurity/grsec_disabled.c linux-2.6.34.1/grsecurity/grsec_disabled.c
40008 --- linux-2.6.34.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
40009 +++ linux-2.6.34.1/grsecurity/grsec_disabled.c 2010-07-07 09:04:56.000000000 -0400
40010 @@ -0,0 +1,425 @@
40011 +#include <linux/kernel.h>
40012 +#include <linux/module.h>
40013 +#include <linux/sched.h>
40014 +#include <linux/file.h>
40015 +#include <linux/fs.h>
40016 +#include <linux/kdev_t.h>
40017 +#include <linux/net.h>
40018 +#include <linux/in.h>
40019 +#include <linux/ip.h>
40020 +#include <linux/skbuff.h>
40021 +#include <linux/sysctl.h>
40022 +
40023 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40024 +void
40025 +pax_set_initial_flags(struct linux_binprm *bprm)
40026 +{
40027 + return;
40028 +}
40029 +#endif
40030 +
40031 +#ifdef CONFIG_SYSCTL
40032 +__u32
40033 +gr_handle_sysctl(const struct ctl_table * table, const int op)
40034 +{
40035 + return 0;
40036 +}
40037 +#endif
40038 +
40039 +#ifdef CONFIG_TASKSTATS
40040 +int gr_is_taskstats_denied(int pid)
40041 +{
40042 + return 0;
40043 +}
40044 +#endif
40045 +
40046 +int
40047 +gr_acl_is_enabled(void)
40048 +{
40049 + return 0;
40050 +}
40051 +
40052 +int
40053 +gr_handle_rawio(const struct inode *inode)
40054 +{
40055 + return 0;
40056 +}
40057 +
40058 +void
40059 +gr_acl_handle_psacct(struct task_struct *task, const long code)
40060 +{
40061 + return;
40062 +}
40063 +
40064 +int
40065 +gr_handle_ptrace(struct task_struct *task, const long request)
40066 +{
40067 + return 0;
40068 +}
40069 +
40070 +int
40071 +gr_handle_proc_ptrace(struct task_struct *task)
40072 +{
40073 + return 0;
40074 +}
40075 +
40076 +void
40077 +gr_learn_resource(const struct task_struct *task,
40078 + const int res, const unsigned long wanted, const int gt)
40079 +{
40080 + return;
40081 +}
40082 +
40083 +int
40084 +gr_set_acls(const int type)
40085 +{
40086 + return 0;
40087 +}
40088 +
40089 +int
40090 +gr_check_hidden_task(const struct task_struct *tsk)
40091 +{
40092 + return 0;
40093 +}
40094 +
40095 +int
40096 +gr_check_protected_task(const struct task_struct *task)
40097 +{
40098 + return 0;
40099 +}
40100 +
40101 +void
40102 +gr_copy_label(struct task_struct *tsk)
40103 +{
40104 + return;
40105 +}
40106 +
40107 +void
40108 +gr_set_pax_flags(struct task_struct *task)
40109 +{
40110 + return;
40111 +}
40112 +
40113 +int
40114 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
40115 + const int unsafe_share)
40116 +{
40117 + return 0;
40118 +}
40119 +
40120 +void
40121 +gr_handle_delete(const ino_t ino, const dev_t dev)
40122 +{
40123 + return;
40124 +}
40125 +
40126 +void
40127 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
40128 +{
40129 + return;
40130 +}
40131 +
40132 +void
40133 +gr_handle_crash(struct task_struct *task, const int sig)
40134 +{
40135 + return;
40136 +}
40137 +
40138 +int
40139 +gr_check_crash_exec(const struct file *filp)
40140 +{
40141 + return 0;
40142 +}
40143 +
40144 +int
40145 +gr_check_crash_uid(const uid_t uid)
40146 +{
40147 + return 0;
40148 +}
40149 +
40150 +void
40151 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
40152 + struct dentry *old_dentry,
40153 + struct dentry *new_dentry,
40154 + struct vfsmount *mnt, const __u8 replace)
40155 +{
40156 + return;
40157 +}
40158 +
40159 +int
40160 +gr_search_socket(const int family, const int type, const int protocol)
40161 +{
40162 + return 1;
40163 +}
40164 +
40165 +int
40166 +gr_search_connectbind(const int mode, const struct socket *sock,
40167 + const struct sockaddr_in *addr)
40168 +{
40169 + return 0;
40170 +}
40171 +
40172 +int
40173 +gr_is_capable(const int cap)
40174 +{
40175 + return 1;
40176 +}
40177 +
40178 +int
40179 +gr_is_capable_nolog(const int cap)
40180 +{
40181 + return 1;
40182 +}
40183 +
40184 +void
40185 +gr_handle_alertkill(struct task_struct *task)
40186 +{
40187 + return;
40188 +}
40189 +
40190 +__u32
40191 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
40192 +{
40193 + return 1;
40194 +}
40195 +
40196 +__u32
40197 +gr_acl_handle_hidden_file(const struct dentry * dentry,
40198 + const struct vfsmount * mnt)
40199 +{
40200 + return 1;
40201 +}
40202 +
40203 +__u32
40204 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
40205 + const int fmode)
40206 +{
40207 + return 1;
40208 +}
40209 +
40210 +__u32
40211 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
40212 +{
40213 + return 1;
40214 +}
40215 +
40216 +__u32
40217 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
40218 +{
40219 + return 1;
40220 +}
40221 +
40222 +int
40223 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
40224 + unsigned int *vm_flags)
40225 +{
40226 + return 1;
40227 +}
40228 +
40229 +__u32
40230 +gr_acl_handle_truncate(const struct dentry * dentry,
40231 + const struct vfsmount * mnt)
40232 +{
40233 + return 1;
40234 +}
40235 +
40236 +__u32
40237 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
40238 +{
40239 + return 1;
40240 +}
40241 +
40242 +__u32
40243 +gr_acl_handle_access(const struct dentry * dentry,
40244 + const struct vfsmount * mnt, const int fmode)
40245 +{
40246 + return 1;
40247 +}
40248 +
40249 +__u32
40250 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
40251 + mode_t mode)
40252 +{
40253 + return 1;
40254 +}
40255 +
40256 +__u32
40257 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
40258 + mode_t mode)
40259 +{
40260 + return 1;
40261 +}
40262 +
40263 +__u32
40264 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
40265 +{
40266 + return 1;
40267 +}
40268 +
40269 +void
40270 +grsecurity_init(void)
40271 +{
40272 + return;
40273 +}
40274 +
40275 +__u32
40276 +gr_acl_handle_mknod(const struct dentry * new_dentry,
40277 + const struct dentry * parent_dentry,
40278 + const struct vfsmount * parent_mnt,
40279 + const int mode)
40280 +{
40281 + return 1;
40282 +}
40283 +
40284 +__u32
40285 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
40286 + const struct dentry * parent_dentry,
40287 + const struct vfsmount * parent_mnt)
40288 +{
40289 + return 1;
40290 +}
40291 +
40292 +__u32
40293 +gr_acl_handle_symlink(const struct dentry * new_dentry,
40294 + const struct dentry * parent_dentry,
40295 + const struct vfsmount * parent_mnt, const char *from)
40296 +{
40297 + return 1;
40298 +}
40299 +
40300 +__u32
40301 +gr_acl_handle_link(const struct dentry * new_dentry,
40302 + const struct dentry * parent_dentry,
40303 + const struct vfsmount * parent_mnt,
40304 + const struct dentry * old_dentry,
40305 + const struct vfsmount * old_mnt, const char *to)
40306 +{
40307 + return 1;
40308 +}
40309 +
40310 +int
40311 +gr_acl_handle_rename(const struct dentry *new_dentry,
40312 + const struct dentry *parent_dentry,
40313 + const struct vfsmount *parent_mnt,
40314 + const struct dentry *old_dentry,
40315 + const struct inode *old_parent_inode,
40316 + const struct vfsmount *old_mnt, const char *newname)
40317 +{
40318 + return 0;
40319 +}
40320 +
40321 +int
40322 +gr_acl_handle_filldir(const struct file *file, const char *name,
40323 + const int namelen, const ino_t ino)
40324 +{
40325 + return 1;
40326 +}
40327 +
40328 +int
40329 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
40330 + const time_t shm_createtime, const uid_t cuid, const int shmid)
40331 +{
40332 + return 1;
40333 +}
40334 +
40335 +int
40336 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
40337 +{
40338 + return 0;
40339 +}
40340 +
40341 +int
40342 +gr_search_accept(const struct socket *sock)
40343 +{
40344 + return 0;
40345 +}
40346 +
40347 +int
40348 +gr_search_listen(const struct socket *sock)
40349 +{
40350 + return 0;
40351 +}
40352 +
40353 +int
40354 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
40355 +{
40356 + return 0;
40357 +}
40358 +
40359 +__u32
40360 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
40361 +{
40362 + return 1;
40363 +}
40364 +
40365 +__u32
40366 +gr_acl_handle_creat(const struct dentry * dentry,
40367 + const struct dentry * p_dentry,
40368 + const struct vfsmount * p_mnt, const int fmode,
40369 + const int imode)
40370 +{
40371 + return 1;
40372 +}
40373 +
40374 +void
40375 +gr_acl_handle_exit(void)
40376 +{
40377 + return;
40378 +}
40379 +
40380 +int
40381 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
40382 +{
40383 + return 1;
40384 +}
40385 +
40386 +void
40387 +gr_set_role_label(const uid_t uid, const gid_t gid)
40388 +{
40389 + return;
40390 +}
40391 +
40392 +int
40393 +gr_acl_handle_procpidmem(const struct task_struct *task)
40394 +{
40395 + return 0;
40396 +}
40397 +
40398 +int
40399 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
40400 +{
40401 + return 0;
40402 +}
40403 +
40404 +int
40405 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
40406 +{
40407 + return 0;
40408 +}
40409 +
40410 +void
40411 +gr_set_kernel_label(struct task_struct *task)
40412 +{
40413 + return;
40414 +}
40415 +
40416 +int
40417 +gr_check_user_change(int real, int effective, int fs)
40418 +{
40419 + return 0;
40420 +}
40421 +
40422 +int
40423 +gr_check_group_change(int real, int effective, int fs)
40424 +{
40425 + return 0;
40426 +}
40427 +
40428 +EXPORT_SYMBOL(gr_is_capable);
40429 +EXPORT_SYMBOL(gr_is_capable_nolog);
40430 +EXPORT_SYMBOL(gr_learn_resource);
40431 +EXPORT_SYMBOL(gr_set_kernel_label);
40432 +#ifdef CONFIG_SECURITY
40433 +EXPORT_SYMBOL(gr_check_user_change);
40434 +EXPORT_SYMBOL(gr_check_group_change);
40435 +#endif
40436 diff -urNp linux-2.6.34.1/grsecurity/grsec_exec.c linux-2.6.34.1/grsecurity/grsec_exec.c
40437 --- linux-2.6.34.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
40438 +++ linux-2.6.34.1/grsecurity/grsec_exec.c 2010-07-07 09:04:56.000000000 -0400
40439 @@ -0,0 +1,88 @@
40440 +#include <linux/kernel.h>
40441 +#include <linux/sched.h>
40442 +#include <linux/file.h>
40443 +#include <linux/binfmts.h>
40444 +#include <linux/smp_lock.h>
40445 +#include <linux/fs.h>
40446 +#include <linux/types.h>
40447 +#include <linux/grdefs.h>
40448 +#include <linux/grinternal.h>
40449 +#include <linux/capability.h>
40450 +
40451 +#include <asm/uaccess.h>
40452 +
40453 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40454 +static char gr_exec_arg_buf[132];
40455 +static DECLARE_MUTEX(gr_exec_arg_sem);
40456 +#endif
40457 +
40458 +int
40459 +gr_handle_nproc(void)
40460 +{
40461 +#ifdef CONFIG_GRKERNSEC_EXECVE
40462 + const struct cred *cred = current_cred();
40463 + if (grsec_enable_execve && cred->user &&
40464 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
40465 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
40466 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
40467 + return -EAGAIN;
40468 + }
40469 +#endif
40470 + return 0;
40471 +}
40472 +
40473 +void
40474 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv)
40475 +{
40476 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40477 + char *grarg = gr_exec_arg_buf;
40478 + unsigned int i, x, execlen = 0;
40479 + char c;
40480 +
40481 + if (!((grsec_enable_execlog && grsec_enable_group &&
40482 + in_group_p(grsec_audit_gid))
40483 + || (grsec_enable_execlog && !grsec_enable_group)))
40484 + return;
40485 +
40486 + down(&gr_exec_arg_sem);
40487 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
40488 +
40489 + if (unlikely(argv == NULL))
40490 + goto log;
40491 +
40492 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
40493 + const char __user *p;
40494 + unsigned int len;
40495 +
40496 + if (copy_from_user(&p, argv + i, sizeof(p)))
40497 + goto log;
40498 + if (!p)
40499 + goto log;
40500 + len = strnlen_user(p, 128 - execlen);
40501 + if (len > 128 - execlen)
40502 + len = 128 - execlen;
40503 + else if (len > 0)
40504 + len--;
40505 + if (copy_from_user(grarg + execlen, p, len))
40506 + goto log;
40507 +
40508 + /* rewrite unprintable characters */
40509 + for (x = 0; x < len; x++) {
40510 + c = *(grarg + execlen + x);
40511 + if (c < 32 || c > 126)
40512 + *(grarg + execlen + x) = ' ';
40513 + }
40514 +
40515 + execlen += len;
40516 + *(grarg + execlen) = ' ';
40517 + *(grarg + execlen + 1) = '\0';
40518 + execlen++;
40519 + }
40520 +
40521 + log:
40522 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
40523 + bprm->file->f_path.mnt, grarg);
40524 + up(&gr_exec_arg_sem);
40525 +#endif
40526 + return;
40527 +}
40528 diff -urNp linux-2.6.34.1/grsecurity/grsec_fifo.c linux-2.6.34.1/grsecurity/grsec_fifo.c
40529 --- linux-2.6.34.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
40530 +++ linux-2.6.34.1/grsecurity/grsec_fifo.c 2010-07-07 09:04:56.000000000 -0400
40531 @@ -0,0 +1,24 @@
40532 +#include <linux/kernel.h>
40533 +#include <linux/sched.h>
40534 +#include <linux/fs.h>
40535 +#include <linux/file.h>
40536 +#include <linux/grinternal.h>
40537 +
40538 +int
40539 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
40540 + const struct dentry *dir, const int flag, const int acc_mode)
40541 +{
40542 +#ifdef CONFIG_GRKERNSEC_FIFO
40543 + const struct cred *cred = current_cred();
40544 +
40545 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
40546 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
40547 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
40548 + (cred->fsuid != dentry->d_inode->i_uid)) {
40549 + if (!generic_permission(dentry->d_inode, acc_mode, NULL))
40550 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
40551 + return -EACCES;
40552 + }
40553 +#endif
40554 + return 0;
40555 +}
40556 diff -urNp linux-2.6.34.1/grsecurity/grsec_fork.c linux-2.6.34.1/grsecurity/grsec_fork.c
40557 --- linux-2.6.34.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
40558 +++ linux-2.6.34.1/grsecurity/grsec_fork.c 2010-07-07 09:04:56.000000000 -0400
40559 @@ -0,0 +1,15 @@
40560 +#include <linux/kernel.h>
40561 +#include <linux/sched.h>
40562 +#include <linux/grsecurity.h>
40563 +#include <linux/grinternal.h>
40564 +#include <linux/errno.h>
40565 +
40566 +void
40567 +gr_log_forkfail(const int retval)
40568 +{
40569 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
40570 + if (grsec_enable_forkfail && retval != -ERESTARTNOINTR)
40571 + gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval);
40572 +#endif
40573 + return;
40574 +}
40575 diff -urNp linux-2.6.34.1/grsecurity/grsec_init.c linux-2.6.34.1/grsecurity/grsec_init.c
40576 --- linux-2.6.34.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
40577 +++ linux-2.6.34.1/grsecurity/grsec_init.c 2010-07-07 09:04:56.000000000 -0400
40578 @@ -0,0 +1,258 @@
40579 +#include <linux/kernel.h>
40580 +#include <linux/sched.h>
40581 +#include <linux/mm.h>
40582 +#include <linux/smp_lock.h>
40583 +#include <linux/gracl.h>
40584 +#include <linux/slab.h>
40585 +#include <linux/vmalloc.h>
40586 +#include <linux/percpu.h>
40587 +#include <linux/module.h>
40588 +
40589 +int grsec_enable_link;
40590 +int grsec_enable_dmesg;
40591 +int grsec_enable_harden_ptrace;
40592 +int grsec_enable_fifo;
40593 +int grsec_enable_execve;
40594 +int grsec_enable_execlog;
40595 +int grsec_enable_signal;
40596 +int grsec_enable_forkfail;
40597 +int grsec_enable_audit_ptrace;
40598 +int grsec_enable_time;
40599 +int grsec_enable_audit_textrel;
40600 +int grsec_enable_group;
40601 +int grsec_audit_gid;
40602 +int grsec_enable_chdir;
40603 +int grsec_enable_mount;
40604 +int grsec_enable_rofs;
40605 +int grsec_enable_chroot_findtask;
40606 +int grsec_enable_chroot_mount;
40607 +int grsec_enable_chroot_shmat;
40608 +int grsec_enable_chroot_fchdir;
40609 +int grsec_enable_chroot_double;
40610 +int grsec_enable_chroot_pivot;
40611 +int grsec_enable_chroot_chdir;
40612 +int grsec_enable_chroot_chmod;
40613 +int grsec_enable_chroot_mknod;
40614 +int grsec_enable_chroot_nice;
40615 +int grsec_enable_chroot_execlog;
40616 +int grsec_enable_chroot_caps;
40617 +int grsec_enable_chroot_sysctl;
40618 +int grsec_enable_chroot_unix;
40619 +int grsec_enable_tpe;
40620 +int grsec_tpe_gid;
40621 +int grsec_enable_blackhole;
40622 +#ifdef CONFIG_IPV6_MODULE
40623 +EXPORT_SYMBOL(grsec_enable_blackhole);
40624 +#endif
40625 +int grsec_lastack_retries;
40626 +int grsec_enable_tpe_all;
40627 +int grsec_enable_socket_all;
40628 +int grsec_socket_all_gid;
40629 +int grsec_enable_socket_client;
40630 +int grsec_socket_client_gid;
40631 +int grsec_enable_socket_server;
40632 +int grsec_socket_server_gid;
40633 +int grsec_resource_logging;
40634 +int grsec_disable_privio;
40635 +int grsec_lock;
40636 +
40637 +DEFINE_SPINLOCK(grsec_alert_lock);
40638 +unsigned long grsec_alert_wtime = 0;
40639 +unsigned long grsec_alert_fyet = 0;
40640 +
40641 +DEFINE_SPINLOCK(grsec_audit_lock);
40642 +
40643 +DEFINE_RWLOCK(grsec_exec_file_lock);
40644 +
40645 +char *gr_shared_page[4];
40646 +
40647 +char *gr_alert_log_fmt;
40648 +char *gr_audit_log_fmt;
40649 +char *gr_alert_log_buf;
40650 +char *gr_audit_log_buf;
40651 +
40652 +extern struct gr_arg *gr_usermode;
40653 +extern unsigned char *gr_system_salt;
40654 +extern unsigned char *gr_system_sum;
40655 +
40656 +void __init
40657 +grsecurity_init(void)
40658 +{
40659 + int j;
40660 + /* create the per-cpu shared pages */
40661 +
40662 +#ifdef CONFIG_X86
40663 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
40664 +#endif
40665 +
40666 + for (j = 0; j < 4; j++) {
40667 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
40668 + if (gr_shared_page[j] == NULL) {
40669 + panic("Unable to allocate grsecurity shared page");
40670 + return;
40671 + }
40672 + }
40673 +
40674 + /* allocate log buffers */
40675 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
40676 + if (!gr_alert_log_fmt) {
40677 + panic("Unable to allocate grsecurity alert log format buffer");
40678 + return;
40679 + }
40680 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
40681 + if (!gr_audit_log_fmt) {
40682 + panic("Unable to allocate grsecurity audit log format buffer");
40683 + return;
40684 + }
40685 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
40686 + if (!gr_alert_log_buf) {
40687 + panic("Unable to allocate grsecurity alert log buffer");
40688 + return;
40689 + }
40690 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
40691 + if (!gr_audit_log_buf) {
40692 + panic("Unable to allocate grsecurity audit log buffer");
40693 + return;
40694 + }
40695 +
40696 + /* allocate memory for authentication structure */
40697 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
40698 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
40699 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
40700 +
40701 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
40702 + panic("Unable to allocate grsecurity authentication structure");
40703 + return;
40704 + }
40705 +
40706 +
40707 +#ifdef CONFIG_GRKERNSEC_IO
40708 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
40709 + grsec_disable_privio = 1;
40710 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
40711 + grsec_disable_privio = 1;
40712 +#else
40713 + grsec_disable_privio = 0;
40714 +#endif
40715 +#endif
40716 +
40717 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
40718 +#ifndef CONFIG_GRKERNSEC_SYSCTL
40719 + grsec_lock = 1;
40720 +#endif
40721 +
40722 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
40723 + grsec_enable_audit_textrel = 1;
40724 +#endif
40725 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
40726 + grsec_enable_group = 1;
40727 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
40728 +#endif
40729 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
40730 + grsec_enable_chdir = 1;
40731 +#endif
40732 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
40733 + grsec_enable_harden_ptrace = 1;
40734 +#endif
40735 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
40736 + grsec_enable_mount = 1;
40737 +#endif
40738 +#ifdef CONFIG_GRKERNSEC_LINK
40739 + grsec_enable_link = 1;
40740 +#endif
40741 +#ifdef CONFIG_GRKERNSEC_DMESG
40742 + grsec_enable_dmesg = 1;
40743 +#endif
40744 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
40745 + grsec_enable_blackhole = 1;
40746 + grsec_lastack_retries = 4;
40747 +#endif
40748 +#ifdef CONFIG_GRKERNSEC_FIFO
40749 + grsec_enable_fifo = 1;
40750 +#endif
40751 +#ifdef CONFIG_GRKERNSEC_EXECVE
40752 + grsec_enable_execve = 1;
40753 +#endif
40754 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40755 + grsec_enable_execlog = 1;
40756 +#endif
40757 +#ifdef CONFIG_GRKERNSEC_SIGNAL
40758 + grsec_enable_signal = 1;
40759 +#endif
40760 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
40761 + grsec_enable_forkfail = 1;
40762 +#endif
40763 +#ifdef CONFIG_GRKERNSEC_TIME
40764 + grsec_enable_time = 1;
40765 +#endif
40766 +#ifdef CONFIG_GRKERNSEC_RESLOG
40767 + grsec_resource_logging = 1;
40768 +#endif
40769 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
40770 + grsec_enable_chroot_findtask = 1;
40771 +#endif
40772 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
40773 + grsec_enable_chroot_unix = 1;
40774 +#endif
40775 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
40776 + grsec_enable_chroot_mount = 1;
40777 +#endif
40778 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
40779 + grsec_enable_chroot_fchdir = 1;
40780 +#endif
40781 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
40782 + grsec_enable_chroot_shmat = 1;
40783 +#endif
40784 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
40785 + grsec_enable_audit_ptrace = 1;
40786 +#endif
40787 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
40788 + grsec_enable_chroot_double = 1;
40789 +#endif
40790 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
40791 + grsec_enable_chroot_pivot = 1;
40792 +#endif
40793 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
40794 + grsec_enable_chroot_chdir = 1;
40795 +#endif
40796 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
40797 + grsec_enable_chroot_chmod = 1;
40798 +#endif
40799 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
40800 + grsec_enable_chroot_mknod = 1;
40801 +#endif
40802 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
40803 + grsec_enable_chroot_nice = 1;
40804 +#endif
40805 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
40806 + grsec_enable_chroot_execlog = 1;
40807 +#endif
40808 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
40809 + grsec_enable_chroot_caps = 1;
40810 +#endif
40811 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
40812 + grsec_enable_chroot_sysctl = 1;
40813 +#endif
40814 +#ifdef CONFIG_GRKERNSEC_TPE
40815 + grsec_enable_tpe = 1;
40816 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
40817 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
40818 + grsec_enable_tpe_all = 1;
40819 +#endif
40820 +#endif
40821 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
40822 + grsec_enable_socket_all = 1;
40823 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
40824 +#endif
40825 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
40826 + grsec_enable_socket_client = 1;
40827 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
40828 +#endif
40829 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
40830 + grsec_enable_socket_server = 1;
40831 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
40832 +#endif
40833 +#endif
40834 +
40835 + return;
40836 +}
40837 diff -urNp linux-2.6.34.1/grsecurity/grsec_link.c linux-2.6.34.1/grsecurity/grsec_link.c
40838 --- linux-2.6.34.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
40839 +++ linux-2.6.34.1/grsecurity/grsec_link.c 2010-07-07 09:04:56.000000000 -0400
40840 @@ -0,0 +1,43 @@
40841 +#include <linux/kernel.h>
40842 +#include <linux/sched.h>
40843 +#include <linux/fs.h>
40844 +#include <linux/file.h>
40845 +#include <linux/grinternal.h>
40846 +
40847 +int
40848 +gr_handle_follow_link(const struct inode *parent,
40849 + const struct inode *inode,
40850 + const struct dentry *dentry, const struct vfsmount *mnt)
40851 +{
40852 +#ifdef CONFIG_GRKERNSEC_LINK
40853 + const struct cred *cred = current_cred();
40854 +
40855 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
40856 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
40857 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
40858 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
40859 + return -EACCES;
40860 + }
40861 +#endif
40862 + return 0;
40863 +}
40864 +
40865 +int
40866 +gr_handle_hardlink(const struct dentry *dentry,
40867 + const struct vfsmount *mnt,
40868 + struct inode *inode, const int mode, const char *to)
40869 +{
40870 +#ifdef CONFIG_GRKERNSEC_LINK
40871 + const struct cred *cred = current_cred();
40872 +
40873 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
40874 + (!S_ISREG(mode) || (mode & S_ISUID) ||
40875 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
40876 + (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) &&
40877 + !capable(CAP_FOWNER) && cred->uid) {
40878 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
40879 + return -EPERM;
40880 + }
40881 +#endif
40882 + return 0;
40883 +}
40884 diff -urNp linux-2.6.34.1/grsecurity/grsec_log.c linux-2.6.34.1/grsecurity/grsec_log.c
40885 --- linux-2.6.34.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
40886 +++ linux-2.6.34.1/grsecurity/grsec_log.c 2010-07-07 09:04:56.000000000 -0400
40887 @@ -0,0 +1,306 @@
40888 +#include <linux/kernel.h>
40889 +#include <linux/sched.h>
40890 +#include <linux/file.h>
40891 +#include <linux/tty.h>
40892 +#include <linux/fs.h>
40893 +#include <linux/grinternal.h>
40894 +
40895 +#ifdef CONFIG_TREE_PREEMPT_RCU
40896 +#define DISABLE_PREEMPT() preempt_disable()
40897 +#define ENABLE_PREEMPT() preempt_enable()
40898 +#else
40899 +#define DISABLE_PREEMPT()
40900 +#define ENABLE_PREEMPT()
40901 +#endif
40902 +
40903 +#define BEGIN_LOCKS(x) \
40904 + DISABLE_PREEMPT(); \
40905 + rcu_read_lock(); \
40906 + read_lock(&tasklist_lock); \
40907 + read_lock(&grsec_exec_file_lock); \
40908 + if (x != GR_DO_AUDIT) \
40909 + spin_lock(&grsec_alert_lock); \
40910 + else \
40911 + spin_lock(&grsec_audit_lock)
40912 +
40913 +#define END_LOCKS(x) \
40914 + if (x != GR_DO_AUDIT) \
40915 + spin_unlock(&grsec_alert_lock); \
40916 + else \
40917 + spin_unlock(&grsec_audit_lock); \
40918 + read_unlock(&grsec_exec_file_lock); \
40919 + read_unlock(&tasklist_lock); \
40920 + rcu_read_unlock(); \
40921 + ENABLE_PREEMPT(); \
40922 + if (x == GR_DONT_AUDIT) \
40923 + gr_handle_alertkill(current)
40924 +
40925 +enum {
40926 + FLOODING,
40927 + NO_FLOODING
40928 +};
40929 +
40930 +extern char *gr_alert_log_fmt;
40931 +extern char *gr_audit_log_fmt;
40932 +extern char *gr_alert_log_buf;
40933 +extern char *gr_audit_log_buf;
40934 +
40935 +static int gr_log_start(int audit)
40936 +{
40937 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
40938 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
40939 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
40940 +
40941 + if (audit == GR_DO_AUDIT)
40942 + goto set_fmt;
40943 +
40944 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
40945 + grsec_alert_wtime = jiffies;
40946 + grsec_alert_fyet = 0;
40947 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
40948 + grsec_alert_fyet++;
40949 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
40950 + grsec_alert_wtime = jiffies;
40951 + grsec_alert_fyet++;
40952 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
40953 + return FLOODING;
40954 + } else return FLOODING;
40955 +
40956 +set_fmt:
40957 + memset(buf, 0, PAGE_SIZE);
40958 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
40959 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
40960 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
40961 + } else if (current->signal->curr_ip) {
40962 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
40963 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
40964 + } else if (gr_acl_is_enabled()) {
40965 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
40966 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
40967 + } else {
40968 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
40969 + strcpy(buf, fmt);
40970 + }
40971 +
40972 + return NO_FLOODING;
40973 +}
40974 +
40975 +static void gr_log_middle(int audit, const char *msg, va_list ap)
40976 + __attribute__ ((format (printf, 2, 0)));
40977 +
40978 +static void gr_log_middle(int audit, const char *msg, va_list ap)
40979 +{
40980 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
40981 + unsigned int len = strlen(buf);
40982 +
40983 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
40984 +
40985 + return;
40986 +}
40987 +
40988 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
40989 + __attribute__ ((format (printf, 2, 3)));
40990 +
40991 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
40992 +{
40993 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
40994 + unsigned int len = strlen(buf);
40995 + va_list ap;
40996 +
40997 + va_start(ap, msg);
40998 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
40999 + va_end(ap);
41000 +
41001 + return;
41002 +}
41003 +
41004 +static void gr_log_end(int audit)
41005 +{
41006 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
41007 + unsigned int len = strlen(buf);
41008 +
41009 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->parent)));
41010 + printk("%s\n", buf);
41011 +
41012 + return;
41013 +}
41014 +
41015 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
41016 +{
41017 + int logtype;
41018 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
41019 + char *str1, *str2, *str3;
41020 + void *voidptr;
41021 + int num1, num2;
41022 + unsigned long ulong1, ulong2;
41023 + struct dentry *dentry;
41024 + struct vfsmount *mnt;
41025 + struct file *file;
41026 + struct task_struct *task;
41027 + const struct cred *cred, *pcred;
41028 + va_list ap;
41029 +
41030 + BEGIN_LOCKS(audit);
41031 + logtype = gr_log_start(audit);
41032 + if (logtype == FLOODING) {
41033 + END_LOCKS(audit);
41034 + return;
41035 + }
41036 + va_start(ap, argtypes);
41037 + switch (argtypes) {
41038 + case GR_TTYSNIFF:
41039 + task = va_arg(ap, struct task_struct *);
41040 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid);
41041 + break;
41042 + case GR_SYSCTL_HIDDEN:
41043 + str1 = va_arg(ap, char *);
41044 + gr_log_middle_varargs(audit, msg, result, str1);
41045 + break;
41046 + case GR_RBAC:
41047 + dentry = va_arg(ap, struct dentry *);
41048 + mnt = va_arg(ap, struct vfsmount *);
41049 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
41050 + break;
41051 + case GR_RBAC_STR:
41052 + dentry = va_arg(ap, struct dentry *);
41053 + mnt = va_arg(ap, struct vfsmount *);
41054 + str1 = va_arg(ap, char *);
41055 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
41056 + break;
41057 + case GR_STR_RBAC:
41058 + str1 = va_arg(ap, char *);
41059 + dentry = va_arg(ap, struct dentry *);
41060 + mnt = va_arg(ap, struct vfsmount *);
41061 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
41062 + break;
41063 + case GR_RBAC_MODE2:
41064 + dentry = va_arg(ap, struct dentry *);
41065 + mnt = va_arg(ap, struct vfsmount *);
41066 + str1 = va_arg(ap, char *);
41067 + str2 = va_arg(ap, char *);
41068 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
41069 + break;
41070 + case GR_RBAC_MODE3:
41071 + dentry = va_arg(ap, struct dentry *);
41072 + mnt = va_arg(ap, struct vfsmount *);
41073 + str1 = va_arg(ap, char *);
41074 + str2 = va_arg(ap, char *);
41075 + str3 = va_arg(ap, char *);
41076 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
41077 + break;
41078 + case GR_FILENAME:
41079 + dentry = va_arg(ap, struct dentry *);
41080 + mnt = va_arg(ap, struct vfsmount *);
41081 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
41082 + break;
41083 + case GR_STR_FILENAME:
41084 + str1 = va_arg(ap, char *);
41085 + dentry = va_arg(ap, struct dentry *);
41086 + mnt = va_arg(ap, struct vfsmount *);
41087 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
41088 + break;
41089 + case GR_FILENAME_STR:
41090 + dentry = va_arg(ap, struct dentry *);
41091 + mnt = va_arg(ap, struct vfsmount *);
41092 + str1 = va_arg(ap, char *);
41093 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
41094 + break;
41095 + case GR_FILENAME_TWO_INT:
41096 + dentry = va_arg(ap, struct dentry *);
41097 + mnt = va_arg(ap, struct vfsmount *);
41098 + num1 = va_arg(ap, int);
41099 + num2 = va_arg(ap, int);
41100 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
41101 + break;
41102 + case GR_FILENAME_TWO_INT_STR:
41103 + dentry = va_arg(ap, struct dentry *);
41104 + mnt = va_arg(ap, struct vfsmount *);
41105 + num1 = va_arg(ap, int);
41106 + num2 = va_arg(ap, int);
41107 + str1 = va_arg(ap, char *);
41108 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
41109 + break;
41110 + case GR_TEXTREL:
41111 + file = va_arg(ap, struct file *);
41112 + ulong1 = va_arg(ap, unsigned long);
41113 + ulong2 = va_arg(ap, unsigned long);
41114 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
41115 + break;
41116 + case GR_PTRACE:
41117 + task = va_arg(ap, struct task_struct *);
41118 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
41119 + break;
41120 + case GR_RESOURCE:
41121 + task = va_arg(ap, struct task_struct *);
41122 + cred = __task_cred(task);
41123 + pcred = __task_cred(task->parent);
41124 + ulong1 = va_arg(ap, unsigned long);
41125 + str1 = va_arg(ap, char *);
41126 + ulong2 = va_arg(ap, unsigned long);
41127 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41128 + break;
41129 + case GR_CAP:
41130 + task = va_arg(ap, struct task_struct *);
41131 + cred = __task_cred(task);
41132 + pcred = __task_cred(task->parent);
41133 + str1 = va_arg(ap, char *);
41134 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41135 + break;
41136 + case GR_SIG:
41137 + str1 = va_arg(ap, char *);
41138 + voidptr = va_arg(ap, void *);
41139 + gr_log_middle_varargs(audit, msg, str1, voidptr);
41140 + break;
41141 + case GR_SIG2:
41142 + task = va_arg(ap, struct task_struct *);
41143 + cred = __task_cred(task);
41144 + pcred = __task_cred(task->parent);
41145 + num1 = va_arg(ap, int);
41146 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41147 + break;
41148 + case GR_CRASH1:
41149 + task = va_arg(ap, struct task_struct *);
41150 + cred = __task_cred(task);
41151 + pcred = __task_cred(task->parent);
41152 + ulong1 = va_arg(ap, unsigned long);
41153 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
41154 + break;
41155 + case GR_CRASH2:
41156 + task = va_arg(ap, struct task_struct *);
41157 + cred = __task_cred(task);
41158 + pcred = __task_cred(task->parent);
41159 + ulong1 = va_arg(ap, unsigned long);
41160 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
41161 + break;
41162 + case GR_PSACCT:
41163 + {
41164 + unsigned int wday, cday;
41165 + __u8 whr, chr;
41166 + __u8 wmin, cmin;
41167 + __u8 wsec, csec;
41168 + char cur_tty[64] = { 0 };
41169 + char parent_tty[64] = { 0 };
41170 +
41171 + task = va_arg(ap, struct task_struct *);
41172 + wday = va_arg(ap, unsigned int);
41173 + cday = va_arg(ap, unsigned int);
41174 + whr = va_arg(ap, int);
41175 + chr = va_arg(ap, int);
41176 + wmin = va_arg(ap, int);
41177 + cmin = va_arg(ap, int);
41178 + wsec = va_arg(ap, int);
41179 + csec = va_arg(ap, int);
41180 + ulong1 = va_arg(ap, unsigned long);
41181 + cred = __task_cred(task);
41182 + pcred = __task_cred(task->parent);
41183 +
41184 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, &task->parent->signal->curr_ip, tty_name(task->parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41185 + }
41186 + break;
41187 + default:
41188 + gr_log_middle(audit, msg, ap);
41189 + }
41190 + va_end(ap);
41191 + gr_log_end(audit);
41192 + END_LOCKS(audit);
41193 +}
41194 diff -urNp linux-2.6.34.1/grsecurity/grsec_mem.c linux-2.6.34.1/grsecurity/grsec_mem.c
41195 --- linux-2.6.34.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
41196 +++ linux-2.6.34.1/grsecurity/grsec_mem.c 2010-07-07 09:04:56.000000000 -0400
41197 @@ -0,0 +1,85 @@
41198 +#include <linux/kernel.h>
41199 +#include <linux/sched.h>
41200 +#include <linux/mm.h>
41201 +#include <linux/mman.h>
41202 +#include <linux/grinternal.h>
41203 +
41204 +void
41205 +gr_handle_ioperm(void)
41206 +{
41207 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
41208 + return;
41209 +}
41210 +
41211 +void
41212 +gr_handle_iopl(void)
41213 +{
41214 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
41215 + return;
41216 +}
41217 +
41218 +void
41219 +gr_handle_mem_write(void)
41220 +{
41221 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG);
41222 + return;
41223 +}
41224 +
41225 +void
41226 +gr_handle_kmem_write(void)
41227 +{
41228 + gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG);
41229 + return;
41230 +}
41231 +
41232 +void
41233 +gr_handle_open_port(void)
41234 +{
41235 + gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG);
41236 + return;
41237 +}
41238 +
41239 +int
41240 +gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
41241 +{
41242 + unsigned long start, end;
41243 +
41244 + start = offset;
41245 + end = start + vma->vm_end - vma->vm_start;
41246 +
41247 + if (start > end) {
41248 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
41249 + return -EPERM;
41250 + }
41251 +
41252 + /* allowed ranges : ISA I/O BIOS */
41253 + if ((start >= __pa(high_memory))
41254 +#if defined(CONFIG_X86) || defined(CONFIG_PPC)
41255 + || (start >= 0x000a0000 && end <= 0x00100000)
41256 + || (start >= 0x00000000 && end <= 0x00001000)
41257 +#endif
41258 + )
41259 + return 0;
41260 +
41261 + if (vma->vm_flags & VM_WRITE) {
41262 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
41263 + return -EPERM;
41264 + } else
41265 + vma->vm_flags &= ~VM_MAYWRITE;
41266 +
41267 + return 0;
41268 +}
41269 +
41270 +void
41271 +gr_log_nonroot_mod_load(const char *modname)
41272 +{
41273 + gr_log_str(GR_DONT_AUDIT, GR_NONROOT_MODLOAD_MSG, modname);
41274 + return;
41275 +}
41276 +
41277 +void
41278 +gr_handle_vm86(void)
41279 +{
41280 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
41281 + return;
41282 +}
41283 diff -urNp linux-2.6.34.1/grsecurity/grsec_mount.c linux-2.6.34.1/grsecurity/grsec_mount.c
41284 --- linux-2.6.34.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
41285 +++ linux-2.6.34.1/grsecurity/grsec_mount.c 2010-07-07 09:04:56.000000000 -0400
41286 @@ -0,0 +1,62 @@
41287 +#include <linux/kernel.h>
41288 +#include <linux/sched.h>
41289 +#include <linux/mount.h>
41290 +#include <linux/grsecurity.h>
41291 +#include <linux/grinternal.h>
41292 +
41293 +void
41294 +gr_log_remount(const char *devname, const int retval)
41295 +{
41296 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41297 + if (grsec_enable_mount && (retval >= 0))
41298 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
41299 +#endif
41300 + return;
41301 +}
41302 +
41303 +void
41304 +gr_log_unmount(const char *devname, const int retval)
41305 +{
41306 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41307 + if (grsec_enable_mount && (retval >= 0))
41308 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
41309 +#endif
41310 + return;
41311 +}
41312 +
41313 +void
41314 +gr_log_mount(const char *from, const char *to, const int retval)
41315 +{
41316 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41317 + if (grsec_enable_mount && (retval >= 0))
41318 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
41319 +#endif
41320 + return;
41321 +}
41322 +
41323 +int
41324 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
41325 +{
41326 +#ifdef CONFIG_GRKERNSEC_ROFS
41327 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
41328 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
41329 + return -EPERM;
41330 + } else
41331 + return 0;
41332 +#endif
41333 + return 0;
41334 +}
41335 +
41336 +int
41337 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
41338 +{
41339 +#ifdef CONFIG_GRKERNSEC_ROFS
41340 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
41341 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
41342 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
41343 + return -EPERM;
41344 + } else
41345 + return 0;
41346 +#endif
41347 + return 0;
41348 +}
41349 diff -urNp linux-2.6.34.1/grsecurity/grsec_ptrace.c linux-2.6.34.1/grsecurity/grsec_ptrace.c
41350 --- linux-2.6.34.1/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
41351 +++ linux-2.6.34.1/grsecurity/grsec_ptrace.c 2010-07-07 09:04:56.000000000 -0400
41352 @@ -0,0 +1,14 @@
41353 +#include <linux/kernel.h>
41354 +#include <linux/sched.h>
41355 +#include <linux/grinternal.h>
41356 +#include <linux/grsecurity.h>
41357 +
41358 +void
41359 +gr_audit_ptrace(struct task_struct *task)
41360 +{
41361 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
41362 + if (grsec_enable_audit_ptrace)
41363 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
41364 +#endif
41365 + return;
41366 +}
41367 diff -urNp linux-2.6.34.1/grsecurity/grsec_sig.c linux-2.6.34.1/grsecurity/grsec_sig.c
41368 --- linux-2.6.34.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
41369 +++ linux-2.6.34.1/grsecurity/grsec_sig.c 2010-07-07 09:04:56.000000000 -0400
41370 @@ -0,0 +1,65 @@
41371 +#include <linux/kernel.h>
41372 +#include <linux/sched.h>
41373 +#include <linux/delay.h>
41374 +#include <linux/grsecurity.h>
41375 +#include <linux/grinternal.h>
41376 +
41377 +char *signames[] = {
41378 + [SIGSEGV] = "Segmentation fault",
41379 + [SIGILL] = "Illegal instruction",
41380 + [SIGABRT] = "Abort",
41381 + [SIGBUS] = "Invalid alignment/Bus error"
41382 +};
41383 +
41384 +void
41385 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
41386 +{
41387 +#ifdef CONFIG_GRKERNSEC_SIGNAL
41388 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
41389 + (sig == SIGABRT) || (sig == SIGBUS))) {
41390 + if (t->pid == current->pid) {
41391 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
41392 + } else {
41393 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
41394 + }
41395 + }
41396 +#endif
41397 + return;
41398 +}
41399 +
41400 +int
41401 +gr_handle_signal(const struct task_struct *p, const int sig)
41402 +{
41403 +#ifdef CONFIG_GRKERNSEC
41404 + if (current->pid > 1 && gr_check_protected_task(p)) {
41405 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
41406 + return -EPERM;
41407 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
41408 + return -EPERM;
41409 + }
41410 +#endif
41411 + return 0;
41412 +}
41413 +
41414 +void gr_handle_brute_attach(struct task_struct *p)
41415 +{
41416 +#ifdef CONFIG_GRKERNSEC_BRUTE
41417 + read_lock(&tasklist_lock);
41418 + read_lock(&grsec_exec_file_lock);
41419 + if (p->parent && p->parent->exec_file == p->exec_file)
41420 + p->parent->brute = 1;
41421 + read_unlock(&grsec_exec_file_lock);
41422 + read_unlock(&tasklist_lock);
41423 +#endif
41424 + return;
41425 +}
41426 +
41427 +void gr_handle_brute_check(void)
41428 +{
41429 +#ifdef CONFIG_GRKERNSEC_BRUTE
41430 + if (current->brute)
41431 + msleep(30 * 1000);
41432 +#endif
41433 + return;
41434 +}
41435 +
41436 diff -urNp linux-2.6.34.1/grsecurity/grsec_sock.c linux-2.6.34.1/grsecurity/grsec_sock.c
41437 --- linux-2.6.34.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
41438 +++ linux-2.6.34.1/grsecurity/grsec_sock.c 2010-07-07 09:04:56.000000000 -0400
41439 @@ -0,0 +1,271 @@
41440 +#include <linux/kernel.h>
41441 +#include <linux/module.h>
41442 +#include <linux/sched.h>
41443 +#include <linux/file.h>
41444 +#include <linux/net.h>
41445 +#include <linux/in.h>
41446 +#include <linux/ip.h>
41447 +#include <net/sock.h>
41448 +#include <net/inet_sock.h>
41449 +#include <linux/grsecurity.h>
41450 +#include <linux/grinternal.h>
41451 +#include <linux/gracl.h>
41452 +
41453 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
41454 +EXPORT_SYMBOL(gr_cap_rtnetlink);
41455 +
41456 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
41457 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
41458 +
41459 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
41460 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
41461 +
41462 +#ifdef CONFIG_UNIX_MODULE
41463 +EXPORT_SYMBOL(gr_acl_handle_unix);
41464 +EXPORT_SYMBOL(gr_acl_handle_mknod);
41465 +EXPORT_SYMBOL(gr_handle_chroot_unix);
41466 +EXPORT_SYMBOL(gr_handle_create);
41467 +#endif
41468 +
41469 +#ifdef CONFIG_GRKERNSEC
41470 +#define gr_conn_table_size 32749
41471 +struct conn_table_entry {
41472 + struct conn_table_entry *next;
41473 + struct signal_struct *sig;
41474 +};
41475 +
41476 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
41477 +DEFINE_SPINLOCK(gr_conn_table_lock);
41478 +
41479 +extern const char * gr_socktype_to_name(unsigned char type);
41480 +extern const char * gr_proto_to_name(unsigned char proto);
41481 +
41482 +static __inline__ int
41483 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
41484 +{
41485 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
41486 +}
41487 +
41488 +static __inline__ int
41489 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
41490 + __u16 sport, __u16 dport)
41491 +{
41492 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
41493 + sig->gr_sport == sport && sig->gr_dport == dport))
41494 + return 1;
41495 + else
41496 + return 0;
41497 +}
41498 +
41499 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
41500 +{
41501 + struct conn_table_entry **match;
41502 + unsigned int index;
41503 +
41504 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
41505 + sig->gr_sport, sig->gr_dport,
41506 + gr_conn_table_size);
41507 +
41508 + newent->sig = sig;
41509 +
41510 + match = &gr_conn_table[index];
41511 + newent->next = *match;
41512 + *match = newent;
41513 +
41514 + return;
41515 +}
41516 +
41517 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
41518 +{
41519 + struct conn_table_entry *match, *last = NULL;
41520 + unsigned int index;
41521 +
41522 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
41523 + sig->gr_sport, sig->gr_dport,
41524 + gr_conn_table_size);
41525 +
41526 + match = gr_conn_table[index];
41527 + while (match && !conn_match(match->sig,
41528 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
41529 + sig->gr_dport)) {
41530 + last = match;
41531 + match = match->next;
41532 + }
41533 +
41534 + if (match) {
41535 + if (last)
41536 + last->next = match->next;
41537 + else
41538 + gr_conn_table[index] = NULL;
41539 + kfree(match);
41540 + }
41541 +
41542 + return;
41543 +}
41544 +
41545 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
41546 + __u16 sport, __u16 dport)
41547 +{
41548 + struct conn_table_entry *match;
41549 + unsigned int index;
41550 +
41551 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
41552 +
41553 + match = gr_conn_table[index];
41554 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
41555 + match = match->next;
41556 +
41557 + if (match)
41558 + return match->sig;
41559 + else
41560 + return NULL;
41561 +}
41562 +
41563 +#endif
41564 +
41565 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
41566 +{
41567 +#ifdef CONFIG_GRKERNSEC
41568 + struct signal_struct *sig = task->signal;
41569 + struct conn_table_entry *newent;
41570 +
41571 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
41572 + if (newent == NULL)
41573 + return;
41574 + /* no bh lock needed since we are called with bh disabled */
41575 + spin_lock(&gr_conn_table_lock);
41576 + gr_del_task_from_ip_table_nolock(sig);
41577 + sig->gr_saddr = inet->inet_rcv_saddr;
41578 + sig->gr_daddr = inet->inet_daddr;
41579 + sig->gr_sport = inet->inet_sport;
41580 + sig->gr_dport = inet->inet_dport;
41581 + gr_add_to_task_ip_table_nolock(sig, newent);
41582 + spin_unlock(&gr_conn_table_lock);
41583 +#endif
41584 + return;
41585 +}
41586 +
41587 +void gr_del_task_from_ip_table(struct task_struct *task)
41588 +{
41589 +#ifdef CONFIG_GRKERNSEC
41590 + spin_lock_bh(&gr_conn_table_lock);
41591 + gr_del_task_from_ip_table_nolock(task->signal);
41592 + spin_unlock_bh(&gr_conn_table_lock);
41593 +#endif
41594 + return;
41595 +}
41596 +
41597 +void
41598 +gr_attach_curr_ip(const struct sock *sk)
41599 +{
41600 +#ifdef CONFIG_GRKERNSEC
41601 + struct signal_struct *p, *set;
41602 + const struct inet_sock *inet = inet_sk(sk);
41603 +
41604 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
41605 + return;
41606 +
41607 + set = current->signal;
41608 +
41609 + spin_lock_bh(&gr_conn_table_lock);
41610 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
41611 + inet->inet_dport, inet->inet_sport);
41612 + if (unlikely(p != NULL)) {
41613 + set->curr_ip = p->curr_ip;
41614 + set->used_accept = 1;
41615 + gr_del_task_from_ip_table_nolock(p);
41616 + spin_unlock_bh(&gr_conn_table_lock);
41617 + return;
41618 + }
41619 + spin_unlock_bh(&gr_conn_table_lock);
41620 +
41621 + set->curr_ip = inet->inet_daddr;
41622 + set->used_accept = 1;
41623 +#endif
41624 + return;
41625 +}
41626 +
41627 +int
41628 +gr_handle_sock_all(const int family, const int type, const int protocol)
41629 +{
41630 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
41631 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
41632 + (family != AF_UNIX) && (family != AF_LOCAL)) {
41633 + gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol));
41634 + return -EACCES;
41635 + }
41636 +#endif
41637 + return 0;
41638 +}
41639 +
41640 +int
41641 +gr_handle_sock_server(const struct sockaddr *sck)
41642 +{
41643 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
41644 + if (grsec_enable_socket_server &&
41645 + in_group_p(grsec_socket_server_gid) &&
41646 + sck && (sck->sa_family != AF_UNIX) &&
41647 + (sck->sa_family != AF_LOCAL)) {
41648 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
41649 + return -EACCES;
41650 + }
41651 +#endif
41652 + return 0;
41653 +}
41654 +
41655 +int
41656 +gr_handle_sock_server_other(const struct sock *sck)
41657 +{
41658 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
41659 + if (grsec_enable_socket_server &&
41660 + in_group_p(grsec_socket_server_gid) &&
41661 + sck && (sck->sk_family != AF_UNIX) &&
41662 + (sck->sk_family != AF_LOCAL)) {
41663 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
41664 + return -EACCES;
41665 + }
41666 +#endif
41667 + return 0;
41668 +}
41669 +
41670 +int
41671 +gr_handle_sock_client(const struct sockaddr *sck)
41672 +{
41673 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
41674 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
41675 + sck && (sck->sa_family != AF_UNIX) &&
41676 + (sck->sa_family != AF_LOCAL)) {
41677 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
41678 + return -EACCES;
41679 + }
41680 +#endif
41681 + return 0;
41682 +}
41683 +
41684 +kernel_cap_t
41685 +gr_cap_rtnetlink(struct sock *sock)
41686 +{
41687 +#ifdef CONFIG_GRKERNSEC
41688 + if (!gr_acl_is_enabled())
41689 + return current_cap();
41690 + else if (sock->sk_protocol == NETLINK_ISCSI &&
41691 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
41692 + gr_is_capable(CAP_SYS_ADMIN))
41693 + return current_cap();
41694 + else if (sock->sk_protocol == NETLINK_AUDIT &&
41695 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
41696 + gr_is_capable(CAP_AUDIT_WRITE) &&
41697 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
41698 + gr_is_capable(CAP_AUDIT_CONTROL))
41699 + return current_cap();
41700 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
41701 + ((sock->sk_protocol == NETLINK_ROUTE) ?
41702 + gr_is_capable_nolog(CAP_NET_ADMIN) :
41703 + gr_is_capable(CAP_NET_ADMIN)))
41704 + return current_cap();
41705 + else
41706 + return __cap_empty_set;
41707 +#else
41708 + return current_cap();
41709 +#endif
41710 +}
41711 diff -urNp linux-2.6.34.1/grsecurity/grsec_sysctl.c linux-2.6.34.1/grsecurity/grsec_sysctl.c
41712 --- linux-2.6.34.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
41713 +++ linux-2.6.34.1/grsecurity/grsec_sysctl.c 2010-07-07 09:04:56.000000000 -0400
41714 @@ -0,0 +1,415 @@
41715 +#include <linux/kernel.h>
41716 +#include <linux/sched.h>
41717 +#include <linux/sysctl.h>
41718 +#include <linux/grsecurity.h>
41719 +#include <linux/grinternal.h>
41720 +
41721 +int
41722 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
41723 +{
41724 +#ifdef CONFIG_GRKERNSEC_SYSCTL
41725 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
41726 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
41727 + return -EACCES;
41728 + }
41729 +#endif
41730 + return 0;
41731 +}
41732 +
41733 +#ifdef CONFIG_GRKERNSEC_ROFS
41734 +static int __maybe_unused one = 1;
41735 +#endif
41736 +
41737 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
41738 +ctl_table grsecurity_table[] = {
41739 +#ifdef CONFIG_GRKERNSEC_SYSCTL
41740 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
41741 +#ifdef CONFIG_GRKERNSEC_IO
41742 + {
41743 + .procname = "disable_priv_io",
41744 + .data = &grsec_disable_privio,
41745 + .maxlen = sizeof(int),
41746 + .mode = 0600,
41747 + .proc_handler = &proc_dointvec,
41748 + },
41749 +#endif
41750 +#endif
41751 +#ifdef CONFIG_GRKERNSEC_LINK
41752 + {
41753 + .procname = "linking_restrictions",
41754 + .data = &grsec_enable_link,
41755 + .maxlen = sizeof(int),
41756 + .mode = 0600,
41757 + .proc_handler = &proc_dointvec,
41758 + },
41759 +#endif
41760 +#ifdef CONFIG_GRKERNSEC_FIFO
41761 + {
41762 + .procname = "fifo_restrictions",
41763 + .data = &grsec_enable_fifo,
41764 + .maxlen = sizeof(int),
41765 + .mode = 0600,
41766 + .proc_handler = &proc_dointvec,
41767 + },
41768 +#endif
41769 +#ifdef CONFIG_GRKERNSEC_EXECVE
41770 + {
41771 + .procname = "execve_limiting",
41772 + .data = &grsec_enable_execve,
41773 + .maxlen = sizeof(int),
41774 + .mode = 0600,
41775 + .proc_handler = &proc_dointvec,
41776 + },
41777 +#endif
41778 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
41779 + {
41780 + .procname = "ip_blackhole",
41781 + .data = &grsec_enable_blackhole,
41782 + .maxlen = sizeof(int),
41783 + .mode = 0600,
41784 + .proc_handler = &proc_dointvec,
41785 + },
41786 + {
41787 + .procname = "lastack_retries",
41788 + .data = &grsec_lastack_retries,
41789 + .maxlen = sizeof(int),
41790 + .mode = 0600,
41791 + .proc_handler = &proc_dointvec,
41792 + },
41793 +#endif
41794 +#ifdef CONFIG_GRKERNSEC_EXECLOG
41795 + {
41796 + .procname = "exec_logging",
41797 + .data = &grsec_enable_execlog,
41798 + .maxlen = sizeof(int),
41799 + .mode = 0600,
41800 + .proc_handler = &proc_dointvec,
41801 + },
41802 +#endif
41803 +#ifdef CONFIG_GRKERNSEC_SIGNAL
41804 + {
41805 + .procname = "signal_logging",
41806 + .data = &grsec_enable_signal,
41807 + .maxlen = sizeof(int),
41808 + .mode = 0600,
41809 + .proc_handler = &proc_dointvec,
41810 + },
41811 +#endif
41812 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
41813 + {
41814 + .procname = "forkfail_logging",
41815 + .data = &grsec_enable_forkfail,
41816 + .maxlen = sizeof(int),
41817 + .mode = 0600,
41818 + .proc_handler = &proc_dointvec,
41819 + },
41820 +#endif
41821 +#ifdef CONFIG_GRKERNSEC_TIME
41822 + {
41823 + .procname = "timechange_logging",
41824 + .data = &grsec_enable_time,
41825 + .maxlen = sizeof(int),
41826 + .mode = 0600,
41827 + .proc_handler = &proc_dointvec,
41828 + },
41829 +#endif
41830 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
41831 + {
41832 + .procname = "chroot_deny_shmat",
41833 + .data = &grsec_enable_chroot_shmat,
41834 + .maxlen = sizeof(int),
41835 + .mode = 0600,
41836 + .proc_handler = &proc_dointvec,
41837 + },
41838 +#endif
41839 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
41840 + {
41841 + .procname = "chroot_deny_unix",
41842 + .data = &grsec_enable_chroot_unix,
41843 + .maxlen = sizeof(int),
41844 + .mode = 0600,
41845 + .proc_handler = &proc_dointvec,
41846 + },
41847 +#endif
41848 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
41849 + {
41850 + .procname = "chroot_deny_mount",
41851 + .data = &grsec_enable_chroot_mount,
41852 + .maxlen = sizeof(int),
41853 + .mode = 0600,
41854 + .proc_handler = &proc_dointvec,
41855 + },
41856 +#endif
41857 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
41858 + {
41859 + .procname = "chroot_deny_fchdir",
41860 + .data = &grsec_enable_chroot_fchdir,
41861 + .maxlen = sizeof(int),
41862 + .mode = 0600,
41863 + .proc_handler = &proc_dointvec,
41864 + },
41865 +#endif
41866 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
41867 + {
41868 + .procname = "chroot_deny_chroot",
41869 + .data = &grsec_enable_chroot_double,
41870 + .maxlen = sizeof(int),
41871 + .mode = 0600,
41872 + .proc_handler = &proc_dointvec,
41873 + },
41874 +#endif
41875 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
41876 + {
41877 + .procname = "chroot_deny_pivot",
41878 + .data = &grsec_enable_chroot_pivot,
41879 + .maxlen = sizeof(int),
41880 + .mode = 0600,
41881 + .proc_handler = &proc_dointvec,
41882 + },
41883 +#endif
41884 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
41885 + {
41886 + .procname = "chroot_enforce_chdir",
41887 + .data = &grsec_enable_chroot_chdir,
41888 + .maxlen = sizeof(int),
41889 + .mode = 0600,
41890 + .proc_handler = &proc_dointvec,
41891 + },
41892 +#endif
41893 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
41894 + {
41895 + .procname = "chroot_deny_chmod",
41896 + .data = &grsec_enable_chroot_chmod,
41897 + .maxlen = sizeof(int),
41898 + .mode = 0600,
41899 + .proc_handler = &proc_dointvec,
41900 + },
41901 +#endif
41902 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
41903 + {
41904 + .procname = "chroot_deny_mknod",
41905 + .data = &grsec_enable_chroot_mknod,
41906 + .maxlen = sizeof(int),
41907 + .mode = 0600,
41908 + .proc_handler = &proc_dointvec,
41909 + },
41910 +#endif
41911 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
41912 + {
41913 + .procname = "chroot_restrict_nice",
41914 + .data = &grsec_enable_chroot_nice,
41915 + .maxlen = sizeof(int),
41916 + .mode = 0600,
41917 + .proc_handler = &proc_dointvec,
41918 + },
41919 +#endif
41920 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
41921 + {
41922 + .procname = "chroot_execlog",
41923 + .data = &grsec_enable_chroot_execlog,
41924 + .maxlen = sizeof(int),
41925 + .mode = 0600,
41926 + .proc_handler = &proc_dointvec,
41927 + },
41928 +#endif
41929 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41930 + {
41931 + .procname = "chroot_caps",
41932 + .data = &grsec_enable_chroot_caps,
41933 + .maxlen = sizeof(int),
41934 + .mode = 0600,
41935 + .proc_handler = &proc_dointvec,
41936 + },
41937 +#endif
41938 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
41939 + {
41940 + .procname = "chroot_deny_sysctl",
41941 + .data = &grsec_enable_chroot_sysctl,
41942 + .maxlen = sizeof(int),
41943 + .mode = 0600,
41944 + .proc_handler = &proc_dointvec,
41945 + },
41946 +#endif
41947 +#ifdef CONFIG_GRKERNSEC_TPE
41948 + {
41949 + .procname = "tpe",
41950 + .data = &grsec_enable_tpe,
41951 + .maxlen = sizeof(int),
41952 + .mode = 0600,
41953 + .proc_handler = &proc_dointvec,
41954 + },
41955 + {
41956 + .procname = "tpe_gid",
41957 + .data = &grsec_tpe_gid,
41958 + .maxlen = sizeof(int),
41959 + .mode = 0600,
41960 + .proc_handler = &proc_dointvec,
41961 + },
41962 +#endif
41963 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
41964 + {
41965 + .procname = "tpe_restrict_all",
41966 + .data = &grsec_enable_tpe_all,
41967 + .maxlen = sizeof(int),
41968 + .mode = 0600,
41969 + .proc_handler = &proc_dointvec,
41970 + },
41971 +#endif
41972 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
41973 + {
41974 + .procname = "socket_all",
41975 + .data = &grsec_enable_socket_all,
41976 + .maxlen = sizeof(int),
41977 + .mode = 0600,
41978 + .proc_handler = &proc_dointvec,
41979 + },
41980 + {
41981 + .procname = "socket_all_gid",
41982 + .data = &grsec_socket_all_gid,
41983 + .maxlen = sizeof(int),
41984 + .mode = 0600,
41985 + .proc_handler = &proc_dointvec,
41986 + },
41987 +#endif
41988 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
41989 + {
41990 + .procname = "socket_client",
41991 + .data = &grsec_enable_socket_client,
41992 + .maxlen = sizeof(int),
41993 + .mode = 0600,
41994 + .proc_handler = &proc_dointvec,
41995 + },
41996 + {
41997 + .procname = "socket_client_gid",
41998 + .data = &grsec_socket_client_gid,
41999 + .maxlen = sizeof(int),
42000 + .mode = 0600,
42001 + .proc_handler = &proc_dointvec,
42002 + },
42003 +#endif
42004 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
42005 + {
42006 + .procname = "socket_server",
42007 + .data = &grsec_enable_socket_server,
42008 + .maxlen = sizeof(int),
42009 + .mode = 0600,
42010 + .proc_handler = &proc_dointvec,
42011 + },
42012 + {
42013 + .procname = "socket_server_gid",
42014 + .data = &grsec_socket_server_gid,
42015 + .maxlen = sizeof(int),
42016 + .mode = 0600,
42017 + .proc_handler = &proc_dointvec,
42018 + },
42019 +#endif
42020 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
42021 + {
42022 + .procname = "audit_group",
42023 + .data = &grsec_enable_group,
42024 + .maxlen = sizeof(int),
42025 + .mode = 0600,
42026 + .proc_handler = &proc_dointvec,
42027 + },
42028 + {
42029 + .procname = "audit_gid",
42030 + .data = &grsec_audit_gid,
42031 + .maxlen = sizeof(int),
42032 + .mode = 0600,
42033 + .proc_handler = &proc_dointvec,
42034 + },
42035 +#endif
42036 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
42037 + {
42038 + .procname = "audit_chdir",
42039 + .data = &grsec_enable_chdir,
42040 + .maxlen = sizeof(int),
42041 + .mode = 0600,
42042 + .proc_handler = &proc_dointvec,
42043 + },
42044 +#endif
42045 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
42046 + {
42047 + .procname = "audit_mount",
42048 + .data = &grsec_enable_mount,
42049 + .maxlen = sizeof(int),
42050 + .mode = 0600,
42051 + .proc_handler = &proc_dointvec,
42052 + },
42053 +#endif
42054 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
42055 + {
42056 + .procname = "audit_textrel",
42057 + .data = &grsec_enable_audit_textrel,
42058 + .maxlen = sizeof(int),
42059 + .mode = 0600,
42060 + .proc_handler = &proc_dointvec,
42061 + },
42062 +#endif
42063 +#ifdef CONFIG_GRKERNSEC_DMESG
42064 + {
42065 + .procname = "dmesg",
42066 + .data = &grsec_enable_dmesg,
42067 + .maxlen = sizeof(int),
42068 + .mode = 0600,
42069 + .proc_handler = &proc_dointvec,
42070 + },
42071 +#endif
42072 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
42073 + {
42074 + .procname = "chroot_findtask",
42075 + .data = &grsec_enable_chroot_findtask,
42076 + .maxlen = sizeof(int),
42077 + .mode = 0600,
42078 + .proc_handler = &proc_dointvec,
42079 + },
42080 +#endif
42081 +#ifdef CONFIG_GRKERNSEC_RESLOG
42082 + {
42083 + .procname = "resource_logging",
42084 + .data = &grsec_resource_logging,
42085 + .maxlen = sizeof(int),
42086 + .mode = 0600,
42087 + .proc_handler = &proc_dointvec,
42088 + },
42089 +#endif
42090 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
42091 + {
42092 + .procname = "audit_ptrace",
42093 + .data = &grsec_enable_audit_ptrace,
42094 + .maxlen = sizeof(int),
42095 + .mode = 0600,
42096 + .proc_handler = &proc_dointvec,
42097 + },
42098 +#endif
42099 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
42100 + {
42101 + .procname = "harden_ptrace",
42102 + .data = &grsec_enable_harden_ptrace,
42103 + .maxlen = sizeof(int),
42104 + .mode = 0600,
42105 + .proc_handler = &proc_dointvec,
42106 + },
42107 +#endif
42108 + {
42109 + .procname = "grsec_lock",
42110 + .data = &grsec_lock,
42111 + .maxlen = sizeof(int),
42112 + .mode = 0600,
42113 + .proc_handler = &proc_dointvec,
42114 + },
42115 +#endif
42116 +#ifdef CONFIG_GRKERNSEC_ROFS
42117 + {
42118 + .procname = "romount_protect",
42119 + .data = &grsec_enable_rofs,
42120 + .maxlen = sizeof(int),
42121 + .mode = 0600,
42122 + .proc_handler = &proc_dointvec_minmax,
42123 + .extra1 = &one,
42124 + .extra2 = &one,
42125 + },
42126 +#endif
42127 + { }
42128 +};
42129 +#endif
42130 diff -urNp linux-2.6.34.1/grsecurity/grsec_textrel.c linux-2.6.34.1/grsecurity/grsec_textrel.c
42131 --- linux-2.6.34.1/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500
42132 +++ linux-2.6.34.1/grsecurity/grsec_textrel.c 2010-07-07 09:04:56.000000000 -0400
42133 @@ -0,0 +1,16 @@
42134 +#include <linux/kernel.h>
42135 +#include <linux/sched.h>
42136 +#include <linux/mm.h>
42137 +#include <linux/file.h>
42138 +#include <linux/grinternal.h>
42139 +#include <linux/grsecurity.h>
42140 +
42141 +void
42142 +gr_log_textrel(struct vm_area_struct * vma)
42143 +{
42144 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
42145 + if (grsec_enable_audit_textrel)
42146 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
42147 +#endif
42148 + return;
42149 +}
42150 diff -urNp linux-2.6.34.1/grsecurity/grsec_time.c linux-2.6.34.1/grsecurity/grsec_time.c
42151 --- linux-2.6.34.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
42152 +++ linux-2.6.34.1/grsecurity/grsec_time.c 2010-07-07 09:04:56.000000000 -0400
42153 @@ -0,0 +1,13 @@
42154 +#include <linux/kernel.h>
42155 +#include <linux/sched.h>
42156 +#include <linux/grinternal.h>
42157 +
42158 +void
42159 +gr_log_timechange(void)
42160 +{
42161 +#ifdef CONFIG_GRKERNSEC_TIME
42162 + if (grsec_enable_time)
42163 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
42164 +#endif
42165 + return;
42166 +}
42167 diff -urNp linux-2.6.34.1/grsecurity/grsec_tpe.c linux-2.6.34.1/grsecurity/grsec_tpe.c
42168 --- linux-2.6.34.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
42169 +++ linux-2.6.34.1/grsecurity/grsec_tpe.c 2010-07-07 09:04:56.000000000 -0400
42170 @@ -0,0 +1,38 @@
42171 +#include <linux/kernel.h>
42172 +#include <linux/sched.h>
42173 +#include <linux/file.h>
42174 +#include <linux/fs.h>
42175 +#include <linux/grinternal.h>
42176 +
42177 +extern int gr_acl_tpe_check(void);
42178 +
42179 +int
42180 +gr_tpe_allow(const struct file *file)
42181 +{
42182 +#ifdef CONFIG_GRKERNSEC
42183 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
42184 + const struct cred *cred = current_cred();
42185 +
42186 + if (cred->uid && ((grsec_enable_tpe &&
42187 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
42188 + !in_group_p(grsec_tpe_gid)
42189 +#else
42190 + in_group_p(grsec_tpe_gid)
42191 +#endif
42192 + ) || gr_acl_tpe_check()) &&
42193 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
42194 + (inode->i_mode & S_IWOTH))))) {
42195 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
42196 + return 0;
42197 + }
42198 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
42199 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
42200 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
42201 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
42202 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
42203 + return 0;
42204 + }
42205 +#endif
42206 +#endif
42207 + return 1;
42208 +}
42209 diff -urNp linux-2.6.34.1/grsecurity/grsum.c linux-2.6.34.1/grsecurity/grsum.c
42210 --- linux-2.6.34.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
42211 +++ linux-2.6.34.1/grsecurity/grsum.c 2010-07-07 09:04:56.000000000 -0400
42212 @@ -0,0 +1,61 @@
42213 +#include <linux/err.h>
42214 +#include <linux/kernel.h>
42215 +#include <linux/sched.h>
42216 +#include <linux/mm.h>
42217 +#include <linux/scatterlist.h>
42218 +#include <linux/crypto.h>
42219 +#include <linux/gracl.h>
42220 +
42221 +
42222 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
42223 +#error "crypto and sha256 must be built into the kernel"
42224 +#endif
42225 +
42226 +int
42227 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
42228 +{
42229 + char *p;
42230 + struct crypto_hash *tfm;
42231 + struct hash_desc desc;
42232 + struct scatterlist sg;
42233 + unsigned char temp_sum[GR_SHA_LEN];
42234 + volatile int retval = 0;
42235 + volatile int dummy = 0;
42236 + unsigned int i;
42237 +
42238 + sg_init_table(&sg, 1);
42239 +
42240 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
42241 + if (IS_ERR(tfm)) {
42242 + /* should never happen, since sha256 should be built in */
42243 + return 1;
42244 + }
42245 +
42246 + desc.tfm = tfm;
42247 + desc.flags = 0;
42248 +
42249 + crypto_hash_init(&desc);
42250 +
42251 + p = salt;
42252 + sg_set_buf(&sg, p, GR_SALT_LEN);
42253 + crypto_hash_update(&desc, &sg, sg.length);
42254 +
42255 + p = entry->pw;
42256 + sg_set_buf(&sg, p, strlen(p));
42257 +
42258 + crypto_hash_update(&desc, &sg, sg.length);
42259 +
42260 + crypto_hash_final(&desc, temp_sum);
42261 +
42262 + memset(entry->pw, 0, GR_PW_LEN);
42263 +
42264 + for (i = 0; i < GR_SHA_LEN; i++)
42265 + if (sum[i] != temp_sum[i])
42266 + retval = 1;
42267 + else
42268 + dummy = 1; // waste a cycle
42269 +
42270 + crypto_free_hash(tfm);
42271 +
42272 + return retval;
42273 +}
42274 diff -urNp linux-2.6.34.1/include/acpi/acoutput.h linux-2.6.34.1/include/acpi/acoutput.h
42275 --- linux-2.6.34.1/include/acpi/acoutput.h 2010-07-05 14:24:10.000000000 -0400
42276 +++ linux-2.6.34.1/include/acpi/acoutput.h 2010-07-07 09:04:56.000000000 -0400
42277 @@ -266,8 +266,8 @@
42278 * leaving no executable debug code!
42279 */
42280 #define ACPI_FUNCTION_NAME(a)
42281 -#define ACPI_DEBUG_PRINT(pl)
42282 -#define ACPI_DEBUG_PRINT_RAW(pl)
42283 +#define ACPI_DEBUG_PRINT(pl) do {} while (0)
42284 +#define ACPI_DEBUG_PRINT_RAW(pl) do {} while (0)
42285
42286 #endif /* ACPI_DEBUG_OUTPUT */
42287
42288 diff -urNp linux-2.6.34.1/include/acpi/acpi_drivers.h linux-2.6.34.1/include/acpi/acpi_drivers.h
42289 --- linux-2.6.34.1/include/acpi/acpi_drivers.h 2010-07-05 14:24:10.000000000 -0400
42290 +++ linux-2.6.34.1/include/acpi/acpi_drivers.h 2010-07-07 09:04:56.000000000 -0400
42291 @@ -122,8 +122,8 @@ int acpi_processor_set_thermal_limit(acp
42292 Dock Station
42293 -------------------------------------------------------------------------- */
42294 struct acpi_dock_ops {
42295 - acpi_notify_handler handler;
42296 - acpi_notify_handler uevent;
42297 + const acpi_notify_handler handler;
42298 + const acpi_notify_handler uevent;
42299 };
42300
42301 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
42302 @@ -131,7 +131,7 @@ extern int is_dock_device(acpi_handle ha
42303 extern int register_dock_notifier(struct notifier_block *nb);
42304 extern void unregister_dock_notifier(struct notifier_block *nb);
42305 extern int register_hotplug_dock_device(acpi_handle handle,
42306 - struct acpi_dock_ops *ops,
42307 + const struct acpi_dock_ops *ops,
42308 void *context);
42309 extern void unregister_hotplug_dock_device(acpi_handle handle);
42310 #else
42311 @@ -147,7 +147,7 @@ static inline void unregister_dock_notif
42312 {
42313 }
42314 static inline int register_hotplug_dock_device(acpi_handle handle,
42315 - struct acpi_dock_ops *ops,
42316 + const struct acpi_dock_ops *ops,
42317 void *context)
42318 {
42319 return -ENODEV;
42320 diff -urNp linux-2.6.34.1/include/asm-generic/atomic-long.h linux-2.6.34.1/include/asm-generic/atomic-long.h
42321 --- linux-2.6.34.1/include/asm-generic/atomic-long.h 2010-07-05 14:24:10.000000000 -0400
42322 +++ linux-2.6.34.1/include/asm-generic/atomic-long.h 2010-07-07 09:04:56.000000000 -0400
42323 @@ -22,6 +22,12 @@
42324
42325 typedef atomic64_t atomic_long_t;
42326
42327 +#ifdef CONFIG_PAX_REFCOUNT
42328 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
42329 +#else
42330 +typedef atomic64_t atomic_long_unchecked_t;
42331 +#endif
42332 +
42333 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
42334
42335 static inline long atomic_long_read(atomic_long_t *l)
42336 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
42337 return (long)atomic64_read(v);
42338 }
42339
42340 +#ifdef CONFIG_PAX_REFCOUNT
42341 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
42342 +{
42343 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42344 +
42345 + return (long)atomic64_read_unchecked(v);
42346 +}
42347 +#endif
42348 +
42349 static inline void atomic_long_set(atomic_long_t *l, long i)
42350 {
42351 atomic64_t *v = (atomic64_t *)l;
42352 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
42353 atomic64_set(v, i);
42354 }
42355
42356 +#ifdef CONFIG_PAX_REFCOUNT
42357 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
42358 +{
42359 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42360 +
42361 + atomic64_set_unchecked(v, i);
42362 +}
42363 +#endif
42364 +
42365 static inline void atomic_long_inc(atomic_long_t *l)
42366 {
42367 atomic64_t *v = (atomic64_t *)l;
42368 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
42369 atomic64_inc(v);
42370 }
42371
42372 +#ifdef CONFIG_PAX_REFCOUNT
42373 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
42374 +{
42375 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42376 +
42377 + atomic64_inc_unchecked(v);
42378 +}
42379 +#endif
42380 +
42381 static inline void atomic_long_dec(atomic_long_t *l)
42382 {
42383 atomic64_t *v = (atomic64_t *)l;
42384 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
42385 atomic64_dec(v);
42386 }
42387
42388 +#ifdef CONFIG_PAX_REFCOUNT
42389 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
42390 +{
42391 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42392 +
42393 + atomic64_dec_unchecked(v);
42394 +}
42395 +#endif
42396 +
42397 static inline void atomic_long_add(long i, atomic_long_t *l)
42398 {
42399 atomic64_t *v = (atomic64_t *)l;
42400 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
42401 atomic64_add(i, v);
42402 }
42403
42404 +#ifdef CONFIG_PAX_REFCOUNT
42405 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
42406 +{
42407 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42408 +
42409 + atomic64_add_unchecked(i, v);
42410 +}
42411 +#endif
42412 +
42413 static inline void atomic_long_sub(long i, atomic_long_t *l)
42414 {
42415 atomic64_t *v = (atomic64_t *)l;
42416 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
42417 return (long)atomic64_inc_return(v);
42418 }
42419
42420 +#ifdef CONFIG_PAX_REFCOUNT
42421 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
42422 +{
42423 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
42424 +
42425 + return (long)atomic64_inc_return_unchecked(v);
42426 +}
42427 +#endif
42428 +
42429 static inline long atomic_long_dec_return(atomic_long_t *l)
42430 {
42431 atomic64_t *v = (atomic64_t *)l;
42432 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
42433
42434 typedef atomic_t atomic_long_t;
42435
42436 +#ifdef CONFIG_PAX_REFCOUNT
42437 +typedef atomic_unchecked_t atomic_long_unchecked_t;
42438 +#else
42439 +typedef atomic_t atomic_long_unchecked_t;
42440 +#endif
42441 +
42442 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
42443 static inline long atomic_long_read(atomic_long_t *l)
42444 {
42445 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
42446 return (long)atomic_read(v);
42447 }
42448
42449 +#ifdef CONFIG_PAX_REFCOUNT
42450 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
42451 +{
42452 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42453 +
42454 + return (long)atomic_read_unchecked(v);
42455 +}
42456 +#endif
42457 +
42458 static inline void atomic_long_set(atomic_long_t *l, long i)
42459 {
42460 atomic_t *v = (atomic_t *)l;
42461 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
42462 atomic_set(v, i);
42463 }
42464
42465 +#ifdef CONFIG_PAX_REFCOUNT
42466 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
42467 +{
42468 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42469 +
42470 + atomic_set_unchecked(v, i);
42471 +}
42472 +#endif
42473 +
42474 static inline void atomic_long_inc(atomic_long_t *l)
42475 {
42476 atomic_t *v = (atomic_t *)l;
42477 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
42478 atomic_inc(v);
42479 }
42480
42481 +#ifdef CONFIG_PAX_REFCOUNT
42482 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
42483 +{
42484 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42485 +
42486 + atomic_inc_unchecked(v);
42487 +}
42488 +#endif
42489 +
42490 static inline void atomic_long_dec(atomic_long_t *l)
42491 {
42492 atomic_t *v = (atomic_t *)l;
42493 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
42494 atomic_dec(v);
42495 }
42496
42497 +#ifdef CONFIG_PAX_REFCOUNT
42498 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
42499 +{
42500 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42501 +
42502 + atomic_dec_unchecked(v);
42503 +}
42504 +#endif
42505 +
42506 static inline void atomic_long_add(long i, atomic_long_t *l)
42507 {
42508 atomic_t *v = (atomic_t *)l;
42509 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
42510 atomic_add(i, v);
42511 }
42512
42513 +#ifdef CONFIG_PAX_REFCOUNT
42514 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
42515 +{
42516 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42517 +
42518 + atomic_add_unchecked(i, v);
42519 +}
42520 +#endif
42521 +
42522 static inline void atomic_long_sub(long i, atomic_long_t *l)
42523 {
42524 atomic_t *v = (atomic_t *)l;
42525 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
42526 return (long)atomic_inc_return(v);
42527 }
42528
42529 +#ifdef CONFIG_PAX_REFCOUNT
42530 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
42531 +{
42532 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
42533 +
42534 + return (long)atomic_inc_return_unchecked(v);
42535 +}
42536 +#endif
42537 +
42538 static inline long atomic_long_dec_return(atomic_long_t *l)
42539 {
42540 atomic_t *v = (atomic_t *)l;
42541 @@ -255,4 +375,35 @@ static inline long atomic_long_add_unles
42542
42543 #endif /* BITS_PER_LONG == 64 */
42544
42545 +#ifdef CONFIG_PAX_REFCOUNT
42546 +static inline void pax_refcount_needs_these_functions(void)
42547 +{
42548 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
42549 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
42550 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
42551 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
42552 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
42553 +
42554 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
42555 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
42556 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
42557 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
42558 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
42559 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
42560 +}
42561 +#else
42562 +#define atomic_read_unchecked(v) atomic_read(v)
42563 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
42564 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
42565 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
42566 +#define atomic_inc_unchecked(v) atomic_inc(v)
42567 +
42568 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
42569 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
42570 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
42571 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
42572 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
42573 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
42574 +#endif
42575 +
42576 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
42577 diff -urNp linux-2.6.34.1/include/asm-generic/dma-mapping-common.h linux-2.6.34.1/include/asm-generic/dma-mapping-common.h
42578 --- linux-2.6.34.1/include/asm-generic/dma-mapping-common.h 2010-07-05 14:24:10.000000000 -0400
42579 +++ linux-2.6.34.1/include/asm-generic/dma-mapping-common.h 2010-07-07 09:04:56.000000000 -0400
42580 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
42581 enum dma_data_direction dir,
42582 struct dma_attrs *attrs)
42583 {
42584 - struct dma_map_ops *ops = get_dma_ops(dev);
42585 + const struct dma_map_ops *ops = get_dma_ops(dev);
42586 dma_addr_t addr;
42587
42588 kmemcheck_mark_initialized(ptr, size);
42589 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
42590 enum dma_data_direction dir,
42591 struct dma_attrs *attrs)
42592 {
42593 - struct dma_map_ops *ops = get_dma_ops(dev);
42594 + const struct dma_map_ops *ops = get_dma_ops(dev);
42595
42596 BUG_ON(!valid_dma_direction(dir));
42597 if (ops->unmap_page)
42598 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
42599 int nents, enum dma_data_direction dir,
42600 struct dma_attrs *attrs)
42601 {
42602 - struct dma_map_ops *ops = get_dma_ops(dev);
42603 + const struct dma_map_ops *ops = get_dma_ops(dev);
42604 int i, ents;
42605 struct scatterlist *s;
42606
42607 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
42608 int nents, enum dma_data_direction dir,
42609 struct dma_attrs *attrs)
42610 {
42611 - struct dma_map_ops *ops = get_dma_ops(dev);
42612 + const struct dma_map_ops *ops = get_dma_ops(dev);
42613
42614 BUG_ON(!valid_dma_direction(dir));
42615 debug_dma_unmap_sg(dev, sg, nents, dir);
42616 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
42617 size_t offset, size_t size,
42618 enum dma_data_direction dir)
42619 {
42620 - struct dma_map_ops *ops = get_dma_ops(dev);
42621 + const struct dma_map_ops *ops = get_dma_ops(dev);
42622 dma_addr_t addr;
42623
42624 kmemcheck_mark_initialized(page_address(page) + offset, size);
42625 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
42626 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
42627 size_t size, enum dma_data_direction dir)
42628 {
42629 - struct dma_map_ops *ops = get_dma_ops(dev);
42630 + const struct dma_map_ops *ops = get_dma_ops(dev);
42631
42632 BUG_ON(!valid_dma_direction(dir));
42633 if (ops->unmap_page)
42634 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
42635 size_t size,
42636 enum dma_data_direction dir)
42637 {
42638 - struct dma_map_ops *ops = get_dma_ops(dev);
42639 + const struct dma_map_ops *ops = get_dma_ops(dev);
42640
42641 BUG_ON(!valid_dma_direction(dir));
42642 if (ops->sync_single_for_cpu)
42643 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
42644 dma_addr_t addr, size_t size,
42645 enum dma_data_direction dir)
42646 {
42647 - struct dma_map_ops *ops = get_dma_ops(dev);
42648 + const struct dma_map_ops *ops = get_dma_ops(dev);
42649
42650 BUG_ON(!valid_dma_direction(dir));
42651 if (ops->sync_single_for_device)
42652 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
42653 size_t size,
42654 enum dma_data_direction dir)
42655 {
42656 - struct dma_map_ops *ops = get_dma_ops(dev);
42657 + const struct dma_map_ops *ops = get_dma_ops(dev);
42658
42659 BUG_ON(!valid_dma_direction(dir));
42660 if (ops->sync_single_range_for_cpu) {
42661 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
42662 size_t size,
42663 enum dma_data_direction dir)
42664 {
42665 - struct dma_map_ops *ops = get_dma_ops(dev);
42666 + const struct dma_map_ops *ops = get_dma_ops(dev);
42667
42668 BUG_ON(!valid_dma_direction(dir));
42669 if (ops->sync_single_range_for_device) {
42670 @@ -155,7 +155,7 @@ static inline void
42671 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
42672 int nelems, enum dma_data_direction dir)
42673 {
42674 - struct dma_map_ops *ops = get_dma_ops(dev);
42675 + const struct dma_map_ops *ops = get_dma_ops(dev);
42676
42677 BUG_ON(!valid_dma_direction(dir));
42678 if (ops->sync_sg_for_cpu)
42679 @@ -167,7 +167,7 @@ static inline void
42680 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
42681 int nelems, enum dma_data_direction dir)
42682 {
42683 - struct dma_map_ops *ops = get_dma_ops(dev);
42684 + const struct dma_map_ops *ops = get_dma_ops(dev);
42685
42686 BUG_ON(!valid_dma_direction(dir));
42687 if (ops->sync_sg_for_device)
42688 diff -urNp linux-2.6.34.1/include/asm-generic/futex.h linux-2.6.34.1/include/asm-generic/futex.h
42689 --- linux-2.6.34.1/include/asm-generic/futex.h 2010-07-05 14:24:10.000000000 -0400
42690 +++ linux-2.6.34.1/include/asm-generic/futex.h 2010-07-07 09:04:56.000000000 -0400
42691 @@ -6,7 +6,7 @@
42692 #include <asm/errno.h>
42693
42694 static inline int
42695 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
42696 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
42697 {
42698 int op = (encoded_op >> 28) & 7;
42699 int cmp = (encoded_op >> 24) & 15;
42700 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
42701 }
42702
42703 static inline int
42704 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
42705 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
42706 {
42707 return -ENOSYS;
42708 }
42709 diff -urNp linux-2.6.34.1/include/asm-generic/int-l64.h linux-2.6.34.1/include/asm-generic/int-l64.h
42710 --- linux-2.6.34.1/include/asm-generic/int-l64.h 2010-07-05 14:24:10.000000000 -0400
42711 +++ linux-2.6.34.1/include/asm-generic/int-l64.h 2010-07-07 09:04:56.000000000 -0400
42712 @@ -46,6 +46,8 @@ typedef unsigned int u32;
42713 typedef signed long s64;
42714 typedef unsigned long u64;
42715
42716 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
42717 +
42718 #define S8_C(x) x
42719 #define U8_C(x) x ## U
42720 #define S16_C(x) x
42721 diff -urNp linux-2.6.34.1/include/asm-generic/int-ll64.h linux-2.6.34.1/include/asm-generic/int-ll64.h
42722 --- linux-2.6.34.1/include/asm-generic/int-ll64.h 2010-07-05 14:24:10.000000000 -0400
42723 +++ linux-2.6.34.1/include/asm-generic/int-ll64.h 2010-07-07 09:04:56.000000000 -0400
42724 @@ -51,6 +51,8 @@ typedef unsigned int u32;
42725 typedef signed long long s64;
42726 typedef unsigned long long u64;
42727
42728 +typedef unsigned long long intoverflow_t;
42729 +
42730 #define S8_C(x) x
42731 #define U8_C(x) x ## U
42732 #define S16_C(x) x
42733 diff -urNp linux-2.6.34.1/include/asm-generic/kmap_types.h linux-2.6.34.1/include/asm-generic/kmap_types.h
42734 --- linux-2.6.34.1/include/asm-generic/kmap_types.h 2010-07-05 14:24:10.000000000 -0400
42735 +++ linux-2.6.34.1/include/asm-generic/kmap_types.h 2010-07-07 09:04:56.000000000 -0400
42736 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
42737 KMAP_D(16) KM_IRQ_PTE,
42738 KMAP_D(17) KM_NMI,
42739 KMAP_D(18) KM_NMI_PTE,
42740 -KMAP_D(19) KM_TYPE_NR
42741 +KMAP_D(19) KM_CLEARPAGE,
42742 +KMAP_D(20) KM_TYPE_NR
42743 };
42744
42745 #undef KMAP_D
42746 diff -urNp linux-2.6.34.1/include/asm-generic/pgtable.h linux-2.6.34.1/include/asm-generic/pgtable.h
42747 --- linux-2.6.34.1/include/asm-generic/pgtable.h 2010-07-05 14:24:10.000000000 -0400
42748 +++ linux-2.6.34.1/include/asm-generic/pgtable.h 2010-07-07 09:04:56.000000000 -0400
42749 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
42750 unsigned long size);
42751 #endif
42752
42753 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
42754 +static inline unsigned long pax_open_kernel(void) { return 0; }
42755 +#endif
42756 +
42757 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
42758 +static inline unsigned long pax_close_kernel(void) { return 0; }
42759 +#endif
42760 +
42761 #endif /* !__ASSEMBLY__ */
42762
42763 #endif /* _ASM_GENERIC_PGTABLE_H */
42764 diff -urNp linux-2.6.34.1/include/asm-generic/vmlinux.lds.h linux-2.6.34.1/include/asm-generic/vmlinux.lds.h
42765 --- linux-2.6.34.1/include/asm-generic/vmlinux.lds.h 2010-07-05 14:24:10.000000000 -0400
42766 +++ linux-2.6.34.1/include/asm-generic/vmlinux.lds.h 2010-07-07 09:04:56.000000000 -0400
42767 @@ -203,6 +203,7 @@
42768 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
42769 VMLINUX_SYMBOL(__start_rodata) = .; \
42770 *(.rodata) *(.rodata.*) \
42771 + *(.data.read_only) \
42772 *(__vermagic) /* Kernel version magic */ \
42773 *(__markers_strings) /* Markers: strings */ \
42774 *(__tracepoints_strings)/* Tracepoints: strings */ \
42775 @@ -660,22 +661,24 @@
42776 * section in the linker script will go there too. @phdr should have
42777 * a leading colon.
42778 *
42779 - * Note that this macros defines __per_cpu_load as an absolute symbol.
42780 + * Note that this macros defines per_cpu_load as an absolute symbol.
42781 * If there is no need to put the percpu section at a predetermined
42782 * address, use PERCPU().
42783 */
42784 #define PERCPU_VADDR(vaddr, phdr) \
42785 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
42786 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
42787 + per_cpu_load = .; \
42788 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
42789 - LOAD_OFFSET) { \
42790 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
42791 VMLINUX_SYMBOL(__per_cpu_start) = .; \
42792 *(.data.percpu.first) \
42793 - *(.data.percpu.page_aligned) \
42794 *(.data.percpu) \
42795 + . = ALIGN(PAGE_SIZE); \
42796 + *(.data.percpu.page_aligned) \
42797 *(.data.percpu.shared_aligned) \
42798 VMLINUX_SYMBOL(__per_cpu_end) = .; \
42799 } phdr \
42800 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
42801 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
42802
42803 /**
42804 * PERCPU - define output section for percpu area, simple version
42805 diff -urNp linux-2.6.34.1/include/drm/drmP.h linux-2.6.34.1/include/drm/drmP.h
42806 --- linux-2.6.34.1/include/drm/drmP.h 2010-07-05 14:24:10.000000000 -0400
42807 +++ linux-2.6.34.1/include/drm/drmP.h 2010-07-07 09:04:56.000000000 -0400
42808 @@ -808,7 +808,7 @@ struct drm_driver {
42809 void (*vgaarb_irq)(struct drm_device *dev, bool state);
42810
42811 /* Driver private ops for this object */
42812 - struct vm_operations_struct *gem_vm_ops;
42813 + const struct vm_operations_struct *gem_vm_ops;
42814
42815 int major;
42816 int minor;
42817 @@ -917,7 +917,7 @@ struct drm_device {
42818
42819 /** \name Usage Counters */
42820 /*@{ */
42821 - int open_count; /**< Outstanding files open */
42822 + atomic_t open_count; /**< Outstanding files open */
42823 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
42824 atomic_t vma_count; /**< Outstanding vma areas open */
42825 int buf_use; /**< Buffers in use -- cannot alloc */
42826 @@ -928,7 +928,7 @@ struct drm_device {
42827 /*@{ */
42828 unsigned long counters;
42829 enum drm_stat_type types[15];
42830 - atomic_t counts[15];
42831 + atomic_unchecked_t counts[15];
42832 /*@} */
42833
42834 struct list_head filelist;
42835 diff -urNp linux-2.6.34.1/include/drm/drm_pciids.h linux-2.6.34.1/include/drm/drm_pciids.h
42836 --- linux-2.6.34.1/include/drm/drm_pciids.h 2010-07-05 14:24:10.000000000 -0400
42837 +++ linux-2.6.34.1/include/drm/drm_pciids.h 2010-07-07 09:04:56.000000000 -0400
42838 @@ -412,7 +412,7 @@
42839 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
42840 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
42841 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
42842 - {0, 0, 0}
42843 + {0, 0, 0, 0, 0, 0}
42844
42845 #define r128_PCI_IDS \
42846 {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42847 @@ -452,14 +452,14 @@
42848 {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42849 {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42850 {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42851 - {0, 0, 0}
42852 + {0, 0, 0, 0, 0, 0}
42853
42854 #define mga_PCI_IDS \
42855 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
42856 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
42857 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
42858 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
42859 - {0, 0, 0}
42860 + {0, 0, 0, 0, 0, 0}
42861
42862 #define mach64_PCI_IDS \
42863 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42864 @@ -482,7 +482,7 @@
42865 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42866 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42867 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42868 - {0, 0, 0}
42869 + {0, 0, 0, 0, 0, 0}
42870
42871 #define sisdrv_PCI_IDS \
42872 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42873 @@ -493,7 +493,7 @@
42874 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42875 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
42876 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
42877 - {0, 0, 0}
42878 + {0, 0, 0, 0, 0, 0}
42879
42880 #define tdfx_PCI_IDS \
42881 {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42882 @@ -502,7 +502,7 @@
42883 {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42884 {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42885 {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42886 - {0, 0, 0}
42887 + {0, 0, 0, 0, 0, 0}
42888
42889 #define viadrv_PCI_IDS \
42890 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42891 @@ -514,14 +514,14 @@
42892 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42893 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
42894 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
42895 - {0, 0, 0}
42896 + {0, 0, 0, 0, 0, 0}
42897
42898 #define i810_PCI_IDS \
42899 {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42900 {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42901 {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42902 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42903 - {0, 0, 0}
42904 + {0, 0, 0, 0, 0, 0}
42905
42906 #define i830_PCI_IDS \
42907 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42908 @@ -529,11 +529,11 @@
42909 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42910 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42911 {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42912 - {0, 0, 0}
42913 + {0, 0, 0, 0, 0, 0}
42914
42915 #define gamma_PCI_IDS \
42916 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
42917 - {0, 0, 0}
42918 + {0, 0, 0, 0, 0, 0}
42919
42920 #define savage_PCI_IDS \
42921 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
42922 @@ -559,10 +559,10 @@
42923 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
42924 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
42925 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
42926 - {0, 0, 0}
42927 + {0, 0, 0, 0, 0, 0}
42928
42929 #define ffb_PCI_IDS \
42930 - {0, 0, 0}
42931 + {0, 0, 0, 0, 0, 0}
42932
42933 #define i915_PCI_IDS \
42934 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
42935 @@ -596,4 +596,4 @@
42936 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
42937 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
42938 {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
42939 - {0, 0, 0}
42940 + {0, 0, 0, 0, 0, 0}
42941 diff -urNp linux-2.6.34.1/include/linux/a.out.h linux-2.6.34.1/include/linux/a.out.h
42942 --- linux-2.6.34.1/include/linux/a.out.h 2010-07-05 14:24:10.000000000 -0400
42943 +++ linux-2.6.34.1/include/linux/a.out.h 2010-07-07 09:04:56.000000000 -0400
42944 @@ -39,6 +39,14 @@ enum machine_type {
42945 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
42946 };
42947
42948 +/* Constants for the N_FLAGS field */
42949 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
42950 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
42951 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
42952 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
42953 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
42954 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
42955 +
42956 #if !defined (N_MAGIC)
42957 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
42958 #endif
42959 diff -urNp linux-2.6.34.1/include/linux/atmdev.h linux-2.6.34.1/include/linux/atmdev.h
42960 --- linux-2.6.34.1/include/linux/atmdev.h 2010-07-05 14:24:10.000000000 -0400
42961 +++ linux-2.6.34.1/include/linux/atmdev.h 2010-07-07 09:04:56.000000000 -0400
42962 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
42963 #endif
42964
42965 struct k_atm_aal_stats {
42966 -#define __HANDLE_ITEM(i) atomic_t i
42967 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
42968 __AAL_STAT_ITEMS
42969 #undef __HANDLE_ITEM
42970 };
42971 diff -urNp linux-2.6.34.1/include/linux/binfmts.h linux-2.6.34.1/include/linux/binfmts.h
42972 --- linux-2.6.34.1/include/linux/binfmts.h 2010-07-05 14:24:10.000000000 -0400
42973 +++ linux-2.6.34.1/include/linux/binfmts.h 2010-07-07 09:04:56.000000000 -0400
42974 @@ -87,6 +87,7 @@ struct linux_binfmt {
42975 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
42976 int (*load_shlib)(struct file *);
42977 int (*core_dump)(struct coredump_params *cprm);
42978 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
42979 unsigned long min_coredump; /* minimal dump size */
42980 int hasvdso;
42981 };
42982 diff -urNp linux-2.6.34.1/include/linux/blkdev.h linux-2.6.34.1/include/linux/blkdev.h
42983 --- linux-2.6.34.1/include/linux/blkdev.h 2010-07-05 14:24:10.000000000 -0400
42984 +++ linux-2.6.34.1/include/linux/blkdev.h 2010-07-07 09:04:56.000000000 -0400
42985 @@ -1275,19 +1275,19 @@ static inline int blk_integrity_rq(struc
42986 #endif /* CONFIG_BLK_DEV_INTEGRITY */
42987
42988 struct block_device_operations {
42989 - int (*open) (struct block_device *, fmode_t);
42990 - int (*release) (struct gendisk *, fmode_t);
42991 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
42992 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
42993 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
42994 - int (*direct_access) (struct block_device *, sector_t,
42995 + int (* const open) (struct block_device *, fmode_t);
42996 + int (* const release) (struct gendisk *, fmode_t);
42997 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
42998 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
42999 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
43000 + int (* const direct_access) (struct block_device *, sector_t,
43001 void **, unsigned long *);
43002 - int (*media_changed) (struct gendisk *);
43003 - unsigned long long (*set_capacity) (struct gendisk *,
43004 + int (* const media_changed) (struct gendisk *);
43005 + unsigned long long (* const set_capacity) (struct gendisk *,
43006 unsigned long long);
43007 - int (*revalidate_disk) (struct gendisk *);
43008 - int (*getgeo)(struct block_device *, struct hd_geometry *);
43009 - struct module *owner;
43010 + int (* const revalidate_disk) (struct gendisk *);
43011 + int (* const getgeo)(struct block_device *, struct hd_geometry *);
43012 + struct module * const owner;
43013 };
43014
43015 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
43016 diff -urNp linux-2.6.34.1/include/linux/cache.h linux-2.6.34.1/include/linux/cache.h
43017 --- linux-2.6.34.1/include/linux/cache.h 2010-07-05 14:24:10.000000000 -0400
43018 +++ linux-2.6.34.1/include/linux/cache.h 2010-07-07 09:04:56.000000000 -0400
43019 @@ -16,6 +16,10 @@
43020 #define __read_mostly
43021 #endif
43022
43023 +#ifndef __read_only
43024 +#define __read_only __read_mostly
43025 +#endif
43026 +
43027 #ifndef ____cacheline_aligned
43028 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
43029 #endif
43030 diff -urNp linux-2.6.34.1/include/linux/capability.h linux-2.6.34.1/include/linux/capability.h
43031 --- linux-2.6.34.1/include/linux/capability.h 2010-07-05 14:24:10.000000000 -0400
43032 +++ linux-2.6.34.1/include/linux/capability.h 2010-07-07 09:04:56.000000000 -0400
43033 @@ -561,6 +561,7 @@ extern const kernel_cap_t __cap_init_eff
43034 (security_real_capable_noaudit((t), (cap)) == 0)
43035
43036 extern int capable(int cap);
43037 +int capable_nolog(int cap);
43038
43039 /* audit system wants to get cap info from files as well */
43040 struct dentry;
43041 diff -urNp linux-2.6.34.1/include/linux/compiler-gcc4.h linux-2.6.34.1/include/linux/compiler-gcc4.h
43042 --- linux-2.6.34.1/include/linux/compiler-gcc4.h 2010-07-05 14:24:10.000000000 -0400
43043 +++ linux-2.6.34.1/include/linux/compiler-gcc4.h 2010-07-07 09:04:56.000000000 -0400
43044 @@ -50,6 +50,10 @@
43045 #define unreachable() __builtin_unreachable()
43046 #endif
43047
43048 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
43049 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
43050 +#define __bos0(ptr) __bos((ptr), 0)
43051 +#define __bos1(ptr) __bos((ptr), 1)
43052 #endif
43053
43054 #if __GNUC_MINOR__ > 0
43055 diff -urNp linux-2.6.34.1/include/linux/compiler.h linux-2.6.34.1/include/linux/compiler.h
43056 --- linux-2.6.34.1/include/linux/compiler.h 2010-07-05 14:24:10.000000000 -0400
43057 +++ linux-2.6.34.1/include/linux/compiler.h 2010-07-07 09:04:56.000000000 -0400
43058 @@ -267,6 +267,22 @@ void ftrace_likely_update(struct ftrace_
43059 #define __cold
43060 #endif
43061
43062 +#ifndef __alloc_size
43063 +#define __alloc_size
43064 +#endif
43065 +
43066 +#ifndef __bos
43067 +#define __bos
43068 +#endif
43069 +
43070 +#ifndef __bos0
43071 +#define __bos0
43072 +#endif
43073 +
43074 +#ifndef __bos1
43075 +#define __bos1
43076 +#endif
43077 +
43078 /* Simple shorthand for a section definition */
43079 #ifndef __section
43080 # define __section(S) __attribute__ ((__section__(#S)))
43081 diff -urNp linux-2.6.34.1/include/linux/decompress/mm.h linux-2.6.34.1/include/linux/decompress/mm.h
43082 --- linux-2.6.34.1/include/linux/decompress/mm.h 2010-07-05 14:24:10.000000000 -0400
43083 +++ linux-2.6.34.1/include/linux/decompress/mm.h 2010-07-07 09:04:56.000000000 -0400
43084 @@ -78,7 +78,7 @@ static void free(void *where)
43085 * warnings when not needed (indeed large_malloc / large_free are not
43086 * needed by inflate */
43087
43088 -#define malloc(a) kmalloc(a, GFP_KERNEL)
43089 +#define malloc(a) kmalloc((a), GFP_KERNEL)
43090 #define free(a) kfree(a)
43091
43092 #define large_malloc(a) vmalloc(a)
43093 diff -urNp linux-2.6.34.1/include/linux/dma-mapping.h linux-2.6.34.1/include/linux/dma-mapping.h
43094 --- linux-2.6.34.1/include/linux/dma-mapping.h 2010-07-05 14:24:10.000000000 -0400
43095 +++ linux-2.6.34.1/include/linux/dma-mapping.h 2010-07-07 09:04:56.000000000 -0400
43096 @@ -16,50 +16,50 @@ enum dma_data_direction {
43097 };
43098
43099 struct dma_map_ops {
43100 - void* (*alloc_coherent)(struct device *dev, size_t size,
43101 + void* (* const alloc_coherent)(struct device *dev, size_t size,
43102 dma_addr_t *dma_handle, gfp_t gfp);
43103 - void (*free_coherent)(struct device *dev, size_t size,
43104 + void (* const free_coherent)(struct device *dev, size_t size,
43105 void *vaddr, dma_addr_t dma_handle);
43106 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
43107 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
43108 unsigned long offset, size_t size,
43109 enum dma_data_direction dir,
43110 struct dma_attrs *attrs);
43111 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
43112 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
43113 size_t size, enum dma_data_direction dir,
43114 struct dma_attrs *attrs);
43115 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
43116 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
43117 int nents, enum dma_data_direction dir,
43118 struct dma_attrs *attrs);
43119 - void (*unmap_sg)(struct device *dev,
43120 + void (* const unmap_sg)(struct device *dev,
43121 struct scatterlist *sg, int nents,
43122 enum dma_data_direction dir,
43123 struct dma_attrs *attrs);
43124 - void (*sync_single_for_cpu)(struct device *dev,
43125 + void (* const sync_single_for_cpu)(struct device *dev,
43126 dma_addr_t dma_handle, size_t size,
43127 enum dma_data_direction dir);
43128 - void (*sync_single_for_device)(struct device *dev,
43129 + void (* const sync_single_for_device)(struct device *dev,
43130 dma_addr_t dma_handle, size_t size,
43131 enum dma_data_direction dir);
43132 - void (*sync_single_range_for_cpu)(struct device *dev,
43133 + void (* const sync_single_range_for_cpu)(struct device *dev,
43134 dma_addr_t dma_handle,
43135 unsigned long offset,
43136 size_t size,
43137 enum dma_data_direction dir);
43138 - void (*sync_single_range_for_device)(struct device *dev,
43139 + void (* const sync_single_range_for_device)(struct device *dev,
43140 dma_addr_t dma_handle,
43141 unsigned long offset,
43142 size_t size,
43143 enum dma_data_direction dir);
43144 - void (*sync_sg_for_cpu)(struct device *dev,
43145 + void (* const sync_sg_for_cpu)(struct device *dev,
43146 struct scatterlist *sg, int nents,
43147 enum dma_data_direction dir);
43148 - void (*sync_sg_for_device)(struct device *dev,
43149 + void (* const sync_sg_for_device)(struct device *dev,
43150 struct scatterlist *sg, int nents,
43151 enum dma_data_direction dir);
43152 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
43153 - int (*dma_supported)(struct device *dev, u64 mask);
43154 - int (*set_dma_mask)(struct device *dev, u64 mask);
43155 - int is_phys;
43156 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
43157 + int (* const dma_supported)(struct device *dev, u64 mask);
43158 + int (* set_dma_mask)(struct device *dev, u64 mask);
43159 + const int is_phys;
43160 };
43161
43162 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
43163 diff -urNp linux-2.6.34.1/include/linux/elf.h linux-2.6.34.1/include/linux/elf.h
43164 --- linux-2.6.34.1/include/linux/elf.h 2010-07-05 14:24:10.000000000 -0400
43165 +++ linux-2.6.34.1/include/linux/elf.h 2010-07-07 09:04:56.000000000 -0400
43166 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
43167 #define PT_GNU_EH_FRAME 0x6474e550
43168
43169 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
43170 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
43171 +
43172 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
43173 +
43174 +/* Constants for the e_flags field */
43175 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
43176 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
43177 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
43178 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
43179 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
43180 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
43181
43182 /*
43183 * Extended Numbering
43184 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
43185 #define DT_DEBUG 21
43186 #define DT_TEXTREL 22
43187 #define DT_JMPREL 23
43188 +#define DT_FLAGS 30
43189 + #define DF_TEXTREL 0x00000004
43190 #define DT_ENCODING 32
43191 #define OLD_DT_LOOS 0x60000000
43192 #define DT_LOOS 0x6000000d
43193 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
43194 #define PF_W 0x2
43195 #define PF_X 0x1
43196
43197 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
43198 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
43199 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
43200 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
43201 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
43202 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
43203 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
43204 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
43205 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
43206 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
43207 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
43208 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
43209 +
43210 typedef struct elf32_phdr{
43211 Elf32_Word p_type;
43212 Elf32_Off p_offset;
43213 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
43214 #define EI_OSABI 7
43215 #define EI_PAD 8
43216
43217 +#define EI_PAX 14
43218 +
43219 #define ELFMAG0 0x7f /* EI_MAG */
43220 #define ELFMAG1 'E'
43221 #define ELFMAG2 'L'
43222 @@ -420,6 +448,7 @@ extern Elf32_Dyn _DYNAMIC [];
43223 #define elf_note elf32_note
43224 #define elf_addr_t Elf32_Off
43225 #define Elf_Half Elf32_Half
43226 +#define elf_dyn Elf32_Dyn
43227
43228 #else
43229
43230 @@ -430,6 +459,7 @@ extern Elf64_Dyn _DYNAMIC [];
43231 #define elf_note elf64_note
43232 #define elf_addr_t Elf64_Off
43233 #define Elf_Half Elf64_Half
43234 +#define elf_dyn Elf64_Dyn
43235
43236 #endif
43237
43238 diff -urNp linux-2.6.34.1/include/linux/fs.h linux-2.6.34.1/include/linux/fs.h
43239 --- linux-2.6.34.1/include/linux/fs.h 2010-07-05 14:24:10.000000000 -0400
43240 +++ linux-2.6.34.1/include/linux/fs.h 2010-07-07 09:04:56.000000000 -0400
43241 @@ -90,6 +90,11 @@ struct inodes_stat_t {
43242 /* Expect random access pattern */
43243 #define FMODE_RANDOM ((__force fmode_t)0x1000)
43244
43245 +/* Hack for grsec so as not to require read permission simply to execute
43246 + * a binary
43247 + */
43248 +#define FMODE_GREXEC ((__force fmode_t)0x2000)
43249 +
43250 /*
43251 * The below are the various read and write types that we support. Some of
43252 * them include behavioral modifiers that send information down to the
43253 @@ -570,41 +575,41 @@ typedef int (*read_actor_t)(read_descrip
43254 unsigned long, unsigned long);
43255
43256 struct address_space_operations {
43257 - int (*writepage)(struct page *page, struct writeback_control *wbc);
43258 - int (*readpage)(struct file *, struct page *);
43259 - void (*sync_page)(struct page *);
43260 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
43261 + int (* const readpage)(struct file *, struct page *);
43262 + void (* const sync_page)(struct page *);
43263
43264 /* Write back some dirty pages from this mapping. */
43265 - int (*writepages)(struct address_space *, struct writeback_control *);
43266 + int (* const writepages)(struct address_space *, struct writeback_control *);
43267
43268 /* Set a page dirty. Return true if this dirtied it */
43269 - int (*set_page_dirty)(struct page *page);
43270 + int (* const set_page_dirty)(struct page *page);
43271
43272 - int (*readpages)(struct file *filp, struct address_space *mapping,
43273 + int (* const readpages)(struct file *filp, struct address_space *mapping,
43274 struct list_head *pages, unsigned nr_pages);
43275
43276 - int (*write_begin)(struct file *, struct address_space *mapping,
43277 + int (* const write_begin)(struct file *, struct address_space *mapping,
43278 loff_t pos, unsigned len, unsigned flags,
43279 struct page **pagep, void **fsdata);
43280 - int (*write_end)(struct file *, struct address_space *mapping,
43281 + int (* const write_end)(struct file *, struct address_space *mapping,
43282 loff_t pos, unsigned len, unsigned copied,
43283 struct page *page, void *fsdata);
43284
43285 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
43286 - sector_t (*bmap)(struct address_space *, sector_t);
43287 - void (*invalidatepage) (struct page *, unsigned long);
43288 - int (*releasepage) (struct page *, gfp_t);
43289 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
43290 + sector_t (* const bmap)(struct address_space *, sector_t);
43291 + void (* const invalidatepage) (struct page *, unsigned long);
43292 + int (* const releasepage) (struct page *, gfp_t);
43293 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
43294 loff_t offset, unsigned long nr_segs);
43295 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
43296 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
43297 void **, unsigned long *);
43298 /* migrate the contents of a page to the specified target */
43299 - int (*migratepage) (struct address_space *,
43300 + int (* const migratepage) (struct address_space *,
43301 struct page *, struct page *);
43302 - int (*launder_page) (struct page *);
43303 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
43304 + int (* const launder_page) (struct page *);
43305 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
43306 unsigned long);
43307 - int (*error_remove_page)(struct address_space *, struct page *);
43308 + int (* const error_remove_page)(struct address_space *, struct page *);
43309 };
43310
43311 /*
43312 @@ -1032,19 +1037,19 @@ static inline int file_check_writeable(s
43313 typedef struct files_struct *fl_owner_t;
43314
43315 struct file_lock_operations {
43316 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
43317 - void (*fl_release_private)(struct file_lock *);
43318 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
43319 + void (* const fl_release_private)(struct file_lock *);
43320 };
43321
43322 struct lock_manager_operations {
43323 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
43324 - void (*fl_notify)(struct file_lock *); /* unblock callback */
43325 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
43326 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
43327 - void (*fl_release_private)(struct file_lock *);
43328 - void (*fl_break)(struct file_lock *);
43329 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
43330 - int (*fl_change)(struct file_lock **, int);
43331 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
43332 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
43333 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
43334 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
43335 + void (* const fl_release_private)(struct file_lock *);
43336 + void (* const fl_break)(struct file_lock *);
43337 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
43338 + int (* const fl_change)(struct file_lock **, int);
43339 };
43340
43341 struct lock_manager {
43342 @@ -1439,7 +1444,7 @@ struct fiemap_extent_info {
43343 unsigned int fi_flags; /* Flags as passed from user */
43344 unsigned int fi_extents_mapped; /* Number of mapped extents */
43345 unsigned int fi_extents_max; /* Size of fiemap_extent array */
43346 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
43347 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
43348 * array */
43349 };
43350 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
43351 @@ -1556,30 +1561,30 @@ extern ssize_t vfs_writev(struct file *,
43352 unsigned long, loff_t *);
43353
43354 struct super_operations {
43355 - struct inode *(*alloc_inode)(struct super_block *sb);
43356 - void (*destroy_inode)(struct inode *);
43357 + struct inode *(* const alloc_inode)(struct super_block *sb);
43358 + void (* const destroy_inode)(struct inode *);
43359
43360 - void (*dirty_inode) (struct inode *);
43361 - int (*write_inode) (struct inode *, struct writeback_control *wbc);
43362 - void (*drop_inode) (struct inode *);
43363 - void (*delete_inode) (struct inode *);
43364 - void (*put_super) (struct super_block *);
43365 - void (*write_super) (struct super_block *);
43366 - int (*sync_fs)(struct super_block *sb, int wait);
43367 - int (*freeze_fs) (struct super_block *);
43368 - int (*unfreeze_fs) (struct super_block *);
43369 - int (*statfs) (struct dentry *, struct kstatfs *);
43370 - int (*remount_fs) (struct super_block *, int *, char *);
43371 - void (*clear_inode) (struct inode *);
43372 - void (*umount_begin) (struct super_block *);
43373 + void (* const dirty_inode) (struct inode *);
43374 + int (* const write_inode) (struct inode *, struct writeback_control *wbc);
43375 + void (* const drop_inode) (struct inode *);
43376 + void (* const delete_inode) (struct inode *);
43377 + void (* const put_super) (struct super_block *);
43378 + void (* const write_super) (struct super_block *);
43379 + int (* const sync_fs)(struct super_block *sb, int wait);
43380 + int (* const freeze_fs) (struct super_block *);
43381 + int (* const unfreeze_fs) (struct super_block *);
43382 + int (* const statfs) (struct dentry *, struct kstatfs *);
43383 + int (* const remount_fs) (struct super_block *, int *, char *);
43384 + void (* const clear_inode) (struct inode *);
43385 + void (* const umount_begin) (struct super_block *);
43386
43387 - int (*show_options)(struct seq_file *, struct vfsmount *);
43388 - int (*show_stats)(struct seq_file *, struct vfsmount *);
43389 + int (* const show_options)(struct seq_file *, struct vfsmount *);
43390 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
43391 #ifdef CONFIG_QUOTA
43392 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
43393 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
43394 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
43395 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
43396 #endif
43397 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
43398 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
43399 };
43400
43401 /*
43402 diff -urNp linux-2.6.34.1/include/linux/fs_struct.h linux-2.6.34.1/include/linux/fs_struct.h
43403 --- linux-2.6.34.1/include/linux/fs_struct.h 2010-07-05 14:24:10.000000000 -0400
43404 +++ linux-2.6.34.1/include/linux/fs_struct.h 2010-07-07 09:04:56.000000000 -0400
43405 @@ -4,7 +4,7 @@
43406 #include <linux/path.h>
43407
43408 struct fs_struct {
43409 - int users;
43410 + atomic_t users;
43411 rwlock_t lock;
43412 int umask;
43413 int in_exec;
43414 diff -urNp linux-2.6.34.1/include/linux/genhd.h linux-2.6.34.1/include/linux/genhd.h
43415 --- linux-2.6.34.1/include/linux/genhd.h 2010-07-05 14:24:10.000000000 -0400
43416 +++ linux-2.6.34.1/include/linux/genhd.h 2010-07-07 09:04:56.000000000 -0400
43417 @@ -162,7 +162,7 @@ struct gendisk {
43418
43419 struct timer_rand_state *random;
43420
43421 - atomic_t sync_io; /* RAID */
43422 + atomic_unchecked_t sync_io; /* RAID */
43423 struct work_struct async_notify;
43424 #ifdef CONFIG_BLK_DEV_INTEGRITY
43425 struct blk_integrity *integrity;
43426 diff -urNp linux-2.6.34.1/include/linux/gracl.h linux-2.6.34.1/include/linux/gracl.h
43427 --- linux-2.6.34.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
43428 +++ linux-2.6.34.1/include/linux/gracl.h 2010-07-07 09:04:56.000000000 -0400
43429 @@ -0,0 +1,310 @@
43430 +#ifndef GR_ACL_H
43431 +#define GR_ACL_H
43432 +
43433 +#include <linux/grdefs.h>
43434 +#include <linux/resource.h>
43435 +#include <linux/capability.h>
43436 +#include <linux/dcache.h>
43437 +#include <asm/resource.h>
43438 +
43439 +/* Major status information */
43440 +
43441 +#define GR_VERSION "grsecurity 2.2.0"
43442 +#define GRSECURITY_VERSION 0x2200
43443 +
43444 +enum {
43445 + GR_SHUTDOWN = 0,
43446 + GR_ENABLE = 1,
43447 + GR_SPROLE = 2,
43448 + GR_RELOAD = 3,
43449 + GR_SEGVMOD = 4,
43450 + GR_STATUS = 5,
43451 + GR_UNSPROLE = 6,
43452 + GR_PASSSET = 7,
43453 + GR_SPROLEPAM = 8,
43454 +};
43455 +
43456 +/* Password setup definitions
43457 + * kernel/grhash.c */
43458 +enum {
43459 + GR_PW_LEN = 128,
43460 + GR_SALT_LEN = 16,
43461 + GR_SHA_LEN = 32,
43462 +};
43463 +
43464 +enum {
43465 + GR_SPROLE_LEN = 64,
43466 +};
43467 +
43468 +#define GR_NLIMITS 32
43469 +
43470 +/* Begin Data Structures */
43471 +
43472 +struct sprole_pw {
43473 + unsigned char *rolename;
43474 + unsigned char salt[GR_SALT_LEN];
43475 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
43476 +};
43477 +
43478 +struct name_entry {
43479 + __u32 key;
43480 + ino_t inode;
43481 + dev_t device;
43482 + char *name;
43483 + __u16 len;
43484 + __u8 deleted;
43485 + struct name_entry *prev;
43486 + struct name_entry *next;
43487 +};
43488 +
43489 +struct inodev_entry {
43490 + struct name_entry *nentry;
43491 + struct inodev_entry *prev;
43492 + struct inodev_entry *next;
43493 +};
43494 +
43495 +struct acl_role_db {
43496 + struct acl_role_label **r_hash;
43497 + __u32 r_size;
43498 +};
43499 +
43500 +struct inodev_db {
43501 + struct inodev_entry **i_hash;
43502 + __u32 i_size;
43503 +};
43504 +
43505 +struct name_db {
43506 + struct name_entry **n_hash;
43507 + __u32 n_size;
43508 +};
43509 +
43510 +struct crash_uid {
43511 + uid_t uid;
43512 + unsigned long expires;
43513 +};
43514 +
43515 +struct gr_hash_struct {
43516 + void **table;
43517 + void **nametable;
43518 + void *first;
43519 + __u32 table_size;
43520 + __u32 used_size;
43521 + int type;
43522 +};
43523 +
43524 +/* Userspace Grsecurity ACL data structures */
43525 +
43526 +struct acl_subject_label {
43527 + char *filename;
43528 + ino_t inode;
43529 + dev_t device;
43530 + __u32 mode;
43531 + kernel_cap_t cap_mask;
43532 + kernel_cap_t cap_lower;
43533 + kernel_cap_t cap_invert_audit;
43534 +
43535 + struct rlimit res[GR_NLIMITS];
43536 + __u32 resmask;
43537 +
43538 + __u8 user_trans_type;
43539 + __u8 group_trans_type;
43540 + uid_t *user_transitions;
43541 + gid_t *group_transitions;
43542 + __u16 user_trans_num;
43543 + __u16 group_trans_num;
43544 +
43545 + __u32 ip_proto[8];
43546 + __u32 ip_type;
43547 + struct acl_ip_label **ips;
43548 + __u32 ip_num;
43549 + __u32 inaddr_any_override;
43550 +
43551 + __u32 crashes;
43552 + unsigned long expires;
43553 +
43554 + struct acl_subject_label *parent_subject;
43555 + struct gr_hash_struct *hash;
43556 + struct acl_subject_label *prev;
43557 + struct acl_subject_label *next;
43558 +
43559 + struct acl_object_label **obj_hash;
43560 + __u32 obj_hash_size;
43561 + __u16 pax_flags;
43562 +};
43563 +
43564 +struct role_allowed_ip {
43565 + __u32 addr;
43566 + __u32 netmask;
43567 +
43568 + struct role_allowed_ip *prev;
43569 + struct role_allowed_ip *next;
43570 +};
43571 +
43572 +struct role_transition {
43573 + char *rolename;
43574 +
43575 + struct role_transition *prev;
43576 + struct role_transition *next;
43577 +};
43578 +
43579 +struct acl_role_label {
43580 + char *rolename;
43581 + uid_t uidgid;
43582 + __u16 roletype;
43583 +
43584 + __u16 auth_attempts;
43585 + unsigned long expires;
43586 +
43587 + struct acl_subject_label *root_label;
43588 + struct gr_hash_struct *hash;
43589 +
43590 + struct acl_role_label *prev;
43591 + struct acl_role_label *next;
43592 +
43593 + struct role_transition *transitions;
43594 + struct role_allowed_ip *allowed_ips;
43595 + uid_t *domain_children;
43596 + __u16 domain_child_num;
43597 +
43598 + struct acl_subject_label **subj_hash;
43599 + __u32 subj_hash_size;
43600 +};
43601 +
43602 +struct user_acl_role_db {
43603 + struct acl_role_label **r_table;
43604 + __u32 num_pointers; /* Number of allocations to track */
43605 + __u32 num_roles; /* Number of roles */
43606 + __u32 num_domain_children; /* Number of domain children */
43607 + __u32 num_subjects; /* Number of subjects */
43608 + __u32 num_objects; /* Number of objects */
43609 +};
43610 +
43611 +struct acl_object_label {
43612 + char *filename;
43613 + ino_t inode;
43614 + dev_t device;
43615 + __u32 mode;
43616 +
43617 + struct acl_subject_label *nested;
43618 + struct acl_object_label *globbed;
43619 +
43620 + /* next two structures not used */
43621 +
43622 + struct acl_object_label *prev;
43623 + struct acl_object_label *next;
43624 +};
43625 +
43626 +struct acl_ip_label {
43627 + char *iface;
43628 + __u32 addr;
43629 + __u32 netmask;
43630 + __u16 low, high;
43631 + __u8 mode;
43632 + __u32 type;
43633 + __u32 proto[8];
43634 +
43635 + /* next two structures not used */
43636 +
43637 + struct acl_ip_label *prev;
43638 + struct acl_ip_label *next;
43639 +};
43640 +
43641 +struct gr_arg {
43642 + struct user_acl_role_db role_db;
43643 + unsigned char pw[GR_PW_LEN];
43644 + unsigned char salt[GR_SALT_LEN];
43645 + unsigned char sum[GR_SHA_LEN];
43646 + unsigned char sp_role[GR_SPROLE_LEN];
43647 + struct sprole_pw *sprole_pws;
43648 + dev_t segv_device;
43649 + ino_t segv_inode;
43650 + uid_t segv_uid;
43651 + __u16 num_sprole_pws;
43652 + __u16 mode;
43653 +};
43654 +
43655 +struct gr_arg_wrapper {
43656 + struct gr_arg *arg;
43657 + __u32 version;
43658 + __u32 size;
43659 +};
43660 +
43661 +struct subject_map {
43662 + struct acl_subject_label *user;
43663 + struct acl_subject_label *kernel;
43664 + struct subject_map *prev;
43665 + struct subject_map *next;
43666 +};
43667 +
43668 +struct acl_subj_map_db {
43669 + struct subject_map **s_hash;
43670 + __u32 s_size;
43671 +};
43672 +
43673 +/* End Data Structures Section */
43674 +
43675 +/* Hash functions generated by empirical testing by Brad Spengler
43676 + Makes good use of the low bits of the inode. Generally 0-1 times
43677 + in loop for successful match. 0-3 for unsuccessful match.
43678 + Shift/add algorithm with modulus of table size and an XOR*/
43679 +
43680 +static __inline__ unsigned int
43681 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
43682 +{
43683 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
43684 +}
43685 +
43686 + static __inline__ unsigned int
43687 +shash(const struct acl_subject_label *userp, const unsigned int sz)
43688 +{
43689 + return ((const unsigned long)userp % sz);
43690 +}
43691 +
43692 +static __inline__ unsigned int
43693 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
43694 +{
43695 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
43696 +}
43697 +
43698 +static __inline__ unsigned int
43699 +nhash(const char *name, const __u16 len, const unsigned int sz)
43700 +{
43701 + return full_name_hash((const unsigned char *)name, len) % sz;
43702 +}
43703 +
43704 +#define FOR_EACH_ROLE_START(role) \
43705 + role = role_list; \
43706 + while (role) {
43707 +
43708 +#define FOR_EACH_ROLE_END(role) \
43709 + role = role->prev; \
43710 + }
43711 +
43712 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
43713 + subj = NULL; \
43714 + iter = 0; \
43715 + while (iter < role->subj_hash_size) { \
43716 + if (subj == NULL) \
43717 + subj = role->subj_hash[iter]; \
43718 + if (subj == NULL) { \
43719 + iter++; \
43720 + continue; \
43721 + }
43722 +
43723 +#define FOR_EACH_SUBJECT_END(subj,iter) \
43724 + subj = subj->next; \
43725 + if (subj == NULL) \
43726 + iter++; \
43727 + }
43728 +
43729 +
43730 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
43731 + subj = role->hash->first; \
43732 + while (subj != NULL) {
43733 +
43734 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
43735 + subj = subj->next; \
43736 + }
43737 +
43738 +#endif
43739 +
43740 diff -urNp linux-2.6.34.1/include/linux/gralloc.h linux-2.6.34.1/include/linux/gralloc.h
43741 --- linux-2.6.34.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
43742 +++ linux-2.6.34.1/include/linux/gralloc.h 2010-07-07 09:04:56.000000000 -0400
43743 @@ -0,0 +1,9 @@
43744 +#ifndef __GRALLOC_H
43745 +#define __GRALLOC_H
43746 +
43747 +void acl_free_all(void);
43748 +int acl_alloc_stack_init(unsigned long size);
43749 +void *acl_alloc(unsigned long len);
43750 +void *acl_alloc_num(unsigned long num, unsigned long len);
43751 +
43752 +#endif
43753 diff -urNp linux-2.6.34.1/include/linux/grdefs.h linux-2.6.34.1/include/linux/grdefs.h
43754 --- linux-2.6.34.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
43755 +++ linux-2.6.34.1/include/linux/grdefs.h 2010-07-07 09:04:56.000000000 -0400
43756 @@ -0,0 +1,136 @@
43757 +#ifndef GRDEFS_H
43758 +#define GRDEFS_H
43759 +
43760 +/* Begin grsecurity status declarations */
43761 +
43762 +enum {
43763 + GR_READY = 0x01,
43764 + GR_STATUS_INIT = 0x00 // disabled state
43765 +};
43766 +
43767 +/* Begin ACL declarations */
43768 +
43769 +/* Role flags */
43770 +
43771 +enum {
43772 + GR_ROLE_USER = 0x0001,
43773 + GR_ROLE_GROUP = 0x0002,
43774 + GR_ROLE_DEFAULT = 0x0004,
43775 + GR_ROLE_SPECIAL = 0x0008,
43776 + GR_ROLE_AUTH = 0x0010,
43777 + GR_ROLE_NOPW = 0x0020,
43778 + GR_ROLE_GOD = 0x0040,
43779 + GR_ROLE_LEARN = 0x0080,
43780 + GR_ROLE_TPE = 0x0100,
43781 + GR_ROLE_DOMAIN = 0x0200,
43782 + GR_ROLE_PAM = 0x0400
43783 +};
43784 +
43785 +/* ACL Subject and Object mode flags */
43786 +enum {
43787 + GR_DELETED = 0x80000000
43788 +};
43789 +
43790 +/* ACL Object-only mode flags */
43791 +enum {
43792 + GR_READ = 0x00000001,
43793 + GR_APPEND = 0x00000002,
43794 + GR_WRITE = 0x00000004,
43795 + GR_EXEC = 0x00000008,
43796 + GR_FIND = 0x00000010,
43797 + GR_INHERIT = 0x00000020,
43798 + GR_SETID = 0x00000040,
43799 + GR_CREATE = 0x00000080,
43800 + GR_DELETE = 0x00000100,
43801 + GR_LINK = 0x00000200,
43802 + GR_AUDIT_READ = 0x00000400,
43803 + GR_AUDIT_APPEND = 0x00000800,
43804 + GR_AUDIT_WRITE = 0x00001000,
43805 + GR_AUDIT_EXEC = 0x00002000,
43806 + GR_AUDIT_FIND = 0x00004000,
43807 + GR_AUDIT_INHERIT= 0x00008000,
43808 + GR_AUDIT_SETID = 0x00010000,
43809 + GR_AUDIT_CREATE = 0x00020000,
43810 + GR_AUDIT_DELETE = 0x00040000,
43811 + GR_AUDIT_LINK = 0x00080000,
43812 + GR_PTRACERD = 0x00100000,
43813 + GR_NOPTRACE = 0x00200000,
43814 + GR_SUPPRESS = 0x00400000,
43815 + GR_NOLEARN = 0x00800000
43816 +};
43817 +
43818 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
43819 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
43820 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
43821 +
43822 +/* ACL subject-only mode flags */
43823 +enum {
43824 + GR_KILL = 0x00000001,
43825 + GR_VIEW = 0x00000002,
43826 + GR_PROTECTED = 0x00000004,
43827 + GR_LEARN = 0x00000008,
43828 + GR_OVERRIDE = 0x00000010,
43829 + /* just a placeholder, this mode is only used in userspace */
43830 + GR_DUMMY = 0x00000020,
43831 + GR_PROTSHM = 0x00000040,
43832 + GR_KILLPROC = 0x00000080,
43833 + GR_KILLIPPROC = 0x00000100,
43834 + /* just a placeholder, this mode is only used in userspace */
43835 + GR_NOTROJAN = 0x00000200,
43836 + GR_PROTPROCFD = 0x00000400,
43837 + GR_PROCACCT = 0x00000800,
43838 + GR_RELAXPTRACE = 0x00001000,
43839 + GR_NESTED = 0x00002000,
43840 + GR_INHERITLEARN = 0x00004000,
43841 + GR_PROCFIND = 0x00008000,
43842 + GR_POVERRIDE = 0x00010000,
43843 + GR_KERNELAUTH = 0x00020000,
43844 +};
43845 +
43846 +enum {
43847 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
43848 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
43849 + GR_PAX_ENABLE_MPROTECT = 0x0004,
43850 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
43851 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
43852 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
43853 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
43854 + GR_PAX_DISABLE_MPROTECT = 0x0400,
43855 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
43856 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
43857 +};
43858 +
43859 +enum {
43860 + GR_ID_USER = 0x01,
43861 + GR_ID_GROUP = 0x02,
43862 +};
43863 +
43864 +enum {
43865 + GR_ID_ALLOW = 0x01,
43866 + GR_ID_DENY = 0x02,
43867 +};
43868 +
43869 +#define GR_CRASH_RES 31
43870 +#define GR_UIDTABLE_MAX 500
43871 +
43872 +/* begin resource learning section */
43873 +enum {
43874 + GR_RLIM_CPU_BUMP = 60,
43875 + GR_RLIM_FSIZE_BUMP = 50000,
43876 + GR_RLIM_DATA_BUMP = 10000,
43877 + GR_RLIM_STACK_BUMP = 1000,
43878 + GR_RLIM_CORE_BUMP = 10000,
43879 + GR_RLIM_RSS_BUMP = 500000,
43880 + GR_RLIM_NPROC_BUMP = 1,
43881 + GR_RLIM_NOFILE_BUMP = 5,
43882 + GR_RLIM_MEMLOCK_BUMP = 50000,
43883 + GR_RLIM_AS_BUMP = 500000,
43884 + GR_RLIM_LOCKS_BUMP = 2,
43885 + GR_RLIM_SIGPENDING_BUMP = 5,
43886 + GR_RLIM_MSGQUEUE_BUMP = 10000,
43887 + GR_RLIM_NICE_BUMP = 1,
43888 + GR_RLIM_RTPRIO_BUMP = 1,
43889 + GR_RLIM_RTTIME_BUMP = 1000000
43890 +};
43891 +
43892 +#endif
43893 diff -urNp linux-2.6.34.1/include/linux/grinternal.h linux-2.6.34.1/include/linux/grinternal.h
43894 --- linux-2.6.34.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
43895 +++ linux-2.6.34.1/include/linux/grinternal.h 2010-07-07 09:04:56.000000000 -0400
43896 @@ -0,0 +1,211 @@
43897 +#ifndef __GRINTERNAL_H
43898 +#define __GRINTERNAL_H
43899 +
43900 +#ifdef CONFIG_GRKERNSEC
43901 +
43902 +#include <linux/fs.h>
43903 +#include <linux/mnt_namespace.h>
43904 +#include <linux/nsproxy.h>
43905 +#include <linux/gracl.h>
43906 +#include <linux/grdefs.h>
43907 +#include <linux/grmsg.h>
43908 +
43909 +void gr_add_learn_entry(const char *fmt, ...)
43910 + __attribute__ ((format (printf, 1, 2)));
43911 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
43912 + const struct vfsmount *mnt);
43913 +__u32 gr_check_create(const struct dentry *new_dentry,
43914 + const struct dentry *parent,
43915 + const struct vfsmount *mnt, const __u32 mode);
43916 +int gr_check_protected_task(const struct task_struct *task);
43917 +__u32 to_gr_audit(const __u32 reqmode);
43918 +int gr_set_acls(const int type);
43919 +
43920 +int gr_acl_is_enabled(void);
43921 +char gr_roletype_to_char(void);
43922 +
43923 +void gr_handle_alertkill(struct task_struct *task);
43924 +char *gr_to_filename(const struct dentry *dentry,
43925 + const struct vfsmount *mnt);
43926 +char *gr_to_filename1(const struct dentry *dentry,
43927 + const struct vfsmount *mnt);
43928 +char *gr_to_filename2(const struct dentry *dentry,
43929 + const struct vfsmount *mnt);
43930 +char *gr_to_filename3(const struct dentry *dentry,
43931 + const struct vfsmount *mnt);
43932 +
43933 +extern int grsec_enable_harden_ptrace;
43934 +extern int grsec_enable_link;
43935 +extern int grsec_enable_fifo;
43936 +extern int grsec_enable_execve;
43937 +extern int grsec_enable_shm;
43938 +extern int grsec_enable_execlog;
43939 +extern int grsec_enable_signal;
43940 +extern int grsec_enable_audit_ptrace;
43941 +extern int grsec_enable_forkfail;
43942 +extern int grsec_enable_time;
43943 +extern int grsec_enable_rofs;
43944 +extern int grsec_enable_chroot_shmat;
43945 +extern int grsec_enable_chroot_findtask;
43946 +extern int grsec_enable_chroot_mount;
43947 +extern int grsec_enable_chroot_double;
43948 +extern int grsec_enable_chroot_pivot;
43949 +extern int grsec_enable_chroot_chdir;
43950 +extern int grsec_enable_chroot_chmod;
43951 +extern int grsec_enable_chroot_mknod;
43952 +extern int grsec_enable_chroot_fchdir;
43953 +extern int grsec_enable_chroot_nice;
43954 +extern int grsec_enable_chroot_execlog;
43955 +extern int grsec_enable_chroot_caps;
43956 +extern int grsec_enable_chroot_sysctl;
43957 +extern int grsec_enable_chroot_unix;
43958 +extern int grsec_enable_tpe;
43959 +extern int grsec_tpe_gid;
43960 +extern int grsec_enable_tpe_all;
43961 +extern int grsec_enable_sidcaps;
43962 +extern int grsec_enable_socket_all;
43963 +extern int grsec_socket_all_gid;
43964 +extern int grsec_enable_socket_client;
43965 +extern int grsec_socket_client_gid;
43966 +extern int grsec_enable_socket_server;
43967 +extern int grsec_socket_server_gid;
43968 +extern int grsec_audit_gid;
43969 +extern int grsec_enable_group;
43970 +extern int grsec_enable_audit_textrel;
43971 +extern int grsec_enable_mount;
43972 +extern int grsec_enable_chdir;
43973 +extern int grsec_resource_logging;
43974 +extern int grsec_enable_blackhole;
43975 +extern int grsec_lastack_retries;
43976 +extern int grsec_lock;
43977 +
43978 +extern spinlock_t grsec_alert_lock;
43979 +extern unsigned long grsec_alert_wtime;
43980 +extern unsigned long grsec_alert_fyet;
43981 +
43982 +extern spinlock_t grsec_audit_lock;
43983 +
43984 +extern rwlock_t grsec_exec_file_lock;
43985 +
43986 +#define gr_task_fullpath(tsk) (tsk->exec_file ? \
43987 + gr_to_filename2(tsk->exec_file->f_path.dentry, \
43988 + tsk->exec_file->f_vfsmnt) : "/")
43989 +
43990 +#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
43991 + gr_to_filename3(tsk->parent->exec_file->f_path.dentry, \
43992 + tsk->parent->exec_file->f_vfsmnt) : "/")
43993 +
43994 +#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
43995 + gr_to_filename(tsk->exec_file->f_path.dentry, \
43996 + tsk->exec_file->f_vfsmnt) : "/")
43997 +
43998 +#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
43999 + gr_to_filename1(tsk->parent->exec_file->f_path.dentry, \
44000 + tsk->parent->exec_file->f_vfsmnt) : "/")
44001 +
44002 +#define proc_is_chrooted(tsk_a) (tsk_a->gr_is_chrooted)
44003 +
44004 +#define have_same_root(tsk_a,tsk_b) (tsk_a->gr_chroot_dentry == tsk_b->gr_chroot_dentry)
44005 +
44006 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), task->comm, \
44007 + task->pid, cred->uid, \
44008 + cred->euid, cred->gid, cred->egid, \
44009 + gr_parent_task_fullpath(task), \
44010 + task->parent->comm, task->parent->pid, \
44011 + pcred->uid, pcred->euid, \
44012 + pcred->gid, pcred->egid
44013 +
44014 +#define GR_CHROOT_CAPS {{ \
44015 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
44016 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
44017 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
44018 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
44019 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
44020 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
44021 +
44022 +#define security_learn(normal_msg,args...) \
44023 +({ \
44024 + read_lock(&grsec_exec_file_lock); \
44025 + gr_add_learn_entry(normal_msg "\n", ## args); \
44026 + read_unlock(&grsec_exec_file_lock); \
44027 +})
44028 +
44029 +enum {
44030 + GR_DO_AUDIT,
44031 + GR_DONT_AUDIT,
44032 + GR_DONT_AUDIT_GOOD
44033 +};
44034 +
44035 +enum {
44036 + GR_TTYSNIFF,
44037 + GR_RBAC,
44038 + GR_RBAC_STR,
44039 + GR_STR_RBAC,
44040 + GR_RBAC_MODE2,
44041 + GR_RBAC_MODE3,
44042 + GR_FILENAME,
44043 + GR_SYSCTL_HIDDEN,
44044 + GR_NOARGS,
44045 + GR_ONE_INT,
44046 + GR_ONE_INT_TWO_STR,
44047 + GR_ONE_STR,
44048 + GR_STR_INT,
44049 + GR_TWO_INT,
44050 + GR_THREE_INT,
44051 + GR_FIVE_INT_TWO_STR,
44052 + GR_TWO_STR,
44053 + GR_THREE_STR,
44054 + GR_FOUR_STR,
44055 + GR_STR_FILENAME,
44056 + GR_FILENAME_STR,
44057 + GR_FILENAME_TWO_INT,
44058 + GR_FILENAME_TWO_INT_STR,
44059 + GR_TEXTREL,
44060 + GR_PTRACE,
44061 + GR_RESOURCE,
44062 + GR_CAP,
44063 + GR_SIG,
44064 + GR_SIG2,
44065 + GR_CRASH1,
44066 + GR_CRASH2,
44067 + GR_PSACCT
44068 +};
44069 +
44070 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
44071 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
44072 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
44073 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
44074 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
44075 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
44076 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
44077 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
44078 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
44079 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
44080 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
44081 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
44082 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
44083 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
44084 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
44085 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
44086 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
44087 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
44088 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
44089 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
44090 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
44091 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
44092 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
44093 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
44094 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
44095 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
44096 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
44097 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
44098 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
44099 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
44100 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
44101 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
44102 +
44103 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
44104 +
44105 +#endif
44106 +
44107 +#endif
44108 diff -urNp linux-2.6.34.1/include/linux/grmsg.h linux-2.6.34.1/include/linux/grmsg.h
44109 --- linux-2.6.34.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
44110 +++ linux-2.6.34.1/include/linux/grmsg.h 2010-07-07 09:04:56.000000000 -0400
44111 @@ -0,0 +1,108 @@
44112 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
44113 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
44114 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
44115 +#define GR_STOPMOD_MSG "denied modification of module state by "
44116 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
44117 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
44118 +#define GR_IOPERM_MSG "denied use of ioperm() by "
44119 +#define GR_IOPL_MSG "denied use of iopl() by "
44120 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
44121 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
44122 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
44123 +#define GR_KMEM_MSG "denied write of /dev/kmem by "
44124 +#define GR_PORT_OPEN_MSG "denied open of /dev/port by "
44125 +#define GR_MEM_WRITE_MSG "denied write of /dev/mem by "
44126 +#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by "
44127 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
44128 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
44129 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
44130 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
44131 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
44132 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
44133 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
44134 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
44135 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
44136 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
44137 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
44138 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
44139 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
44140 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
44141 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
44142 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
44143 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
44144 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
44145 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
44146 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
44147 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
44148 +#define GR_NPROC_MSG "denied overstep of process limit by "
44149 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
44150 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
44151 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
44152 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
44153 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
44154 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
44155 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
44156 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
44157 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
44158 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
44159 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
44160 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
44161 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
44162 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
44163 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
44164 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
44165 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
44166 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
44167 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
44168 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
44169 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
44170 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
44171 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
44172 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
44173 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
44174 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
44175 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
44176 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
44177 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
44178 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
44179 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
44180 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
44181 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
44182 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
44183 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
44184 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
44185 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
44186 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
44187 +#define GR_FAILFORK_MSG "failed fork with errno %d by "
44188 +#define GR_NICE_CHROOT_MSG "denied priority change by "
44189 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
44190 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
44191 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
44192 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
44193 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
44194 +#define GR_TIME_MSG "time set by "
44195 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
44196 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
44197 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
44198 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
44199 +#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by "
44200 +#define GR_BIND_MSG "denied bind() by "
44201 +#define GR_CONNECT_MSG "denied connect() by "
44202 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
44203 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
44204 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
44205 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
44206 +#define GR_CAP_ACL_MSG "use of %s denied for "
44207 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
44208 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
44209 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
44210 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
44211 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
44212 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
44213 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
44214 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
44215 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
44216 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
44217 +#define GR_NONROOT_MODLOAD_MSG "denied kernel module auto-load of %.64s by "
44218 +#define GR_VM86_MSG "denied use of vm86 by "
44219 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
44220 diff -urNp linux-2.6.34.1/include/linux/grsecurity.h linux-2.6.34.1/include/linux/grsecurity.h
44221 --- linux-2.6.34.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
44222 +++ linux-2.6.34.1/include/linux/grsecurity.h 2010-07-07 09:04:56.000000000 -0400
44223 @@ -0,0 +1,201 @@
44224 +#ifndef GR_SECURITY_H
44225 +#define GR_SECURITY_H
44226 +#include <linux/fs.h>
44227 +#include <linux/fs_struct.h>
44228 +#include <linux/binfmts.h>
44229 +#include <linux/gracl.h>
44230 +
44231 +/* notify of brain-dead configs */
44232 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
44233 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
44234 +#endif
44235 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
44236 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
44237 +#endif
44238 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
44239 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
44240 +#endif
44241 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
44242 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
44243 +#endif
44244 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
44245 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
44246 +#endif
44247 +
44248 +void gr_handle_brute_attach(struct task_struct *p);
44249 +void gr_handle_brute_check(void);
44250 +
44251 +char gr_roletype_to_char(void);
44252 +
44253 +int gr_check_user_change(int real, int effective, int fs);
44254 +int gr_check_group_change(int real, int effective, int fs);
44255 +
44256 +void gr_del_task_from_ip_table(struct task_struct *p);
44257 +
44258 +int gr_pid_is_chrooted(struct task_struct *p);
44259 +int gr_handle_chroot_nice(void);
44260 +int gr_handle_chroot_sysctl(const int op);
44261 +int gr_handle_chroot_setpriority(struct task_struct *p,
44262 + const int niceval);
44263 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
44264 +int gr_handle_chroot_chroot(const struct dentry *dentry,
44265 + const struct vfsmount *mnt);
44266 +int gr_handle_chroot_caps(struct path *path);
44267 +void gr_handle_chroot_chdir(struct path *path);
44268 +int gr_handle_chroot_chmod(const struct dentry *dentry,
44269 + const struct vfsmount *mnt, const int mode);
44270 +int gr_handle_chroot_mknod(const struct dentry *dentry,
44271 + const struct vfsmount *mnt, const int mode);
44272 +int gr_handle_chroot_mount(const struct dentry *dentry,
44273 + const struct vfsmount *mnt,
44274 + const char *dev_name);
44275 +int gr_handle_chroot_pivot(void);
44276 +int gr_handle_chroot_unix(const pid_t pid);
44277 +
44278 +int gr_handle_rawio(const struct inode *inode);
44279 +int gr_handle_nproc(void);
44280 +
44281 +void gr_handle_ioperm(void);
44282 +void gr_handle_iopl(void);
44283 +
44284 +int gr_tpe_allow(const struct file *file);
44285 +
44286 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
44287 +void gr_clear_chroot_entries(struct task_struct *task);
44288 +
44289 +void gr_log_forkfail(const int retval);
44290 +void gr_log_timechange(void);
44291 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
44292 +void gr_log_chdir(const struct dentry *dentry,
44293 + const struct vfsmount *mnt);
44294 +void gr_log_chroot_exec(const struct dentry *dentry,
44295 + const struct vfsmount *mnt);
44296 +void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
44297 +void gr_log_remount(const char *devname, const int retval);
44298 +void gr_log_unmount(const char *devname, const int retval);
44299 +void gr_log_mount(const char *from, const char *to, const int retval);
44300 +void gr_log_textrel(struct vm_area_struct *vma);
44301 +
44302 +int gr_handle_follow_link(const struct inode *parent,
44303 + const struct inode *inode,
44304 + const struct dentry *dentry,
44305 + const struct vfsmount *mnt);
44306 +int gr_handle_fifo(const struct dentry *dentry,
44307 + const struct vfsmount *mnt,
44308 + const struct dentry *dir, const int flag,
44309 + const int acc_mode);
44310 +int gr_handle_hardlink(const struct dentry *dentry,
44311 + const struct vfsmount *mnt,
44312 + struct inode *inode,
44313 + const int mode, const char *to);
44314 +
44315 +int gr_is_capable(const int cap);
44316 +int gr_is_capable_nolog(const int cap);
44317 +void gr_learn_resource(const struct task_struct *task, const int limit,
44318 + const unsigned long wanted, const int gt);
44319 +void gr_copy_label(struct task_struct *tsk);
44320 +void gr_handle_crash(struct task_struct *task, const int sig);
44321 +int gr_handle_signal(const struct task_struct *p, const int sig);
44322 +int gr_check_crash_uid(const uid_t uid);
44323 +int gr_check_protected_task(const struct task_struct *task);
44324 +int gr_acl_handle_mmap(const struct file *file,
44325 + const unsigned long prot);
44326 +int gr_acl_handle_mprotect(const struct file *file,
44327 + const unsigned long prot);
44328 +int gr_check_hidden_task(const struct task_struct *tsk);
44329 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
44330 + const struct vfsmount *mnt);
44331 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
44332 + const struct vfsmount *mnt);
44333 +__u32 gr_acl_handle_access(const struct dentry *dentry,
44334 + const struct vfsmount *mnt, const int fmode);
44335 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
44336 + const struct vfsmount *mnt, mode_t mode);
44337 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
44338 + const struct vfsmount *mnt, mode_t mode);
44339 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
44340 + const struct vfsmount *mnt);
44341 +int gr_handle_ptrace(struct task_struct *task, const long request);
44342 +int gr_handle_proc_ptrace(struct task_struct *task);
44343 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
44344 + const struct vfsmount *mnt);
44345 +int gr_check_crash_exec(const struct file *filp);
44346 +int gr_acl_is_enabled(void);
44347 +void gr_set_kernel_label(struct task_struct *task);
44348 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
44349 + const gid_t gid);
44350 +int gr_set_proc_label(const struct dentry *dentry,
44351 + const struct vfsmount *mnt,
44352 + const int unsafe_share);
44353 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
44354 + const struct vfsmount *mnt);
44355 +__u32 gr_acl_handle_open(const struct dentry *dentry,
44356 + const struct vfsmount *mnt, const int fmode);
44357 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
44358 + const struct dentry *p_dentry,
44359 + const struct vfsmount *p_mnt, const int fmode,
44360 + const int imode);
44361 +void gr_handle_create(const struct dentry *dentry,
44362 + const struct vfsmount *mnt);
44363 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
44364 + const struct dentry *parent_dentry,
44365 + const struct vfsmount *parent_mnt,
44366 + const int mode);
44367 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
44368 + const struct dentry *parent_dentry,
44369 + const struct vfsmount *parent_mnt);
44370 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
44371 + const struct vfsmount *mnt);
44372 +void gr_handle_delete(const ino_t ino, const dev_t dev);
44373 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
44374 + const struct vfsmount *mnt);
44375 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
44376 + const struct dentry *parent_dentry,
44377 + const struct vfsmount *parent_mnt,
44378 + const char *from);
44379 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
44380 + const struct dentry *parent_dentry,
44381 + const struct vfsmount *parent_mnt,
44382 + const struct dentry *old_dentry,
44383 + const struct vfsmount *old_mnt, const char *to);
44384 +int gr_acl_handle_rename(struct dentry *new_dentry,
44385 + struct dentry *parent_dentry,
44386 + const struct vfsmount *parent_mnt,
44387 + struct dentry *old_dentry,
44388 + struct inode *old_parent_inode,
44389 + struct vfsmount *old_mnt, const char *newname);
44390 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44391 + struct dentry *old_dentry,
44392 + struct dentry *new_dentry,
44393 + struct vfsmount *mnt, const __u8 replace);
44394 +__u32 gr_check_link(const struct dentry *new_dentry,
44395 + const struct dentry *parent_dentry,
44396 + const struct vfsmount *parent_mnt,
44397 + const struct dentry *old_dentry,
44398 + const struct vfsmount *old_mnt);
44399 +int gr_acl_handle_filldir(const struct file *file, const char *name,
44400 + const unsigned int namelen, const ino_t ino);
44401 +
44402 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
44403 + const struct vfsmount *mnt);
44404 +void gr_acl_handle_exit(void);
44405 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
44406 +int gr_acl_handle_procpidmem(const struct task_struct *task);
44407 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
44408 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
44409 +void gr_audit_ptrace(struct task_struct *task);
44410 +
44411 +#ifdef CONFIG_GRKERNSEC
44412 +void gr_log_nonroot_mod_load(const char *modname);
44413 +void gr_handle_vm86(void);
44414 +void gr_handle_mem_write(void);
44415 +void gr_handle_kmem_write(void);
44416 +void gr_handle_open_port(void);
44417 +int gr_handle_mem_mmap(const unsigned long offset,
44418 + struct vm_area_struct *vma);
44419 +
44420 +extern int grsec_enable_dmesg;
44421 +extern int grsec_disable_privio;
44422 +#endif
44423 +
44424 +#endif
44425 diff -urNp linux-2.6.34.1/include/linux/grsock.h linux-2.6.34.1/include/linux/grsock.h
44426 --- linux-2.6.34.1/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
44427 +++ linux-2.6.34.1/include/linux/grsock.h 2010-07-07 09:04:56.000000000 -0400
44428 @@ -0,0 +1,19 @@
44429 +#ifndef __GRSOCK_H
44430 +#define __GRSOCK_H
44431 +
44432 +extern void gr_attach_curr_ip(const struct sock *sk);
44433 +extern int gr_handle_sock_all(const int family, const int type,
44434 + const int protocol);
44435 +extern int gr_handle_sock_server(const struct sockaddr *sck);
44436 +extern int gr_handle_sock_server_other(const struct sock *sck);
44437 +extern int gr_handle_sock_client(const struct sockaddr *sck);
44438 +extern int gr_search_connect(struct socket * sock,
44439 + struct sockaddr_in * addr);
44440 +extern int gr_search_bind(struct socket * sock,
44441 + struct sockaddr_in * addr);
44442 +extern int gr_search_listen(struct socket * sock);
44443 +extern int gr_search_accept(struct socket * sock);
44444 +extern int gr_search_socket(const int domain, const int type,
44445 + const int protocol);
44446 +
44447 +#endif
44448 diff -urNp linux-2.6.34.1/include/linux/hdpu_features.h linux-2.6.34.1/include/linux/hdpu_features.h
44449 --- linux-2.6.34.1/include/linux/hdpu_features.h 2010-07-05 14:24:10.000000000 -0400
44450 +++ linux-2.6.34.1/include/linux/hdpu_features.h 2010-07-07 09:04:56.000000000 -0400
44451 @@ -3,7 +3,7 @@
44452 struct cpustate_t {
44453 spinlock_t lock;
44454 int excl;
44455 - int open_count;
44456 + atomic_t open_count;
44457 unsigned char cached_val;
44458 int inited;
44459 unsigned long *set_addr;
44460 diff -urNp linux-2.6.34.1/include/linux/highmem.h linux-2.6.34.1/include/linux/highmem.h
44461 --- linux-2.6.34.1/include/linux/highmem.h 2010-07-05 14:24:10.000000000 -0400
44462 +++ linux-2.6.34.1/include/linux/highmem.h 2010-07-07 09:04:56.000000000 -0400
44463 @@ -143,6 +143,18 @@ static inline void clear_highpage(struct
44464 kunmap_atomic(kaddr, KM_USER0);
44465 }
44466
44467 +static inline void sanitize_highpage(struct page *page)
44468 +{
44469 + void *kaddr;
44470 + unsigned long flags;
44471 +
44472 + local_irq_save(flags);
44473 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
44474 + clear_page(kaddr);
44475 + kunmap_atomic(kaddr, KM_CLEARPAGE);
44476 + local_irq_restore(flags);
44477 +}
44478 +
44479 static inline void zero_user_segments(struct page *page,
44480 unsigned start1, unsigned end1,
44481 unsigned start2, unsigned end2)
44482 diff -urNp linux-2.6.34.1/include/linux/interrupt.h linux-2.6.34.1/include/linux/interrupt.h
44483 --- linux-2.6.34.1/include/linux/interrupt.h 2010-07-05 14:24:10.000000000 -0400
44484 +++ linux-2.6.34.1/include/linux/interrupt.h 2010-07-07 09:04:56.000000000 -0400
44485 @@ -357,7 +357,7 @@ enum
44486 /* map softirq index to softirq name. update 'softirq_to_name' in
44487 * kernel/softirq.c when adding a new softirq.
44488 */
44489 -extern char *softirq_to_name[NR_SOFTIRQS];
44490 +extern const char * const softirq_to_name[NR_SOFTIRQS];
44491
44492 /* softirq mask and active fields moved to irq_cpustat_t in
44493 * asm/hardirq.h to get better cache usage. KAO
44494 @@ -365,12 +365,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
44495
44496 struct softirq_action
44497 {
44498 - void (*action)(struct softirq_action *);
44499 + void (*action)(void);
44500 };
44501
44502 asmlinkage void do_softirq(void);
44503 asmlinkage void __do_softirq(void);
44504 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
44505 +extern void open_softirq(int nr, void (*action)(void));
44506 extern void softirq_init(void);
44507 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
44508 extern void raise_softirq_irqoff(unsigned int nr);
44509 diff -urNp linux-2.6.34.1/include/linux/jbd.h linux-2.6.34.1/include/linux/jbd.h
44510 --- linux-2.6.34.1/include/linux/jbd.h 2010-07-05 14:24:10.000000000 -0400
44511 +++ linux-2.6.34.1/include/linux/jbd.h 2010-07-07 09:04:56.000000000 -0400
44512 @@ -67,7 +67,7 @@ extern u8 journal_enable_debug;
44513 } \
44514 } while (0)
44515 #else
44516 -#define jbd_debug(f, a...) /**/
44517 +#define jbd_debug(f, a...) do {} while (0)
44518 #endif
44519
44520 static inline void *jbd_alloc(size_t size, gfp_t flags)
44521 diff -urNp linux-2.6.34.1/include/linux/jbd2.h linux-2.6.34.1/include/linux/jbd2.h
44522 --- linux-2.6.34.1/include/linux/jbd2.h 2010-07-05 14:24:10.000000000 -0400
44523 +++ linux-2.6.34.1/include/linux/jbd2.h 2010-07-07 09:04:56.000000000 -0400
44524 @@ -67,7 +67,7 @@ extern u8 jbd2_journal_enable_debug;
44525 } \
44526 } while (0)
44527 #else
44528 -#define jbd_debug(f, a...) /**/
44529 +#define jbd_debug(f, a...) do {} while (0)
44530 #endif
44531
44532 extern void *jbd2_alloc(size_t size, gfp_t flags);
44533 diff -urNp linux-2.6.34.1/include/linux/kallsyms.h linux-2.6.34.1/include/linux/kallsyms.h
44534 --- linux-2.6.34.1/include/linux/kallsyms.h 2010-07-05 14:24:10.000000000 -0400
44535 +++ linux-2.6.34.1/include/linux/kallsyms.h 2010-07-07 09:04:56.000000000 -0400
44536 @@ -15,7 +15,8 @@
44537
44538 struct module;
44539
44540 -#ifdef CONFIG_KALLSYMS
44541 +#ifndef __INCLUDED_BY_HIDESYM
44542 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44543 /* Lookup the address for a symbol. Returns 0 if not found. */
44544 unsigned long kallsyms_lookup_name(const char *name);
44545
44546 @@ -92,6 +93,9 @@ static inline int lookup_symbol_attrs(un
44547 /* Stupid that this does nothing, but I didn't create this mess. */
44548 #define __print_symbol(fmt, addr)
44549 #endif /*CONFIG_KALLSYMS*/
44550 +#else /* when included by kallsyms.c, with HIDESYM enabled */
44551 +extern void __print_symbol(const char *fmt, unsigned long address);
44552 +#endif
44553
44554 /* This macro allows us to keep printk typechecking */
44555 static void __check_printsym_format(const char *fmt, ...)
44556 diff -urNp linux-2.6.34.1/include/linux/kgdb.h linux-2.6.34.1/include/linux/kgdb.h
44557 --- linux-2.6.34.1/include/linux/kgdb.h 2010-07-05 14:24:10.000000000 -0400
44558 +++ linux-2.6.34.1/include/linux/kgdb.h 2010-07-07 09:04:56.000000000 -0400
44559 @@ -250,20 +250,20 @@ struct kgdb_arch {
44560 */
44561 struct kgdb_io {
44562 const char *name;
44563 - int (*read_char) (void);
44564 - void (*write_char) (u8);
44565 - void (*flush) (void);
44566 - int (*init) (void);
44567 - void (*pre_exception) (void);
44568 - void (*post_exception) (void);
44569 + int (* const read_char) (void);
44570 + void (* const write_char) (u8);
44571 + void (* const flush) (void);
44572 + int (* const init) (void);
44573 + void (* const pre_exception) (void);
44574 + void (* const post_exception) (void);
44575 };
44576
44577 -extern struct kgdb_arch arch_kgdb_ops;
44578 +extern const struct kgdb_arch arch_kgdb_ops;
44579
44580 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
44581
44582 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
44583 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
44584 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
44585 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
44586
44587 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
44588 extern int kgdb_mem2hex(char *mem, char *buf, int count);
44589 diff -urNp linux-2.6.34.1/include/linux/kvm_host.h linux-2.6.34.1/include/linux/kvm_host.h
44590 --- linux-2.6.34.1/include/linux/kvm_host.h 2010-07-05 14:24:10.000000000 -0400
44591 +++ linux-2.6.34.1/include/linux/kvm_host.h 2010-07-07 09:04:56.000000000 -0400
44592 @@ -237,7 +237,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
44593 void vcpu_load(struct kvm_vcpu *vcpu);
44594 void vcpu_put(struct kvm_vcpu *vcpu);
44595
44596 -int kvm_init(void *opaque, unsigned int vcpu_size,
44597 +int kvm_init(const void *opaque, unsigned int vcpu_size,
44598 struct module *module);
44599 void kvm_exit(void);
44600
44601 @@ -355,7 +355,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
44602 struct kvm_guest_debug *dbg);
44603 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
44604
44605 -int kvm_arch_init(void *opaque);
44606 +int kvm_arch_init(const void *opaque);
44607 void kvm_arch_exit(void);
44608
44609 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
44610 diff -urNp linux-2.6.34.1/include/linux/libata.h linux-2.6.34.1/include/linux/libata.h
44611 --- linux-2.6.34.1/include/linux/libata.h 2010-07-05 14:24:10.000000000 -0400
44612 +++ linux-2.6.34.1/include/linux/libata.h 2010-07-07 09:04:56.000000000 -0400
44613 @@ -64,11 +64,11 @@
44614 #ifdef ATA_VERBOSE_DEBUG
44615 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
44616 #else
44617 -#define VPRINTK(fmt, args...)
44618 +#define VPRINTK(fmt, args...) do {} while (0)
44619 #endif /* ATA_VERBOSE_DEBUG */
44620 #else
44621 -#define DPRINTK(fmt, args...)
44622 -#define VPRINTK(fmt, args...)
44623 +#define DPRINTK(fmt, args...) do {} while (0)
44624 +#define VPRINTK(fmt, args...) do {} while (0)
44625 #endif /* ATA_DEBUG */
44626
44627 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
44628 @@ -525,11 +525,11 @@ struct ata_ioports {
44629
44630 struct ata_host {
44631 spinlock_t lock;
44632 - struct device *dev;
44633 + struct device *dev;
44634 void __iomem * const *iomap;
44635 unsigned int n_ports;
44636 void *private_data;
44637 - struct ata_port_operations *ops;
44638 + const struct ata_port_operations *ops;
44639 unsigned long flags;
44640 #ifdef CONFIG_ATA_ACPI
44641 acpi_handle acpi_handle;
44642 @@ -711,7 +711,7 @@ struct ata_link {
44643
44644 struct ata_port {
44645 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
44646 - struct ata_port_operations *ops;
44647 + const struct ata_port_operations *ops;
44648 spinlock_t *lock;
44649 /* Flags owned by the EH context. Only EH should touch these once the
44650 port is active */
44651 @@ -894,7 +894,7 @@ struct ata_port_info {
44652 unsigned long pio_mask;
44653 unsigned long mwdma_mask;
44654 unsigned long udma_mask;
44655 - struct ata_port_operations *port_ops;
44656 + const struct ata_port_operations *port_ops;
44657 void *private_data;
44658 };
44659
44660 @@ -918,7 +918,7 @@ extern const unsigned long sata_deb_timi
44661 extern const unsigned long sata_deb_timing_hotplug[];
44662 extern const unsigned long sata_deb_timing_long[];
44663
44664 -extern struct ata_port_operations ata_dummy_port_ops;
44665 +extern const struct ata_port_operations ata_dummy_port_ops;
44666 extern const struct ata_port_info ata_dummy_port_info;
44667
44668 static inline const unsigned long *
44669 @@ -964,7 +964,7 @@ extern int ata_host_activate(struct ata_
44670 struct scsi_host_template *sht);
44671 extern void ata_host_detach(struct ata_host *host);
44672 extern void ata_host_init(struct ata_host *, struct device *,
44673 - unsigned long, struct ata_port_operations *);
44674 + unsigned long, const struct ata_port_operations *);
44675 extern int ata_scsi_detect(struct scsi_host_template *sht);
44676 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
44677 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
44678 diff -urNp linux-2.6.34.1/include/linux/lockd/bind.h linux-2.6.34.1/include/linux/lockd/bind.h
44679 --- linux-2.6.34.1/include/linux/lockd/bind.h 2010-07-05 14:24:10.000000000 -0400
44680 +++ linux-2.6.34.1/include/linux/lockd/bind.h 2010-07-07 09:04:56.000000000 -0400
44681 @@ -23,13 +23,13 @@ struct svc_rqst;
44682 * This is the set of functions for lockd->nfsd communication
44683 */
44684 struct nlmsvc_binding {
44685 - __be32 (*fopen)(struct svc_rqst *,
44686 + __be32 (* const fopen)(struct svc_rqst *,
44687 struct nfs_fh *,
44688 struct file **);
44689 - void (*fclose)(struct file *);
44690 + void (* const fclose)(struct file *);
44691 };
44692
44693 -extern struct nlmsvc_binding * nlmsvc_ops;
44694 +extern const struct nlmsvc_binding * nlmsvc_ops;
44695
44696 /*
44697 * Similar to nfs_client_initdata, but without the NFS-specific
44698 diff -urNp linux-2.6.34.1/include/linux/mm.h linux-2.6.34.1/include/linux/mm.h
44699 --- linux-2.6.34.1/include/linux/mm.h 2010-07-05 14:24:10.000000000 -0400
44700 +++ linux-2.6.34.1/include/linux/mm.h 2010-07-07 09:04:56.000000000 -0400
44701 @@ -103,7 +103,14 @@ extern unsigned int kobjsize(const void
44702
44703 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
44704 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
44705 +
44706 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
44707 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
44708 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
44709 +#else
44710 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
44711 +#endif
44712 +
44713 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
44714 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
44715
44716 @@ -1006,6 +1013,8 @@ struct shrinker {
44717 extern void register_shrinker(struct shrinker *);
44718 extern void unregister_shrinker(struct shrinker *);
44719
44720 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
44721 +
44722 int vma_wants_writenotify(struct vm_area_struct *vma);
44723
44724 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
44725 @@ -1282,6 +1291,7 @@ out:
44726 }
44727
44728 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
44729 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
44730
44731 extern unsigned long do_brk(unsigned long, unsigned long);
44732
44733 @@ -1336,6 +1346,10 @@ extern struct vm_area_struct * find_vma(
44734 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
44735 struct vm_area_struct **pprev);
44736
44737 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
44738 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
44739 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
44740 +
44741 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
44742 NULL if none. Assume start_addr < end_addr. */
44743 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
44744 @@ -1352,7 +1366,6 @@ static inline unsigned long vma_pages(st
44745 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
44746 }
44747
44748 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
44749 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
44750 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
44751 unsigned long pfn, unsigned long size, pgprot_t);
44752 @@ -1462,10 +1475,16 @@ extern int unpoison_memory(unsigned long
44753 extern int sysctl_memory_failure_early_kill;
44754 extern int sysctl_memory_failure_recovery;
44755 extern void shake_page(struct page *p, int access);
44756 -extern atomic_long_t mce_bad_pages;
44757 +extern atomic_long_unchecked_t mce_bad_pages;
44758 extern int soft_offline_page(struct page *page, int flags);
44759
44760 extern void dump_page(struct page *page);
44761
44762 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44763 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
44764 +#else
44765 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
44766 +#endif
44767 +
44768 #endif /* __KERNEL__ */
44769 #endif /* _LINUX_MM_H */
44770 diff -urNp linux-2.6.34.1/include/linux/mm_types.h linux-2.6.34.1/include/linux/mm_types.h
44771 --- linux-2.6.34.1/include/linux/mm_types.h 2010-07-05 14:24:10.000000000 -0400
44772 +++ linux-2.6.34.1/include/linux/mm_types.h 2010-07-07 09:04:56.000000000 -0400
44773 @@ -183,6 +183,8 @@ struct vm_area_struct {
44774 #ifdef CONFIG_NUMA
44775 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
44776 #endif
44777 +
44778 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
44779 };
44780
44781 struct core_thread {
44782 @@ -310,6 +312,24 @@ struct mm_struct {
44783 #ifdef CONFIG_MMU_NOTIFIER
44784 struct mmu_notifier_mm *mmu_notifier_mm;
44785 #endif
44786 +
44787 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44788 + unsigned long pax_flags;
44789 +#endif
44790 +
44791 +#ifdef CONFIG_PAX_DLRESOLVE
44792 + unsigned long call_dl_resolve;
44793 +#endif
44794 +
44795 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
44796 + unsigned long call_syscall;
44797 +#endif
44798 +
44799 +#ifdef CONFIG_PAX_ASLR
44800 + unsigned long delta_mmap; /* randomized offset */
44801 + unsigned long delta_stack; /* randomized offset */
44802 +#endif
44803 +
44804 };
44805
44806 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
44807 diff -urNp linux-2.6.34.1/include/linux/mmu_notifier.h linux-2.6.34.1/include/linux/mmu_notifier.h
44808 --- linux-2.6.34.1/include/linux/mmu_notifier.h 2010-07-05 14:24:10.000000000 -0400
44809 +++ linux-2.6.34.1/include/linux/mmu_notifier.h 2010-07-07 09:04:56.000000000 -0400
44810 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
44811 */
44812 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
44813 ({ \
44814 - pte_t __pte; \
44815 + pte_t ___pte; \
44816 struct vm_area_struct *___vma = __vma; \
44817 unsigned long ___address = __address; \
44818 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
44819 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
44820 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
44821 - __pte; \
44822 + ___pte; \
44823 })
44824
44825 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
44826 diff -urNp linux-2.6.34.1/include/linux/mod_devicetable.h linux-2.6.34.1/include/linux/mod_devicetable.h
44827 --- linux-2.6.34.1/include/linux/mod_devicetable.h 2010-07-05 14:24:10.000000000 -0400
44828 +++ linux-2.6.34.1/include/linux/mod_devicetable.h 2010-07-07 09:04:56.000000000 -0400
44829 @@ -12,7 +12,7 @@
44830 typedef unsigned long kernel_ulong_t;
44831 #endif
44832
44833 -#define PCI_ANY_ID (~0)
44834 +#define PCI_ANY_ID ((__u16)~0)
44835
44836 struct pci_device_id {
44837 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
44838 @@ -131,7 +131,7 @@ struct usb_device_id {
44839 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
44840 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
44841
44842 -#define HID_ANY_ID (~0)
44843 +#define HID_ANY_ID (~0U)
44844
44845 struct hid_device_id {
44846 __u16 bus;
44847 diff -urNp linux-2.6.34.1/include/linux/module.h linux-2.6.34.1/include/linux/module.h
44848 --- linux-2.6.34.1/include/linux/module.h 2010-07-05 14:24:10.000000000 -0400
44849 +++ linux-2.6.34.1/include/linux/module.h 2010-07-07 09:04:56.000000000 -0400
44850 @@ -290,16 +290,16 @@ struct module
44851 int (*init)(void);
44852
44853 /* If this is non-NULL, vfree after init() returns */
44854 - void *module_init;
44855 + void *module_init_rx, *module_init_rw;
44856
44857 /* Here is the actual code + data, vfree'd on unload. */
44858 - void *module_core;
44859 + void *module_core_rx, *module_core_rw;
44860
44861 /* Here are the sizes of the init and core sections */
44862 - unsigned int init_size, core_size;
44863 + unsigned int init_size_rw, core_size_rw;
44864
44865 /* The size of the executable code in each section. */
44866 - unsigned int init_text_size, core_text_size;
44867 + unsigned int init_size_rx, core_size_rx;
44868
44869 /* Arch-specific module values */
44870 struct mod_arch_specific arch;
44871 @@ -399,16 +399,46 @@ bool is_module_address(unsigned long add
44872 bool is_module_percpu_address(unsigned long addr);
44873 bool is_module_text_address(unsigned long addr);
44874
44875 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
44876 +{
44877 +
44878 +#ifdef CONFIG_PAX_KERNEXEC
44879 + if (ktla_ktva(addr) >= (unsigned long)start &&
44880 + ktla_ktva(addr) < (unsigned long)start + size)
44881 + return 1;
44882 +#endif
44883 +
44884 + return ((void *)addr >= start && (void *)addr < start + size);
44885 +}
44886 +
44887 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
44888 +{
44889 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
44890 +}
44891 +
44892 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
44893 +{
44894 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
44895 +}
44896 +
44897 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
44898 +{
44899 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
44900 +}
44901 +
44902 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
44903 +{
44904 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
44905 +}
44906 +
44907 static inline int within_module_core(unsigned long addr, struct module *mod)
44908 {
44909 - return (unsigned long)mod->module_core <= addr &&
44910 - addr < (unsigned long)mod->module_core + mod->core_size;
44911 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
44912 }
44913
44914 static inline int within_module_init(unsigned long addr, struct module *mod)
44915 {
44916 - return (unsigned long)mod->module_init <= addr &&
44917 - addr < (unsigned long)mod->module_init + mod->init_size;
44918 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
44919 }
44920
44921 /* Search for module by name: must hold module_mutex. */
44922 diff -urNp linux-2.6.34.1/include/linux/moduleloader.h linux-2.6.34.1/include/linux/moduleloader.h
44923 --- linux-2.6.34.1/include/linux/moduleloader.h 2010-07-05 14:24:10.000000000 -0400
44924 +++ linux-2.6.34.1/include/linux/moduleloader.h 2010-07-07 09:04:56.000000000 -0400
44925 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
44926 sections. Returns NULL on failure. */
44927 void *module_alloc(unsigned long size);
44928
44929 +#ifdef CONFIG_PAX_KERNEXEC
44930 +void *module_alloc_exec(unsigned long size);
44931 +#else
44932 +#define module_alloc_exec(x) module_alloc(x)
44933 +#endif
44934 +
44935 /* Free memory returned from module_alloc. */
44936 void module_free(struct module *mod, void *module_region);
44937
44938 +#ifdef CONFIG_PAX_KERNEXEC
44939 +void module_free_exec(struct module *mod, void *module_region);
44940 +#else
44941 +#define module_free_exec(x, y) module_free((x), (y))
44942 +#endif
44943 +
44944 /* Apply the given relocation to the (simplified) ELF. Return -error
44945 or 0. */
44946 int apply_relocate(Elf_Shdr *sechdrs,
44947 diff -urNp linux-2.6.34.1/include/linux/namei.h linux-2.6.34.1/include/linux/namei.h
44948 --- linux-2.6.34.1/include/linux/namei.h 2010-07-05 14:24:10.000000000 -0400
44949 +++ linux-2.6.34.1/include/linux/namei.h 2010-07-07 09:04:56.000000000 -0400
44950 @@ -22,7 +22,7 @@ struct nameidata {
44951 unsigned int flags;
44952 int last_type;
44953 unsigned depth;
44954 - char *saved_names[MAX_NESTED_LINKS + 1];
44955 + const char *saved_names[MAX_NESTED_LINKS + 1];
44956
44957 /* Intent data */
44958 union {
44959 @@ -81,12 +81,12 @@ extern int follow_up(struct path *);
44960 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
44961 extern void unlock_rename(struct dentry *, struct dentry *);
44962
44963 -static inline void nd_set_link(struct nameidata *nd, char *path)
44964 +static inline void nd_set_link(struct nameidata *nd, const char *path)
44965 {
44966 nd->saved_names[nd->depth] = path;
44967 }
44968
44969 -static inline char *nd_get_link(struct nameidata *nd)
44970 +static inline const char *nd_get_link(const struct nameidata *nd)
44971 {
44972 return nd->saved_names[nd->depth];
44973 }
44974 diff -urNp linux-2.6.34.1/include/linux/oprofile.h linux-2.6.34.1/include/linux/oprofile.h
44975 --- linux-2.6.34.1/include/linux/oprofile.h 2010-07-05 14:24:10.000000000 -0400
44976 +++ linux-2.6.34.1/include/linux/oprofile.h 2010-07-07 09:04:56.000000000 -0400
44977 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
44978 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
44979 char const * name, ulong * val);
44980
44981 -/** Create a file for read-only access to an atomic_t. */
44982 +/** Create a file for read-only access to an atomic_unchecked_t. */
44983 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
44984 - char const * name, atomic_t * val);
44985 + char const * name, atomic_unchecked_t * val);
44986
44987 /** create a directory */
44988 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
44989 diff -urNp linux-2.6.34.1/include/linux/pipe_fs_i.h linux-2.6.34.1/include/linux/pipe_fs_i.h
44990 --- linux-2.6.34.1/include/linux/pipe_fs_i.h 2010-07-05 14:24:10.000000000 -0400
44991 +++ linux-2.6.34.1/include/linux/pipe_fs_i.h 2010-07-07 09:04:56.000000000 -0400
44992 @@ -46,9 +46,9 @@ struct pipe_inode_info {
44993 wait_queue_head_t wait;
44994 unsigned int nrbufs, curbuf;
44995 struct page *tmp_page;
44996 - unsigned int readers;
44997 - unsigned int writers;
44998 - unsigned int waiting_writers;
44999 + atomic_t readers;
45000 + atomic_t writers;
45001 + atomic_t waiting_writers;
45002 unsigned int r_counter;
45003 unsigned int w_counter;
45004 struct fasync_struct *fasync_readers;
45005 diff -urNp linux-2.6.34.1/include/linux/poison.h linux-2.6.34.1/include/linux/poison.h
45006 --- linux-2.6.34.1/include/linux/poison.h 2010-07-05 14:24:10.000000000 -0400
45007 +++ linux-2.6.34.1/include/linux/poison.h 2010-07-07 09:04:56.000000000 -0400
45008 @@ -19,8 +19,8 @@
45009 * under normal circumstances, used to verify that nobody uses
45010 * non-initialized list entries.
45011 */
45012 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
45013 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
45014 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
45015 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
45016
45017 /********** include/linux/timer.h **********/
45018 /*
45019 diff -urNp linux-2.6.34.1/include/linux/proc_fs.h linux-2.6.34.1/include/linux/proc_fs.h
45020 --- linux-2.6.34.1/include/linux/proc_fs.h 2010-07-05 14:24:10.000000000 -0400
45021 +++ linux-2.6.34.1/include/linux/proc_fs.h 2010-07-07 09:04:56.000000000 -0400
45022 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
45023 return proc_create_data(name, mode, parent, proc_fops, NULL);
45024 }
45025
45026 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
45027 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
45028 +{
45029 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45030 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
45031 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45032 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
45033 +#else
45034 + return proc_create_data(name, mode, parent, proc_fops, NULL);
45035 +#endif
45036 +}
45037 +
45038 +
45039 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
45040 mode_t mode, struct proc_dir_entry *base,
45041 read_proc_t *read_proc, void * data)
45042 diff -urNp linux-2.6.34.1/include/linux/random.h linux-2.6.34.1/include/linux/random.h
45043 --- linux-2.6.34.1/include/linux/random.h 2010-07-05 14:24:10.000000000 -0400
45044 +++ linux-2.6.34.1/include/linux/random.h 2010-07-07 09:04:56.000000000 -0400
45045 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
45046 u32 random32(void);
45047 void srandom32(u32 seed);
45048
45049 +static inline unsigned long pax_get_random_long(void)
45050 +{
45051 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
45052 +}
45053 +
45054 #endif /* __KERNEL___ */
45055
45056 #endif /* _LINUX_RANDOM_H */
45057 diff -urNp linux-2.6.34.1/include/linux/reiserfs_fs.h linux-2.6.34.1/include/linux/reiserfs_fs.h
45058 --- linux-2.6.34.1/include/linux/reiserfs_fs.h 2010-07-05 14:24:10.000000000 -0400
45059 +++ linux-2.6.34.1/include/linux/reiserfs_fs.h 2010-07-07 09:04:56.000000000 -0400
45060 @@ -1404,7 +1404,7 @@ static inline loff_t max_reiserfs_offset
45061 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
45062
45063 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
45064 -#define get_generation(s) atomic_read (&fs_generation(s))
45065 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
45066 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
45067 #define __fs_changed(gen,s) (gen != get_generation (s))
45068 #define fs_changed(gen,s) \
45069 @@ -1616,24 +1616,24 @@ static inline struct super_block *sb_fro
45070 */
45071
45072 struct item_operations {
45073 - int (*bytes_number) (struct item_head * ih, int block_size);
45074 - void (*decrement_key) (struct cpu_key *);
45075 - int (*is_left_mergeable) (struct reiserfs_key * ih,
45076 + int (* const bytes_number) (struct item_head * ih, int block_size);
45077 + void (* const decrement_key) (struct cpu_key *);
45078 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
45079 unsigned long bsize);
45080 - void (*print_item) (struct item_head *, char *item);
45081 - void (*check_item) (struct item_head *, char *item);
45082 + void (* const print_item) (struct item_head *, char *item);
45083 + void (* const check_item) (struct item_head *, char *item);
45084
45085 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
45086 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
45087 int is_affected, int insert_size);
45088 - int (*check_left) (struct virtual_item * vi, int free,
45089 + int (* const check_left) (struct virtual_item * vi, int free,
45090 int start_skip, int end_skip);
45091 - int (*check_right) (struct virtual_item * vi, int free);
45092 - int (*part_size) (struct virtual_item * vi, int from, int to);
45093 - int (*unit_num) (struct virtual_item * vi);
45094 - void (*print_vi) (struct virtual_item * vi);
45095 + int (* const check_right) (struct virtual_item * vi, int free);
45096 + int (* const part_size) (struct virtual_item * vi, int from, int to);
45097 + int (* const unit_num) (struct virtual_item * vi);
45098 + void (* const print_vi) (struct virtual_item * vi);
45099 };
45100
45101 -extern struct item_operations *item_ops[TYPE_ANY + 1];
45102 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
45103
45104 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
45105 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
45106 diff -urNp linux-2.6.34.1/include/linux/reiserfs_fs_sb.h linux-2.6.34.1/include/linux/reiserfs_fs_sb.h
45107 --- linux-2.6.34.1/include/linux/reiserfs_fs_sb.h 2010-07-05 14:24:10.000000000 -0400
45108 +++ linux-2.6.34.1/include/linux/reiserfs_fs_sb.h 2010-07-07 09:04:56.000000000 -0400
45109 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
45110 /* Comment? -Hans */
45111 wait_queue_head_t s_wait;
45112 /* To be obsoleted soon by per buffer seals.. -Hans */
45113 - atomic_t s_generation_counter; // increased by one every time the
45114 + atomic_unchecked_t s_generation_counter; // increased by one every time the
45115 // tree gets re-balanced
45116 unsigned long s_properties; /* File system properties. Currently holds
45117 on-disk FS format */
45118 diff -urNp linux-2.6.34.1/include/linux/sched.h linux-2.6.34.1/include/linux/sched.h
45119 --- linux-2.6.34.1/include/linux/sched.h 2010-07-05 14:24:10.000000000 -0400
45120 +++ linux-2.6.34.1/include/linux/sched.h 2010-07-07 09:04:56.000000000 -0400
45121 @@ -101,6 +101,7 @@ struct bio_list;
45122 struct fs_struct;
45123 struct bts_context;
45124 struct perf_event_context;
45125 +struct linux_binprm;
45126
45127 /*
45128 * List of flags we want to share for kernel threads,
45129 @@ -628,6 +629,15 @@ struct signal_struct {
45130 struct tty_audit_buf *tty_audit_buf;
45131 #endif
45132
45133 +#ifdef CONFIG_GRKERNSEC
45134 + u32 curr_ip;
45135 + u32 gr_saddr;
45136 + u32 gr_daddr;
45137 + u16 gr_sport;
45138 + u16 gr_dport;
45139 + u8 used_accept:1;
45140 +#endif
45141 +
45142 int oom_adj; /* OOM kill score adjustment (bit shift) */
45143 };
45144
45145 @@ -1169,7 +1179,7 @@ struct rcu_node;
45146
45147 struct task_struct {
45148 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
45149 - void *stack;
45150 + struct thread_info *stack;
45151 atomic_t usage;
45152 unsigned int flags; /* per process flags, defined below */
45153 unsigned int ptrace;
45154 @@ -1283,8 +1293,8 @@ struct task_struct {
45155 struct list_head thread_group;
45156
45157 struct completion *vfork_done; /* for vfork() */
45158 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
45159 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
45160 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
45161 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
45162
45163 cputime_t utime, stime, utimescaled, stimescaled;
45164 cputime_t gtime;
45165 @@ -1300,16 +1310,6 @@ struct task_struct {
45166 struct task_cputime cputime_expires;
45167 struct list_head cpu_timers[3];
45168
45169 -/* process credentials */
45170 - const struct cred *real_cred; /* objective and real subjective task
45171 - * credentials (COW) */
45172 - const struct cred *cred; /* effective (overridable) subjective task
45173 - * credentials (COW) */
45174 - struct mutex cred_guard_mutex; /* guard against foreign influences on
45175 - * credential calculations
45176 - * (notably. ptrace) */
45177 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
45178 -
45179 char comm[TASK_COMM_LEN]; /* executable name excluding path
45180 - access with [gs]et_task_comm (which lock
45181 it with task_lock())
45182 @@ -1393,6 +1393,15 @@ struct task_struct {
45183 int softirqs_enabled;
45184 int softirq_context;
45185 #endif
45186 +
45187 +/* process credentials */
45188 + const struct cred *real_cred; /* objective and real subjective task
45189 + * credentials (COW) */
45190 + struct mutex cred_guard_mutex; /* guard against foreign influences on
45191 + * credential calculations
45192 + * (notably. ptrace) */
45193 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
45194 +
45195 #ifdef CONFIG_LOCKDEP
45196 # define MAX_LOCK_DEPTH 48UL
45197 u64 curr_chain_key;
45198 @@ -1413,6 +1422,9 @@ struct task_struct {
45199
45200 struct backing_dev_info *backing_dev_info;
45201
45202 + const struct cred *cred; /* effective (overridable) subjective task
45203 + * credentials (COW) */
45204 +
45205 struct io_context *io_context;
45206
45207 unsigned long ptrace_message;
45208 @@ -1476,6 +1488,20 @@ struct task_struct {
45209 unsigned long default_timer_slack_ns;
45210
45211 struct list_head *scm_work_list;
45212 +
45213 +#ifdef CONFIG_GRKERNSEC
45214 + /* grsecurity */
45215 + struct dentry *gr_chroot_dentry;
45216 + struct acl_subject_label *acl;
45217 + struct acl_role_label *role;
45218 + struct file *exec_file;
45219 + u16 acl_role_id;
45220 + u8 acl_sp_role;
45221 + u8 is_writable;
45222 + u8 brute;
45223 + u8 gr_is_chrooted;
45224 +#endif
45225 +
45226 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
45227 /* Index of current stored address in ret_stack */
45228 int curr_ret_stack;
45229 @@ -1507,6 +1533,52 @@ struct task_struct {
45230 #endif
45231 };
45232
45233 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
45234 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
45235 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
45236 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
45237 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
45238 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
45239 +
45240 +#ifdef CONFIG_PAX_SOFTMODE
45241 +extern unsigned int pax_softmode;
45242 +#endif
45243 +
45244 +extern int pax_check_flags(unsigned long *);
45245 +
45246 +/* if tsk != current then task_lock must be held on it */
45247 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45248 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
45249 +{
45250 + if (likely(tsk->mm))
45251 + return tsk->mm->pax_flags;
45252 + else
45253 + return 0UL;
45254 +}
45255 +
45256 +/* if tsk != current then task_lock must be held on it */
45257 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
45258 +{
45259 + if (likely(tsk->mm)) {
45260 + tsk->mm->pax_flags = flags;
45261 + return 0;
45262 + }
45263 + return -EINVAL;
45264 +}
45265 +#endif
45266 +
45267 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
45268 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
45269 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
45270 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
45271 +#endif
45272 +
45273 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
45274 +void pax_report_insns(void *pc, void *sp);
45275 +void pax_report_refcount_overflow(struct pt_regs *regs);
45276 +void pax_report_leak_to_user(const void *ptr, unsigned long len);
45277 +void pax_report_overflow_from_user(const void *ptr, unsigned long len);
45278 +
45279 /* Future-safe accessor for struct task_struct's cpus_allowed. */
45280 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
45281
45282 @@ -2108,7 +2180,7 @@ extern void __cleanup_sighand(struct sig
45283 extern void exit_itimers(struct signal_struct *);
45284 extern void flush_itimer_signals(void);
45285
45286 -extern NORET_TYPE void do_group_exit(int);
45287 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
45288
45289 extern void daemonize(const char *, ...);
45290 extern int allow_signal(int);
45291 @@ -2221,8 +2293,8 @@ static inline void unlock_task_sighand(s
45292
45293 #ifndef __HAVE_THREAD_FUNCTIONS
45294
45295 -#define task_thread_info(task) ((struct thread_info *)(task)->stack)
45296 -#define task_stack_page(task) ((task)->stack)
45297 +#define task_thread_info(task) ((task)->stack)
45298 +#define task_stack_page(task) ((void *)(task)->stack)
45299
45300 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
45301 {
45302 @@ -2237,13 +2309,31 @@ static inline unsigned long *end_of_stac
45303
45304 #endif
45305
45306 -static inline int object_is_on_stack(void *obj)
45307 +static inline int object_starts_on_stack(void *obj)
45308 {
45309 - void *stack = task_stack_page(current);
45310 + const void *stack = task_stack_page(current);
45311
45312 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
45313 }
45314
45315 +/* 0: not at all, 1: fully, -1: partially (implies an error) */
45316 +static inline int object_is_on_stack(const void *obj, unsigned long len)
45317 +{
45318 + const void *stack = task_stack_page(current);
45319 + const void *stackend = stack + THREAD_SIZE;
45320 +
45321 + if (obj + len < obj)
45322 + return -1;
45323 +
45324 + if (stack <= obj && obj + len <= stackend)
45325 + return 1;
45326 +
45327 + if (obj + len <= stack || stackend <= obj)
45328 + return 0;
45329 +
45330 + return -1;
45331 +}
45332 +
45333 extern void thread_info_cache_init(void);
45334
45335 #ifdef CONFIG_DEBUG_STACK_USAGE
45336 diff -urNp linux-2.6.34.1/include/linux/screen_info.h linux-2.6.34.1/include/linux/screen_info.h
45337 --- linux-2.6.34.1/include/linux/screen_info.h 2010-07-05 14:24:10.000000000 -0400
45338 +++ linux-2.6.34.1/include/linux/screen_info.h 2010-07-07 09:04:56.000000000 -0400
45339 @@ -43,7 +43,8 @@ struct screen_info {
45340 __u16 pages; /* 0x32 */
45341 __u16 vesa_attributes; /* 0x34 */
45342 __u32 capabilities; /* 0x36 */
45343 - __u8 _reserved[6]; /* 0x3a */
45344 + __u16 vesapm_size; /* 0x3a */
45345 + __u8 _reserved[4]; /* 0x3c */
45346 } __attribute__((packed));
45347
45348 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
45349 diff -urNp linux-2.6.34.1/include/linux/security.h linux-2.6.34.1/include/linux/security.h
45350 --- linux-2.6.34.1/include/linux/security.h 2010-07-05 14:24:10.000000000 -0400
45351 +++ linux-2.6.34.1/include/linux/security.h 2010-07-07 09:04:57.000000000 -0400
45352 @@ -34,6 +34,7 @@
45353 #include <linux/key.h>
45354 #include <linux/xfrm.h>
45355 #include <linux/slab.h>
45356 +#include <linux/grsecurity.h>
45357 #include <net/flow.h>
45358
45359 /* Maximum number of letters for an LSM name string */
45360 diff -urNp linux-2.6.34.1/include/linux/shm.h linux-2.6.34.1/include/linux/shm.h
45361 --- linux-2.6.34.1/include/linux/shm.h 2010-07-05 14:24:10.000000000 -0400
45362 +++ linux-2.6.34.1/include/linux/shm.h 2010-07-07 09:04:57.000000000 -0400
45363 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
45364 pid_t shm_cprid;
45365 pid_t shm_lprid;
45366 struct user_struct *mlock_user;
45367 +#ifdef CONFIG_GRKERNSEC
45368 + time_t shm_createtime;
45369 + pid_t shm_lapid;
45370 +#endif
45371 };
45372
45373 /* shm_mode upper byte flags */
45374 diff -urNp linux-2.6.34.1/include/linux/slab.h linux-2.6.34.1/include/linux/slab.h
45375 --- linux-2.6.34.1/include/linux/slab.h 2010-07-05 14:24:10.000000000 -0400
45376 +++ linux-2.6.34.1/include/linux/slab.h 2010-07-07 09:04:57.000000000 -0400
45377 @@ -11,6 +11,7 @@
45378
45379 #include <linux/gfp.h>
45380 #include <linux/types.h>
45381 +#include <linux/err.h>
45382
45383 /*
45384 * Flags to pass to kmem_cache_create().
45385 @@ -87,10 +88,13 @@
45386 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
45387 * Both make kfree a no-op.
45388 */
45389 -#define ZERO_SIZE_PTR ((void *)16)
45390 +#define ZERO_SIZE_PTR \
45391 +({ \
45392 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
45393 + (void *)(-MAX_ERRNO-1L); \
45394 +})
45395
45396 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
45397 - (unsigned long)ZERO_SIZE_PTR)
45398 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
45399
45400 /*
45401 * struct kmem_cache related prototypes
45402 @@ -144,6 +148,7 @@ void * __must_check krealloc(const void
45403 void kfree(const void *);
45404 void kzfree(const void *);
45405 size_t ksize(const void *);
45406 +void check_object_size(const void *ptr, unsigned long n, bool to);
45407
45408 /*
45409 * Allocator specific definitions. These are mainly used to establish optimized
45410 @@ -334,4 +339,37 @@ static inline void *kzalloc_node(size_t
45411
45412 void __init kmem_cache_init_late(void);
45413
45414 +#define kmalloc(x, y) \
45415 +({ \
45416 + void *___retval; \
45417 + intoverflow_t ___x = (intoverflow_t)x; \
45418 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
45419 + ___retval = NULL; \
45420 + else \
45421 + ___retval = kmalloc((size_t)___x, (y)); \
45422 + ___retval; \
45423 +})
45424 +
45425 +#define kmalloc_node(x, y, z) \
45426 +({ \
45427 + void *___retval; \
45428 + intoverflow_t ___x = (intoverflow_t)x; \
45429 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
45430 + ___retval = NULL; \
45431 + else \
45432 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
45433 + ___retval; \
45434 +})
45435 +
45436 +#define kzalloc(x, y) \
45437 +({ \
45438 + void *___retval; \
45439 + intoverflow_t ___x = (intoverflow_t)x; \
45440 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
45441 + ___retval = NULL; \
45442 + else \
45443 + ___retval = kzalloc((size_t)___x, (y)); \
45444 + ___retval; \
45445 +})
45446 +
45447 #endif /* _LINUX_SLAB_H */
45448 diff -urNp linux-2.6.34.1/include/linux/slub_def.h linux-2.6.34.1/include/linux/slub_def.h
45449 --- linux-2.6.34.1/include/linux/slub_def.h 2010-07-05 14:24:10.000000000 -0400
45450 +++ linux-2.6.34.1/include/linux/slub_def.h 2010-07-07 09:04:57.000000000 -0400
45451 @@ -79,7 +79,7 @@ struct kmem_cache {
45452 struct kmem_cache_order_objects max;
45453 struct kmem_cache_order_objects min;
45454 gfp_t allocflags; /* gfp flags to use on each alloc */
45455 - int refcount; /* Refcount for slab cache destroy */
45456 + atomic_t refcount; /* Refcount for slab cache destroy */
45457 void (*ctor)(void *);
45458 int inuse; /* Offset to metadata */
45459 int align; /* Alignment */
45460 diff -urNp linux-2.6.34.1/include/linux/sonet.h linux-2.6.34.1/include/linux/sonet.h
45461 --- linux-2.6.34.1/include/linux/sonet.h 2010-07-05 14:24:10.000000000 -0400
45462 +++ linux-2.6.34.1/include/linux/sonet.h 2010-07-07 09:04:57.000000000 -0400
45463 @@ -61,7 +61,7 @@ struct sonet_stats {
45464 #include <asm/atomic.h>
45465
45466 struct k_sonet_stats {
45467 -#define __HANDLE_ITEM(i) atomic_t i
45468 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
45469 __SONET_ITEMS
45470 #undef __HANDLE_ITEM
45471 };
45472 diff -urNp linux-2.6.34.1/include/linux/suspend.h linux-2.6.34.1/include/linux/suspend.h
45473 --- linux-2.6.34.1/include/linux/suspend.h 2010-07-05 14:24:10.000000000 -0400
45474 +++ linux-2.6.34.1/include/linux/suspend.h 2010-07-07 09:04:57.000000000 -0400
45475 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
45476 * which require special recovery actions in that situation.
45477 */
45478 struct platform_suspend_ops {
45479 - int (*valid)(suspend_state_t state);
45480 - int (*begin)(suspend_state_t state);
45481 - int (*prepare)(void);
45482 - int (*prepare_late)(void);
45483 - int (*enter)(suspend_state_t state);
45484 - void (*wake)(void);
45485 - void (*finish)(void);
45486 - void (*end)(void);
45487 - void (*recover)(void);
45488 + int (* const valid)(suspend_state_t state);
45489 + int (* const begin)(suspend_state_t state);
45490 + int (* const prepare)(void);
45491 + int (* const prepare_late)(void);
45492 + int (* const enter)(suspend_state_t state);
45493 + void (* const wake)(void);
45494 + void (* const finish)(void);
45495 + void (* const end)(void);
45496 + void (* const recover)(void);
45497 };
45498
45499 #ifdef CONFIG_SUSPEND
45500 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
45501 * suspend_set_ops - set platform dependent suspend operations
45502 * @ops: The new suspend operations to set.
45503 */
45504 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
45505 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
45506 extern int suspend_valid_only_mem(suspend_state_t state);
45507
45508 /**
45509 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
45510 #else /* !CONFIG_SUSPEND */
45511 #define suspend_valid_only_mem NULL
45512
45513 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
45514 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
45515 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
45516 #endif /* !CONFIG_SUSPEND */
45517
45518 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
45519 * platforms which require special recovery actions in that situation.
45520 */
45521 struct platform_hibernation_ops {
45522 - int (*begin)(void);
45523 - void (*end)(void);
45524 - int (*pre_snapshot)(void);
45525 - void (*finish)(void);
45526 - int (*prepare)(void);
45527 - int (*enter)(void);
45528 - void (*leave)(void);
45529 - int (*pre_restore)(void);
45530 - void (*restore_cleanup)(void);
45531 - void (*recover)(void);
45532 + int (* const begin)(void);
45533 + void (* const end)(void);
45534 + int (* const pre_snapshot)(void);
45535 + void (* const finish)(void);
45536 + int (* const prepare)(void);
45537 + int (* const enter)(void);
45538 + void (* const leave)(void);
45539 + int (* const pre_restore)(void);
45540 + void (* const restore_cleanup)(void);
45541 + void (* const recover)(void);
45542 };
45543
45544 #ifdef CONFIG_HIBERNATION
45545 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
45546 extern void swsusp_unset_page_free(struct page *);
45547 extern unsigned long get_safe_page(gfp_t gfp_mask);
45548
45549 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
45550 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
45551 extern int hibernate(void);
45552 extern bool system_entering_hibernation(void);
45553 #else /* CONFIG_HIBERNATION */
45554 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
45555 static inline void swsusp_set_page_free(struct page *p) {}
45556 static inline void swsusp_unset_page_free(struct page *p) {}
45557
45558 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
45559 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
45560 static inline int hibernate(void) { return -ENOSYS; }
45561 static inline bool system_entering_hibernation(void) { return false; }
45562 #endif /* CONFIG_HIBERNATION */
45563 diff -urNp linux-2.6.34.1/include/linux/sysctl.h linux-2.6.34.1/include/linux/sysctl.h
45564 --- linux-2.6.34.1/include/linux/sysctl.h 2010-07-05 14:24:10.000000000 -0400
45565 +++ linux-2.6.34.1/include/linux/sysctl.h 2010-07-07 09:04:57.000000000 -0400
45566 @@ -155,7 +155,11 @@ enum
45567 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
45568 };
45569
45570 -
45571 +#ifdef CONFIG_PAX_SOFTMODE
45572 +enum {
45573 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
45574 +};
45575 +#endif
45576
45577 /* CTL_VM names: */
45578 enum
45579 diff -urNp linux-2.6.34.1/include/linux/sysfs.h linux-2.6.34.1/include/linux/sysfs.h
45580 --- linux-2.6.34.1/include/linux/sysfs.h 2010-07-05 14:24:10.000000000 -0400
45581 +++ linux-2.6.34.1/include/linux/sysfs.h 2010-07-07 09:04:57.000000000 -0400
45582 @@ -113,8 +113,8 @@ struct bin_attribute {
45583 #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
45584
45585 struct sysfs_ops {
45586 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
45587 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
45588 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
45589 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
45590 };
45591
45592 struct sysfs_dirent;
45593 diff -urNp linux-2.6.34.1/include/linux/thread_info.h linux-2.6.34.1/include/linux/thread_info.h
45594 --- linux-2.6.34.1/include/linux/thread_info.h 2010-07-05 14:24:10.000000000 -0400
45595 +++ linux-2.6.34.1/include/linux/thread_info.h 2010-07-07 09:04:57.000000000 -0400
45596 @@ -23,7 +23,7 @@ struct restart_block {
45597 };
45598 /* For futex_wait and futex_wait_requeue_pi */
45599 struct {
45600 - u32 *uaddr;
45601 + u32 __user *uaddr;
45602 u32 val;
45603 u32 flags;
45604 u32 bitset;
45605 diff -urNp linux-2.6.34.1/include/linux/tty.h linux-2.6.34.1/include/linux/tty.h
45606 --- linux-2.6.34.1/include/linux/tty.h 2010-07-05 14:24:10.000000000 -0400
45607 +++ linux-2.6.34.1/include/linux/tty.h 2010-07-07 09:04:57.000000000 -0400
45608 @@ -13,6 +13,7 @@
45609 #include <linux/tty_driver.h>
45610 #include <linux/tty_ldisc.h>
45611 #include <linux/mutex.h>
45612 +#include <linux/poll.h>
45613
45614 #include <asm/system.h>
45615
45616 @@ -452,7 +453,6 @@ extern int tty_perform_flush(struct tty_
45617 extern dev_t tty_devnum(struct tty_struct *tty);
45618 extern void proc_clear_tty(struct task_struct *p);
45619 extern struct tty_struct *get_current_tty(void);
45620 -extern void tty_default_fops(struct file_operations *fops);
45621 extern struct tty_struct *alloc_tty_struct(void);
45622 extern void free_tty_struct(struct tty_struct *tty);
45623 extern void initialize_tty_struct(struct tty_struct *tty,
45624 @@ -513,6 +513,18 @@ extern void tty_ldisc_begin(void);
45625 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
45626 extern void tty_ldisc_enable(struct tty_struct *tty);
45627
45628 +/* tty_io.c */
45629 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
45630 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
45631 +extern unsigned int tty_poll(struct file *, poll_table *);
45632 +#ifdef CONFIG_COMPAT
45633 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
45634 + unsigned long arg);
45635 +#else
45636 +#define tty_compat_ioctl NULL
45637 +#endif
45638 +extern int tty_release(struct inode *, struct file *);
45639 +extern int tty_fasync(int fd, struct file *filp, int on);
45640
45641 /* n_tty.c */
45642 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
45643 diff -urNp linux-2.6.34.1/include/linux/tty_ldisc.h linux-2.6.34.1/include/linux/tty_ldisc.h
45644 --- linux-2.6.34.1/include/linux/tty_ldisc.h 2010-07-05 14:24:10.000000000 -0400
45645 +++ linux-2.6.34.1/include/linux/tty_ldisc.h 2010-07-07 09:04:57.000000000 -0400
45646 @@ -147,7 +147,7 @@ struct tty_ldisc_ops {
45647
45648 struct module *owner;
45649
45650 - int refcount;
45651 + atomic_t refcount;
45652 };
45653
45654 struct tty_ldisc {
45655 diff -urNp linux-2.6.34.1/include/linux/types.h linux-2.6.34.1/include/linux/types.h
45656 --- linux-2.6.34.1/include/linux/types.h 2010-07-05 14:24:10.000000000 -0400
45657 +++ linux-2.6.34.1/include/linux/types.h 2010-07-07 09:04:57.000000000 -0400
45658 @@ -191,10 +191,26 @@ typedef struct {
45659 volatile int counter;
45660 } atomic_t;
45661
45662 +#ifdef CONFIG_PAX_REFCOUNT
45663 +typedef struct {
45664 + volatile int counter;
45665 +} atomic_unchecked_t;
45666 +#else
45667 +typedef atomic_t atomic_unchecked_t;
45668 +#endif
45669 +
45670 #ifdef CONFIG_64BIT
45671 typedef struct {
45672 volatile long counter;
45673 } atomic64_t;
45674 +
45675 +#ifdef CONFIG_PAX_REFCOUNT
45676 +typedef struct {
45677 + volatile long counter;
45678 +} atomic64_unchecked_t;
45679 +#else
45680 +typedef atomic64_t atomic64_unchecked_t;
45681 +#endif
45682 #endif
45683
45684 struct ustat {
45685 diff -urNp linux-2.6.34.1/include/linux/uaccess.h linux-2.6.34.1/include/linux/uaccess.h
45686 --- linux-2.6.34.1/include/linux/uaccess.h 2010-07-05 14:24:10.000000000 -0400
45687 +++ linux-2.6.34.1/include/linux/uaccess.h 2010-07-07 09:04:57.000000000 -0400
45688 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
45689 long ret; \
45690 mm_segment_t old_fs = get_fs(); \
45691 \
45692 - set_fs(KERNEL_DS); \
45693 pagefault_disable(); \
45694 + set_fs(KERNEL_DS); \
45695 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
45696 - pagefault_enable(); \
45697 set_fs(old_fs); \
45698 + pagefault_enable(); \
45699 ret; \
45700 })
45701
45702 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
45703 * Safely read from address @src to the buffer at @dst. If a kernel fault
45704 * happens, handle that and return -EFAULT.
45705 */
45706 -extern long probe_kernel_read(void *dst, void *src, size_t size);
45707 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
45708 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
45709 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
45710
45711 /*
45712 * probe_kernel_write(): safely attempt to write to a location
45713 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
45714 * Safely write to address @dst from the buffer at @src. If a kernel fault
45715 * happens, handle that and return -EFAULT.
45716 */
45717 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
45718 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
45719 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
45720 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
45721
45722 #endif /* __LINUX_UACCESS_H__ */
45723 diff -urNp linux-2.6.34.1/include/linux/vmalloc.h linux-2.6.34.1/include/linux/vmalloc.h
45724 --- linux-2.6.34.1/include/linux/vmalloc.h 2010-07-05 14:24:10.000000000 -0400
45725 +++ linux-2.6.34.1/include/linux/vmalloc.h 2010-07-07 09:04:57.000000000 -0400
45726 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
45727 #define VM_MAP 0x00000004 /* vmap()ed pages */
45728 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
45729 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
45730 +
45731 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
45732 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
45733 +#endif
45734 +
45735 /* bits [20..32] reserved for arch specific ioremap internals */
45736
45737 /*
45738 @@ -121,4 +126,81 @@ struct vm_struct **pcpu_get_vm_areas(con
45739
45740 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
45741
45742 +#define vmalloc(x) \
45743 +({ \
45744 + void *___retval; \
45745 + intoverflow_t ___x = (intoverflow_t)x; \
45746 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
45747 + ___retval = NULL; \
45748 + else \
45749 + ___retval = vmalloc((unsigned long)___x); \
45750 + ___retval; \
45751 +})
45752 +
45753 +#define __vmalloc(x, y, z) \
45754 +({ \
45755 + void *___retval; \
45756 + intoverflow_t ___x = (intoverflow_t)x; \
45757 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
45758 + ___retval = NULL; \
45759 + else \
45760 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
45761 + ___retval; \
45762 +})
45763 +
45764 +#define vmalloc_user(x) \
45765 +({ \
45766 + void *___retval; \
45767 + intoverflow_t ___x = (intoverflow_t)x; \
45768 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
45769 + ___retval = NULL; \
45770 + else \
45771 + ___retval = vmalloc_user((unsigned long)___x); \
45772 + ___retval; \
45773 +})
45774 +
45775 +#define vmalloc_exec(x) \
45776 +({ \
45777 + void *___retval; \
45778 + intoverflow_t ___x = (intoverflow_t)x; \
45779 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
45780 + ___retval = NULL; \
45781 + else \
45782 + ___retval = vmalloc_exec((unsigned long)___x); \
45783 + ___retval; \
45784 +})
45785 +
45786 +#define vmalloc_node(x, y) \
45787 +({ \
45788 + void *___retval; \
45789 + intoverflow_t ___x = (intoverflow_t)x; \
45790 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
45791 + ___retval = NULL; \
45792 + else \
45793 + ___retval = vmalloc_node((unsigned long)___x, (y));\
45794 + ___retval; \
45795 +})
45796 +
45797 +#define vmalloc_32(x) \
45798 +({ \
45799 + void *___retval; \
45800 + intoverflow_t ___x = (intoverflow_t)x; \
45801 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
45802 + ___retval = NULL; \
45803 + else \
45804 + ___retval = vmalloc_32((unsigned long)___x); \
45805 + ___retval; \
45806 +})
45807 +
45808 +#define vmalloc_32_user(x) \
45809 +({ \
45810 + void *___retval; \
45811 + intoverflow_t ___x = (intoverflow_t)x; \
45812 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
45813 + ___retval = NULL; \
45814 + else \
45815 + ___retval = vmalloc_32_user((unsigned long)___x);\
45816 + ___retval; \
45817 +})
45818 +
45819 #endif /* _LINUX_VMALLOC_H */
45820 diff -urNp linux-2.6.34.1/include/net/irda/ircomm_tty.h linux-2.6.34.1/include/net/irda/ircomm_tty.h
45821 --- linux-2.6.34.1/include/net/irda/ircomm_tty.h 2010-07-05 14:24:10.000000000 -0400
45822 +++ linux-2.6.34.1/include/net/irda/ircomm_tty.h 2010-07-07 09:04:57.000000000 -0400
45823 @@ -105,8 +105,8 @@ struct ircomm_tty_cb {
45824 unsigned short close_delay;
45825 unsigned short closing_wait; /* time to wait before closing */
45826
45827 - int open_count;
45828 - int blocked_open; /* # of blocked opens */
45829 + atomic_t open_count;
45830 + atomic_t blocked_open; /* # of blocked opens */
45831
45832 /* Protect concurent access to :
45833 * o self->open_count
45834 diff -urNp linux-2.6.34.1/include/net/neighbour.h linux-2.6.34.1/include/net/neighbour.h
45835 --- linux-2.6.34.1/include/net/neighbour.h 2010-07-05 14:24:10.000000000 -0400
45836 +++ linux-2.6.34.1/include/net/neighbour.h 2010-07-07 09:04:57.000000000 -0400
45837 @@ -116,12 +116,12 @@ struct neighbour {
45838
45839 struct neigh_ops {
45840 int family;
45841 - void (*solicit)(struct neighbour *, struct sk_buff*);
45842 - void (*error_report)(struct neighbour *, struct sk_buff*);
45843 - int (*output)(struct sk_buff*);
45844 - int (*connected_output)(struct sk_buff*);
45845 - int (*hh_output)(struct sk_buff*);
45846 - int (*queue_xmit)(struct sk_buff*);
45847 + void (* const solicit)(struct neighbour *, struct sk_buff*);
45848 + void (* const error_report)(struct neighbour *, struct sk_buff*);
45849 + int (* const output)(struct sk_buff*);
45850 + int (* const connected_output)(struct sk_buff*);
45851 + int (* const hh_output)(struct sk_buff*);
45852 + int (* const queue_xmit)(struct sk_buff*);
45853 };
45854
45855 struct pneigh_entry {
45856 diff -urNp linux-2.6.34.1/include/net/sctp/sctp.h linux-2.6.34.1/include/net/sctp/sctp.h
45857 --- linux-2.6.34.1/include/net/sctp/sctp.h 2010-07-05 14:24:10.000000000 -0400
45858 +++ linux-2.6.34.1/include/net/sctp/sctp.h 2010-07-07 09:04:57.000000000 -0400
45859 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
45860
45861 #else /* SCTP_DEBUG */
45862
45863 -#define SCTP_DEBUG_PRINTK(whatever...)
45864 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
45865 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
45866 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
45867 #define SCTP_ENABLE_DEBUG
45868 #define SCTP_DISABLE_DEBUG
45869 #define SCTP_ASSERT(expr, str, func)
45870 diff -urNp linux-2.6.34.1/include/net/tcp.h linux-2.6.34.1/include/net/tcp.h
45871 --- linux-2.6.34.1/include/net/tcp.h 2010-07-05 14:24:10.000000000 -0400
45872 +++ linux-2.6.34.1/include/net/tcp.h 2010-07-07 09:04:57.000000000 -0400
45873 @@ -1395,6 +1395,7 @@ enum tcp_seq_states {
45874 struct tcp_seq_afinfo {
45875 char *name;
45876 sa_family_t family;
45877 + /* cannot be const */
45878 struct file_operations seq_fops;
45879 struct seq_operations seq_ops;
45880 };
45881 diff -urNp linux-2.6.34.1/include/net/udp.h linux-2.6.34.1/include/net/udp.h
45882 --- linux-2.6.34.1/include/net/udp.h 2010-07-05 14:24:10.000000000 -0400
45883 +++ linux-2.6.34.1/include/net/udp.h 2010-07-07 09:04:57.000000000 -0400
45884 @@ -221,6 +221,7 @@ struct udp_seq_afinfo {
45885 char *name;
45886 sa_family_t family;
45887 struct udp_table *udp_table;
45888 + /* cannot be const */
45889 struct file_operations seq_fops;
45890 struct seq_operations seq_ops;
45891 };
45892 diff -urNp linux-2.6.34.1/include/sound/ac97_codec.h linux-2.6.34.1/include/sound/ac97_codec.h
45893 --- linux-2.6.34.1/include/sound/ac97_codec.h 2010-07-05 14:24:10.000000000 -0400
45894 +++ linux-2.6.34.1/include/sound/ac97_codec.h 2010-07-07 09:04:57.000000000 -0400
45895 @@ -419,15 +419,15 @@
45896 struct snd_ac97;
45897
45898 struct snd_ac97_build_ops {
45899 - int (*build_3d) (struct snd_ac97 *ac97);
45900 - int (*build_specific) (struct snd_ac97 *ac97);
45901 - int (*build_spdif) (struct snd_ac97 *ac97);
45902 - int (*build_post_spdif) (struct snd_ac97 *ac97);
45903 + int (* const build_3d) (struct snd_ac97 *ac97);
45904 + int (* const build_specific) (struct snd_ac97 *ac97);
45905 + int (* const build_spdif) (struct snd_ac97 *ac97);
45906 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
45907 #ifdef CONFIG_PM
45908 - void (*suspend) (struct snd_ac97 *ac97);
45909 - void (*resume) (struct snd_ac97 *ac97);
45910 + void (* const suspend) (struct snd_ac97 *ac97);
45911 + void (* const resume) (struct snd_ac97 *ac97);
45912 #endif
45913 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
45914 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
45915 };
45916
45917 struct snd_ac97_bus_ops {
45918 @@ -477,7 +477,7 @@ struct snd_ac97_template {
45919
45920 struct snd_ac97 {
45921 /* -- lowlevel (hardware) driver specific -- */
45922 - struct snd_ac97_build_ops * build_ops;
45923 + const struct snd_ac97_build_ops * build_ops;
45924 void *private_data;
45925 void (*private_free) (struct snd_ac97 *ac97);
45926 /* --- */
45927 diff -urNp linux-2.6.34.1/include/trace/events/irq.h linux-2.6.34.1/include/trace/events/irq.h
45928 --- linux-2.6.34.1/include/trace/events/irq.h 2010-07-05 14:24:10.000000000 -0400
45929 +++ linux-2.6.34.1/include/trace/events/irq.h 2010-07-07 09:04:57.000000000 -0400
45930 @@ -34,7 +34,7 @@
45931 */
45932 TRACE_EVENT(irq_handler_entry,
45933
45934 - TP_PROTO(int irq, struct irqaction *action),
45935 + TP_PROTO(int irq, const struct irqaction *action),
45936
45937 TP_ARGS(irq, action),
45938
45939 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
45940 */
45941 TRACE_EVENT(irq_handler_exit,
45942
45943 - TP_PROTO(int irq, struct irqaction *action, int ret),
45944 + TP_PROTO(int irq, const struct irqaction *action, int ret),
45945
45946 TP_ARGS(irq, action, ret),
45947
45948 @@ -84,7 +84,7 @@ TRACE_EVENT(irq_handler_exit,
45949
45950 DECLARE_EVENT_CLASS(softirq,
45951
45952 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45953 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
45954
45955 TP_ARGS(h, vec),
45956
45957 @@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(softirq,
45958 */
45959 DEFINE_EVENT(softirq, softirq_entry,
45960
45961 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45962 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
45963
45964 TP_ARGS(h, vec)
45965 );
45966 @@ -131,7 +131,7 @@ DEFINE_EVENT(softirq, softirq_entry,
45967 */
45968 DEFINE_EVENT(softirq, softirq_exit,
45969
45970 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45971 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
45972
45973 TP_ARGS(h, vec)
45974 );
45975 diff -urNp linux-2.6.34.1/include/video/uvesafb.h linux-2.6.34.1/include/video/uvesafb.h
45976 --- linux-2.6.34.1/include/video/uvesafb.h 2010-07-05 14:24:10.000000000 -0400
45977 +++ linux-2.6.34.1/include/video/uvesafb.h 2010-07-07 09:04:57.000000000 -0400
45978 @@ -177,6 +177,7 @@ struct uvesafb_par {
45979 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
45980 u8 pmi_setpal; /* PMI for palette changes */
45981 u16 *pmi_base; /* protected mode interface location */
45982 + u8 *pmi_code; /* protected mode code location */
45983 void *pmi_start;
45984 void *pmi_pal;
45985 u8 *vbe_state_orig; /*
45986 diff -urNp linux-2.6.34.1/init/Kconfig linux-2.6.34.1/init/Kconfig
45987 --- linux-2.6.34.1/init/Kconfig 2010-07-05 14:24:10.000000000 -0400
45988 +++ linux-2.6.34.1/init/Kconfig 2010-07-07 09:04:57.000000000 -0400
45989 @@ -1029,7 +1029,7 @@ config SLUB_DEBUG
45990
45991 config COMPAT_BRK
45992 bool "Disable heap randomization"
45993 - default y
45994 + default n
45995 help
45996 Randomizing heap placement makes heap exploits harder, but it
45997 also breaks ancient binaries (including anything libc5 based).
45998 diff -urNp linux-2.6.34.1/init/do_mounts.c linux-2.6.34.1/init/do_mounts.c
45999 --- linux-2.6.34.1/init/do_mounts.c 2010-07-05 14:24:10.000000000 -0400
46000 +++ linux-2.6.34.1/init/do_mounts.c 2010-07-07 09:04:57.000000000 -0400
46001 @@ -217,11 +217,11 @@ static void __init get_fs_names(char *pa
46002
46003 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
46004 {
46005 - int err = sys_mount(name, "/root", fs, flags, data);
46006 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
46007 if (err)
46008 return err;
46009
46010 - sys_chdir("/root");
46011 + sys_chdir((__force char __user *)"/root");
46012 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
46013 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
46014 current->fs->pwd.mnt->mnt_sb->s_type->name,
46015 @@ -312,18 +312,18 @@ void __init change_floppy(char *fmt, ...
46016 va_start(args, fmt);
46017 vsprintf(buf, fmt, args);
46018 va_end(args);
46019 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
46020 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
46021 if (fd >= 0) {
46022 sys_ioctl(fd, FDEJECT, 0);
46023 sys_close(fd);
46024 }
46025 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
46026 - fd = sys_open("/dev/console", O_RDWR, 0);
46027 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
46028 if (fd >= 0) {
46029 sys_ioctl(fd, TCGETS, (long)&termios);
46030 termios.c_lflag &= ~ICANON;
46031 sys_ioctl(fd, TCSETSF, (long)&termios);
46032 - sys_read(fd, &c, 1);
46033 + sys_read(fd, (char __user *)&c, 1);
46034 termios.c_lflag |= ICANON;
46035 sys_ioctl(fd, TCSETSF, (long)&termios);
46036 sys_close(fd);
46037 @@ -417,6 +417,6 @@ void __init prepare_namespace(void)
46038 mount_root();
46039 out:
46040 devtmpfs_mount("dev");
46041 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
46042 - sys_chroot(".");
46043 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
46044 + sys_chroot((__force char __user *)".");
46045 }
46046 diff -urNp linux-2.6.34.1/init/do_mounts.h linux-2.6.34.1/init/do_mounts.h
46047 --- linux-2.6.34.1/init/do_mounts.h 2010-07-05 14:24:10.000000000 -0400
46048 +++ linux-2.6.34.1/init/do_mounts.h 2010-07-07 09:04:57.000000000 -0400
46049 @@ -15,15 +15,15 @@ extern int root_mountflags;
46050
46051 static inline int create_dev(char *name, dev_t dev)
46052 {
46053 - sys_unlink(name);
46054 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
46055 + sys_unlink((__force char __user *)name);
46056 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
46057 }
46058
46059 #if BITS_PER_LONG == 32
46060 static inline u32 bstat(char *name)
46061 {
46062 struct stat64 stat;
46063 - if (sys_stat64(name, &stat) != 0)
46064 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
46065 return 0;
46066 if (!S_ISBLK(stat.st_mode))
46067 return 0;
46068 diff -urNp linux-2.6.34.1/init/do_mounts_initrd.c linux-2.6.34.1/init/do_mounts_initrd.c
46069 --- linux-2.6.34.1/init/do_mounts_initrd.c 2010-07-05 14:24:10.000000000 -0400
46070 +++ linux-2.6.34.1/init/do_mounts_initrd.c 2010-07-07 09:04:57.000000000 -0400
46071 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
46072 create_dev("/dev/root.old", Root_RAM0);
46073 /* mount initrd on rootfs' /root */
46074 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
46075 - sys_mkdir("/old", 0700);
46076 - root_fd = sys_open("/", 0, 0);
46077 - old_fd = sys_open("/old", 0, 0);
46078 + sys_mkdir((__force const char __user *)"/old", 0700);
46079 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
46080 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
46081 /* move initrd over / and chdir/chroot in initrd root */
46082 - sys_chdir("/root");
46083 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
46084 - sys_chroot(".");
46085 + sys_chdir((__force const char __user *)"/root");
46086 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
46087 + sys_chroot((__force const char __user *)".");
46088
46089 /*
46090 * In case that a resume from disk is carried out by linuxrc or one of
46091 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
46092
46093 /* move initrd to rootfs' /old */
46094 sys_fchdir(old_fd);
46095 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
46096 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
46097 /* switch root and cwd back to / of rootfs */
46098 sys_fchdir(root_fd);
46099 - sys_chroot(".");
46100 + sys_chroot((__force const char __user *)".");
46101 sys_close(old_fd);
46102 sys_close(root_fd);
46103
46104 if (new_decode_dev(real_root_dev) == Root_RAM0) {
46105 - sys_chdir("/old");
46106 + sys_chdir((__force const char __user *)"/old");
46107 return;
46108 }
46109
46110 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
46111 mount_root();
46112
46113 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
46114 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
46115 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
46116 if (!error)
46117 printk("okay\n");
46118 else {
46119 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
46120 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
46121 if (error == -ENOENT)
46122 printk("/initrd does not exist. Ignored.\n");
46123 else
46124 printk("failed\n");
46125 printk(KERN_NOTICE "Unmounting old root\n");
46126 - sys_umount("/old", MNT_DETACH);
46127 + sys_umount((__force char __user *)"/old", MNT_DETACH);
46128 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
46129 if (fd < 0) {
46130 error = fd;
46131 @@ -115,11 +115,11 @@ int __init initrd_load(void)
46132 * mounted in the normal path.
46133 */
46134 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
46135 - sys_unlink("/initrd.image");
46136 + sys_unlink((__force const char __user *)"/initrd.image");
46137 handle_initrd();
46138 return 1;
46139 }
46140 }
46141 - sys_unlink("/initrd.image");
46142 + sys_unlink((__force const char __user *)"/initrd.image");
46143 return 0;
46144 }
46145 diff -urNp linux-2.6.34.1/init/do_mounts_md.c linux-2.6.34.1/init/do_mounts_md.c
46146 --- linux-2.6.34.1/init/do_mounts_md.c 2010-07-05 14:24:10.000000000 -0400
46147 +++ linux-2.6.34.1/init/do_mounts_md.c 2010-07-07 09:04:57.000000000 -0400
46148 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
46149 partitioned ? "_d" : "", minor,
46150 md_setup_args[ent].device_names);
46151
46152 - fd = sys_open(name, 0, 0);
46153 + fd = sys_open((__force char __user *)name, 0, 0);
46154 if (fd < 0) {
46155 printk(KERN_ERR "md: open failed - cannot start "
46156 "array %s\n", name);
46157 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
46158 * array without it
46159 */
46160 sys_close(fd);
46161 - fd = sys_open(name, 0, 0);
46162 + fd = sys_open((__force char __user *)name, 0, 0);
46163 sys_ioctl(fd, BLKRRPART, 0);
46164 }
46165 sys_close(fd);
46166 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
46167
46168 wait_for_device_probe();
46169
46170 - fd = sys_open("/dev/md0", 0, 0);
46171 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
46172 if (fd >= 0) {
46173 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
46174 sys_close(fd);
46175 diff -urNp linux-2.6.34.1/init/initramfs.c linux-2.6.34.1/init/initramfs.c
46176 --- linux-2.6.34.1/init/initramfs.c 2010-07-05 14:24:10.000000000 -0400
46177 +++ linux-2.6.34.1/init/initramfs.c 2010-07-07 09:04:57.000000000 -0400
46178 @@ -74,7 +74,7 @@ static void __init free_hash(void)
46179 }
46180 }
46181
46182 -static long __init do_utime(char __user *filename, time_t mtime)
46183 +static long __init do_utime(__force char __user *filename, time_t mtime)
46184 {
46185 struct timespec t[2];
46186
46187 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
46188 struct dir_entry *de, *tmp;
46189 list_for_each_entry_safe(de, tmp, &dir_list, list) {
46190 list_del(&de->list);
46191 - do_utime(de->name, de->mtime);
46192 + do_utime((__force char __user *)de->name, de->mtime);
46193 kfree(de->name);
46194 kfree(de);
46195 }
46196 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
46197 if (nlink >= 2) {
46198 char *old = find_link(major, minor, ino, mode, collected);
46199 if (old)
46200 - return (sys_link(old, collected) < 0) ? -1 : 1;
46201 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
46202 }
46203 return 0;
46204 }
46205 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
46206 {
46207 struct stat st;
46208
46209 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
46210 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
46211 if (S_ISDIR(st.st_mode))
46212 - sys_rmdir(path);
46213 + sys_rmdir((__force char __user *)path);
46214 else
46215 - sys_unlink(path);
46216 + sys_unlink((__force char __user *)path);
46217 }
46218 }
46219
46220 @@ -305,7 +305,7 @@ static int __init do_name(void)
46221 int openflags = O_WRONLY|O_CREAT;
46222 if (ml != 1)
46223 openflags |= O_TRUNC;
46224 - wfd = sys_open(collected, openflags, mode);
46225 + wfd = sys_open((__force char __user *)collected, openflags, mode);
46226
46227 if (wfd >= 0) {
46228 sys_fchown(wfd, uid, gid);
46229 @@ -317,17 +317,17 @@ static int __init do_name(void)
46230 }
46231 }
46232 } else if (S_ISDIR(mode)) {
46233 - sys_mkdir(collected, mode);
46234 - sys_chown(collected, uid, gid);
46235 - sys_chmod(collected, mode);
46236 + sys_mkdir((__force char __user *)collected, mode);
46237 + sys_chown((__force char __user *)collected, uid, gid);
46238 + sys_chmod((__force char __user *)collected, mode);
46239 dir_add(collected, mtime);
46240 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
46241 S_ISFIFO(mode) || S_ISSOCK(mode)) {
46242 if (maybe_link() == 0) {
46243 - sys_mknod(collected, mode, rdev);
46244 - sys_chown(collected, uid, gid);
46245 - sys_chmod(collected, mode);
46246 - do_utime(collected, mtime);
46247 + sys_mknod((__force char __user *)collected, mode, rdev);
46248 + sys_chown((__force char __user *)collected, uid, gid);
46249 + sys_chmod((__force char __user *)collected, mode);
46250 + do_utime((__force char __user *)collected, mtime);
46251 }
46252 }
46253 return 0;
46254 @@ -336,15 +336,15 @@ static int __init do_name(void)
46255 static int __init do_copy(void)
46256 {
46257 if (count >= body_len) {
46258 - sys_write(wfd, victim, body_len);
46259 + sys_write(wfd, (__force char __user *)victim, body_len);
46260 sys_close(wfd);
46261 - do_utime(vcollected, mtime);
46262 + do_utime((__force char __user *)vcollected, mtime);
46263 kfree(vcollected);
46264 eat(body_len);
46265 state = SkipIt;
46266 return 0;
46267 } else {
46268 - sys_write(wfd, victim, count);
46269 + sys_write(wfd, (__force char __user *)victim, count);
46270 body_len -= count;
46271 eat(count);
46272 return 1;
46273 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
46274 {
46275 collected[N_ALIGN(name_len) + body_len] = '\0';
46276 clean_path(collected, 0);
46277 - sys_symlink(collected + N_ALIGN(name_len), collected);
46278 - sys_lchown(collected, uid, gid);
46279 - do_utime(collected, mtime);
46280 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
46281 + sys_lchown((__force char __user *)collected, uid, gid);
46282 + do_utime((__force char __user *)collected, mtime);
46283 state = SkipIt;
46284 next_state = Reset;
46285 return 0;
46286 diff -urNp linux-2.6.34.1/init/main.c linux-2.6.34.1/init/main.c
46287 --- linux-2.6.34.1/init/main.c 2010-07-05 14:24:10.000000000 -0400
46288 +++ linux-2.6.34.1/init/main.c 2010-07-07 09:04:57.000000000 -0400
46289 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
46290 #ifdef CONFIG_TC
46291 extern void tc_init(void);
46292 #endif
46293 +extern void grsecurity_init(void);
46294
46295 enum system_states system_state __read_mostly;
46296 EXPORT_SYMBOL(system_state);
46297 @@ -197,6 +198,50 @@ static int __init set_reset_devices(char
46298
46299 __setup("reset_devices", set_reset_devices);
46300
46301 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
46302 +extern void pax_enter_kernel_user(void);
46303 +extern void pax_exit_kernel_user(void);
46304 +extern pgdval_t clone_pgd_mask;
46305 +#endif
46306 +
46307 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
46308 +static int __init setup_pax_nouderef(char *str)
46309 +{
46310 +#ifdef CONFIG_X86_32
46311 + unsigned int cpu;
46312 +
46313 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
46314 + get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].type = 3;
46315 + get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].limit = 0xf;
46316 + }
46317 + asm("mov %0, %%ds" : : "r" (__KERNEL_DS) : "memory");
46318 + asm("mov %0, %%es" : : "r" (__KERNEL_DS) : "memory");
46319 + asm("mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
46320 +#else
46321 + char *p;
46322 + p = (char *)pax_enter_kernel_user;
46323 + *p = 0xc3;
46324 + p = (char *)pax_exit_kernel_user;
46325 + *p = 0xc3;
46326 + clone_pgd_mask = ~(pgdval_t)0UL;
46327 +#endif
46328 +
46329 + return 0;
46330 +}
46331 +early_param("pax_nouderef", setup_pax_nouderef);
46332 +#endif
46333 +
46334 +#ifdef CONFIG_PAX_SOFTMODE
46335 +unsigned int pax_softmode;
46336 +
46337 +static int __init setup_pax_softmode(char *str)
46338 +{
46339 + get_option(&str, &pax_softmode);
46340 + return 1;
46341 +}
46342 +__setup("pax_softmode=", setup_pax_softmode);
46343 +#endif
46344 +
46345 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
46346 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
46347 static const char *panic_later, *panic_param;
46348 @@ -713,52 +758,53 @@ int initcall_debug;
46349 core_param(initcall_debug, initcall_debug, bool, 0644);
46350
46351 static char msgbuf[64];
46352 -static struct boot_trace_call call;
46353 -static struct boot_trace_ret ret;
46354 +static struct boot_trace_call trace_call;
46355 +static struct boot_trace_ret trace_ret;
46356
46357 int do_one_initcall(initcall_t fn)
46358 {
46359 int count = preempt_count();
46360 ktime_t calltime, delta, rettime;
46361 + const char *msg1 = "", *msg2 = "";
46362
46363 if (initcall_debug) {
46364 - call.caller = task_pid_nr(current);
46365 - printk("calling %pF @ %i\n", fn, call.caller);
46366 + trace_call.caller = task_pid_nr(current);
46367 + printk("calling %pF @ %i\n", fn, trace_call.caller);
46368 calltime = ktime_get();
46369 - trace_boot_call(&call, fn);
46370 + trace_boot_call(&trace_call, fn);
46371 enable_boot_trace();
46372 }
46373
46374 - ret.result = fn();
46375 + trace_ret.result = fn();
46376
46377 if (initcall_debug) {
46378 disable_boot_trace();
46379 rettime = ktime_get();
46380 delta = ktime_sub(rettime, calltime);
46381 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
46382 - trace_boot_ret(&ret, fn);
46383 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
46384 + trace_boot_ret(&trace_ret, fn);
46385 printk("initcall %pF returned %d after %Ld usecs\n", fn,
46386 - ret.result, ret.duration);
46387 + trace_ret.result, trace_ret.duration);
46388 }
46389
46390 msgbuf[0] = 0;
46391
46392 - if (ret.result && ret.result != -ENODEV && initcall_debug)
46393 - sprintf(msgbuf, "error code %d ", ret.result);
46394 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
46395 + sprintf(msgbuf, "error code %d ", trace_ret.result);
46396
46397 if (preempt_count() != count) {
46398 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
46399 + msg1 = " preemption imbalance";
46400 preempt_count() = count;
46401 }
46402 if (irqs_disabled()) {
46403 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
46404 + msg2 = " disabled interrupts";
46405 local_irq_enable();
46406 }
46407 - if (msgbuf[0]) {
46408 - printk("initcall %pF returned with %s\n", fn, msgbuf);
46409 + if (msgbuf[0] || *msg1 || *msg2) {
46410 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
46411 }
46412
46413 - return ret.result;
46414 + return trace_ret.result;
46415 }
46416
46417
46418 @@ -886,7 +932,7 @@ static int __init kernel_init(void * unu
46419 do_basic_setup();
46420
46421 /* Open the /dev/console on the rootfs, this should never fail */
46422 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
46423 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
46424 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
46425
46426 (void) sys_dup(0);
46427 @@ -899,11 +945,13 @@ static int __init kernel_init(void * unu
46428 if (!ramdisk_execute_command)
46429 ramdisk_execute_command = "/init";
46430
46431 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
46432 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
46433 ramdisk_execute_command = NULL;
46434 prepare_namespace();
46435 }
46436
46437 + grsecurity_init();
46438 +
46439 /*
46440 * Ok, we have completed the initial bootup, and
46441 * we're essentially up and running. Get rid of the
46442 diff -urNp linux-2.6.34.1/init/noinitramfs.c linux-2.6.34.1/init/noinitramfs.c
46443 --- linux-2.6.34.1/init/noinitramfs.c 2010-07-05 14:24:10.000000000 -0400
46444 +++ linux-2.6.34.1/init/noinitramfs.c 2010-07-07 09:04:57.000000000 -0400
46445 @@ -29,17 +29,17 @@ static int __init default_rootfs(void)
46446 {
46447 int err;
46448
46449 - err = sys_mkdir("/dev", 0755);
46450 + err = sys_mkdir((const char __user *)"/dev", 0755);
46451 if (err < 0)
46452 goto out;
46453
46454 - err = sys_mknod((const char __user *) "/dev/console",
46455 + err = sys_mknod((__force const char __user *) "/dev/console",
46456 S_IFCHR | S_IRUSR | S_IWUSR,
46457 new_encode_dev(MKDEV(5, 1)));
46458 if (err < 0)
46459 goto out;
46460
46461 - err = sys_mkdir("/root", 0700);
46462 + err = sys_mkdir((const char __user *)"/root", 0700);
46463 if (err < 0)
46464 goto out;
46465
46466 diff -urNp linux-2.6.34.1/ipc/mqueue.c linux-2.6.34.1/ipc/mqueue.c
46467 --- linux-2.6.34.1/ipc/mqueue.c 2010-07-05 14:24:10.000000000 -0400
46468 +++ linux-2.6.34.1/ipc/mqueue.c 2010-07-07 09:04:57.000000000 -0400
46469 @@ -153,6 +153,7 @@ static struct inode *mqueue_get_inode(st
46470 mq_bytes = (mq_msg_tblsz +
46471 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
46472
46473 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
46474 spin_lock(&mq_lock);
46475 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
46476 u->mq_bytes + mq_bytes >
46477 diff -urNp linux-2.6.34.1/ipc/shm.c linux-2.6.34.1/ipc/shm.c
46478 --- linux-2.6.34.1/ipc/shm.c 2010-07-05 14:24:10.000000000 -0400
46479 +++ linux-2.6.34.1/ipc/shm.c 2010-07-07 09:04:57.000000000 -0400
46480 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
46481 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
46482 #endif
46483
46484 +#ifdef CONFIG_GRKERNSEC
46485 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
46486 + const time_t shm_createtime, const uid_t cuid,
46487 + const int shmid);
46488 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
46489 + const time_t shm_createtime);
46490 +#endif
46491 +
46492 void shm_init_ns(struct ipc_namespace *ns)
46493 {
46494 ns->shm_ctlmax = SHMMAX;
46495 @@ -398,6 +406,14 @@ static int newseg(struct ipc_namespace *
46496 shp->shm_lprid = 0;
46497 shp->shm_atim = shp->shm_dtim = 0;
46498 shp->shm_ctim = get_seconds();
46499 +#ifdef CONFIG_GRKERNSEC
46500 + {
46501 + struct timespec timeval;
46502 + do_posix_clock_monotonic_gettime(&timeval);
46503 +
46504 + shp->shm_createtime = timeval.tv_sec;
46505 + }
46506 +#endif
46507 shp->shm_segsz = size;
46508 shp->shm_nattch = 0;
46509 shp->shm_file = file;
46510 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
46511 if (err)
46512 goto out_unlock;
46513
46514 +#ifdef CONFIG_GRKERNSEC
46515 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
46516 + shp->shm_perm.cuid, shmid) ||
46517 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
46518 + err = -EACCES;
46519 + goto out_unlock;
46520 + }
46521 +#endif
46522 +
46523 path = shp->shm_file->f_path;
46524 path_get(&path);
46525 shp->shm_nattch++;
46526 +#ifdef CONFIG_GRKERNSEC
46527 + shp->shm_lapid = current->pid;
46528 +#endif
46529 size = i_size_read(path.dentry->d_inode);
46530 shm_unlock(shp);
46531
46532 diff -urNp linux-2.6.34.1/kernel/acct.c linux-2.6.34.1/kernel/acct.c
46533 --- linux-2.6.34.1/kernel/acct.c 2010-07-05 14:24:10.000000000 -0400
46534 +++ linux-2.6.34.1/kernel/acct.c 2010-07-07 09:04:57.000000000 -0400
46535 @@ -580,7 +580,7 @@ static void do_acct_process(struct bsd_a
46536 */
46537 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46538 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
46539 - file->f_op->write(file, (char *)&ac,
46540 + file->f_op->write(file, (__force char __user *)&ac,
46541 sizeof(acct_t), &file->f_pos);
46542 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
46543 set_fs(fs);
46544 diff -urNp linux-2.6.34.1/kernel/capability.c linux-2.6.34.1/kernel/capability.c
46545 --- linux-2.6.34.1/kernel/capability.c 2010-07-05 14:24:10.000000000 -0400
46546 +++ linux-2.6.34.1/kernel/capability.c 2010-07-07 09:04:57.000000000 -0400
46547 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
46548 * before modification is attempted and the application
46549 * fails.
46550 */
46551 + if (tocopy > ARRAY_SIZE(kdata))
46552 + return -EFAULT;
46553 +
46554 if (copy_to_user(dataptr, kdata, tocopy
46555 * sizeof(struct __user_cap_data_struct))) {
46556 return -EFAULT;
46557 @@ -307,10 +310,21 @@ int capable(int cap)
46558 BUG();
46559 }
46560
46561 - if (security_capable(cap) == 0) {
46562 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
46563 + current->flags |= PF_SUPERPRIV;
46564 + return 1;
46565 + }
46566 + return 0;
46567 +}
46568 +
46569 +int capable_nolog(int cap)
46570 +{
46571 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
46572 current->flags |= PF_SUPERPRIV;
46573 return 1;
46574 }
46575 return 0;
46576 }
46577 +
46578 EXPORT_SYMBOL(capable);
46579 +EXPORT_SYMBOL(capable_nolog);
46580 diff -urNp linux-2.6.34.1/kernel/configs.c linux-2.6.34.1/kernel/configs.c
46581 --- linux-2.6.34.1/kernel/configs.c 2010-07-05 14:24:10.000000000 -0400
46582 +++ linux-2.6.34.1/kernel/configs.c 2010-07-07 09:04:57.000000000 -0400
46583 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
46584 struct proc_dir_entry *entry;
46585
46586 /* create the current config file */
46587 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46588 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
46589 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
46590 + &ikconfig_file_ops);
46591 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46592 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
46593 + &ikconfig_file_ops);
46594 +#endif
46595 +#else
46596 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
46597 &ikconfig_file_ops);
46598 +#endif
46599 +
46600 if (!entry)
46601 return -ENOMEM;
46602
46603 diff -urNp linux-2.6.34.1/kernel/cpu.c linux-2.6.34.1/kernel/cpu.c
46604 --- linux-2.6.34.1/kernel/cpu.c 2010-07-05 14:24:10.000000000 -0400
46605 +++ linux-2.6.34.1/kernel/cpu.c 2010-07-07 09:04:57.000000000 -0400
46606 @@ -20,7 +20,7 @@
46607 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
46608 static DEFINE_MUTEX(cpu_add_remove_lock);
46609
46610 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
46611 +static RAW_NOTIFIER_HEAD(cpu_chain);
46612
46613 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46614 * Should always be manipulated under cpu_add_remove_lock
46615 diff -urNp linux-2.6.34.1/kernel/cred.c linux-2.6.34.1/kernel/cred.c
46616 --- linux-2.6.34.1/kernel/cred.c 2010-07-05 14:24:10.000000000 -0400
46617 +++ linux-2.6.34.1/kernel/cred.c 2010-07-07 09:04:57.000000000 -0400
46618 @@ -527,6 +527,8 @@ int commit_creds(struct cred *new)
46619
46620 get_cred(new); /* we will require a ref for the subj creds too */
46621
46622 + gr_set_role_label(task, new->uid, new->gid);
46623 +
46624 /* dumpability changes */
46625 if (old->euid != new->euid ||
46626 old->egid != new->egid ||
46627 diff -urNp linux-2.6.34.1/kernel/exit.c linux-2.6.34.1/kernel/exit.c
46628 --- linux-2.6.34.1/kernel/exit.c 2010-07-05 14:24:10.000000000 -0400
46629 +++ linux-2.6.34.1/kernel/exit.c 2010-07-07 09:04:57.000000000 -0400
46630 @@ -57,6 +57,10 @@
46631 #include <asm/mmu_context.h>
46632 #include "cred-internals.h"
46633
46634 +#ifdef CONFIG_GRKERNSEC
46635 +extern rwlock_t grsec_exec_file_lock;
46636 +#endif
46637 +
46638 static void exit_mm(struct task_struct * tsk);
46639
46640 static void __unhash_process(struct task_struct *p)
46641 @@ -170,6 +174,8 @@ void release_task(struct task_struct * p
46642 struct task_struct *leader;
46643 int zap_leader;
46644 repeat:
46645 + gr_del_task_from_ip_table(p);
46646 +
46647 tracehook_prepare_release_task(p);
46648 /* don't need to get the RCU readlock here - the process is dead and
46649 * can't be modifying its own credentials. But shut RCU-lockdep up */
46650 @@ -339,11 +345,22 @@ static void reparent_to_kthreadd(void)
46651 {
46652 write_lock_irq(&tasklist_lock);
46653
46654 +#ifdef CONFIG_GRKERNSEC
46655 + write_lock(&grsec_exec_file_lock);
46656 + if (current->exec_file) {
46657 + fput(current->exec_file);
46658 + current->exec_file = NULL;
46659 + }
46660 + write_unlock(&grsec_exec_file_lock);
46661 +#endif
46662 +
46663 ptrace_unlink(current);
46664 /* Reparent to init */
46665 current->real_parent = current->parent = kthreadd_task;
46666 list_move_tail(&current->sibling, &current->real_parent->children);
46667
46668 + gr_set_kernel_label(current);
46669 +
46670 /* Set the exit signal to SIGCHLD so we signal init on exit */
46671 current->exit_signal = SIGCHLD;
46672
46673 @@ -395,7 +412,7 @@ int allow_signal(int sig)
46674 * know it'll be handled, so that they don't get converted to
46675 * SIGKILL or just silently dropped.
46676 */
46677 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
46678 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
46679 recalc_sigpending();
46680 spin_unlock_irq(&current->sighand->siglock);
46681 return 0;
46682 @@ -431,6 +448,17 @@ void daemonize(const char *name, ...)
46683 vsnprintf(current->comm, sizeof(current->comm), name, args);
46684 va_end(args);
46685
46686 +#ifdef CONFIG_GRKERNSEC
46687 + write_lock(&grsec_exec_file_lock);
46688 + if (current->exec_file) {
46689 + fput(current->exec_file);
46690 + current->exec_file = NULL;
46691 + }
46692 + write_unlock(&grsec_exec_file_lock);
46693 +#endif
46694 +
46695 + gr_set_kernel_label(current);
46696 +
46697 /*
46698 * If we were started as result of loading a module, close all of the
46699 * user space pages. We don't need them, and if we didn't close them
46700 @@ -971,6 +999,9 @@ NORET_TYPE void do_exit(long code)
46701 tsk->exit_code = code;
46702 taskstats_exit(tsk, group_dead);
46703
46704 + gr_acl_handle_psacct(tsk, code);
46705 + gr_acl_handle_exit();
46706 +
46707 exit_mm(tsk);
46708
46709 if (group_dead)
46710 diff -urNp linux-2.6.34.1/kernel/fork.c linux-2.6.34.1/kernel/fork.c
46711 --- linux-2.6.34.1/kernel/fork.c 2010-07-05 14:24:10.000000000 -0400
46712 +++ linux-2.6.34.1/kernel/fork.c 2010-07-07 09:04:57.000000000 -0400
46713 @@ -263,7 +263,7 @@ static struct task_struct *dup_task_stru
46714 *stackend = STACK_END_MAGIC; /* for overflow detection */
46715
46716 #ifdef CONFIG_CC_STACKPROTECTOR
46717 - tsk->stack_canary = get_random_int();
46718 + tsk->stack_canary = pax_get_random_long();
46719 #endif
46720
46721 /* One for us, one for whoever does the "release_task()" (usually parent) */
46722 @@ -303,8 +303,8 @@ static int dup_mmap(struct mm_struct *mm
46723 mm->locked_vm = 0;
46724 mm->mmap = NULL;
46725 mm->mmap_cache = NULL;
46726 - mm->free_area_cache = oldmm->mmap_base;
46727 - mm->cached_hole_size = ~0UL;
46728 + mm->free_area_cache = oldmm->free_area_cache;
46729 + mm->cached_hole_size = oldmm->cached_hole_size;
46730 mm->map_count = 0;
46731 cpumask_clear(mm_cpumask(mm));
46732 mm->mm_rb = RB_ROOT;
46733 @@ -347,6 +347,7 @@ static int dup_mmap(struct mm_struct *mm
46734 tmp->vm_flags &= ~VM_LOCKED;
46735 tmp->vm_mm = mm;
46736 tmp->vm_next = NULL;
46737 + tmp->vm_mirror = NULL;
46738 file = tmp->vm_file;
46739 if (file) {
46740 struct inode *inode = file->f_path.dentry->d_inode;
46741 @@ -393,6 +394,31 @@ static int dup_mmap(struct mm_struct *mm
46742 if (retval)
46743 goto out;
46744 }
46745 +
46746 +#ifdef CONFIG_PAX_SEGMEXEC
46747 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
46748 + struct vm_area_struct *mpnt_m;
46749 +
46750 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
46751 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
46752 +
46753 + if (!mpnt->vm_mirror)
46754 + continue;
46755 +
46756 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
46757 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
46758 + mpnt->vm_mirror = mpnt_m;
46759 + } else {
46760 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
46761 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
46762 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
46763 + mpnt->vm_mirror->vm_mirror = mpnt;
46764 + }
46765 + }
46766 + BUG_ON(mpnt_m);
46767 + }
46768 +#endif
46769 +
46770 /* a new mm has just been created */
46771 arch_dup_mmap(oldmm, mm);
46772 retval = 0;
46773 @@ -744,13 +770,14 @@ static int copy_fs(unsigned long clone_f
46774 write_unlock(&fs->lock);
46775 return -EAGAIN;
46776 }
46777 - fs->users++;
46778 + atomic_inc(&fs->users);
46779 write_unlock(&fs->lock);
46780 return 0;
46781 }
46782 tsk->fs = copy_fs_struct(fs);
46783 if (!tsk->fs)
46784 return -ENOMEM;
46785 + gr_set_chroot_entries(tsk, &tsk->fs->root);
46786 return 0;
46787 }
46788
46789 @@ -1009,10 +1036,13 @@ static struct task_struct *copy_process(
46790 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
46791 #endif
46792 retval = -EAGAIN;
46793 +
46794 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
46795 +
46796 if (atomic_read(&p->real_cred->user->processes) >=
46797 task_rlimit(p, RLIMIT_NPROC)) {
46798 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
46799 - p->real_cred->user != INIT_USER)
46800 + if (p->real_cred->user != INIT_USER &&
46801 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
46802 goto bad_fork_free;
46803 }
46804
46805 @@ -1168,6 +1198,8 @@ static struct task_struct *copy_process(
46806 goto bad_fork_free_pid;
46807 }
46808
46809 + gr_copy_label(p);
46810 +
46811 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
46812 /*
46813 * Clear TID on mm_release()?
46814 @@ -1320,6 +1352,8 @@ bad_fork_cleanup_count:
46815 bad_fork_free:
46816 free_task(p);
46817 fork_out:
46818 + gr_log_forkfail(retval);
46819 +
46820 return ERR_PTR(retval);
46821 }
46822
46823 @@ -1413,6 +1447,8 @@ long do_fork(unsigned long clone_flags,
46824 if (clone_flags & CLONE_PARENT_SETTID)
46825 put_user(nr, parent_tidptr);
46826
46827 + gr_handle_brute_check();
46828 +
46829 if (clone_flags & CLONE_VFORK) {
46830 p->vfork_done = &vfork;
46831 init_completion(&vfork);
46832 @@ -1545,7 +1581,7 @@ static int unshare_fs(unsigned long unsh
46833 return 0;
46834
46835 /* don't need lock here; in the worst case we'll do useless copy */
46836 - if (fs->users == 1)
46837 + if (atomic_read(&fs->users) == 1)
46838 return 0;
46839
46840 *new_fsp = copy_fs_struct(fs);
46841 @@ -1668,7 +1704,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
46842 fs = current->fs;
46843 write_lock(&fs->lock);
46844 current->fs = new_fs;
46845 - if (--fs->users)
46846 + gr_set_chroot_entries(current, &current->fs->root);
46847 + if (atomic_dec_return(&fs->users))
46848 new_fs = NULL;
46849 else
46850 new_fs = fs;
46851 diff -urNp linux-2.6.34.1/kernel/futex.c linux-2.6.34.1/kernel/futex.c
46852 --- linux-2.6.34.1/kernel/futex.c 2010-07-05 14:24:10.000000000 -0400
46853 +++ linux-2.6.34.1/kernel/futex.c 2010-07-07 09:04:57.000000000 -0400
46854 @@ -54,6 +54,7 @@
46855 #include <linux/mount.h>
46856 #include <linux/pagemap.h>
46857 #include <linux/syscalls.h>
46858 +#include <linux/ptrace.h>
46859 #include <linux/signal.h>
46860 #include <linux/module.h>
46861 #include <linux/magic.h>
46862 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
46863 struct page *page;
46864 int err;
46865
46866 +#ifdef CONFIG_PAX_SEGMEXEC
46867 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
46868 + return -EFAULT;
46869 +#endif
46870 +
46871 /*
46872 * The futex address must be "naturally" aligned.
46873 */
46874 @@ -1852,7 +1858,7 @@ retry:
46875
46876 restart = &current_thread_info()->restart_block;
46877 restart->fn = futex_wait_restart;
46878 - restart->futex.uaddr = (u32 *)uaddr;
46879 + restart->futex.uaddr = uaddr;
46880 restart->futex.val = val;
46881 restart->futex.time = abs_time->tv64;
46882 restart->futex.bitset = bitset;
46883 @@ -2385,7 +2391,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
46884 {
46885 struct robust_list_head __user *head;
46886 unsigned long ret;
46887 - const struct cred *cred = current_cred(), *pcred;
46888 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
46889 + const struct cred *cred = current_cred();
46890 + const struct cred *pcred;
46891 +#endif
46892
46893 if (!futex_cmpxchg_enabled)
46894 return -ENOSYS;
46895 @@ -2401,11 +2410,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
46896 if (!p)
46897 goto err_unlock;
46898 ret = -EPERM;
46899 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46900 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
46901 + goto err_unlock;
46902 +#else
46903 pcred = __task_cred(p);
46904 if (cred->euid != pcred->euid &&
46905 cred->euid != pcred->uid &&
46906 !capable(CAP_SYS_PTRACE))
46907 goto err_unlock;
46908 +#endif
46909 head = p->robust_list;
46910 rcu_read_unlock();
46911 }
46912 @@ -2467,7 +2481,7 @@ retry:
46913 */
46914 static inline int fetch_robust_entry(struct robust_list __user **entry,
46915 struct robust_list __user * __user *head,
46916 - int *pi)
46917 + unsigned int *pi)
46918 {
46919 unsigned long uentry;
46920
46921 diff -urNp linux-2.6.34.1/kernel/futex_compat.c linux-2.6.34.1/kernel/futex_compat.c
46922 --- linux-2.6.34.1/kernel/futex_compat.c 2010-07-05 14:24:10.000000000 -0400
46923 +++ linux-2.6.34.1/kernel/futex_compat.c 2010-07-07 09:04:57.000000000 -0400
46924 @@ -10,6 +10,7 @@
46925 #include <linux/compat.h>
46926 #include <linux/nsproxy.h>
46927 #include <linux/futex.h>
46928 +#include <linux/ptrace.h>
46929
46930 #include <asm/uaccess.h>
46931
46932 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
46933 {
46934 struct compat_robust_list_head __user *head;
46935 unsigned long ret;
46936 - const struct cred *cred = current_cred(), *pcred;
46937 + const struct cred *cred = current_cred();
46938 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
46939 + const struct cred *pcred;
46940 +#endif
46941
46942 if (!futex_cmpxchg_enabled)
46943 return -ENOSYS;
46944 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
46945 if (!p)
46946 goto err_unlock;
46947 ret = -EPERM;
46948 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46949 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
46950 + goto err_unlock;
46951 +#else
46952 pcred = __task_cred(p);
46953 if (cred->euid != pcred->euid &&
46954 cred->euid != pcred->uid &&
46955 !capable(CAP_SYS_PTRACE))
46956 goto err_unlock;
46957 +#endif
46958 head = p->compat_robust_list;
46959 rcu_read_unlock();
46960 }
46961 diff -urNp linux-2.6.34.1/kernel/gcov/base.c linux-2.6.34.1/kernel/gcov/base.c
46962 --- linux-2.6.34.1/kernel/gcov/base.c 2010-07-05 14:24:10.000000000 -0400
46963 +++ linux-2.6.34.1/kernel/gcov/base.c 2010-07-07 09:04:57.000000000 -0400
46964 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
46965 }
46966
46967 #ifdef CONFIG_MODULES
46968 -static inline int within(void *addr, void *start, unsigned long size)
46969 -{
46970 - return ((addr >= start) && (addr < start + size));
46971 -}
46972 -
46973 /* Update list and generate events when modules are unloaded. */
46974 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
46975 void *data)
46976 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
46977 prev = NULL;
46978 /* Remove entries located in module from linked list. */
46979 for (info = gcov_info_head; info; info = info->next) {
46980 - if (within(info, mod->module_core, mod->core_size)) {
46981 + if (within_module_core_rw((unsigned long)info, mod)) {
46982 if (prev)
46983 prev->next = info->next;
46984 else
46985 diff -urNp linux-2.6.34.1/kernel/hrtimer.c linux-2.6.34.1/kernel/hrtimer.c
46986 --- linux-2.6.34.1/kernel/hrtimer.c 2010-07-05 14:24:10.000000000 -0400
46987 +++ linux-2.6.34.1/kernel/hrtimer.c 2010-07-07 09:04:57.000000000 -0400
46988 @@ -1398,7 +1398,7 @@ void hrtimer_peek_ahead_timers(void)
46989 local_irq_restore(flags);
46990 }
46991
46992 -static void run_hrtimer_softirq(struct softirq_action *h)
46993 +static void run_hrtimer_softirq(void)
46994 {
46995 hrtimer_peek_ahead_timers();
46996 }
46997 diff -urNp linux-2.6.34.1/kernel/kallsyms.c linux-2.6.34.1/kernel/kallsyms.c
46998 --- linux-2.6.34.1/kernel/kallsyms.c 2010-07-05 14:24:10.000000000 -0400
46999 +++ linux-2.6.34.1/kernel/kallsyms.c 2010-07-07 09:04:57.000000000 -0400
47000 @@ -11,6 +11,9 @@
47001 * Changed the compression method from stem compression to "table lookup"
47002 * compression (see scripts/kallsyms.c for a more complete description)
47003 */
47004 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47005 +#define __INCLUDED_BY_HIDESYM 1
47006 +#endif
47007 #include <linux/kallsyms.h>
47008 #include <linux/module.h>
47009 #include <linux/init.h>
47010 @@ -52,6 +55,9 @@ extern const unsigned long kallsyms_mark
47011
47012 static inline int is_kernel_inittext(unsigned long addr)
47013 {
47014 + if (system_state != SYSTEM_BOOTING)
47015 + return 0;
47016 +
47017 if (addr >= (unsigned long)_sinittext
47018 && addr <= (unsigned long)_einittext)
47019 return 1;
47020 @@ -68,6 +74,26 @@ static inline int is_kernel_text(unsigne
47021
47022 static inline int is_kernel(unsigned long addr)
47023 {
47024 + if (is_kernel_inittext(addr))
47025 + return 1;
47026 +
47027 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
47028 +#ifdef CONFIG_MODULES
47029 + if ((unsigned long)MODULES_EXEC_VADDR <= ktla_ktva(addr) && ktla_ktva(addr) <= (unsigned long)MODULES_EXEC_END)
47030 + return 0;
47031 +#endif
47032 +
47033 + if (is_kernel_text(addr))
47034 + return 1;
47035 +
47036 + if (ktla_ktva((unsigned long)_stext) <= addr && addr < ktla_ktva((unsigned long)_etext))
47037 + return 1;
47038 +
47039 + if ((addr >= (unsigned long)_sdata && addr <= (unsigned long)_end))
47040 + return 1;
47041 + return in_gate_area_no_task(addr);
47042 +#endif
47043 +
47044 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
47045 return 1;
47046 return in_gate_area_no_task(addr);
47047 @@ -415,7 +441,6 @@ static unsigned long get_ksymbol_core(st
47048
47049 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
47050 {
47051 - iter->name[0] = '\0';
47052 iter->nameoff = get_symbol_offset(new_pos);
47053 iter->pos = new_pos;
47054 }
47055 @@ -463,6 +488,11 @@ static int s_show(struct seq_file *m, vo
47056 {
47057 struct kallsym_iter *iter = m->private;
47058
47059 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47060 + if (current_uid())
47061 + return 0;
47062 +#endif
47063 +
47064 /* Some debugging symbols have no name. Ignore them. */
47065 if (!iter->name[0])
47066 return 0;
47067 @@ -503,7 +533,7 @@ static int kallsyms_open(struct inode *i
47068 struct kallsym_iter *iter;
47069 int ret;
47070
47071 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
47072 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
47073 if (!iter)
47074 return -ENOMEM;
47075 reset_iter(iter, 0);
47076 diff -urNp linux-2.6.34.1/kernel/kgdb.c linux-2.6.34.1/kernel/kgdb.c
47077 --- linux-2.6.34.1/kernel/kgdb.c 2010-07-05 14:24:10.000000000 -0400
47078 +++ linux-2.6.34.1/kernel/kgdb.c 2010-07-07 09:04:57.000000000 -0400
47079 @@ -93,7 +93,7 @@ static int kgdb_io_module_registered;
47080 /* Guard for recursive entry */
47081 static int exception_level;
47082
47083 -static struct kgdb_io *kgdb_io_ops;
47084 +static const struct kgdb_io *kgdb_io_ops;
47085 static DEFINE_SPINLOCK(kgdb_registration_lock);
47086
47087 /* kgdb console driver is loaded */
47088 @@ -1665,7 +1665,7 @@ static void kgdb_initial_breakpoint(void
47089 *
47090 * Register it with the KGDB core.
47091 */
47092 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
47093 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
47094 {
47095 int err;
47096
47097 @@ -1710,7 +1710,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
47098 *
47099 * Unregister it with the KGDB core.
47100 */
47101 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
47102 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
47103 {
47104 BUG_ON(kgdb_connected);
47105
47106 diff -urNp linux-2.6.34.1/kernel/kmod.c linux-2.6.34.1/kernel/kmod.c
47107 --- linux-2.6.34.1/kernel/kmod.c 2010-07-05 14:24:10.000000000 -0400
47108 +++ linux-2.6.34.1/kernel/kmod.c 2010-07-07 09:04:57.000000000 -0400
47109 @@ -90,6 +90,18 @@ int __request_module(bool wait, const ch
47110 if (ret)
47111 return ret;
47112
47113 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
47114 + /* we could do a tighter check here, but some distros
47115 + are taking it upon themselves to remove CAP_SYS_MODULE
47116 + from even root-running apps which cause modules to be
47117 + auto-loaded
47118 + */
47119 + if (current_uid()) {
47120 + gr_log_nonroot_mod_load(module_name);
47121 + return -EPERM;
47122 + }
47123 +#endif
47124 +
47125 /* If modprobe needs a service that is in a module, we get a recursive
47126 * loop. Limit the number of running kmod threads to max_threads/2 or
47127 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
47128 diff -urNp linux-2.6.34.1/kernel/kprobes.c linux-2.6.34.1/kernel/kprobes.c
47129 --- linux-2.6.34.1/kernel/kprobes.c 2010-07-05 14:24:10.000000000 -0400
47130 +++ linux-2.6.34.1/kernel/kprobes.c 2010-07-07 09:04:57.000000000 -0400
47131 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
47132 * kernel image and loaded module images reside. This is required
47133 * so x86_64 can correctly handle the %rip-relative fixups.
47134 */
47135 - kip->insns = module_alloc(PAGE_SIZE);
47136 + kip->insns = module_alloc_exec(PAGE_SIZE);
47137 if (!kip->insns) {
47138 kfree(kip);
47139 return NULL;
47140 @@ -223,7 +223,7 @@ static int __kprobes collect_one_slot(st
47141 */
47142 if (!list_is_singular(&kip->list)) {
47143 list_del(&kip->list);
47144 - module_free(NULL, kip->insns);
47145 + module_free_exec(NULL, kip->insns);
47146 kfree(kip);
47147 }
47148 return 1;
47149 @@ -1643,7 +1643,7 @@ static int __init init_kprobes(void)
47150 {
47151 int i, err = 0;
47152 unsigned long offset = 0, size = 0;
47153 - char *modname, namebuf[128];
47154 + char *modname, namebuf[KSYM_NAME_LEN];
47155 const char *symbol_name;
47156 void *addr;
47157 struct kprobe_blackpoint *kb;
47158 @@ -1769,7 +1769,7 @@ static int __kprobes show_kprobe_addr(st
47159 const char *sym = NULL;
47160 unsigned int i = *(loff_t *) v;
47161 unsigned long offset = 0;
47162 - char *modname, namebuf[128];
47163 + char *modname, namebuf[KSYM_NAME_LEN];
47164
47165 head = &kprobe_table[i];
47166 preempt_disable();
47167 diff -urNp linux-2.6.34.1/kernel/lockdep.c linux-2.6.34.1/kernel/lockdep.c
47168 --- linux-2.6.34.1/kernel/lockdep.c 2010-07-05 14:24:10.000000000 -0400
47169 +++ linux-2.6.34.1/kernel/lockdep.c 2010-07-07 09:04:57.000000000 -0400
47170 @@ -584,6 +584,10 @@ static int static_obj(void *obj)
47171 end = (unsigned long) &_end,
47172 addr = (unsigned long) obj;
47173
47174 +#ifdef CONFIG_PAX_KERNEXEC
47175 + start = ktla_ktva(start);
47176 +#endif
47177 +
47178 /*
47179 * static variable?
47180 */
47181 @@ -709,6 +713,7 @@ register_lock_class(struct lockdep_map *
47182 if (!static_obj(lock->key)) {
47183 debug_locks_off();
47184 printk("INFO: trying to register non-static key.\n");
47185 + printk("lock:%pS key:%pS.\n", lock, lock->key);
47186 printk("the code is fine but needs lockdep annotation.\n");
47187 printk("turning off the locking correctness validator.\n");
47188 dump_stack();
47189 diff -urNp linux-2.6.34.1/kernel/lockdep_proc.c linux-2.6.34.1/kernel/lockdep_proc.c
47190 --- linux-2.6.34.1/kernel/lockdep_proc.c 2010-07-05 14:24:10.000000000 -0400
47191 +++ linux-2.6.34.1/kernel/lockdep_proc.c 2010-07-07 09:04:57.000000000 -0400
47192 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
47193
47194 static void print_name(struct seq_file *m, struct lock_class *class)
47195 {
47196 - char str[128];
47197 + char str[KSYM_NAME_LEN];
47198 const char *name = class->name;
47199
47200 if (!name) {
47201 diff -urNp linux-2.6.34.1/kernel/module.c linux-2.6.34.1/kernel/module.c
47202 --- linux-2.6.34.1/kernel/module.c 2010-07-05 14:24:10.000000000 -0400
47203 +++ linux-2.6.34.1/kernel/module.c 2010-07-07 09:04:57.000000000 -0400
47204 @@ -89,7 +89,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
47205 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
47206
47207 /* Bounds of module allocation, for speeding __module_address */
47208 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
47209 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
47210 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
47211
47212 int register_module_notifier(struct notifier_block * nb)
47213 {
47214 @@ -245,7 +246,7 @@ bool each_symbol(bool (*fn)(const struct
47215 return true;
47216
47217 list_for_each_entry_rcu(mod, &modules, list) {
47218 - struct symsearch arr[] = {
47219 + struct symsearch modarr[] = {
47220 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
47221 NOT_GPL_ONLY, false },
47222 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
47223 @@ -267,7 +268,7 @@ bool each_symbol(bool (*fn)(const struct
47224 #endif
47225 };
47226
47227 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
47228 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
47229 return true;
47230 }
47231 return false;
47232 @@ -378,7 +379,7 @@ static inline void __percpu *mod_percpu(
47233 static int percpu_modalloc(struct module *mod,
47234 unsigned long size, unsigned long align)
47235 {
47236 - if (align > PAGE_SIZE) {
47237 + if (align-1 >= PAGE_SIZE) {
47238 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
47239 mod->name, align, PAGE_SIZE);
47240 align = PAGE_SIZE;
47241 @@ -1465,7 +1466,8 @@ static void free_module(struct module *m
47242 destroy_params(mod->kp, mod->num_kp);
47243
47244 /* This may be NULL, but that's OK */
47245 - module_free(mod, mod->module_init);
47246 + module_free(mod, mod->module_init_rw);
47247 + module_free_exec(mod, mod->module_init_rx);
47248 kfree(mod->args);
47249 percpu_modfree(mod);
47250 #if defined(CONFIG_MODULE_UNLOAD)
47251 @@ -1473,10 +1475,12 @@ static void free_module(struct module *m
47252 free_percpu(mod->refptr);
47253 #endif
47254 /* Free lock-classes: */
47255 - lockdep_free_key_range(mod->module_core, mod->core_size);
47256 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
47257 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
47258
47259 /* Finally, free the core (containing the module structure) */
47260 - module_free(mod, mod->module_core);
47261 + module_free_exec(mod, mod->module_core_rx);
47262 + module_free(mod, mod->module_core_rw);
47263
47264 #ifdef CONFIG_MPU
47265 update_protections(current->mm);
47266 @@ -1570,7 +1574,9 @@ static int simplify_symbols(Elf_Shdr *se
47267 strtab + sym[i].st_name, mod);
47268 /* Ok if resolved. */
47269 if (ksym) {
47270 + pax_open_kernel();
47271 sym[i].st_value = ksym->value;
47272 + pax_close_kernel();
47273 break;
47274 }
47275
47276 @@ -1589,7 +1595,9 @@ static int simplify_symbols(Elf_Shdr *se
47277 secbase = (unsigned long)mod_percpu(mod);
47278 else
47279 secbase = sechdrs[sym[i].st_shndx].sh_addr;
47280 + pax_open_kernel();
47281 sym[i].st_value += secbase;
47282 + pax_close_kernel();
47283 break;
47284 }
47285 }
47286 @@ -1650,11 +1658,12 @@ static void layout_sections(struct modul
47287 || s->sh_entsize != ~0UL
47288 || strstarts(secstrings + s->sh_name, ".init"))
47289 continue;
47290 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
47291 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
47292 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
47293 + else
47294 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
47295 DEBUGP("\t%s\n", secstrings + s->sh_name);
47296 }
47297 - if (m == 0)
47298 - mod->core_text_size = mod->core_size;
47299 }
47300
47301 DEBUGP("Init section allocation order:\n");
47302 @@ -1667,12 +1676,13 @@ static void layout_sections(struct modul
47303 || s->sh_entsize != ~0UL
47304 || !strstarts(secstrings + s->sh_name, ".init"))
47305 continue;
47306 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
47307 - | INIT_OFFSET_MASK);
47308 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
47309 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
47310 + else
47311 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
47312 + s->sh_entsize |= INIT_OFFSET_MASK;
47313 DEBUGP("\t%s\n", secstrings + s->sh_name);
47314 }
47315 - if (m == 0)
47316 - mod->init_text_size = mod->init_size;
47317 }
47318 }
47319
47320 @@ -1776,9 +1786,8 @@ static int is_exported(const char *name,
47321
47322 /* As per nm */
47323 static char elf_type(const Elf_Sym *sym,
47324 - Elf_Shdr *sechdrs,
47325 - const char *secstrings,
47326 - struct module *mod)
47327 + const Elf_Shdr *sechdrs,
47328 + const char *secstrings)
47329 {
47330 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
47331 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
47332 @@ -1853,7 +1862,7 @@ static unsigned long layout_symtab(struc
47333
47334 /* Put symbol section at end of init part of module. */
47335 symsect->sh_flags |= SHF_ALLOC;
47336 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
47337 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
47338 symindex) | INIT_OFFSET_MASK;
47339 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
47340
47341 @@ -1870,19 +1879,19 @@ static unsigned long layout_symtab(struc
47342 }
47343
47344 /* Append room for core symbols at end of core part. */
47345 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
47346 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
47347 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
47348 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
47349
47350 /* Put string table section at end of init part of module. */
47351 strsect->sh_flags |= SHF_ALLOC;
47352 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
47353 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
47354 strindex) | INIT_OFFSET_MASK;
47355 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
47356
47357 /* Append room for core symbols' strings at end of core part. */
47358 - *pstroffs = mod->core_size;
47359 + *pstroffs = mod->core_size_rx;
47360 __set_bit(0, strmap);
47361 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
47362 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
47363
47364 return symoffs;
47365 }
47366 @@ -1906,12 +1915,14 @@ static void add_kallsyms(struct module *
47367 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
47368 mod->strtab = (void *)sechdrs[strindex].sh_addr;
47369
47370 + pax_open_kernel();
47371 +
47372 /* Set types up while we still have access to sections. */
47373 for (i = 0; i < mod->num_symtab; i++)
47374 mod->symtab[i].st_info
47375 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
47376 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
47377
47378 - mod->core_symtab = dst = mod->module_core + symoffs;
47379 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
47380 src = mod->symtab;
47381 *dst = *src;
47382 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
47383 @@ -1923,10 +1934,12 @@ static void add_kallsyms(struct module *
47384 }
47385 mod->core_num_syms = ndst;
47386
47387 - mod->core_strtab = s = mod->module_core + stroffs;
47388 + mod->core_strtab = s = mod->module_core_rx + stroffs;
47389 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
47390 if (test_bit(i, strmap))
47391 *++s = mod->strtab[i];
47392 +
47393 + pax_close_kernel();
47394 }
47395 #else
47396 static inline unsigned long layout_symtab(struct module *mod,
47397 @@ -1963,16 +1976,30 @@ static void dynamic_debug_setup(struct _
47398 #endif
47399 }
47400
47401 -static void *module_alloc_update_bounds(unsigned long size)
47402 +static void *module_alloc_update_bounds_rw(unsigned long size)
47403 {
47404 void *ret = module_alloc(size);
47405
47406 if (ret) {
47407 /* Update module bounds. */
47408 - if ((unsigned long)ret < module_addr_min)
47409 - module_addr_min = (unsigned long)ret;
47410 - if ((unsigned long)ret + size > module_addr_max)
47411 - module_addr_max = (unsigned long)ret + size;
47412 + if ((unsigned long)ret < module_addr_min_rw)
47413 + module_addr_min_rw = (unsigned long)ret;
47414 + if ((unsigned long)ret + size > module_addr_max_rw)
47415 + module_addr_max_rw = (unsigned long)ret + size;
47416 + }
47417 + return ret;
47418 +}
47419 +
47420 +static void *module_alloc_update_bounds_rx(unsigned long size)
47421 +{
47422 + void *ret = module_alloc_exec(size);
47423 +
47424 + if (ret) {
47425 + /* Update module bounds. */
47426 + if ((unsigned long)ret < module_addr_min_rx)
47427 + module_addr_min_rx = (unsigned long)ret;
47428 + if ((unsigned long)ret + size > module_addr_max_rx)
47429 + module_addr_max_rx = (unsigned long)ret + size;
47430 }
47431 return ret;
47432 }
47433 @@ -2175,7 +2202,7 @@ static noinline struct module *load_modu
47434 secstrings, &stroffs, strmap);
47435
47436 /* Do the allocs. */
47437 - ptr = module_alloc_update_bounds(mod->core_size);
47438 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
47439 /*
47440 * The pointer to this block is stored in the module structure
47441 * which is inside the block. Just mark it as not being a
47442 @@ -2186,23 +2213,47 @@ static noinline struct module *load_modu
47443 err = -ENOMEM;
47444 goto free_percpu;
47445 }
47446 - memset(ptr, 0, mod->core_size);
47447 - mod->module_core = ptr;
47448 + memset(ptr, 0, mod->core_size_rw);
47449 + mod->module_core_rw = ptr;
47450
47451 - ptr = module_alloc_update_bounds(mod->init_size);
47452 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
47453 /*
47454 * The pointer to this block is stored in the module structure
47455 * which is inside the block. This block doesn't need to be
47456 * scanned as it contains data and code that will be freed
47457 * after the module is initialized.
47458 */
47459 - kmemleak_ignore(ptr);
47460 - if (!ptr && mod->init_size) {
47461 + kmemleak_not_leak(ptr);
47462 + if (!ptr && mod->init_size_rw) {
47463 + err = -ENOMEM;
47464 + goto free_core_rw;
47465 + }
47466 + memset(ptr, 0, mod->init_size_rw);
47467 + mod->module_init_rw = ptr;
47468 +
47469 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
47470 + kmemleak_not_leak(ptr);
47471 + if (!ptr) {
47472 err = -ENOMEM;
47473 - goto free_core;
47474 + goto free_init_rw;
47475 }
47476 - memset(ptr, 0, mod->init_size);
47477 - mod->module_init = ptr;
47478 +
47479 + pax_open_kernel();
47480 + memset(ptr, 0, mod->core_size_rx);
47481 + pax_close_kernel();
47482 + mod->module_core_rx = ptr;
47483 +
47484 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
47485 + kmemleak_not_leak(ptr);
47486 + if (!ptr && mod->init_size_rx) {
47487 + err = -ENOMEM;
47488 + goto free_core_rx;
47489 + }
47490 +
47491 + pax_open_kernel();
47492 + memset(ptr, 0, mod->init_size_rx);
47493 + pax_close_kernel();
47494 + mod->module_init_rx = ptr;
47495
47496 /* Transfer each section which specifies SHF_ALLOC */
47497 DEBUGP("final section addresses:\n");
47498 @@ -2212,17 +2263,41 @@ static noinline struct module *load_modu
47499 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
47500 continue;
47501
47502 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
47503 - dest = mod->module_init
47504 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
47505 - else
47506 - dest = mod->module_core + sechdrs[i].sh_entsize;
47507 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
47508 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
47509 + dest = mod->module_init_rw
47510 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
47511 + else
47512 + dest = mod->module_init_rx
47513 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
47514 + } else {
47515 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
47516 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
47517 + else
47518 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
47519 + }
47520 +
47521 + if (sechdrs[i].sh_type != SHT_NOBITS) {
47522 +
47523 +#ifdef CONFIG_PAX_KERNEXEC
47524 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
47525 + pax_open_kernel();
47526 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
47527 + pax_close_kernel();
47528 + } else
47529 +#endif
47530
47531 - if (sechdrs[i].sh_type != SHT_NOBITS)
47532 - memcpy(dest, (void *)sechdrs[i].sh_addr,
47533 - sechdrs[i].sh_size);
47534 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
47535 + }
47536 /* Update sh_addr to point to copy in image. */
47537 - sechdrs[i].sh_addr = (unsigned long)dest;
47538 +
47539 +#ifdef CONFIG_PAX_KERNEXEC
47540 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
47541 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
47542 + else
47543 +#endif
47544 +
47545 + sechdrs[i].sh_addr = (unsigned long)dest;
47546 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
47547 }
47548 /* Module has been moved. */
47549 @@ -2233,7 +2308,7 @@ static noinline struct module *load_modu
47550 mod->refptr = alloc_percpu(struct module_ref);
47551 if (!mod->refptr) {
47552 err = -ENOMEM;
47553 - goto free_init;
47554 + goto free_init_rx;
47555 }
47556 #endif
47557 /* Now we've moved module, initialize linked lists, etc. */
47558 @@ -2348,8 +2423,8 @@ static noinline struct module *load_modu
47559
47560 /* Now do relocations. */
47561 for (i = 1; i < hdr->e_shnum; i++) {
47562 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
47563 unsigned int info = sechdrs[i].sh_info;
47564 + strtab = (char *)sechdrs[strindex].sh_addr;
47565
47566 /* Not a valid relocation section? */
47567 if (info >= hdr->e_shnum)
47568 @@ -2410,12 +2485,12 @@ static noinline struct module *load_modu
47569 * Do it before processing of module parameters, so the module
47570 * can provide parameter accessor functions of its own.
47571 */
47572 - if (mod->module_init)
47573 - flush_icache_range((unsigned long)mod->module_init,
47574 - (unsigned long)mod->module_init
47575 - + mod->init_size);
47576 - flush_icache_range((unsigned long)mod->module_core,
47577 - (unsigned long)mod->module_core + mod->core_size);
47578 + if (mod->module_init_rx)
47579 + flush_icache_range((unsigned long)mod->module_init_rx,
47580 + (unsigned long)mod->module_init_rx
47581 + + mod->init_size_rx);
47582 + flush_icache_range((unsigned long)mod->module_core_rx,
47583 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
47584
47585 set_fs(old_fs);
47586
47587 @@ -2463,12 +2538,16 @@ static noinline struct module *load_modu
47588 free_unload:
47589 module_unload_free(mod);
47590 #if defined(CONFIG_MODULE_UNLOAD)
47591 + free_init_rx:
47592 free_percpu(mod->refptr);
47593 - free_init:
47594 #endif
47595 - module_free(mod, mod->module_init);
47596 - free_core:
47597 - module_free(mod, mod->module_core);
47598 + module_free_exec(mod, mod->module_init_rx);
47599 + free_core_rx:
47600 + module_free_exec(mod, mod->module_core_rx);
47601 + free_init_rw:
47602 + module_free(mod, mod->module_init_rw);
47603 + free_core_rw:
47604 + module_free(mod, mod->module_core_rw);
47605 /* mod will be freed with core. Don't access it beyond this line! */
47606 free_percpu:
47607 percpu_modfree(mod);
47608 @@ -2569,10 +2648,12 @@ SYSCALL_DEFINE3(init_module, void __user
47609 mod->symtab = mod->core_symtab;
47610 mod->strtab = mod->core_strtab;
47611 #endif
47612 - module_free(mod, mod->module_init);
47613 - mod->module_init = NULL;
47614 - mod->init_size = 0;
47615 - mod->init_text_size = 0;
47616 + module_free(mod, mod->module_init_rw);
47617 + module_free_exec(mod, mod->module_init_rx);
47618 + mod->module_init_rw = NULL;
47619 + mod->module_init_rx = NULL;
47620 + mod->init_size_rw = 0;
47621 + mod->init_size_rx = 0;
47622 mutex_unlock(&module_mutex);
47623
47624 return 0;
47625 @@ -2603,10 +2684,16 @@ static const char *get_ksymbol(struct mo
47626 unsigned long nextval;
47627
47628 /* At worse, next value is at end of module */
47629 - if (within_module_init(addr, mod))
47630 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
47631 + if (within_module_init_rx(addr, mod))
47632 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
47633 + else if (within_module_init_rw(addr, mod))
47634 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
47635 + else if (within_module_core_rx(addr, mod))
47636 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
47637 + else if (within_module_core_rw(addr, mod))
47638 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
47639 else
47640 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
47641 + return NULL;
47642
47643 /* Scan for closest preceeding symbol, and next symbol. (ELF
47644 starts real symbols at 1). */
47645 @@ -2852,7 +2939,7 @@ static int m_show(struct seq_file *m, vo
47646 char buf[8];
47647
47648 seq_printf(m, "%s %u",
47649 - mod->name, mod->init_size + mod->core_size);
47650 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
47651 print_unload_info(m, mod);
47652
47653 /* Informative for users. */
47654 @@ -2861,7 +2948,7 @@ static int m_show(struct seq_file *m, vo
47655 mod->state == MODULE_STATE_COMING ? "Loading":
47656 "Live");
47657 /* Used by oprofile and other similar tools. */
47658 - seq_printf(m, " 0x%p", mod->module_core);
47659 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
47660
47661 /* Taints info */
47662 if (mod->taints)
47663 @@ -2897,7 +2984,17 @@ static const struct file_operations proc
47664
47665 static int __init proc_modules_init(void)
47666 {
47667 +#ifndef CONFIG_GRKERNSEC_HIDESYM
47668 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47669 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
47670 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47671 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
47672 +#else
47673 proc_create("modules", 0, NULL, &proc_modules_operations);
47674 +#endif
47675 +#else
47676 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
47677 +#endif
47678 return 0;
47679 }
47680 module_init(proc_modules_init);
47681 @@ -2956,12 +3053,12 @@ struct module *__module_address(unsigned
47682 {
47683 struct module *mod;
47684
47685 - if (addr < module_addr_min || addr > module_addr_max)
47686 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
47687 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
47688 return NULL;
47689
47690 list_for_each_entry_rcu(mod, &modules, list)
47691 - if (within_module_core(addr, mod)
47692 - || within_module_init(addr, mod))
47693 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
47694 return mod;
47695 return NULL;
47696 }
47697 @@ -2995,11 +3092,20 @@ bool is_module_text_address(unsigned lon
47698 */
47699 struct module *__module_text_address(unsigned long addr)
47700 {
47701 - struct module *mod = __module_address(addr);
47702 + struct module *mod;
47703 +
47704 +#ifdef CONFIG_X86_32
47705 + addr = ktla_ktva(addr);
47706 +#endif
47707 +
47708 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
47709 + return NULL;
47710 +
47711 + mod = __module_address(addr);
47712 +
47713 if (mod) {
47714 /* Make sure it's within the text section. */
47715 - if (!within(addr, mod->module_init, mod->init_text_size)
47716 - && !within(addr, mod->module_core, mod->core_text_size))
47717 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
47718 mod = NULL;
47719 }
47720 return mod;
47721 diff -urNp linux-2.6.34.1/kernel/panic.c linux-2.6.34.1/kernel/panic.c
47722 --- linux-2.6.34.1/kernel/panic.c 2010-07-05 14:24:10.000000000 -0400
47723 +++ linux-2.6.34.1/kernel/panic.c 2010-07-07 09:04:57.000000000 -0400
47724 @@ -410,7 +410,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
47725 */
47726 void __stack_chk_fail(void)
47727 {
47728 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
47729 + dump_stack();
47730 + panic("stack-protector: Kernel stack is corrupted in: %pS\n",
47731 __builtin_return_address(0));
47732 }
47733 EXPORT_SYMBOL(__stack_chk_fail);
47734 diff -urNp linux-2.6.34.1/kernel/pid.c linux-2.6.34.1/kernel/pid.c
47735 --- linux-2.6.34.1/kernel/pid.c 2010-07-05 14:24:10.000000000 -0400
47736 +++ linux-2.6.34.1/kernel/pid.c 2010-07-07 09:04:57.000000000 -0400
47737 @@ -33,6 +33,7 @@
47738 #include <linux/rculist.h>
47739 #include <linux/bootmem.h>
47740 #include <linux/hash.h>
47741 +#include <linux/security.h>
47742 #include <linux/pid_namespace.h>
47743 #include <linux/init_task.h>
47744 #include <linux/syscalls.h>
47745 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
47746
47747 int pid_max = PID_MAX_DEFAULT;
47748
47749 -#define RESERVED_PIDS 300
47750 +#define RESERVED_PIDS 500
47751
47752 int pid_max_min = RESERVED_PIDS + 1;
47753 int pid_max_max = PID_MAX_LIMIT;
47754 @@ -382,7 +383,14 @@ EXPORT_SYMBOL(pid_task);
47755 */
47756 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
47757 {
47758 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
47759 + struct task_struct *task;
47760 +
47761 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
47762 +
47763 + if (gr_pid_is_chrooted(task))
47764 + return NULL;
47765 +
47766 + return task;
47767 }
47768
47769 struct task_struct *find_task_by_vpid(pid_t vnr)
47770 diff -urNp linux-2.6.34.1/kernel/posix-cpu-timers.c linux-2.6.34.1/kernel/posix-cpu-timers.c
47771 --- linux-2.6.34.1/kernel/posix-cpu-timers.c 2010-07-05 14:24:10.000000000 -0400
47772 +++ linux-2.6.34.1/kernel/posix-cpu-timers.c 2010-07-07 09:04:57.000000000 -0400
47773 @@ -6,6 +6,7 @@
47774 #include <linux/posix-timers.h>
47775 #include <linux/errno.h>
47776 #include <linux/math64.h>
47777 +#include <linux/security.h>
47778 #include <asm/uaccess.h>
47779 #include <linux/kernel_stat.h>
47780 #include <trace/events/timer.h>
47781 @@ -1036,6 +1037,7 @@ static void check_thread_timers(struct t
47782 unsigned long hard =
47783 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
47784
47785 + gr_learn_resource(tsk, RLIMIT_RTTIME, tsk->rt.timeout * (USEC_PER_SEC/HZ), 1);
47786 if (hard != RLIM_INFINITY &&
47787 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
47788 /*
47789 @@ -1205,6 +1207,7 @@ static void check_process_timers(struct
47790 unsigned long hard =
47791 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
47792 cputime_t x;
47793 + gr_learn_resource(tsk, RLIMIT_CPU, psecs, 0);
47794 if (psecs >= hard) {
47795 /*
47796 * At the hard limit, we just die.
47797 diff -urNp linux-2.6.34.1/kernel/power/hibernate.c linux-2.6.34.1/kernel/power/hibernate.c
47798 --- linux-2.6.34.1/kernel/power/hibernate.c 2010-07-05 14:24:10.000000000 -0400
47799 +++ linux-2.6.34.1/kernel/power/hibernate.c 2010-07-07 09:04:57.000000000 -0400
47800 @@ -50,14 +50,14 @@ enum {
47801
47802 static int hibernation_mode = HIBERNATION_SHUTDOWN;
47803
47804 -static struct platform_hibernation_ops *hibernation_ops;
47805 +static const struct platform_hibernation_ops *hibernation_ops;
47806
47807 /**
47808 * hibernation_set_ops - set the global hibernate operations
47809 * @ops: the hibernation operations to use in subsequent hibernation transitions
47810 */
47811
47812 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
47813 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
47814 {
47815 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
47816 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
47817 diff -urNp linux-2.6.34.1/kernel/power/poweroff.c linux-2.6.34.1/kernel/power/poweroff.c
47818 --- linux-2.6.34.1/kernel/power/poweroff.c 2010-07-05 14:24:10.000000000 -0400
47819 +++ linux-2.6.34.1/kernel/power/poweroff.c 2010-07-07 09:04:57.000000000 -0400
47820 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
47821 .enable_mask = SYSRQ_ENABLE_BOOT,
47822 };
47823
47824 -static int pm_sysrq_init(void)
47825 +static int __init pm_sysrq_init(void)
47826 {
47827 register_sysrq_key('o', &sysrq_poweroff_op);
47828 return 0;
47829 diff -urNp linux-2.6.34.1/kernel/power/process.c linux-2.6.34.1/kernel/power/process.c
47830 --- linux-2.6.34.1/kernel/power/process.c 2010-07-05 14:24:10.000000000 -0400
47831 +++ linux-2.6.34.1/kernel/power/process.c 2010-07-07 09:04:57.000000000 -0400
47832 @@ -38,12 +38,15 @@ static int try_to_freeze_tasks(bool sig_
47833 struct timeval start, end;
47834 u64 elapsed_csecs64;
47835 unsigned int elapsed_csecs;
47836 + bool timedout = false;
47837
47838 do_gettimeofday(&start);
47839
47840 end_time = jiffies + TIMEOUT;
47841 while (true) {
47842 todo = 0;
47843 + if (time_after(jiffies, end_time))
47844 + timedout = true;
47845 read_lock(&tasklist_lock);
47846 do_each_thread(g, p) {
47847 if (frozen(p) || !freezeable(p))
47848 @@ -58,12 +61,16 @@ static int try_to_freeze_tasks(bool sig_
47849 * It is "frozen enough". If the task does wake
47850 * up, it will immediately call try_to_freeze.
47851 */
47852 - if (!task_is_stopped_or_traced(p) &&
47853 - !freezer_should_skip(p))
47854 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
47855 todo++;
47856 + if (timedout) {
47857 + printk(KERN_ERR "Task refusing to freeze:\n");
47858 + sched_show_task(p);
47859 + }
47860 + }
47861 } while_each_thread(g, p);
47862 read_unlock(&tasklist_lock);
47863 - if (!todo || time_after(jiffies, end_time))
47864 + if (!todo || timedout)
47865 break;
47866
47867 /*
47868 diff -urNp linux-2.6.34.1/kernel/power/suspend.c linux-2.6.34.1/kernel/power/suspend.c
47869 --- linux-2.6.34.1/kernel/power/suspend.c 2010-07-05 14:24:10.000000000 -0400
47870 +++ linux-2.6.34.1/kernel/power/suspend.c 2010-07-07 09:04:57.000000000 -0400
47871 @@ -24,13 +24,13 @@ const char *const pm_states[PM_SUSPEND_M
47872 [PM_SUSPEND_MEM] = "mem",
47873 };
47874
47875 -static struct platform_suspend_ops *suspend_ops;
47876 +static const struct platform_suspend_ops *suspend_ops;
47877
47878 /**
47879 * suspend_set_ops - Set the global suspend method table.
47880 * @ops: Pointer to ops structure.
47881 */
47882 -void suspend_set_ops(struct platform_suspend_ops *ops)
47883 +void suspend_set_ops(const struct platform_suspend_ops *ops)
47884 {
47885 mutex_lock(&pm_mutex);
47886 suspend_ops = ops;
47887 diff -urNp linux-2.6.34.1/kernel/printk.c linux-2.6.34.1/kernel/printk.c
47888 --- linux-2.6.34.1/kernel/printk.c 2010-07-05 14:24:10.000000000 -0400
47889 +++ linux-2.6.34.1/kernel/printk.c 2010-07-07 09:04:57.000000000 -0400
47890 @@ -265,6 +265,11 @@ int do_syslog(int type, char __user *buf
47891 char c;
47892 int error = 0;
47893
47894 +#ifdef CONFIG_GRKERNSEC_DMESG
47895 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
47896 + return -EPERM;
47897 +#endif
47898 +
47899 error = security_syslog(type, from_file);
47900 if (error)
47901 return error;
47902 diff -urNp linux-2.6.34.1/kernel/ptrace.c linux-2.6.34.1/kernel/ptrace.c
47903 --- linux-2.6.34.1/kernel/ptrace.c 2010-07-05 14:24:10.000000000 -0400
47904 +++ linux-2.6.34.1/kernel/ptrace.c 2010-07-07 09:04:57.000000000 -0400
47905 @@ -142,7 +142,7 @@ int __ptrace_may_access(struct task_stru
47906 cred->gid != tcred->egid ||
47907 cred->gid != tcred->sgid ||
47908 cred->gid != tcred->gid) &&
47909 - !capable(CAP_SYS_PTRACE)) {
47910 + !capable_nolog(CAP_SYS_PTRACE)) {
47911 rcu_read_unlock();
47912 return -EPERM;
47913 }
47914 @@ -150,7 +150,7 @@ int __ptrace_may_access(struct task_stru
47915 smp_rmb();
47916 if (task->mm)
47917 dumpable = get_dumpable(task->mm);
47918 - if (!dumpable && !capable(CAP_SYS_PTRACE))
47919 + if (!dumpable && !capable_nolog(CAP_SYS_PTRACE))
47920 return -EPERM;
47921
47922 return security_ptrace_access_check(task, mode);
47923 @@ -200,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
47924 goto unlock_tasklist;
47925
47926 task->ptrace = PT_PTRACED;
47927 - if (capable(CAP_SYS_PTRACE))
47928 + if (capable_nolog(CAP_SYS_PTRACE))
47929 task->ptrace |= PT_PTRACE_CAP;
47930
47931 __ptrace_link(task, current);
47932 @@ -363,7 +363,7 @@ int ptrace_readdata(struct task_struct *
47933 break;
47934 return -EIO;
47935 }
47936 - if (copy_to_user(dst, buf, retval))
47937 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
47938 return -EFAULT;
47939 copied += retval;
47940 src += retval;
47941 @@ -574,18 +574,18 @@ int ptrace_request(struct task_struct *c
47942 ret = ptrace_setoptions(child, data);
47943 break;
47944 case PTRACE_GETEVENTMSG:
47945 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
47946 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
47947 break;
47948
47949 case PTRACE_GETSIGINFO:
47950 ret = ptrace_getsiginfo(child, &siginfo);
47951 if (!ret)
47952 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
47953 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
47954 &siginfo);
47955 break;
47956
47957 case PTRACE_SETSIGINFO:
47958 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
47959 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
47960 sizeof siginfo))
47961 ret = -EFAULT;
47962 else
47963 @@ -683,14 +683,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
47964 goto out;
47965 }
47966
47967 + if (gr_handle_ptrace(child, request)) {
47968 + ret = -EPERM;
47969 + goto out_put_task_struct;
47970 + }
47971 +
47972 if (request == PTRACE_ATTACH) {
47973 ret = ptrace_attach(child);
47974 /*
47975 * Some architectures need to do book-keeping after
47976 * a ptrace attach.
47977 */
47978 - if (!ret)
47979 + if (!ret) {
47980 arch_ptrace_attach(child);
47981 + gr_audit_ptrace(child);
47982 + }
47983 goto out_put_task_struct;
47984 }
47985
47986 @@ -715,7 +722,7 @@ int generic_ptrace_peekdata(struct task_
47987 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
47988 if (copied != sizeof(tmp))
47989 return -EIO;
47990 - return put_user(tmp, (unsigned long __user *)data);
47991 + return put_user(tmp, (__force unsigned long __user *)data);
47992 }
47993
47994 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
47995 diff -urNp linux-2.6.34.1/kernel/rcutree.c linux-2.6.34.1/kernel/rcutree.c
47996 --- linux-2.6.34.1/kernel/rcutree.c 2010-07-05 14:24:10.000000000 -0400
47997 +++ linux-2.6.34.1/kernel/rcutree.c 2010-07-07 09:04:57.000000000 -0400
47998 @@ -1321,7 +1321,7 @@ __rcu_process_callbacks(struct rcu_state
47999 /*
48000 * Do softirq processing for the current CPU.
48001 */
48002 -static void rcu_process_callbacks(struct softirq_action *unused)
48003 +static void rcu_process_callbacks(void)
48004 {
48005 /*
48006 * Memory references from any prior RCU read-side critical sections
48007 diff -urNp linux-2.6.34.1/kernel/relay.c linux-2.6.34.1/kernel/relay.c
48008 --- linux-2.6.34.1/kernel/relay.c 2010-07-05 14:24:10.000000000 -0400
48009 +++ linux-2.6.34.1/kernel/relay.c 2010-07-07 09:04:57.000000000 -0400
48010 @@ -1293,7 +1293,7 @@ static ssize_t subbuf_splice_actor(struc
48011 return 0;
48012
48013 ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
48014 - if (ret < 0 || ret < total_len)
48015 + if ((int)ret < 0 || ret < total_len)
48016 return ret;
48017
48018 if (read_start + ret == nonpad_end)
48019 diff -urNp linux-2.6.34.1/kernel/resource.c linux-2.6.34.1/kernel/resource.c
48020 --- linux-2.6.34.1/kernel/resource.c 2010-07-05 14:24:10.000000000 -0400
48021 +++ linux-2.6.34.1/kernel/resource.c 2010-07-07 09:04:57.000000000 -0400
48022 @@ -132,8 +132,18 @@ static const struct file_operations proc
48023
48024 static int __init ioresources_init(void)
48025 {
48026 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48027 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48028 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
48029 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
48030 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48031 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
48032 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
48033 +#endif
48034 +#else
48035 proc_create("ioports", 0, NULL, &proc_ioports_operations);
48036 proc_create("iomem", 0, NULL, &proc_iomem_operations);
48037 +#endif
48038 return 0;
48039 }
48040 __initcall(ioresources_init);
48041 diff -urNp linux-2.6.34.1/kernel/sched.c linux-2.6.34.1/kernel/sched.c
48042 --- linux-2.6.34.1/kernel/sched.c 2010-07-05 14:24:10.000000000 -0400
48043 +++ linux-2.6.34.1/kernel/sched.c 2010-07-07 09:04:57.000000000 -0400
48044 @@ -4364,6 +4364,8 @@ int can_nice(const struct task_struct *p
48045 /* convert nice value [19,-20] to rlimit style value [1,40] */
48046 int nice_rlim = 20 - nice;
48047
48048 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
48049 +
48050 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
48051 capable(CAP_SYS_NICE));
48052 }
48053 @@ -4397,7 +4399,8 @@ SYSCALL_DEFINE1(nice, int, increment)
48054 if (nice > 19)
48055 nice = 19;
48056
48057 - if (increment < 0 && !can_nice(current, nice))
48058 + if (increment < 0 && (!can_nice(current, nice) ||
48059 + gr_handle_chroot_nice()))
48060 return -EPERM;
48061
48062 retval = security_task_setnice(current, nice);
48063 @@ -4544,6 +4547,7 @@ recheck:
48064 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
48065 unlock_task_sighand(p, &flags);
48066
48067 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
48068 /* can't set/change the rt policy */
48069 if (policy != p->policy && !rlim_rtprio)
48070 return -EPERM;
48071 diff -urNp linux-2.6.34.1/kernel/sched_fair.c linux-2.6.34.1/kernel/sched_fair.c
48072 --- linux-2.6.34.1/kernel/sched_fair.c 2010-07-05 14:24:10.000000000 -0400
48073 +++ linux-2.6.34.1/kernel/sched_fair.c 2010-07-07 09:04:57.000000000 -0400
48074 @@ -3432,7 +3432,7 @@ out:
48075 * In CONFIG_NO_HZ case, the idle load balance owner will do the
48076 * rebalancing for all the cpus for whom scheduler ticks are stopped.
48077 */
48078 -static void run_rebalance_domains(struct softirq_action *h)
48079 +static void run_rebalance_domains(void)
48080 {
48081 int this_cpu = smp_processor_id();
48082 struct rq *this_rq = cpu_rq(this_cpu);
48083 diff -urNp linux-2.6.34.1/kernel/signal.c linux-2.6.34.1/kernel/signal.c
48084 --- linux-2.6.34.1/kernel/signal.c 2010-07-05 14:24:10.000000000 -0400
48085 +++ linux-2.6.34.1/kernel/signal.c 2010-07-07 09:04:57.000000000 -0400
48086 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
48087
48088 int print_fatal_signals __read_mostly;
48089
48090 -static void __user *sig_handler(struct task_struct *t, int sig)
48091 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
48092 {
48093 return t->sighand->action[sig - 1].sa.sa_handler;
48094 }
48095
48096 -static int sig_handler_ignored(void __user *handler, int sig)
48097 +static int sig_handler_ignored(__sighandler_t handler, int sig)
48098 {
48099 /* Is it explicitly or implicitly ignored? */
48100 return handler == SIG_IGN ||
48101 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
48102 static int sig_task_ignored(struct task_struct *t, int sig,
48103 int from_ancestor_ns)
48104 {
48105 - void __user *handler;
48106 + __sighandler_t handler;
48107
48108 handler = sig_handler(t, sig);
48109
48110 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
48111 atomic_inc(&user->sigpending);
48112 rcu_read_unlock();
48113
48114 + if (!override_rlimit)
48115 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
48116 +
48117 if (override_rlimit ||
48118 atomic_read(&user->sigpending) <=
48119 task_rlimit(t, RLIMIT_SIGPENDING)) {
48120 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
48121
48122 int unhandled_signal(struct task_struct *tsk, int sig)
48123 {
48124 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
48125 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
48126 if (is_global_init(tsk))
48127 return 1;
48128 if (handler != SIG_IGN && handler != SIG_DFL)
48129 @@ -678,6 +681,9 @@ static int check_kill_permission(int sig
48130 }
48131 }
48132
48133 + if (gr_handle_signal(t, sig))
48134 + return -EPERM;
48135 +
48136 return security_task_kill(t, info, sig, 0);
48137 }
48138
48139 @@ -1025,7 +1031,7 @@ __group_send_sig_info(int sig, struct si
48140 return send_signal(sig, info, p, 1);
48141 }
48142
48143 -static int
48144 +int
48145 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
48146 {
48147 return send_signal(sig, info, t, 0);
48148 @@ -1079,6 +1085,9 @@ force_sig_info(int sig, struct siginfo *
48149 ret = specific_send_sig_info(sig, info, t);
48150 spin_unlock_irqrestore(&t->sighand->siglock, flags);
48151
48152 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
48153 + gr_handle_crash(t, sig);
48154 +
48155 return ret;
48156 }
48157
48158 @@ -1132,8 +1141,11 @@ int group_send_sig_info(int sig, struct
48159 {
48160 int ret = check_kill_permission(sig, info, p);
48161
48162 - if (!ret && sig)
48163 + if (!ret && sig) {
48164 ret = do_send_sig_info(sig, info, p, true);
48165 + if (!ret)
48166 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
48167 + }
48168
48169 return ret;
48170 }
48171 diff -urNp linux-2.6.34.1/kernel/smp.c linux-2.6.34.1/kernel/smp.c
48172 --- linux-2.6.34.1/kernel/smp.c 2010-07-05 14:24:10.000000000 -0400
48173 +++ linux-2.6.34.1/kernel/smp.c 2010-07-07 09:04:57.000000000 -0400
48174 @@ -499,22 +499,22 @@ int smp_call_function(void (*func)(void
48175 }
48176 EXPORT_SYMBOL(smp_call_function);
48177
48178 -void ipi_call_lock(void)
48179 +void ipi_call_lock(void) __acquires(call_function.lock)
48180 {
48181 raw_spin_lock(&call_function.lock);
48182 }
48183
48184 -void ipi_call_unlock(void)
48185 +void ipi_call_unlock(void) __releases(call_function.lock)
48186 {
48187 raw_spin_unlock(&call_function.lock);
48188 }
48189
48190 -void ipi_call_lock_irq(void)
48191 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
48192 {
48193 raw_spin_lock_irq(&call_function.lock);
48194 }
48195
48196 -void ipi_call_unlock_irq(void)
48197 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
48198 {
48199 raw_spin_unlock_irq(&call_function.lock);
48200 }
48201 diff -urNp linux-2.6.34.1/kernel/softirq.c linux-2.6.34.1/kernel/softirq.c
48202 --- linux-2.6.34.1/kernel/softirq.c 2010-07-05 14:24:10.000000000 -0400
48203 +++ linux-2.6.34.1/kernel/softirq.c 2010-07-07 09:04:57.000000000 -0400
48204 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
48205
48206 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
48207
48208 -char *softirq_to_name[NR_SOFTIRQS] = {
48209 +const char * const softirq_to_name[NR_SOFTIRQS] = {
48210 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
48211 "TASKLET", "SCHED", "HRTIMER", "RCU"
48212 };
48213 @@ -190,7 +190,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
48214
48215 asmlinkage void __do_softirq(void)
48216 {
48217 - struct softirq_action *h;
48218 + const struct softirq_action *h;
48219 __u32 pending;
48220 int max_restart = MAX_SOFTIRQ_RESTART;
48221 int cpu;
48222 @@ -216,7 +216,7 @@ restart:
48223 kstat_incr_softirqs_this_cpu(h - softirq_vec);
48224
48225 trace_softirq_entry(h, softirq_vec);
48226 - h->action(h);
48227 + h->action();
48228 trace_softirq_exit(h, softirq_vec);
48229 if (unlikely(prev_count != preempt_count())) {
48230 printk(KERN_ERR "huh, entered softirq %td %s %p"
48231 @@ -340,7 +340,7 @@ void raise_softirq(unsigned int nr)
48232 local_irq_restore(flags);
48233 }
48234
48235 -void open_softirq(int nr, void (*action)(struct softirq_action *))
48236 +void open_softirq(int nr, void (*action)(void))
48237 {
48238 softirq_vec[nr].action = action;
48239 }
48240 @@ -396,7 +396,7 @@ void __tasklet_hi_schedule_first(struct
48241
48242 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
48243
48244 -static void tasklet_action(struct softirq_action *a)
48245 +static void tasklet_action(void)
48246 {
48247 struct tasklet_struct *list;
48248
48249 @@ -431,7 +431,7 @@ static void tasklet_action(struct softir
48250 }
48251 }
48252
48253 -static void tasklet_hi_action(struct softirq_action *a)
48254 +static void tasklet_hi_action(void)
48255 {
48256 struct tasklet_struct *list;
48257
48258 diff -urNp linux-2.6.34.1/kernel/sys.c linux-2.6.34.1/kernel/sys.c
48259 --- linux-2.6.34.1/kernel/sys.c 2010-07-05 14:24:10.000000000 -0400
48260 +++ linux-2.6.34.1/kernel/sys.c 2010-07-07 09:04:57.000000000 -0400
48261 @@ -134,6 +134,12 @@ static int set_one_prio(struct task_stru
48262 error = -EACCES;
48263 goto out;
48264 }
48265 +
48266 + if (gr_handle_chroot_setpriority(p, niceval)) {
48267 + error = -EACCES;
48268 + goto out;
48269 + }
48270 +
48271 no_nice = security_task_setnice(p, niceval);
48272 if (no_nice) {
48273 error = no_nice;
48274 @@ -515,6 +521,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
48275 goto error;
48276 }
48277
48278 + if (gr_check_group_change(new->gid, new->egid, -1))
48279 + goto error;
48280 +
48281 if (rgid != (gid_t) -1 ||
48282 (egid != (gid_t) -1 && egid != old->gid))
48283 new->sgid = new->egid;
48284 @@ -548,6 +557,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
48285 goto error;
48286
48287 retval = -EPERM;
48288 +
48289 + if (gr_check_group_change(gid, gid, gid))
48290 + goto error;
48291 +
48292 if (capable(CAP_SETGID))
48293 new->gid = new->egid = new->sgid = new->fsgid = gid;
48294 else if (gid == old->gid || gid == old->sgid)
48295 @@ -632,6 +645,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
48296 goto error;
48297 }
48298
48299 + if (gr_check_user_change(new->uid, new->euid, -1))
48300 + goto error;
48301 +
48302 if (new->uid != old->uid) {
48303 retval = set_user(new);
48304 if (retval < 0)
48305 @@ -680,6 +696,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
48306 goto error;
48307
48308 retval = -EPERM;
48309 +
48310 + if (gr_check_crash_uid(uid))
48311 + goto error;
48312 + if (gr_check_user_change(uid, uid, uid))
48313 + goto error;
48314 +
48315 if (capable(CAP_SETUID)) {
48316 new->suid = new->uid = uid;
48317 if (uid != old->uid) {
48318 @@ -737,6 +759,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
48319 goto error;
48320 }
48321
48322 + if (gr_check_user_change(ruid, euid, -1))
48323 + goto error;
48324 +
48325 if (ruid != (uid_t) -1) {
48326 new->uid = ruid;
48327 if (ruid != old->uid) {
48328 @@ -805,6 +830,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
48329 goto error;
48330 }
48331
48332 + if (gr_check_group_change(rgid, egid, -1))
48333 + goto error;
48334 +
48335 if (rgid != (gid_t) -1)
48336 new->gid = rgid;
48337 if (egid != (gid_t) -1)
48338 @@ -854,6 +882,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
48339 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
48340 goto error;
48341
48342 + if (gr_check_user_change(-1, -1, uid))
48343 + goto error;
48344 +
48345 if (uid == old->uid || uid == old->euid ||
48346 uid == old->suid || uid == old->fsuid ||
48347 capable(CAP_SETUID)) {
48348 @@ -894,6 +925,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
48349 if (gid == old->gid || gid == old->egid ||
48350 gid == old->sgid || gid == old->fsgid ||
48351 capable(CAP_SETGID)) {
48352 + if (gr_check_group_change(-1, -1, gid))
48353 + goto error;
48354 +
48355 if (gid != old_fsgid) {
48356 new->fsgid = gid;
48357 goto change_okay;
48358 @@ -1522,7 +1556,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
48359 error = get_dumpable(me->mm);
48360 break;
48361 case PR_SET_DUMPABLE:
48362 - if (arg2 < 0 || arg2 > 1) {
48363 + if (arg2 > 1) {
48364 error = -EINVAL;
48365 break;
48366 }
48367 diff -urNp linux-2.6.34.1/kernel/sysctl.c linux-2.6.34.1/kernel/sysctl.c
48368 --- linux-2.6.34.1/kernel/sysctl.c 2010-07-05 14:24:10.000000000 -0400
48369 +++ linux-2.6.34.1/kernel/sysctl.c 2010-07-07 09:04:57.000000000 -0400
48370 @@ -76,6 +76,13 @@
48371
48372
48373 #if defined(CONFIG_SYSCTL)
48374 +#include <linux/grsecurity.h>
48375 +#include <linux/grinternal.h>
48376 +
48377 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
48378 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48379 + const int op);
48380 +extern int gr_handle_chroot_sysctl(const int op);
48381
48382 /* External variables not in a header file. */
48383 extern int sysctl_overcommit_memory;
48384 @@ -162,6 +169,7 @@ static int proc_do_cad_pid(struct ctl_ta
48385 static int proc_taint(struct ctl_table *table, int write,
48386 void __user *buffer, size_t *lenp, loff_t *ppos);
48387 #endif
48388 +extern ctl_table grsecurity_table[];
48389
48390 static struct ctl_table root_table[];
48391 static struct ctl_table_root sysctl_table_root;
48392 @@ -194,6 +202,20 @@ extern struct ctl_table epoll_table[];
48393 int sysctl_legacy_va_layout;
48394 #endif
48395
48396 +#ifdef CONFIG_PAX_SOFTMODE
48397 +static ctl_table pax_table[] = {
48398 + {
48399 + .procname = "softmode",
48400 + .data = &pax_softmode,
48401 + .maxlen = sizeof(unsigned int),
48402 + .mode = 0600,
48403 + .proc_handler = &proc_dointvec,
48404 + },
48405 +
48406 + { }
48407 +};
48408 +#endif
48409 +
48410 /* The default sysctl tables: */
48411
48412 static struct ctl_table root_table[] = {
48413 @@ -241,6 +263,22 @@ static int max_sched_shares_ratelimit =
48414 #endif
48415
48416 static struct ctl_table kern_table[] = {
48417 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
48418 + {
48419 + .procname = "grsecurity",
48420 + .mode = 0500,
48421 + .child = grsecurity_table,
48422 + },
48423 +#endif
48424 +
48425 +#ifdef CONFIG_PAX_SOFTMODE
48426 + {
48427 + .procname = "pax",
48428 + .mode = 0500,
48429 + .child = pax_table,
48430 + },
48431 +#endif
48432 +
48433 {
48434 .procname = "sched_child_runs_first",
48435 .data = &sysctl_sched_child_runs_first,
48436 @@ -1630,6 +1668,16 @@ int sysctl_perm(struct ctl_table_root *r
48437 int error;
48438 int mode;
48439
48440 + if (table->parent != NULL && table->parent->procname != NULL &&
48441 + table->procname != NULL &&
48442 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
48443 + return -EACCES;
48444 + if (gr_handle_chroot_sysctl(op))
48445 + return -EACCES;
48446 + error = gr_handle_sysctl(table, op);
48447 + if (error)
48448 + return error;
48449 +
48450 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
48451 if (error)
48452 return error;
48453 @@ -2138,6 +2186,8 @@ static int __do_proc_dointvec(void *tbl_
48454 len = strlen(buf);
48455 if (len > left)
48456 len = left;
48457 + if (len > sizeof(buf))
48458 + len = sizeof(buf);
48459 if(copy_to_user(s, buf, len))
48460 return -EFAULT;
48461 left -= len;
48462 @@ -2363,6 +2413,8 @@ static int __do_proc_doulongvec_minmax(v
48463 len = strlen(buf);
48464 if (len > left)
48465 len = left;
48466 + if (len > sizeof(buf))
48467 + len = sizeof(buf);
48468 if(copy_to_user(s, buf, len))
48469 return -EFAULT;
48470 left -= len;
48471 diff -urNp linux-2.6.34.1/kernel/taskstats.c linux-2.6.34.1/kernel/taskstats.c
48472 --- linux-2.6.34.1/kernel/taskstats.c 2010-07-05 14:24:10.000000000 -0400
48473 +++ linux-2.6.34.1/kernel/taskstats.c 2010-07-07 09:04:57.000000000 -0400
48474 @@ -27,9 +27,12 @@
48475 #include <linux/cgroup.h>
48476 #include <linux/fs.h>
48477 #include <linux/file.h>
48478 +#include <linux/grsecurity.h>
48479 #include <net/genetlink.h>
48480 #include <asm/atomic.h>
48481
48482 +extern int gr_is_taskstats_denied(int pid);
48483 +
48484 /*
48485 * Maximum length of a cpumask that can be specified in
48486 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
48487 @@ -432,6 +435,9 @@ static int taskstats_user_cmd(struct sk_
48488 size_t size;
48489 cpumask_var_t mask;
48490
48491 + if (gr_is_taskstats_denied(current->pid))
48492 + return -EACCES;
48493 +
48494 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
48495 return -ENOMEM;
48496
48497 diff -urNp linux-2.6.34.1/kernel/time/tick-broadcast.c linux-2.6.34.1/kernel/time/tick-broadcast.c
48498 --- linux-2.6.34.1/kernel/time/tick-broadcast.c 2010-07-05 14:24:10.000000000 -0400
48499 +++ linux-2.6.34.1/kernel/time/tick-broadcast.c 2010-07-07 09:04:57.000000000 -0400
48500 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
48501 * then clear the broadcast bit.
48502 */
48503 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
48504 - int cpu = smp_processor_id();
48505 + cpu = smp_processor_id();
48506
48507 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
48508 tick_broadcast_clear_oneshot(cpu);
48509 diff -urNp linux-2.6.34.1/kernel/time.c linux-2.6.34.1/kernel/time.c
48510 --- linux-2.6.34.1/kernel/time.c 2010-07-05 14:24:10.000000000 -0400
48511 +++ linux-2.6.34.1/kernel/time.c 2010-07-07 09:04:57.000000000 -0400
48512 @@ -93,6 +93,9 @@ SYSCALL_DEFINE1(stime, time_t __user *,
48513 return err;
48514
48515 do_settimeofday(&tv);
48516 +
48517 + gr_log_timechange();
48518 +
48519 return 0;
48520 }
48521
48522 @@ -201,6 +204,8 @@ SYSCALL_DEFINE2(settimeofday, struct tim
48523 return -EFAULT;
48524 }
48525
48526 + gr_log_timechange();
48527 +
48528 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
48529 }
48530
48531 @@ -239,7 +244,7 @@ EXPORT_SYMBOL(current_fs_time);
48532 * Avoid unnecessary multiplications/divisions in the
48533 * two most common HZ cases:
48534 */
48535 -unsigned int inline jiffies_to_msecs(const unsigned long j)
48536 +inline unsigned int jiffies_to_msecs(const unsigned long j)
48537 {
48538 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
48539 return (MSEC_PER_SEC / HZ) * j;
48540 @@ -255,7 +260,7 @@ unsigned int inline jiffies_to_msecs(con
48541 }
48542 EXPORT_SYMBOL(jiffies_to_msecs);
48543
48544 -unsigned int inline jiffies_to_usecs(const unsigned long j)
48545 +inline unsigned int jiffies_to_usecs(const unsigned long j)
48546 {
48547 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
48548 return (USEC_PER_SEC / HZ) * j;
48549 diff -urNp linux-2.6.34.1/kernel/timer.c linux-2.6.34.1/kernel/timer.c
48550 --- linux-2.6.34.1/kernel/timer.c 2010-07-05 14:24:10.000000000 -0400
48551 +++ linux-2.6.34.1/kernel/timer.c 2010-07-07 09:04:57.000000000 -0400
48552 @@ -1208,7 +1208,7 @@ void update_process_times(int user_tick)
48553 /*
48554 * This function runs timers and the timer-tq in bottom half context.
48555 */
48556 -static void run_timer_softirq(struct softirq_action *h)
48557 +static void run_timer_softirq(void)
48558 {
48559 struct tvec_base *base = __get_cpu_var(tvec_bases);
48560
48561 diff -urNp linux-2.6.34.1/kernel/trace/Kconfig linux-2.6.34.1/kernel/trace/Kconfig
48562 --- linux-2.6.34.1/kernel/trace/Kconfig 2010-07-05 14:24:10.000000000 -0400
48563 +++ linux-2.6.34.1/kernel/trace/Kconfig 2010-07-07 09:04:57.000000000 -0400
48564 @@ -124,6 +124,7 @@ if FTRACE
48565 config FUNCTION_TRACER
48566 bool "Kernel Function Tracer"
48567 depends on HAVE_FUNCTION_TRACER
48568 + depends on !PAX_KERNEXEC
48569 select FRAME_POINTER
48570 select KALLSYMS
48571 select GENERIC_TRACER
48572 @@ -353,6 +354,7 @@ config PROFILE_KSYM_TRACER
48573 config STACK_TRACER
48574 bool "Trace max stack"
48575 depends on HAVE_FUNCTION_TRACER
48576 + depends on !PAX_KERNEXEC
48577 select FUNCTION_TRACER
48578 select STACKTRACE
48579 select KALLSYMS
48580 diff -urNp linux-2.6.34.1/kernel/trace/ftrace.c linux-2.6.34.1/kernel/trace/ftrace.c
48581 --- linux-2.6.34.1/kernel/trace/ftrace.c 2010-07-05 14:24:10.000000000 -0400
48582 +++ linux-2.6.34.1/kernel/trace/ftrace.c 2010-07-07 09:04:57.000000000 -0400
48583 @@ -1079,13 +1079,18 @@ ftrace_code_disable(struct module *mod,
48584
48585 ip = rec->ip;
48586
48587 + ret = ftrace_arch_code_modify_prepare();
48588 + FTRACE_WARN_ON(ret);
48589 + if (ret)
48590 + return 0;
48591 +
48592 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
48593 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
48594 if (ret) {
48595 ftrace_bug(ret, ip);
48596 rec->flags |= FTRACE_FL_FAILED;
48597 - return 0;
48598 }
48599 - return 1;
48600 + return ret ? 0 : 1;
48601 }
48602
48603 /*
48604 diff -urNp linux-2.6.34.1/kernel/trace/ring_buffer.c linux-2.6.34.1/kernel/trace/ring_buffer.c
48605 --- linux-2.6.34.1/kernel/trace/ring_buffer.c 2010-07-05 14:24:10.000000000 -0400
48606 +++ linux-2.6.34.1/kernel/trace/ring_buffer.c 2010-07-07 09:04:57.000000000 -0400
48607 @@ -621,7 +621,7 @@ static struct list_head *rb_list_head(st
48608 * the reader page). But if the next page is a header page,
48609 * its flags will be non zero.
48610 */
48611 -static int inline
48612 +static inline int
48613 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
48614 struct buffer_page *page, struct list_head *list)
48615 {
48616 diff -urNp linux-2.6.34.1/kernel/trace/trace.c linux-2.6.34.1/kernel/trace/trace.c
48617 --- linux-2.6.34.1/kernel/trace/trace.c 2010-07-05 14:24:10.000000000 -0400
48618 +++ linux-2.6.34.1/kernel/trace/trace.c 2010-07-07 09:04:57.000000000 -0400
48619 @@ -3918,10 +3918,9 @@ static const struct file_operations trac
48620 };
48621 #endif
48622
48623 -static struct dentry *d_tracer;
48624 -
48625 struct dentry *tracing_init_dentry(void)
48626 {
48627 + static struct dentry *d_tracer;
48628 static int once;
48629
48630 if (d_tracer)
48631 @@ -3941,10 +3940,9 @@ struct dentry *tracing_init_dentry(void)
48632 return d_tracer;
48633 }
48634
48635 -static struct dentry *d_percpu;
48636 -
48637 struct dentry *tracing_dentry_percpu(void)
48638 {
48639 + static struct dentry *d_percpu;
48640 static int once;
48641 struct dentry *d_tracer;
48642
48643 diff -urNp linux-2.6.34.1/kernel/trace/trace_output.c linux-2.6.34.1/kernel/trace/trace_output.c
48644 --- linux-2.6.34.1/kernel/trace/trace_output.c 2010-07-05 14:24:10.000000000 -0400
48645 +++ linux-2.6.34.1/kernel/trace/trace_output.c 2010-07-07 09:04:57.000000000 -0400
48646 @@ -280,7 +280,7 @@ int trace_seq_path(struct trace_seq *s,
48647
48648 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
48649 if (!IS_ERR(p)) {
48650 - p = mangle_path(s->buffer + s->len, p, "\n");
48651 + p = mangle_path(s->buffer + s->len, p, "\n\\");
48652 if (p) {
48653 s->len = p - s->buffer;
48654 return 1;
48655 diff -urNp linux-2.6.34.1/kernel/trace/trace_stack.c linux-2.6.34.1/kernel/trace/trace_stack.c
48656 --- linux-2.6.34.1/kernel/trace/trace_stack.c 2010-07-05 14:24:10.000000000 -0400
48657 +++ linux-2.6.34.1/kernel/trace/trace_stack.c 2010-07-07 09:04:57.000000000 -0400
48658 @@ -50,7 +50,7 @@ static inline void check_stack(void)
48659 return;
48660
48661 /* we do not handle interrupt stacks yet */
48662 - if (!object_is_on_stack(&this_size))
48663 + if (!object_starts_on_stack(&this_size))
48664 return;
48665
48666 local_irq_save(flags);
48667 diff -urNp linux-2.6.34.1/lib/Kconfig.debug linux-2.6.34.1/lib/Kconfig.debug
48668 --- linux-2.6.34.1/lib/Kconfig.debug 2010-07-05 14:24:10.000000000 -0400
48669 +++ linux-2.6.34.1/lib/Kconfig.debug 2010-07-07 09:04:57.000000000 -0400
48670 @@ -946,7 +946,7 @@ config LATENCYTOP
48671 select STACKTRACE
48672 select SCHEDSTATS
48673 select SCHED_DEBUG
48674 - depends on HAVE_LATENCYTOP_SUPPORT
48675 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
48676 help
48677 Enable this option if you want to use the LatencyTOP tool
48678 to find out which userspace is blocking on what kernel operations.
48679 diff -urNp linux-2.6.34.1/lib/bug.c linux-2.6.34.1/lib/bug.c
48680 --- linux-2.6.34.1/lib/bug.c 2010-07-05 14:24:10.000000000 -0400
48681 +++ linux-2.6.34.1/lib/bug.c 2010-07-07 09:04:57.000000000 -0400
48682 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
48683 return BUG_TRAP_TYPE_NONE;
48684
48685 bug = find_bug(bugaddr);
48686 + if (!bug)
48687 + return BUG_TRAP_TYPE_NONE;
48688
48689 printk(KERN_EMERG "------------[ cut here ]------------\n");
48690
48691 diff -urNp linux-2.6.34.1/lib/debugobjects.c linux-2.6.34.1/lib/debugobjects.c
48692 --- linux-2.6.34.1/lib/debugobjects.c 2010-07-05 14:24:10.000000000 -0400
48693 +++ linux-2.6.34.1/lib/debugobjects.c 2010-07-07 09:04:57.000000000 -0400
48694 @@ -278,7 +278,7 @@ static void debug_object_is_on_stack(voi
48695 if (limit > 4)
48696 return;
48697
48698 - is_on_stack = object_is_on_stack(addr);
48699 + is_on_stack = object_starts_on_stack(addr);
48700 if (is_on_stack == onstack)
48701 return;
48702
48703 diff -urNp linux-2.6.34.1/lib/dma-debug.c linux-2.6.34.1/lib/dma-debug.c
48704 --- linux-2.6.34.1/lib/dma-debug.c 2010-07-05 14:24:10.000000000 -0400
48705 +++ linux-2.6.34.1/lib/dma-debug.c 2010-07-07 09:04:57.000000000 -0400
48706 @@ -861,7 +861,7 @@ out:
48707
48708 static void check_for_stack(struct device *dev, void *addr)
48709 {
48710 - if (object_is_on_stack(addr))
48711 + if (object_starts_on_stack(addr))
48712 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
48713 "stack [addr=%p]\n", addr);
48714 }
48715 diff -urNp linux-2.6.34.1/lib/inflate.c linux-2.6.34.1/lib/inflate.c
48716 --- linux-2.6.34.1/lib/inflate.c 2010-07-05 14:24:10.000000000 -0400
48717 +++ linux-2.6.34.1/lib/inflate.c 2010-07-07 09:04:57.000000000 -0400
48718 @@ -267,7 +267,7 @@ static void free(void *where)
48719 malloc_ptr = free_mem_ptr;
48720 }
48721 #else
48722 -#define malloc(a) kmalloc(a, GFP_KERNEL)
48723 +#define malloc(a) kmalloc((a), GFP_KERNEL)
48724 #define free(a) kfree(a)
48725 #endif
48726
48727 diff -urNp linux-2.6.34.1/lib/parser.c linux-2.6.34.1/lib/parser.c
48728 --- linux-2.6.34.1/lib/parser.c 2010-07-05 14:24:10.000000000 -0400
48729 +++ linux-2.6.34.1/lib/parser.c 2010-07-07 09:04:57.000000000 -0400
48730 @@ -129,7 +129,7 @@ static int match_number(substring_t *s,
48731 char *buf;
48732 int ret;
48733
48734 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
48735 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
48736 if (!buf)
48737 return -ENOMEM;
48738 memcpy(buf, s->from, s->to - s->from);
48739 diff -urNp linux-2.6.34.1/lib/radix-tree.c linux-2.6.34.1/lib/radix-tree.c
48740 --- linux-2.6.34.1/lib/radix-tree.c 2010-07-05 14:24:10.000000000 -0400
48741 +++ linux-2.6.34.1/lib/radix-tree.c 2010-07-07 09:04:57.000000000 -0400
48742 @@ -80,7 +80,7 @@ struct radix_tree_preload {
48743 int nr;
48744 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
48745 };
48746 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
48747 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
48748
48749 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
48750 {
48751 diff -urNp linux-2.6.34.1/lib/random32.c linux-2.6.34.1/lib/random32.c
48752 --- linux-2.6.34.1/lib/random32.c 2010-07-05 14:24:10.000000000 -0400
48753 +++ linux-2.6.34.1/lib/random32.c 2010-07-07 09:04:57.000000000 -0400
48754 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
48755 */
48756 static inline u32 __seed(u32 x, u32 m)
48757 {
48758 - return (x < m) ? x + m : x;
48759 + return (x <= m) ? x + m + 1 : x;
48760 }
48761
48762 /**
48763 diff -urNp linux-2.6.34.1/localversion-grsec linux-2.6.34.1/localversion-grsec
48764 --- linux-2.6.34.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
48765 +++ linux-2.6.34.1/localversion-grsec 2010-07-07 09:04:57.000000000 -0400
48766 @@ -0,0 +1 @@
48767 +-grsec
48768 diff -urNp linux-2.6.34.1/mm/Kconfig linux-2.6.34.1/mm/Kconfig
48769 --- linux-2.6.34.1/mm/Kconfig 2010-07-05 14:24:10.000000000 -0400
48770 +++ linux-2.6.34.1/mm/Kconfig 2010-07-07 09:04:57.000000000 -0400
48771 @@ -226,7 +226,7 @@ config KSM
48772 config DEFAULT_MMAP_MIN_ADDR
48773 int "Low address space to protect from user allocation"
48774 depends on MMU
48775 - default 4096
48776 + default 65536
48777 help
48778 This is the portion of low virtual memory which should be protected
48779 from userspace allocation. Keeping a user from writing to low pages
48780 diff -urNp linux-2.6.34.1/mm/filemap.c linux-2.6.34.1/mm/filemap.c
48781 --- linux-2.6.34.1/mm/filemap.c 2010-07-05 14:24:10.000000000 -0400
48782 +++ linux-2.6.34.1/mm/filemap.c 2010-07-07 09:04:57.000000000 -0400
48783 @@ -1607,7 +1607,7 @@ int generic_file_mmap(struct file * file
48784 struct address_space *mapping = file->f_mapping;
48785
48786 if (!mapping->a_ops->readpage)
48787 - return -ENOEXEC;
48788 + return -ENODEV;
48789 file_accessed(file);
48790 vma->vm_ops = &generic_file_vm_ops;
48791 vma->vm_flags |= VM_CAN_NONLINEAR;
48792 @@ -2003,6 +2003,7 @@ inline int generic_write_checks(struct f
48793 *pos = i_size_read(inode);
48794
48795 if (limit != RLIM_INFINITY) {
48796 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
48797 if (*pos >= limit) {
48798 send_sig(SIGXFSZ, current, 0);
48799 return -EFBIG;
48800 diff -urNp linux-2.6.34.1/mm/fremap.c linux-2.6.34.1/mm/fremap.c
48801 --- linux-2.6.34.1/mm/fremap.c 2010-07-05 14:24:10.000000000 -0400
48802 +++ linux-2.6.34.1/mm/fremap.c 2010-07-07 09:04:57.000000000 -0400
48803 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
48804 retry:
48805 vma = find_vma(mm, start);
48806
48807 +#ifdef CONFIG_PAX_SEGMEXEC
48808 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
48809 + goto out;
48810 +#endif
48811 +
48812 /*
48813 * Make sure the vma is shared, that it supports prefaulting,
48814 * and that the remapped range is valid and fully within
48815 diff -urNp linux-2.6.34.1/mm/highmem.c linux-2.6.34.1/mm/highmem.c
48816 --- linux-2.6.34.1/mm/highmem.c 2010-07-05 14:24:10.000000000 -0400
48817 +++ linux-2.6.34.1/mm/highmem.c 2010-07-07 09:04:57.000000000 -0400
48818 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
48819 * So no dangers, even with speculative execution.
48820 */
48821 page = pte_page(pkmap_page_table[i]);
48822 + pax_open_kernel();
48823 pte_clear(&init_mm, (unsigned long)page_address(page),
48824 &pkmap_page_table[i]);
48825 -
48826 + pax_close_kernel();
48827 set_page_address(page, NULL);
48828 need_flush = 1;
48829 }
48830 @@ -177,9 +178,11 @@ start:
48831 }
48832 }
48833 vaddr = PKMAP_ADDR(last_pkmap_nr);
48834 +
48835 + pax_open_kernel();
48836 set_pte_at(&init_mm, vaddr,
48837 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
48838 -
48839 + pax_close_kernel();
48840 pkmap_count[last_pkmap_nr] = 1;
48841 set_page_address(page, (void *)vaddr);
48842
48843 diff -urNp linux-2.6.34.1/mm/hugetlb.c linux-2.6.34.1/mm/hugetlb.c
48844 --- linux-2.6.34.1/mm/hugetlb.c 2010-07-05 14:24:10.000000000 -0400
48845 +++ linux-2.6.34.1/mm/hugetlb.c 2010-07-07 09:04:57.000000000 -0400
48846 @@ -2268,6 +2268,26 @@ static int unmap_ref_private(struct mm_s
48847 return 1;
48848 }
48849
48850 +#ifdef CONFIG_PAX_SEGMEXEC
48851 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
48852 +{
48853 + struct mm_struct *mm = vma->vm_mm;
48854 + struct vm_area_struct *vma_m;
48855 + unsigned long address_m;
48856 + pte_t *ptep_m;
48857 +
48858 + vma_m = pax_find_mirror_vma(vma);
48859 + if (!vma_m)
48860 + return;
48861 +
48862 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
48863 + address_m = address + SEGMEXEC_TASK_SIZE;
48864 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
48865 + get_page(page_m);
48866 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
48867 +}
48868 +#endif
48869 +
48870 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
48871 unsigned long address, pte_t *ptep, pte_t pte,
48872 struct page *pagecache_page)
48873 @@ -2348,6 +2368,11 @@ retry_avoidcopy:
48874 huge_ptep_clear_flush(vma, address, ptep);
48875 set_huge_pte_at(mm, address, ptep,
48876 make_huge_pte(vma, new_page, 1));
48877 +
48878 +#ifdef CONFIG_PAX_SEGMEXEC
48879 + pax_mirror_huge_pte(vma, address, new_page);
48880 +#endif
48881 +
48882 /* Make the old page be freed below */
48883 new_page = old_page;
48884 }
48885 @@ -2479,6 +2504,10 @@ retry:
48886 && (vma->vm_flags & VM_SHARED)));
48887 set_huge_pte_at(mm, address, ptep, new_pte);
48888
48889 +#ifdef CONFIG_PAX_SEGMEXEC
48890 + pax_mirror_huge_pte(vma, address, page);
48891 +#endif
48892 +
48893 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
48894 /* Optimization, do the COW without a second fault */
48895 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
48896 @@ -2507,6 +2536,28 @@ int hugetlb_fault(struct mm_struct *mm,
48897 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
48898 struct hstate *h = hstate_vma(vma);
48899
48900 +#ifdef CONFIG_PAX_SEGMEXEC
48901 + struct vm_area_struct *vma_m;
48902 +
48903 + vma_m = pax_find_mirror_vma(vma);
48904 + if (vma_m) {
48905 + unsigned long address_m;
48906 +
48907 + if (vma->vm_start > vma_m->vm_start) {
48908 + address_m = address;
48909 + address -= SEGMEXEC_TASK_SIZE;
48910 + vma = vma_m;
48911 + h = hstate_vma(vma);
48912 + } else
48913 + address_m = address + SEGMEXEC_TASK_SIZE;
48914 +
48915 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
48916 + return VM_FAULT_OOM;
48917 + address_m &= HPAGE_MASK;
48918 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
48919 + }
48920 +#endif
48921 +
48922 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
48923 if (!ptep)
48924 return VM_FAULT_OOM;
48925 diff -urNp linux-2.6.34.1/mm/maccess.c linux-2.6.34.1/mm/maccess.c
48926 --- linux-2.6.34.1/mm/maccess.c 2010-07-05 14:24:10.000000000 -0400
48927 +++ linux-2.6.34.1/mm/maccess.c 2010-07-07 09:04:57.000000000 -0400
48928 @@ -15,10 +15,10 @@
48929 * happens, handle that and return -EFAULT.
48930 */
48931
48932 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
48933 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
48934 __attribute__((alias("__probe_kernel_read")));
48935
48936 -long __probe_kernel_read(void *dst, void *src, size_t size)
48937 +long __probe_kernel_read(void *dst, const void *src, size_t size)
48938 {
48939 long ret;
48940 mm_segment_t old_fs = get_fs();
48941 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
48942 * Safely write to address @dst from the buffer at @src. If a kernel fault
48943 * happens, handle that and return -EFAULT.
48944 */
48945 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
48946 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
48947 __attribute__((alias("__probe_kernel_write")));
48948
48949 -long __probe_kernel_write(void *dst, void *src, size_t size)
48950 +long __probe_kernel_write(void *dst, const void *src, size_t size)
48951 {
48952 long ret;
48953 mm_segment_t old_fs = get_fs();
48954 diff -urNp linux-2.6.34.1/mm/madvise.c linux-2.6.34.1/mm/madvise.c
48955 --- linux-2.6.34.1/mm/madvise.c 2010-07-05 14:24:10.000000000 -0400
48956 +++ linux-2.6.34.1/mm/madvise.c 2010-07-07 09:04:57.000000000 -0400
48957 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
48958 pgoff_t pgoff;
48959 unsigned long new_flags = vma->vm_flags;
48960
48961 +#ifdef CONFIG_PAX_SEGMEXEC
48962 + struct vm_area_struct *vma_m;
48963 +#endif
48964 +
48965 switch (behavior) {
48966 case MADV_NORMAL:
48967 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
48968 @@ -104,6 +108,13 @@ success:
48969 /*
48970 * vm_flags is protected by the mmap_sem held in write mode.
48971 */
48972 +
48973 +#ifdef CONFIG_PAX_SEGMEXEC
48974 + vma_m = pax_find_mirror_vma(vma);
48975 + if (vma_m)
48976 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
48977 +#endif
48978 +
48979 vma->vm_flags = new_flags;
48980
48981 out:
48982 @@ -162,6 +173,11 @@ static long madvise_dontneed(struct vm_a
48983 struct vm_area_struct ** prev,
48984 unsigned long start, unsigned long end)
48985 {
48986 +
48987 +#ifdef CONFIG_PAX_SEGMEXEC
48988 + struct vm_area_struct *vma_m;
48989 +#endif
48990 +
48991 *prev = vma;
48992 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
48993 return -EINVAL;
48994 @@ -174,6 +190,21 @@ static long madvise_dontneed(struct vm_a
48995 zap_page_range(vma, start, end - start, &details);
48996 } else
48997 zap_page_range(vma, start, end - start, NULL);
48998 +
48999 +#ifdef CONFIG_PAX_SEGMEXEC
49000 + vma_m = pax_find_mirror_vma(vma);
49001 + if (vma_m) {
49002 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
49003 + struct zap_details details = {
49004 + .nonlinear_vma = vma_m,
49005 + .last_index = ULONG_MAX,
49006 + };
49007 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
49008 + } else
49009 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
49010 + }
49011 +#endif
49012 +
49013 return 0;
49014 }
49015
49016 @@ -366,6 +397,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
49017 if (end < start)
49018 goto out;
49019
49020 +#ifdef CONFIG_PAX_SEGMEXEC
49021 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49022 + if (end > SEGMEXEC_TASK_SIZE)
49023 + goto out;
49024 + } else
49025 +#endif
49026 +
49027 + if (end > TASK_SIZE)
49028 + goto out;
49029 +
49030 error = 0;
49031 if (end == start)
49032 goto out;
49033 diff -urNp linux-2.6.34.1/mm/memory-failure.c linux-2.6.34.1/mm/memory-failure.c
49034 --- linux-2.6.34.1/mm/memory-failure.c 2010-07-05 14:24:10.000000000 -0400
49035 +++ linux-2.6.34.1/mm/memory-failure.c 2010-07-07 09:04:58.000000000 -0400
49036 @@ -51,7 +51,7 @@ int sysctl_memory_failure_early_kill __r
49037
49038 int sysctl_memory_failure_recovery __read_mostly = 1;
49039
49040 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
49041 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
49042
49043 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
49044
49045 @@ -939,7 +939,7 @@ int __memory_failure(unsigned long pfn,
49046 return 0;
49047 }
49048
49049 - atomic_long_add(1, &mce_bad_pages);
49050 + atomic_long_add_unchecked(1, &mce_bad_pages);
49051
49052 /*
49053 * We need/can do nothing about count=0 pages.
49054 @@ -1003,7 +1003,7 @@ int __memory_failure(unsigned long pfn,
49055 }
49056 if (hwpoison_filter(p)) {
49057 if (TestClearPageHWPoison(p))
49058 - atomic_long_dec(&mce_bad_pages);
49059 + atomic_long_dec_unchecked(&mce_bad_pages);
49060 unlock_page(p);
49061 put_page(p);
49062 return 0;
49063 @@ -1096,7 +1096,7 @@ int unpoison_memory(unsigned long pfn)
49064
49065 if (!get_page_unless_zero(page)) {
49066 if (TestClearPageHWPoison(p))
49067 - atomic_long_dec(&mce_bad_pages);
49068 + atomic_long_dec_unchecked(&mce_bad_pages);
49069 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
49070 return 0;
49071 }
49072 @@ -1110,7 +1110,7 @@ int unpoison_memory(unsigned long pfn)
49073 */
49074 if (TestClearPageHWPoison(p)) {
49075 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
49076 - atomic_long_dec(&mce_bad_pages);
49077 + atomic_long_dec_unchecked(&mce_bad_pages);
49078 freeit = 1;
49079 }
49080 unlock_page(page);
49081 @@ -1291,7 +1291,7 @@ int soft_offline_page(struct page *page,
49082 return ret;
49083
49084 done:
49085 - atomic_long_add(1, &mce_bad_pages);
49086 + atomic_long_add_unchecked(1, &mce_bad_pages);
49087 SetPageHWPoison(page);
49088 /* keep elevated page count for bad page */
49089 return ret;
49090 diff -urNp linux-2.6.34.1/mm/memory.c linux-2.6.34.1/mm/memory.c
49091 --- linux-2.6.34.1/mm/memory.c 2010-07-05 14:24:10.000000000 -0400
49092 +++ linux-2.6.34.1/mm/memory.c 2010-07-07 09:04:58.000000000 -0400
49093 @@ -48,6 +48,7 @@
49094 #include <linux/ksm.h>
49095 #include <linux/rmap.h>
49096 #include <linux/module.h>
49097 +#include <linux/security.h>
49098 #include <linux/delayacct.h>
49099 #include <linux/init.h>
49100 #include <linux/writeback.h>
49101 @@ -259,8 +260,12 @@ static inline void free_pmd_range(struct
49102 return;
49103
49104 pmd = pmd_offset(pud, start);
49105 +
49106 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
49107 pud_clear(pud);
49108 pmd_free_tlb(tlb, pmd, start);
49109 +#endif
49110 +
49111 }
49112
49113 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
49114 @@ -292,8 +297,12 @@ static inline void free_pud_range(struct
49115 return;
49116
49117 pud = pud_offset(pgd, start);
49118 +
49119 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
49120 pgd_clear(pgd);
49121 pud_free_tlb(tlb, pud, start);
49122 +#endif
49123 +
49124 }
49125
49126 /*
49127 @@ -1354,10 +1363,10 @@ int __get_user_pages(struct task_struct
49128 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
49129 i = 0;
49130
49131 - do {
49132 + while (nr_pages) {
49133 struct vm_area_struct *vma;
49134
49135 - vma = find_extend_vma(mm, start);
49136 + vma = find_vma(mm, start);
49137 if (!vma && in_gate_area(tsk, start)) {
49138 unsigned long pg = start & PAGE_MASK;
49139 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
49140 @@ -1399,7 +1408,7 @@ int __get_user_pages(struct task_struct
49141 continue;
49142 }
49143
49144 - if (!vma ||
49145 + if (!vma || start < vma->vm_start ||
49146 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
49147 !(vm_flags & vma->vm_flags))
49148 return i ? : -EFAULT;
49149 @@ -1474,7 +1483,7 @@ int __get_user_pages(struct task_struct
49150 start += PAGE_SIZE;
49151 nr_pages--;
49152 } while (nr_pages && start < vma->vm_end);
49153 - } while (nr_pages);
49154 + }
49155 return i;
49156 }
49157
49158 @@ -2070,6 +2079,186 @@ static inline void cow_user_page(struct
49159 copy_user_highpage(dst, src, va, vma);
49160 }
49161
49162 +#ifdef CONFIG_PAX_SEGMEXEC
49163 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
49164 +{
49165 + struct mm_struct *mm = vma->vm_mm;
49166 + spinlock_t *ptl;
49167 + pte_t *pte, entry;
49168 +
49169 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
49170 + entry = *pte;
49171 + if (!pte_present(entry)) {
49172 + if (!pte_none(entry)) {
49173 + BUG_ON(pte_file(entry));
49174 + free_swap_and_cache(pte_to_swp_entry(entry));
49175 + pte_clear_not_present_full(mm, address, pte, 0);
49176 + }
49177 + } else {
49178 + struct page *page;
49179 +
49180 + flush_cache_page(vma, address, pte_pfn(entry));
49181 + entry = ptep_clear_flush(vma, address, pte);
49182 + BUG_ON(pte_dirty(entry));
49183 + page = vm_normal_page(vma, address, entry);
49184 + if (page) {
49185 + update_hiwater_rss(mm);
49186 + if (PageAnon(page))
49187 + dec_mm_counter_fast(mm, MM_ANONPAGES);
49188 + else
49189 + dec_mm_counter_fast(mm, MM_FILEPAGES);
49190 + page_remove_rmap(page);
49191 + page_cache_release(page);
49192 + }
49193 + }
49194 + pte_unmap_unlock(pte, ptl);
49195 +}
49196 +
49197 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
49198 + *
49199 + * the ptl of the lower mapped page is held on entry and is not released on exit
49200 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
49201 + */
49202 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
49203 +{
49204 + struct mm_struct *mm = vma->vm_mm;
49205 + unsigned long address_m;
49206 + spinlock_t *ptl_m;
49207 + struct vm_area_struct *vma_m;
49208 + pmd_t *pmd_m;
49209 + pte_t *pte_m, entry_m;
49210 +
49211 + BUG_ON(!page_m || !PageAnon(page_m));
49212 +
49213 + vma_m = pax_find_mirror_vma(vma);
49214 + if (!vma_m)
49215 + return;
49216 +
49217 + BUG_ON(!PageLocked(page_m));
49218 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
49219 + address_m = address + SEGMEXEC_TASK_SIZE;
49220 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
49221 + pte_m = pte_offset_map_nested(pmd_m, address_m);
49222 + ptl_m = pte_lockptr(mm, pmd_m);
49223 + if (ptl != ptl_m) {
49224 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
49225 + if (!pte_none(*pte_m))
49226 + goto out;
49227 + }
49228 +
49229 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
49230 + page_cache_get(page_m);
49231 + page_add_anon_rmap(page_m, vma_m, address_m);
49232 + inc_mm_counter_fast(mm, MM_ANONPAGES);
49233 + set_pte_at(mm, address_m, pte_m, entry_m);
49234 + update_mmu_cache(vma_m, address_m, entry_m);
49235 +out:
49236 + if (ptl != ptl_m)
49237 + spin_unlock(ptl_m);
49238 + pte_unmap_nested(pte_m);
49239 + unlock_page(page_m);
49240 +}
49241 +
49242 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
49243 +{
49244 + struct mm_struct *mm = vma->vm_mm;
49245 + unsigned long address_m;
49246 + spinlock_t *ptl_m;
49247 + struct vm_area_struct *vma_m;
49248 + pmd_t *pmd_m;
49249 + pte_t *pte_m, entry_m;
49250 +
49251 + BUG_ON(!page_m || PageAnon(page_m));
49252 +
49253 + vma_m = pax_find_mirror_vma(vma);
49254 + if (!vma_m)
49255 + return;
49256 +
49257 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
49258 + address_m = address + SEGMEXEC_TASK_SIZE;
49259 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
49260 + pte_m = pte_offset_map_nested(pmd_m, address_m);
49261 + ptl_m = pte_lockptr(mm, pmd_m);
49262 + if (ptl != ptl_m) {
49263 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
49264 + if (!pte_none(*pte_m))
49265 + goto out;
49266 + }
49267 +
49268 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
49269 + page_cache_get(page_m);
49270 + page_add_file_rmap(page_m);
49271 + inc_mm_counter_fast(mm, MM_FILEPAGES);
49272 + set_pte_at(mm, address_m, pte_m, entry_m);
49273 + update_mmu_cache(vma_m, address_m, entry_m);
49274 +out:
49275 + if (ptl != ptl_m)
49276 + spin_unlock(ptl_m);
49277 + pte_unmap_nested(pte_m);
49278 +}
49279 +
49280 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
49281 +{
49282 + struct mm_struct *mm = vma->vm_mm;
49283 + unsigned long address_m;
49284 + spinlock_t *ptl_m;
49285 + struct vm_area_struct *vma_m;
49286 + pmd_t *pmd_m;
49287 + pte_t *pte_m, entry_m;
49288 +
49289 + vma_m = pax_find_mirror_vma(vma);
49290 + if (!vma_m)
49291 + return;
49292 +
49293 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
49294 + address_m = address + SEGMEXEC_TASK_SIZE;
49295 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
49296 + pte_m = pte_offset_map_nested(pmd_m, address_m);
49297 + ptl_m = pte_lockptr(mm, pmd_m);
49298 + if (ptl != ptl_m) {
49299 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
49300 + if (!pte_none(*pte_m))
49301 + goto out;
49302 + }
49303 +
49304 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
49305 + set_pte_at(mm, address_m, pte_m, entry_m);
49306 +out:
49307 + if (ptl != ptl_m)
49308 + spin_unlock(ptl_m);
49309 + pte_unmap_nested(pte_m);
49310 +}
49311 +
49312 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
49313 +{
49314 + struct page *page_m;
49315 + pte_t entry;
49316 +
49317 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
49318 + goto out;
49319 +
49320 + entry = *pte;
49321 + page_m = vm_normal_page(vma, address, entry);
49322 + if (!page_m)
49323 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
49324 + else if (PageAnon(page_m)) {
49325 + if (pax_find_mirror_vma(vma)) {
49326 + pte_unmap_unlock(pte, ptl);
49327 + lock_page(page_m);
49328 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
49329 + if (pte_same(entry, *pte))
49330 + pax_mirror_anon_pte(vma, address, page_m, ptl);
49331 + else
49332 + unlock_page(page_m);
49333 + }
49334 + } else
49335 + pax_mirror_file_pte(vma, address, page_m, ptl);
49336 +
49337 +out:
49338 + pte_unmap_unlock(pte, ptl);
49339 +}
49340 +#endif
49341 +
49342 /*
49343 * This routine handles present pages, when users try to write
49344 * to a shared page. It is done by copying the page to a new address
49345 @@ -2256,6 +2445,12 @@ gotten:
49346 */
49347 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49348 if (likely(pte_same(*page_table, orig_pte))) {
49349 +
49350 +#ifdef CONFIG_PAX_SEGMEXEC
49351 + if (pax_find_mirror_vma(vma))
49352 + BUG_ON(!trylock_page(new_page));
49353 +#endif
49354 +
49355 if (old_page) {
49356 if (!PageAnon(old_page)) {
49357 dec_mm_counter_fast(mm, MM_FILEPAGES);
49358 @@ -2307,6 +2502,10 @@ gotten:
49359 page_remove_rmap(old_page);
49360 }
49361
49362 +#ifdef CONFIG_PAX_SEGMEXEC
49363 + pax_mirror_anon_pte(vma, address, new_page, ptl);
49364 +#endif
49365 +
49366 /* Free the old page.. */
49367 new_page = old_page;
49368 ret |= VM_FAULT_WRITE;
49369 @@ -2715,6 +2914,11 @@ static int do_swap_page(struct mm_struct
49370 swap_free(entry);
49371 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
49372 try_to_free_swap(page);
49373 +
49374 +#ifdef CONFIG_PAX_SEGMEXEC
49375 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
49376 +#endif
49377 +
49378 unlock_page(page);
49379
49380 if (flags & FAULT_FLAG_WRITE) {
49381 @@ -2726,6 +2930,11 @@ static int do_swap_page(struct mm_struct
49382
49383 /* No need to invalidate - it was non-present before */
49384 update_mmu_cache(vma, address, page_table);
49385 +
49386 +#ifdef CONFIG_PAX_SEGMEXEC
49387 + pax_mirror_anon_pte(vma, address, page, ptl);
49388 +#endif
49389 +
49390 unlock:
49391 pte_unmap_unlock(page_table, ptl);
49392 out:
49393 @@ -2749,7 +2958,7 @@ static int do_anonymous_page(struct mm_s
49394 unsigned long address, pte_t *page_table, pmd_t *pmd,
49395 unsigned int flags)
49396 {
49397 - struct page *page;
49398 + struct page *page = NULL;
49399 spinlock_t *ptl;
49400 pte_t entry;
49401
49402 @@ -2784,6 +2993,11 @@ static int do_anonymous_page(struct mm_s
49403 if (!pte_none(*page_table))
49404 goto release;
49405
49406 +#ifdef CONFIG_PAX_SEGMEXEC
49407 + if (pax_find_mirror_vma(vma))
49408 + BUG_ON(!trylock_page(page));
49409 +#endif
49410 +
49411 inc_mm_counter_fast(mm, MM_ANONPAGES);
49412 page_add_new_anon_rmap(page, vma, address);
49413 setpte:
49414 @@ -2791,6 +3005,12 @@ setpte:
49415
49416 /* No need to invalidate - it was non-present before */
49417 update_mmu_cache(vma, address, page_table);
49418 +
49419 +#ifdef CONFIG_PAX_SEGMEXEC
49420 + if (page)
49421 + pax_mirror_anon_pte(vma, address, page, ptl);
49422 +#endif
49423 +
49424 unlock:
49425 pte_unmap_unlock(page_table, ptl);
49426 return 0;
49427 @@ -2933,6 +3153,12 @@ static int __do_fault(struct mm_struct *
49428 */
49429 /* Only go through if we didn't race with anybody else... */
49430 if (likely(pte_same(*page_table, orig_pte))) {
49431 +
49432 +#ifdef CONFIG_PAX_SEGMEXEC
49433 + if (anon && pax_find_mirror_vma(vma))
49434 + BUG_ON(!trylock_page(page));
49435 +#endif
49436 +
49437 flush_icache_page(vma, page);
49438 entry = mk_pte(page, vma->vm_page_prot);
49439 if (flags & FAULT_FLAG_WRITE)
49440 @@ -2952,6 +3178,14 @@ static int __do_fault(struct mm_struct *
49441
49442 /* no need to invalidate: a not-present page won't be cached */
49443 update_mmu_cache(vma, address, page_table);
49444 +
49445 +#ifdef CONFIG_PAX_SEGMEXEC
49446 + if (anon)
49447 + pax_mirror_anon_pte(vma, address, page, ptl);
49448 + else
49449 + pax_mirror_file_pte(vma, address, page, ptl);
49450 +#endif
49451 +
49452 } else {
49453 if (charged)
49454 mem_cgroup_uncharge_page(page);
49455 @@ -3099,6 +3333,12 @@ static inline int handle_pte_fault(struc
49456 if (flags & FAULT_FLAG_WRITE)
49457 flush_tlb_page(vma, address);
49458 }
49459 +
49460 +#ifdef CONFIG_PAX_SEGMEXEC
49461 + pax_mirror_pte(vma, address, pte, pmd, ptl);
49462 + return 0;
49463 +#endif
49464 +
49465 unlock:
49466 pte_unmap_unlock(pte, ptl);
49467 return 0;
49468 @@ -3115,6 +3355,10 @@ int handle_mm_fault(struct mm_struct *mm
49469 pmd_t *pmd;
49470 pte_t *pte;
49471
49472 +#ifdef CONFIG_PAX_SEGMEXEC
49473 + struct vm_area_struct *vma_m;
49474 +#endif
49475 +
49476 __set_current_state(TASK_RUNNING);
49477
49478 count_vm_event(PGFAULT);
49479 @@ -3125,6 +3369,34 @@ int handle_mm_fault(struct mm_struct *mm
49480 if (unlikely(is_vm_hugetlb_page(vma)))
49481 return hugetlb_fault(mm, vma, address, flags);
49482
49483 +#ifdef CONFIG_PAX_SEGMEXEC
49484 + vma_m = pax_find_mirror_vma(vma);
49485 + if (vma_m) {
49486 + unsigned long address_m;
49487 + pgd_t *pgd_m;
49488 + pud_t *pud_m;
49489 + pmd_t *pmd_m;
49490 +
49491 + if (vma->vm_start > vma_m->vm_start) {
49492 + address_m = address;
49493 + address -= SEGMEXEC_TASK_SIZE;
49494 + vma = vma_m;
49495 + } else
49496 + address_m = address + SEGMEXEC_TASK_SIZE;
49497 +
49498 + pgd_m = pgd_offset(mm, address_m);
49499 + pud_m = pud_alloc(mm, pgd_m, address_m);
49500 + if (!pud_m)
49501 + return VM_FAULT_OOM;
49502 + pmd_m = pmd_alloc(mm, pud_m, address_m);
49503 + if (!pmd_m)
49504 + return VM_FAULT_OOM;
49505 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
49506 + return VM_FAULT_OOM;
49507 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
49508 + }
49509 +#endif
49510 +
49511 pgd = pgd_offset(mm, address);
49512 pud = pud_alloc(mm, pgd, address);
49513 if (!pud)
49514 @@ -3222,7 +3494,7 @@ static int __init gate_vma_init(void)
49515 gate_vma.vm_start = FIXADDR_USER_START;
49516 gate_vma.vm_end = FIXADDR_USER_END;
49517 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
49518 - gate_vma.vm_page_prot = __P101;
49519 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
49520 /*
49521 * Make sure the vDSO gets into every core dump.
49522 * Dumping its contents makes post-mortem fully interpretable later
49523 diff -urNp linux-2.6.34.1/mm/mempolicy.c linux-2.6.34.1/mm/mempolicy.c
49524 --- linux-2.6.34.1/mm/mempolicy.c 2010-07-05 14:24:10.000000000 -0400
49525 +++ linux-2.6.34.1/mm/mempolicy.c 2010-07-07 09:04:58.000000000 -0400
49526 @@ -573,6 +573,10 @@ static int mbind_range(struct mm_struct
49527 unsigned long vmstart;
49528 unsigned long vmend;
49529
49530 +#ifdef CONFIG_PAX_SEGMEXEC
49531 + struct vm_area_struct *vma_m;
49532 +#endif
49533 +
49534 vma = find_vma_prev(mm, start, &prev);
49535 if (!vma || vma->vm_start > start)
49536 return -EFAULT;
49537 @@ -603,6 +607,16 @@ static int mbind_range(struct mm_struct
49538 err = policy_vma(vma, new_pol);
49539 if (err)
49540 goto out;
49541 +
49542 +#ifdef CONFIG_PAX_SEGMEXEC
49543 + vma_m = pax_find_mirror_vma(vma);
49544 + if (vma_m) {
49545 + err = policy_vma(vma_m, new_pol);
49546 + if (err)
49547 + goto out;
49548 + }
49549 +#endif
49550 +
49551 }
49552
49553 out:
49554 @@ -1029,6 +1043,17 @@ static long do_mbind(unsigned long start
49555
49556 if (end < start)
49557 return -EINVAL;
49558 +
49559 +#ifdef CONFIG_PAX_SEGMEXEC
49560 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
49561 + if (end > SEGMEXEC_TASK_SIZE)
49562 + return -EINVAL;
49563 + } else
49564 +#endif
49565 +
49566 + if (end > TASK_SIZE)
49567 + return -EINVAL;
49568 +
49569 if (end == start)
49570 return 0;
49571
49572 @@ -1234,6 +1259,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
49573 if (!mm)
49574 return -EINVAL;
49575
49576 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49577 + if (mm != current->mm &&
49578 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
49579 + err = -EPERM;
49580 + goto out;
49581 + }
49582 +#endif
49583 +
49584 /*
49585 * Check if this process has the right to modify the specified
49586 * process. The right exists if the process has administrative
49587 @@ -1243,8 +1276,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
49588 rcu_read_lock();
49589 tcred = __task_cred(task);
49590 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
49591 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
49592 - !capable(CAP_SYS_NICE)) {
49593 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
49594 rcu_read_unlock();
49595 err = -EPERM;
49596 goto out;
49597 @@ -2472,7 +2504,7 @@ int show_numa_map(struct seq_file *m, vo
49598
49599 if (file) {
49600 seq_printf(m, " file=");
49601 - seq_path(m, &file->f_path, "\n\t= ");
49602 + seq_path(m, &file->f_path, "\n\t\\= ");
49603 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49604 seq_printf(m, " heap");
49605 } else if (vma->vm_start <= mm->start_stack &&
49606 diff -urNp linux-2.6.34.1/mm/migrate.c linux-2.6.34.1/mm/migrate.c
49607 --- linux-2.6.34.1/mm/migrate.c 2010-07-05 14:24:10.000000000 -0400
49608 +++ linux-2.6.34.1/mm/migrate.c 2010-07-07 09:04:58.000000000 -0400
49609 @@ -1056,6 +1056,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
49610 if (!mm)
49611 return -EINVAL;
49612
49613 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49614 + if (mm != current->mm &&
49615 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
49616 + err = -EPERM;
49617 + goto out;
49618 + }
49619 +#endif
49620 +
49621 /*
49622 * Check if this process has the right to modify the specified
49623 * process. The right exists if the process has administrative
49624 @@ -1065,8 +1073,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
49625 rcu_read_lock();
49626 tcred = __task_cred(task);
49627 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
49628 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
49629 - !capable(CAP_SYS_NICE)) {
49630 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
49631 rcu_read_unlock();
49632 err = -EPERM;
49633 goto out;
49634 diff -urNp linux-2.6.34.1/mm/mlock.c linux-2.6.34.1/mm/mlock.c
49635 --- linux-2.6.34.1/mm/mlock.c 2010-07-05 14:24:10.000000000 -0400
49636 +++ linux-2.6.34.1/mm/mlock.c 2010-07-07 09:04:58.000000000 -0400
49637 @@ -13,6 +13,7 @@
49638 #include <linux/pagemap.h>
49639 #include <linux/mempolicy.h>
49640 #include <linux/syscalls.h>
49641 +#include <linux/security.h>
49642 #include <linux/sched.h>
49643 #include <linux/module.h>
49644 #include <linux/rmap.h>
49645 @@ -432,6 +433,17 @@ static int do_mlock(unsigned long start,
49646 return -EINVAL;
49647 if (end == start)
49648 return 0;
49649 +
49650 +#ifdef CONFIG_PAX_SEGMEXEC
49651 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49652 + if (end > SEGMEXEC_TASK_SIZE)
49653 + return -EINVAL;
49654 + } else
49655 +#endif
49656 +
49657 + if (end > TASK_SIZE)
49658 + return -EINVAL;
49659 +
49660 vma = find_vma_prev(current->mm, start, &prev);
49661 if (!vma || vma->vm_start > start)
49662 return -ENOMEM;
49663 @@ -491,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
49664 lock_limit >>= PAGE_SHIFT;
49665
49666 /* check against resource limits */
49667 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
49668 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
49669 error = do_mlock(start, len, 1);
49670 up_write(&current->mm->mmap_sem);
49671 @@ -512,10 +525,10 @@ SYSCALL_DEFINE2(munlock, unsigned long,
49672 static int do_mlockall(int flags)
49673 {
49674 struct vm_area_struct * vma, * prev = NULL;
49675 - unsigned int def_flags = 0;
49676 + unsigned int def_flags = current->mm->def_flags & ~VM_LOCKED;
49677
49678 if (flags & MCL_FUTURE)
49679 - def_flags = VM_LOCKED;
49680 + def_flags |= VM_LOCKED;
49681 current->mm->def_flags = def_flags;
49682 if (flags == MCL_FUTURE)
49683 goto out;
49684 @@ -523,6 +536,12 @@ static int do_mlockall(int flags)
49685 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
49686 unsigned int newflags;
49687
49688 +#ifdef CONFIG_PAX_SEGMEXEC
49689 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
49690 + break;
49691 +#endif
49692 +
49693 + BUG_ON(vma->vm_end > TASK_SIZE);
49694 newflags = vma->vm_flags | VM_LOCKED;
49695 if (!(flags & MCL_CURRENT))
49696 newflags &= ~VM_LOCKED;
49697 @@ -554,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
49698 lock_limit >>= PAGE_SHIFT;
49699
49700 ret = -ENOMEM;
49701 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
49702 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
49703 capable(CAP_IPC_LOCK))
49704 ret = do_mlockall(flags);
49705 diff -urNp linux-2.6.34.1/mm/mmap.c linux-2.6.34.1/mm/mmap.c
49706 --- linux-2.6.34.1/mm/mmap.c 2010-07-05 14:24:10.000000000 -0400
49707 +++ linux-2.6.34.1/mm/mmap.c 2010-07-07 09:04:58.000000000 -0400
49708 @@ -44,6 +44,16 @@
49709 #define arch_rebalance_pgtables(addr, len) (addr)
49710 #endif
49711
49712 +static inline void verify_mm_writelocked(struct mm_struct *mm)
49713 +{
49714 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
49715 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
49716 + up_read(&mm->mmap_sem);
49717 + BUG();
49718 + }
49719 +#endif
49720 +}
49721 +
49722 static void unmap_region(struct mm_struct *mm,
49723 struct vm_area_struct *vma, struct vm_area_struct *prev,
49724 unsigned long start, unsigned long end);
49725 @@ -69,16 +79,25 @@ static void unmap_region(struct mm_struc
49726 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
49727 *
49728 */
49729 -pgprot_t protection_map[16] = {
49730 +pgprot_t protection_map[16] __read_only = {
49731 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
49732 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
49733 };
49734
49735 pgprot_t vm_get_page_prot(unsigned long vm_flags)
49736 {
49737 - return __pgprot(pgprot_val(protection_map[vm_flags &
49738 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
49739 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
49740 pgprot_val(arch_vm_get_page_prot(vm_flags)));
49741 +
49742 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
49743 + if (!(__supported_pte_mask & _PAGE_NX) &&
49744 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
49745 + (vm_flags & (VM_READ | VM_WRITE)))
49746 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
49747 +#endif
49748 +
49749 + return prot;
49750 }
49751 EXPORT_SYMBOL(vm_get_page_prot);
49752
49753 @@ -230,6 +249,7 @@ static struct vm_area_struct *remove_vma
49754 struct vm_area_struct *next = vma->vm_next;
49755
49756 might_sleep();
49757 + BUG_ON(vma->vm_mirror);
49758 if (vma->vm_ops && vma->vm_ops->close)
49759 vma->vm_ops->close(vma);
49760 if (vma->vm_file) {
49761 @@ -266,6 +286,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
49762 * not page aligned -Ram Gupta
49763 */
49764 rlim = rlimit(RLIMIT_DATA);
49765 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
49766 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
49767 (mm->end_data - mm->start_data) > rlim)
49768 goto out;
49769 @@ -685,6 +706,12 @@ static int
49770 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
49771 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
49772 {
49773 +
49774 +#ifdef CONFIG_PAX_SEGMEXEC
49775 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
49776 + return 0;
49777 +#endif
49778 +
49779 if (is_mergeable_vma(vma, file, vm_flags) &&
49780 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
49781 if (vma->vm_pgoff == vm_pgoff)
49782 @@ -704,6 +731,12 @@ static int
49783 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
49784 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
49785 {
49786 +
49787 +#ifdef CONFIG_PAX_SEGMEXEC
49788 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
49789 + return 0;
49790 +#endif
49791 +
49792 if (is_mergeable_vma(vma, file, vm_flags) &&
49793 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
49794 pgoff_t vm_pglen;
49795 @@ -746,13 +779,20 @@ can_vma_merge_after(struct vm_area_struc
49796 struct vm_area_struct *vma_merge(struct mm_struct *mm,
49797 struct vm_area_struct *prev, unsigned long addr,
49798 unsigned long end, unsigned long vm_flags,
49799 - struct anon_vma *anon_vma, struct file *file,
49800 + struct anon_vma *anon_vma, struct file *file,
49801 pgoff_t pgoff, struct mempolicy *policy)
49802 {
49803 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
49804 struct vm_area_struct *area, *next;
49805 int err;
49806
49807 +#ifdef CONFIG_PAX_SEGMEXEC
49808 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
49809 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
49810 +
49811 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
49812 +#endif
49813 +
49814 /*
49815 * We later require that vma->vm_flags == vm_flags,
49816 * so this tests vma->vm_flags & VM_SPECIAL, too.
49817 @@ -768,6 +808,15 @@ struct vm_area_struct *vma_merge(struct
49818 if (next && next->vm_end == end) /* cases 6, 7, 8 */
49819 next = next->vm_next;
49820
49821 +#ifdef CONFIG_PAX_SEGMEXEC
49822 + if (prev)
49823 + prev_m = pax_find_mirror_vma(prev);
49824 + if (area)
49825 + area_m = pax_find_mirror_vma(area);
49826 + if (next)
49827 + next_m = pax_find_mirror_vma(next);
49828 +#endif
49829 +
49830 /*
49831 * Can it merge with the predecessor?
49832 */
49833 @@ -787,9 +836,24 @@ struct vm_area_struct *vma_merge(struct
49834 /* cases 1, 6 */
49835 err = vma_adjust(prev, prev->vm_start,
49836 next->vm_end, prev->vm_pgoff, NULL);
49837 - } else /* cases 2, 5, 7 */
49838 +
49839 +#ifdef CONFIG_PAX_SEGMEXEC
49840 + if (!err && prev_m)
49841 + err = vma_adjust(prev_m, prev_m->vm_start,
49842 + next_m->vm_end, prev_m->vm_pgoff, NULL);
49843 +#endif
49844 +
49845 + } else { /* cases 2, 5, 7 */
49846 err = vma_adjust(prev, prev->vm_start,
49847 end, prev->vm_pgoff, NULL);
49848 +
49849 +#ifdef CONFIG_PAX_SEGMEXEC
49850 + if (!err && prev_m)
49851 + err = vma_adjust(prev_m, prev_m->vm_start,
49852 + end_m, prev_m->vm_pgoff, NULL);
49853 +#endif
49854 +
49855 + }
49856 if (err)
49857 return NULL;
49858 return prev;
49859 @@ -802,12 +866,27 @@ struct vm_area_struct *vma_merge(struct
49860 mpol_equal(policy, vma_policy(next)) &&
49861 can_vma_merge_before(next, vm_flags,
49862 anon_vma, file, pgoff+pglen)) {
49863 - if (prev && addr < prev->vm_end) /* case 4 */
49864 + if (prev && addr < prev->vm_end) { /* case 4 */
49865 err = vma_adjust(prev, prev->vm_start,
49866 addr, prev->vm_pgoff, NULL);
49867 - else /* cases 3, 8 */
49868 +
49869 +#ifdef CONFIG_PAX_SEGMEXEC
49870 + if (!err && prev_m)
49871 + err = vma_adjust(prev_m, prev_m->vm_start,
49872 + addr_m, prev_m->vm_pgoff, NULL);
49873 +#endif
49874 +
49875 + } else { /* cases 3, 8 */
49876 err = vma_adjust(area, addr, next->vm_end,
49877 next->vm_pgoff - pglen, NULL);
49878 +
49879 +#ifdef CONFIG_PAX_SEGMEXEC
49880 + if (!err && area_m)
49881 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
49882 + next_m->vm_pgoff - pglen, NULL);
49883 +#endif
49884 +
49885 + }
49886 if (err)
49887 return NULL;
49888 return area;
49889 @@ -922,14 +1001,11 @@ none:
49890 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
49891 struct file *file, long pages)
49892 {
49893 - const unsigned long stack_flags
49894 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
49895 -
49896 if (file) {
49897 mm->shared_vm += pages;
49898 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
49899 mm->exec_vm += pages;
49900 - } else if (flags & stack_flags)
49901 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
49902 mm->stack_vm += pages;
49903 if (flags & (VM_RESERVED|VM_IO))
49904 mm->reserved_vm += pages;
49905 @@ -956,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
49906 * (the exception is when the underlying filesystem is noexec
49907 * mounted, in which case we dont add PROT_EXEC.)
49908 */
49909 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
49910 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
49911 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
49912 prot |= PROT_EXEC;
49913
49914 @@ -982,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
49915 /* Obtain the address to map to. we verify (or select) it and ensure
49916 * that it represents a valid section of the address space.
49917 */
49918 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
49919 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
49920 if (addr & ~PAGE_MASK)
49921 return addr;
49922
49923 @@ -993,6 +1069,26 @@ unsigned long do_mmap_pgoff(struct file
49924 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
49925 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
49926
49927 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49928 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49929 +
49930 +#ifdef CONFIG_PAX_MPROTECT
49931 + if (mm->pax_flags & MF_PAX_MPROTECT) {
49932 + if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC)
49933 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49934 + else
49935 + vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
49936 + }
49937 +#endif
49938 +
49939 + }
49940 +#endif
49941 +
49942 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
49943 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
49944 + vm_flags &= ~VM_PAGEEXEC;
49945 +#endif
49946 +
49947 if (flags & MAP_LOCKED)
49948 if (!can_do_mlock())
49949 return -EPERM;
49950 @@ -1004,6 +1100,7 @@ unsigned long do_mmap_pgoff(struct file
49951 locked += mm->locked_vm;
49952 lock_limit = rlimit(RLIMIT_MEMLOCK);
49953 lock_limit >>= PAGE_SHIFT;
49954 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
49955 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
49956 return -EAGAIN;
49957 }
49958 @@ -1074,6 +1171,9 @@ unsigned long do_mmap_pgoff(struct file
49959 if (error)
49960 return error;
49961
49962 + if (!gr_acl_handle_mmap(file, prot))
49963 + return -EACCES;
49964 +
49965 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
49966 }
49967 EXPORT_SYMBOL(do_mmap_pgoff);
49968 @@ -1150,10 +1250,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
49969 */
49970 int vma_wants_writenotify(struct vm_area_struct *vma)
49971 {
49972 - unsigned int vm_flags = vma->vm_flags;
49973 + unsigned long vm_flags = vma->vm_flags;
49974
49975 /* If it was private or non-writable, the write bit is already clear */
49976 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
49977 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
49978 return 0;
49979
49980 /* The backer wishes to know when pages are first written to? */
49981 @@ -1202,14 +1302,24 @@ unsigned long mmap_region(struct file *f
49982 unsigned long charged = 0;
49983 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
49984
49985 +#ifdef CONFIG_PAX_SEGMEXEC
49986 + struct vm_area_struct *vma_m = NULL;
49987 +#endif
49988 +
49989 + /*
49990 + * mm->mmap_sem is required to protect against another thread
49991 + * changing the mappings in case we sleep.
49992 + */
49993 + verify_mm_writelocked(mm);
49994 +
49995 /* Clear old maps */
49996 error = -ENOMEM;
49997 -munmap_back:
49998 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
49999 if (vma && vma->vm_start < addr + len) {
50000 if (do_munmap(mm, addr, len))
50001 return -ENOMEM;
50002 - goto munmap_back;
50003 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
50004 + BUG_ON(vma && vma->vm_start < addr + len);
50005 }
50006
50007 /* Check against address space limit. */
50008 @@ -1258,6 +1368,16 @@ munmap_back:
50009 goto unacct_error;
50010 }
50011
50012 +#ifdef CONFIG_PAX_SEGMEXEC
50013 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
50014 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
50015 + if (!vma_m) {
50016 + error = -ENOMEM;
50017 + goto free_vma;
50018 + }
50019 + }
50020 +#endif
50021 +
50022 vma->vm_mm = mm;
50023 vma->vm_start = addr;
50024 vma->vm_end = addr + len;
50025 @@ -1281,6 +1401,19 @@ munmap_back:
50026 error = file->f_op->mmap(file, vma);
50027 if (error)
50028 goto unmap_and_free_vma;
50029 +
50030 +#ifdef CONFIG_PAX_SEGMEXEC
50031 + if (vma_m && (vm_flags & VM_EXECUTABLE))
50032 + added_exe_file_vma(mm);
50033 +#endif
50034 +
50035 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
50036 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
50037 + vma->vm_flags |= VM_PAGEEXEC;
50038 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50039 + }
50040 +#endif
50041 +
50042 if (vm_flags & VM_EXECUTABLE)
50043 added_exe_file_vma(mm);
50044
50045 @@ -1316,6 +1449,11 @@ munmap_back:
50046 vma_link(mm, vma, prev, rb_link, rb_parent);
50047 file = vma->vm_file;
50048
50049 +#ifdef CONFIG_PAX_SEGMEXEC
50050 + if (vma_m)
50051 + BUG_ON(pax_mirror_vma(vma_m, vma));
50052 +#endif
50053 +
50054 /* Once vma denies write, undo our temporary denial count */
50055 if (correct_wcount)
50056 atomic_inc(&inode->i_writecount);
50057 @@ -1324,6 +1462,7 @@ out:
50058
50059 mm->total_vm += len >> PAGE_SHIFT;
50060 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
50061 + track_exec_limit(mm, addr, addr + len, vm_flags);
50062 if (vm_flags & VM_LOCKED) {
50063 if (!mlock_vma_pages_range(vma, addr, addr + len))
50064 mm->locked_vm += (len >> PAGE_SHIFT);
50065 @@ -1341,6 +1480,12 @@ unmap_and_free_vma:
50066 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
50067 charged = 0;
50068 free_vma:
50069 +
50070 +#ifdef CONFIG_PAX_SEGMEXEC
50071 + if (vma_m)
50072 + kmem_cache_free(vm_area_cachep, vma_m);
50073 +#endif
50074 +
50075 kmem_cache_free(vm_area_cachep, vma);
50076 unacct_error:
50077 if (charged)
50078 @@ -1374,6 +1519,10 @@ arch_get_unmapped_area(struct file *filp
50079 if (flags & MAP_FIXED)
50080 return addr;
50081
50082 +#ifdef CONFIG_PAX_RANDMMAP
50083 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
50084 +#endif
50085 +
50086 if (addr) {
50087 addr = PAGE_ALIGN(addr);
50088 vma = find_vma(mm, addr);
50089 @@ -1382,10 +1531,10 @@ arch_get_unmapped_area(struct file *filp
50090 return addr;
50091 }
50092 if (len > mm->cached_hole_size) {
50093 - start_addr = addr = mm->free_area_cache;
50094 + start_addr = addr = mm->free_area_cache;
50095 } else {
50096 - start_addr = addr = TASK_UNMAPPED_BASE;
50097 - mm->cached_hole_size = 0;
50098 + start_addr = addr = mm->mmap_base;
50099 + mm->cached_hole_size = 0;
50100 }
50101
50102 full_search:
50103 @@ -1396,9 +1545,8 @@ full_search:
50104 * Start a new search - just in case we missed
50105 * some holes.
50106 */
50107 - if (start_addr != TASK_UNMAPPED_BASE) {
50108 - addr = TASK_UNMAPPED_BASE;
50109 - start_addr = addr;
50110 + if (start_addr != mm->mmap_base) {
50111 + start_addr = addr = mm->mmap_base;
50112 mm->cached_hole_size = 0;
50113 goto full_search;
50114 }
50115 @@ -1420,10 +1568,16 @@ full_search:
50116
50117 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
50118 {
50119 +
50120 +#ifdef CONFIG_PAX_SEGMEXEC
50121 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
50122 + return;
50123 +#endif
50124 +
50125 /*
50126 * Is this a new hole at the lowest possible address?
50127 */
50128 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
50129 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
50130 mm->free_area_cache = addr;
50131 mm->cached_hole_size = ~0UL;
50132 }
50133 @@ -1441,7 +1595,7 @@ arch_get_unmapped_area_topdown(struct fi
50134 {
50135 struct vm_area_struct *vma;
50136 struct mm_struct *mm = current->mm;
50137 - unsigned long addr = addr0;
50138 + unsigned long base = mm->mmap_base, addr = addr0;
50139
50140 /* requested length too big for entire address space */
50141 if (len > TASK_SIZE)
50142 @@ -1450,6 +1604,10 @@ arch_get_unmapped_area_topdown(struct fi
50143 if (flags & MAP_FIXED)
50144 return addr;
50145
50146 +#ifdef CONFIG_PAX_RANDMMAP
50147 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
50148 +#endif
50149 +
50150 /* requesting a specific address */
50151 if (addr) {
50152 addr = PAGE_ALIGN(addr);
50153 @@ -1507,13 +1665,21 @@ bottomup:
50154 * can happen with large stack limits and large mmap()
50155 * allocations.
50156 */
50157 + mm->mmap_base = TASK_UNMAPPED_BASE;
50158 +
50159 +#ifdef CONFIG_PAX_RANDMMAP
50160 + if (mm->pax_flags & MF_PAX_RANDMMAP)
50161 + mm->mmap_base += mm->delta_mmap;
50162 +#endif
50163 +
50164 + mm->free_area_cache = mm->mmap_base;
50165 mm->cached_hole_size = ~0UL;
50166 - mm->free_area_cache = TASK_UNMAPPED_BASE;
50167 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
50168 /*
50169 * Restore the topdown base:
50170 */
50171 - mm->free_area_cache = mm->mmap_base;
50172 + mm->mmap_base = base;
50173 + mm->free_area_cache = base;
50174 mm->cached_hole_size = ~0UL;
50175
50176 return addr;
50177 @@ -1522,6 +1688,12 @@ bottomup:
50178
50179 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
50180 {
50181 +
50182 +#ifdef CONFIG_PAX_SEGMEXEC
50183 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
50184 + return;
50185 +#endif
50186 +
50187 /*
50188 * Is this a new hole at the highest possible address?
50189 */
50190 @@ -1529,8 +1701,10 @@ void arch_unmap_area_topdown(struct mm_s
50191 mm->free_area_cache = addr;
50192
50193 /* dont allow allocations above current base */
50194 - if (mm->free_area_cache > mm->mmap_base)
50195 + if (mm->free_area_cache > mm->mmap_base) {
50196 mm->free_area_cache = mm->mmap_base;
50197 + mm->cached_hole_size = ~0UL;
50198 + }
50199 }
50200
50201 unsigned long
50202 @@ -1638,6 +1812,27 @@ out:
50203 return prev ? prev->vm_next : vma;
50204 }
50205
50206 +#ifdef CONFIG_PAX_SEGMEXEC
50207 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
50208 +{
50209 + struct vm_area_struct *vma_m;
50210 +
50211 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
50212 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
50213 + BUG_ON(vma->vm_mirror);
50214 + return NULL;
50215 + }
50216 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
50217 + vma_m = vma->vm_mirror;
50218 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
50219 + BUG_ON(vma->vm_file != vma_m->vm_file);
50220 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
50221 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
50222 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
50223 + return vma_m;
50224 +}
50225 +#endif
50226 +
50227 /*
50228 * Verify that the stack growth is acceptable and
50229 * update accounting. This is shared with both the
50230 @@ -1654,6 +1849,7 @@ static int acct_stack_growth(struct vm_a
50231 return -ENOMEM;
50232
50233 /* Stack limit test */
50234 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
50235 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
50236 return -ENOMEM;
50237
50238 @@ -1664,6 +1860,7 @@ static int acct_stack_growth(struct vm_a
50239 locked = mm->locked_vm + grow;
50240 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
50241 limit >>= PAGE_SHIFT;
50242 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
50243 if (locked > limit && !capable(CAP_IPC_LOCK))
50244 return -ENOMEM;
50245 }
50246 @@ -1699,35 +1896,40 @@ static
50247 #endif
50248 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
50249 {
50250 - int error;
50251 + int error, locknext;
50252
50253 if (!(vma->vm_flags & VM_GROWSUP))
50254 return -EFAULT;
50255
50256 + /* Also guard against wrapping around to address 0. */
50257 + if (address < PAGE_ALIGN(address+1))
50258 + address = PAGE_ALIGN(address+1);
50259 + else
50260 + return -ENOMEM;
50261 +
50262 /*
50263 * We must make sure the anon_vma is allocated
50264 * so that the anon_vma locking is not a noop.
50265 */
50266 if (unlikely(anon_vma_prepare(vma)))
50267 return -ENOMEM;
50268 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
50269 + if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
50270 + return -ENOMEM;
50271 anon_vma_lock(vma);
50272 + if (locknext)
50273 + anon_vma_lock(vma->vm_next);
50274
50275 /*
50276 * vma->vm_start/vm_end cannot change under us because the caller
50277 * is required to hold the mmap_sem in read mode. We need the
50278 - * anon_vma lock to serialize against concurrent expand_stacks.
50279 - * Also guard against wrapping around to address 0.
50280 + * anon_vma locks to serialize against concurrent expand_stacks
50281 + * and expand_upwards.
50282 */
50283 - if (address < PAGE_ALIGN(address+4))
50284 - address = PAGE_ALIGN(address+4);
50285 - else {
50286 - anon_vma_unlock(vma);
50287 - return -ENOMEM;
50288 - }
50289 error = 0;
50290
50291 /* Somebody else might have raced and expanded it already */
50292 - if (address > vma->vm_end) {
50293 + if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
50294 unsigned long size, grow;
50295
50296 size = address - vma->vm_start;
50297 @@ -1737,6 +1939,8 @@ int expand_upwards(struct vm_area_struct
50298 if (!error)
50299 vma->vm_end = address;
50300 }
50301 + if (locknext)
50302 + anon_vma_unlock(vma->vm_next);
50303 anon_vma_unlock(vma);
50304 return error;
50305 }
50306 @@ -1748,7 +1952,8 @@ int expand_upwards(struct vm_area_struct
50307 static int expand_downwards(struct vm_area_struct *vma,
50308 unsigned long address)
50309 {
50310 - int error;
50311 + int error, lockprev = 0;
50312 + struct vm_area_struct *prev = NULL;
50313
50314 /*
50315 * We must make sure the anon_vma is allocated
50316 @@ -1762,6 +1967,15 @@ static int expand_downwards(struct vm_ar
50317 if (error)
50318 return error;
50319
50320 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
50321 + find_vma_prev(vma->vm_mm, address, &prev);
50322 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
50323 +#endif
50324 + if (lockprev && unlikely(anon_vma_prepare(prev)))
50325 + return -ENOMEM;
50326 + if (lockprev)
50327 + anon_vma_lock(prev);
50328 +
50329 anon_vma_lock(vma);
50330
50331 /*
50332 @@ -1771,9 +1985,15 @@ static int expand_downwards(struct vm_ar
50333 */
50334
50335 /* Somebody else might have raced and expanded it already */
50336 - if (address < vma->vm_start) {
50337 + if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
50338 unsigned long size, grow;
50339
50340 +#ifdef CONFIG_PAX_SEGMEXEC
50341 + struct vm_area_struct *vma_m;
50342 +
50343 + vma_m = pax_find_mirror_vma(vma);
50344 +#endif
50345 +
50346 size = vma->vm_end - address;
50347 grow = (vma->vm_start - address) >> PAGE_SHIFT;
50348
50349 @@ -1781,9 +2001,20 @@ static int expand_downwards(struct vm_ar
50350 if (!error) {
50351 vma->vm_start = address;
50352 vma->vm_pgoff -= grow;
50353 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
50354 +
50355 +#ifdef CONFIG_PAX_SEGMEXEC
50356 + if (vma_m) {
50357 + vma_m->vm_start -= grow << PAGE_SHIFT;
50358 + vma_m->vm_pgoff -= grow;
50359 + }
50360 +#endif
50361 +
50362 }
50363 }
50364 anon_vma_unlock(vma);
50365 + if (lockprev)
50366 + anon_vma_unlock(prev);
50367 return error;
50368 }
50369
50370 @@ -1857,6 +2088,13 @@ static void remove_vma_list(struct mm_st
50371 do {
50372 long nrpages = vma_pages(vma);
50373
50374 +#ifdef CONFIG_PAX_SEGMEXEC
50375 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
50376 + vma = remove_vma(vma);
50377 + continue;
50378 + }
50379 +#endif
50380 +
50381 mm->total_vm -= nrpages;
50382 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
50383 vma = remove_vma(vma);
50384 @@ -1901,6 +2139,16 @@ detach_vmas_to_be_unmapped(struct mm_str
50385
50386 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
50387 do {
50388 +
50389 +#ifdef CONFIG_PAX_SEGMEXEC
50390 + if (vma->vm_mirror) {
50391 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
50392 + vma->vm_mirror->vm_mirror = NULL;
50393 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
50394 + vma->vm_mirror = NULL;
50395 + }
50396 +#endif
50397 +
50398 rb_erase(&vma->vm_rb, &mm->mm_rb);
50399 mm->map_count--;
50400 tail_vma = vma;
50401 @@ -1927,14 +2175,33 @@ static int __split_vma(struct mm_struct
50402 struct vm_area_struct *new;
50403 int err = -ENOMEM;
50404
50405 +#ifdef CONFIG_PAX_SEGMEXEC
50406 + struct vm_area_struct *vma_m, *new_m = NULL;
50407 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
50408 +#endif
50409 +
50410 if (is_vm_hugetlb_page(vma) && (addr &
50411 ~(huge_page_mask(hstate_vma(vma)))))
50412 return -EINVAL;
50413
50414 +#ifdef CONFIG_PAX_SEGMEXEC
50415 + vma_m = pax_find_mirror_vma(vma);
50416 +#endif
50417 +
50418 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
50419 if (!new)
50420 goto out_err;
50421
50422 +#ifdef CONFIG_PAX_SEGMEXEC
50423 + if (vma_m) {
50424 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
50425 + if (!new_m) {
50426 + kmem_cache_free(vm_area_cachep, new);
50427 + goto out_err;
50428 + }
50429 + }
50430 +#endif
50431 +
50432 /* most fields are the same, copy all, and then fixup */
50433 *new = *vma;
50434
50435 @@ -1947,6 +2214,22 @@ static int __split_vma(struct mm_struct
50436 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
50437 }
50438
50439 +#ifdef CONFIG_PAX_SEGMEXEC
50440 + if (vma_m) {
50441 + *new_m = *vma_m;
50442 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
50443 + new_m->vm_mirror = new;
50444 + new->vm_mirror = new_m;
50445 +
50446 + if (new_below)
50447 + new_m->vm_end = addr_m;
50448 + else {
50449 + new_m->vm_start = addr_m;
50450 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
50451 + }
50452 + }
50453 +#endif
50454 +
50455 pol = mpol_dup(vma_policy(vma));
50456 if (IS_ERR(pol)) {
50457 err = PTR_ERR(pol);
50458 @@ -1972,6 +2255,42 @@ static int __split_vma(struct mm_struct
50459 else
50460 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
50461
50462 +#ifdef CONFIG_PAX_SEGMEXEC
50463 + if (!err && vma_m) {
50464 + if (anon_vma_clone(new_m, vma_m))
50465 + goto out_free_mpol;
50466 +
50467 + mpol_get(pol);
50468 + vma_set_policy(new_m, pol);
50469 +
50470 + if (new_m->vm_file) {
50471 + get_file(new_m->vm_file);
50472 + if (vma_m->vm_flags & VM_EXECUTABLE)
50473 + added_exe_file_vma(mm);
50474 + }
50475 +
50476 + if (new_m->vm_ops && new_m->vm_ops->open)
50477 + new_m->vm_ops->open(new_m);
50478 +
50479 + if (new_below)
50480 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
50481 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
50482 + else
50483 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
50484 +
50485 + if (err) {
50486 + if (new_m->vm_ops && new_m->vm_ops->close)
50487 + new_m->vm_ops->close(new_m);
50488 + if (new_m->vm_file) {
50489 + if (vma_m->vm_flags & VM_EXECUTABLE)
50490 + removed_exe_file_vma(mm);
50491 + fput(new_m->vm_file);
50492 + }
50493 + mpol_put(pol);
50494 + }
50495 + }
50496 +#endif
50497 +
50498 /* Success. */
50499 if (!err)
50500 return 0;
50501 @@ -1987,6 +2306,15 @@ static int __split_vma(struct mm_struct
50502 out_free_mpol:
50503 mpol_put(pol);
50504 out_free_vma:
50505 +
50506 +#ifdef CONFIG_PAX_SEGMEXEC
50507 + if (new_m) {
50508 + unlink_anon_vmas(new_m);
50509 + kmem_cache_free(vm_area_cachep, new_m);
50510 + }
50511 +#endif
50512 +
50513 + unlink_anon_vmas(new);
50514 kmem_cache_free(vm_area_cachep, new);
50515 out_err:
50516 return err;
50517 @@ -1999,6 +2327,15 @@ static int __split_vma(struct mm_struct
50518 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
50519 unsigned long addr, int new_below)
50520 {
50521 +
50522 +#ifdef CONFIG_PAX_SEGMEXEC
50523 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
50524 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
50525 + if (mm->map_count >= sysctl_max_map_count-1)
50526 + return -ENOMEM;
50527 + } else
50528 +#endif
50529 +
50530 if (mm->map_count >= sysctl_max_map_count)
50531 return -ENOMEM;
50532
50533 @@ -2010,11 +2347,30 @@ int split_vma(struct mm_struct *mm, stru
50534 * work. This now handles partial unmappings.
50535 * Jeremy Fitzhardinge <jeremy@goop.org>
50536 */
50537 +#ifdef CONFIG_PAX_SEGMEXEC
50538 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
50539 {
50540 + int ret = __do_munmap(mm, start, len);
50541 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
50542 + return ret;
50543 +
50544 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
50545 +}
50546 +
50547 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
50548 +#else
50549 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
50550 +#endif
50551 +{
50552 unsigned long end;
50553 struct vm_area_struct *vma, *prev, *last;
50554
50555 + /*
50556 + * mm->mmap_sem is required to protect against another thread
50557 + * changing the mappings in case we sleep.
50558 + */
50559 + verify_mm_writelocked(mm);
50560 +
50561 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
50562 return -EINVAL;
50563
50564 @@ -2088,6 +2444,8 @@ int do_munmap(struct mm_struct *mm, unsi
50565 /* Fix up all other VM information */
50566 remove_vma_list(mm, vma);
50567
50568 + track_exec_limit(mm, start, end, 0UL);
50569 +
50570 return 0;
50571 }
50572
50573 @@ -2100,22 +2458,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
50574
50575 profile_munmap(addr);
50576
50577 +#ifdef CONFIG_PAX_SEGMEXEC
50578 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
50579 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
50580 + return -EINVAL;
50581 +#endif
50582 +
50583 down_write(&mm->mmap_sem);
50584 ret = do_munmap(mm, addr, len);
50585 up_write(&mm->mmap_sem);
50586 return ret;
50587 }
50588
50589 -static inline void verify_mm_writelocked(struct mm_struct *mm)
50590 -{
50591 -#ifdef CONFIG_DEBUG_VM
50592 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
50593 - WARN_ON(1);
50594 - up_read(&mm->mmap_sem);
50595 - }
50596 -#endif
50597 -}
50598 -
50599 /*
50600 * this is really a simplified "do_mmap". it only handles
50601 * anonymous maps. eventually we may be able to do some
50602 @@ -2129,6 +2483,7 @@ unsigned long do_brk(unsigned long addr,
50603 struct rb_node ** rb_link, * rb_parent;
50604 pgoff_t pgoff = addr >> PAGE_SHIFT;
50605 int error;
50606 + unsigned long charged;
50607
50608 len = PAGE_ALIGN(len);
50609 if (!len)
50610 @@ -2140,16 +2495,30 @@ unsigned long do_brk(unsigned long addr,
50611
50612 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
50613
50614 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50615 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50616 + flags &= ~VM_EXEC;
50617 +
50618 +#ifdef CONFIG_PAX_MPROTECT
50619 + if (mm->pax_flags & MF_PAX_MPROTECT)
50620 + flags &= ~VM_MAYEXEC;
50621 +#endif
50622 +
50623 + }
50624 +#endif
50625 +
50626 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
50627 if (error & ~PAGE_MASK)
50628 return error;
50629
50630 + charged = len >> PAGE_SHIFT;
50631 +
50632 /*
50633 * mlock MCL_FUTURE?
50634 */
50635 if (mm->def_flags & VM_LOCKED) {
50636 unsigned long locked, lock_limit;
50637 - locked = len >> PAGE_SHIFT;
50638 + locked = charged;
50639 locked += mm->locked_vm;
50640 lock_limit = rlimit(RLIMIT_MEMLOCK);
50641 lock_limit >>= PAGE_SHIFT;
50642 @@ -2166,22 +2535,22 @@ unsigned long do_brk(unsigned long addr,
50643 /*
50644 * Clear old maps. this also does some error checking for us
50645 */
50646 - munmap_back:
50647 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
50648 if (vma && vma->vm_start < addr + len) {
50649 if (do_munmap(mm, addr, len))
50650 return -ENOMEM;
50651 - goto munmap_back;
50652 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
50653 + BUG_ON(vma && vma->vm_start < addr + len);
50654 }
50655
50656 /* Check against address space limits *after* clearing old maps... */
50657 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
50658 + if (!may_expand_vm(mm, charged))
50659 return -ENOMEM;
50660
50661 if (mm->map_count > sysctl_max_map_count)
50662 return -ENOMEM;
50663
50664 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
50665 + if (security_vm_enough_memory(charged))
50666 return -ENOMEM;
50667
50668 /* Can we just expand an old private anonymous mapping? */
50669 @@ -2195,7 +2564,7 @@ unsigned long do_brk(unsigned long addr,
50670 */
50671 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
50672 if (!vma) {
50673 - vm_unacct_memory(len >> PAGE_SHIFT);
50674 + vm_unacct_memory(charged);
50675 return -ENOMEM;
50676 }
50677
50678 @@ -2208,11 +2577,12 @@ unsigned long do_brk(unsigned long addr,
50679 vma->vm_page_prot = vm_get_page_prot(flags);
50680 vma_link(mm, vma, prev, rb_link, rb_parent);
50681 out:
50682 - mm->total_vm += len >> PAGE_SHIFT;
50683 + mm->total_vm += charged;
50684 if (flags & VM_LOCKED) {
50685 if (!mlock_vma_pages_range(vma, addr, addr + len))
50686 - mm->locked_vm += (len >> PAGE_SHIFT);
50687 + mm->locked_vm += charged;
50688 }
50689 + track_exec_limit(mm, addr, addr + len, flags);
50690 return addr;
50691 }
50692
50693 @@ -2259,8 +2629,10 @@ void exit_mmap(struct mm_struct *mm)
50694 * Walk the list again, actually closing and freeing it,
50695 * with preemption enabled, without holding any MM locks.
50696 */
50697 - while (vma)
50698 + while (vma) {
50699 + vma->vm_mirror = NULL;
50700 vma = remove_vma(vma);
50701 + }
50702
50703 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
50704 }
50705 @@ -2274,6 +2646,10 @@ int insert_vm_struct(struct mm_struct *
50706 struct vm_area_struct * __vma, * prev;
50707 struct rb_node ** rb_link, * rb_parent;
50708
50709 +#ifdef CONFIG_PAX_SEGMEXEC
50710 + struct vm_area_struct *vma_m = NULL;
50711 +#endif
50712 +
50713 /*
50714 * The vm_pgoff of a purely anonymous vma should be irrelevant
50715 * until its first write fault, when page's anon_vma and index
50716 @@ -2296,7 +2672,22 @@ int insert_vm_struct(struct mm_struct *
50717 if ((vma->vm_flags & VM_ACCOUNT) &&
50718 security_vm_enough_memory_mm(mm, vma_pages(vma)))
50719 return -ENOMEM;
50720 +
50721 +#ifdef CONFIG_PAX_SEGMEXEC
50722 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
50723 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
50724 + if (!vma_m)
50725 + return -ENOMEM;
50726 + }
50727 +#endif
50728 +
50729 vma_link(mm, vma, prev, rb_link, rb_parent);
50730 +
50731 +#ifdef CONFIG_PAX_SEGMEXEC
50732 + if (vma_m)
50733 + BUG_ON(pax_mirror_vma(vma_m, vma));
50734 +#endif
50735 +
50736 return 0;
50737 }
50738
50739 @@ -2314,6 +2705,8 @@ struct vm_area_struct *copy_vma(struct v
50740 struct rb_node **rb_link, *rb_parent;
50741 struct mempolicy *pol;
50742
50743 + BUG_ON(vma->vm_mirror);
50744 +
50745 /*
50746 * If anonymous vma has not yet been faulted, update new pgoff
50747 * to match new location, to increase its chance of merging.
50748 @@ -2363,6 +2756,39 @@ struct vm_area_struct *copy_vma(struct v
50749 kmem_cache_free(vm_area_cachep, new_vma);
50750 return NULL;
50751 }
50752 +
50753 +#ifdef CONFIG_PAX_SEGMEXEC
50754 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
50755 +{
50756 + struct vm_area_struct *prev_m;
50757 + struct rb_node **rb_link_m, *rb_parent_m;
50758 + struct mempolicy *pol_m;
50759 +
50760 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
50761 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
50762 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
50763 + *vma_m = *vma;
50764 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
50765 + if (anon_vma_clone(vma_m, vma))
50766 + return -ENOMEM;
50767 + pol_m = vma_policy(vma_m);
50768 + mpol_get(pol_m);
50769 + vma_set_policy(vma_m, pol_m);
50770 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
50771 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
50772 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
50773 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
50774 + if (vma_m->vm_file)
50775 + get_file(vma_m->vm_file);
50776 + if (vma_m->vm_ops && vma_m->vm_ops->open)
50777 + vma_m->vm_ops->open(vma_m);
50778 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
50779 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
50780 + vma_m->vm_mirror = vma;
50781 + vma->vm_mirror = vma_m;
50782 + return 0;
50783 +}
50784 +#endif
50785
50786 /*
50787 * Return true if the calling process may expand its vm space by the passed
50788 @@ -2374,7 +2800,7 @@ int may_expand_vm(struct mm_struct *mm,
50789 unsigned long lim;
50790
50791 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
50792 -
50793 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
50794 if (cur + npages > lim)
50795 return 0;
50796 return 1;
50797 @@ -2444,6 +2870,15 @@ int install_special_mapping(struct mm_st
50798 vma->vm_start = addr;
50799 vma->vm_end = addr + len;
50800
50801 +#ifdef CONFIG_PAX_MPROTECT
50802 + if (mm->pax_flags & MF_PAX_MPROTECT) {
50803 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
50804 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50805 + else
50806 + vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
50807 + }
50808 +#endif
50809 +
50810 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
50811 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50812
50813 diff -urNp linux-2.6.34.1/mm/mprotect.c linux-2.6.34.1/mm/mprotect.c
50814 --- linux-2.6.34.1/mm/mprotect.c 2010-07-05 14:24:10.000000000 -0400
50815 +++ linux-2.6.34.1/mm/mprotect.c 2010-07-07 09:04:58.000000000 -0400
50816 @@ -23,10 +23,16 @@
50817 #include <linux/mmu_notifier.h>
50818 #include <linux/migrate.h>
50819 #include <linux/perf_event.h>
50820 +
50821 +#ifdef CONFIG_PAX_MPROTECT
50822 +#include <linux/elf.h>
50823 +#endif
50824 +
50825 #include <asm/uaccess.h>
50826 #include <asm/pgtable.h>
50827 #include <asm/cacheflush.h>
50828 #include <asm/tlbflush.h>
50829 +#include <asm/mmu_context.h>
50830
50831 #ifndef pgprot_modify
50832 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
50833 @@ -131,6 +137,48 @@ static void change_protection(struct vm_
50834 flush_tlb_range(vma, start, end);
50835 }
50836
50837 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
50838 +/* called while holding the mmap semaphor for writing except stack expansion */
50839 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
50840 +{
50841 + unsigned long oldlimit, newlimit = 0UL;
50842 +
50843 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
50844 + return;
50845 +
50846 + spin_lock(&mm->page_table_lock);
50847 + oldlimit = mm->context.user_cs_limit;
50848 + if ((prot & VM_EXEC) && oldlimit < end)
50849 + /* USER_CS limit moved up */
50850 + newlimit = end;
50851 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
50852 + /* USER_CS limit moved down */
50853 + newlimit = start;
50854 +
50855 + if (newlimit) {
50856 + mm->context.user_cs_limit = newlimit;
50857 +
50858 +#ifdef CONFIG_SMP
50859 + wmb();
50860 + cpus_clear(mm->context.cpu_user_cs_mask);
50861 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
50862 +#endif
50863 +
50864 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
50865 + }
50866 + spin_unlock(&mm->page_table_lock);
50867 + if (newlimit == end) {
50868 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
50869 +
50870 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
50871 + if (is_vm_hugetlb_page(vma))
50872 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
50873 + else
50874 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
50875 + }
50876 +}
50877 +#endif
50878 +
50879 int
50880 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
50881 unsigned long start, unsigned long end, unsigned long newflags)
50882 @@ -143,6 +191,14 @@ mprotect_fixup(struct vm_area_struct *vm
50883 int error;
50884 int dirty_accountable = 0;
50885
50886 +#ifdef CONFIG_PAX_SEGMEXEC
50887 + struct vm_area_struct *vma_m = NULL;
50888 + unsigned long start_m, end_m;
50889 +
50890 + start_m = start + SEGMEXEC_TASK_SIZE;
50891 + end_m = end + SEGMEXEC_TASK_SIZE;
50892 +#endif
50893 +
50894 if (newflags == oldflags) {
50895 *pprev = vma;
50896 return 0;
50897 @@ -164,6 +220,42 @@ mprotect_fixup(struct vm_area_struct *vm
50898 }
50899 }
50900
50901 +#ifdef CONFIG_PAX_SEGMEXEC
50902 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
50903 + if (start != vma->vm_start) {
50904 + error = split_vma(mm, vma, start, 1);
50905 + if (error)
50906 + goto fail;
50907 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
50908 + *pprev = (*pprev)->vm_next;
50909 + }
50910 +
50911 + if (end != vma->vm_end) {
50912 + error = split_vma(mm, vma, end, 0);
50913 + if (error)
50914 + goto fail;
50915 + }
50916 +
50917 + if (pax_find_mirror_vma(vma)) {
50918 + error = __do_munmap(mm, start_m, end_m - start_m);
50919 + if (error)
50920 + goto fail;
50921 + } else {
50922 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
50923 + if (!vma_m) {
50924 + error = -ENOMEM;
50925 + goto fail;
50926 + }
50927 + vma->vm_flags = newflags;
50928 + error = pax_mirror_vma(vma_m, vma);
50929 + if (error) {
50930 + vma->vm_flags = oldflags;
50931 + goto fail;
50932 + }
50933 + }
50934 + }
50935 +#endif
50936 +
50937 /*
50938 * First try to merge with previous and/or next vma.
50939 */
50940 @@ -194,9 +286,21 @@ success:
50941 * vm_flags and vm_page_prot are protected by the mmap_sem
50942 * held in write mode.
50943 */
50944 +
50945 +#ifdef CONFIG_PAX_SEGMEXEC
50946 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
50947 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
50948 +#endif
50949 +
50950 vma->vm_flags = newflags;
50951 +
50952 +#ifdef CONFIG_PAX_MPROTECT
50953 + if (mm->binfmt && mm->binfmt->handle_mprotect)
50954 + mm->binfmt->handle_mprotect(vma, newflags);
50955 +#endif
50956 +
50957 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
50958 - vm_get_page_prot(newflags));
50959 + vm_get_page_prot(vma->vm_flags));
50960
50961 if (vma_wants_writenotify(vma)) {
50962 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
50963 @@ -237,6 +341,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
50964 end = start + len;
50965 if (end <= start)
50966 return -ENOMEM;
50967 +
50968 +#ifdef CONFIG_PAX_SEGMEXEC
50969 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
50970 + if (end > SEGMEXEC_TASK_SIZE)
50971 + return -EINVAL;
50972 + } else
50973 +#endif
50974 +
50975 + if (end > TASK_SIZE)
50976 + return -EINVAL;
50977 +
50978 if (!arch_validate_prot(prot))
50979 return -EINVAL;
50980
50981 @@ -244,7 +359,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
50982 /*
50983 * Does the application expect PROT_READ to imply PROT_EXEC:
50984 */
50985 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
50986 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
50987 prot |= PROT_EXEC;
50988
50989 vm_flags = calc_vm_prot_bits(prot);
50990 @@ -276,6 +391,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
50991 if (start > vma->vm_start)
50992 prev = vma;
50993
50994 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
50995 + error = -EACCES;
50996 + goto out;
50997 + }
50998 +
50999 +#ifdef CONFIG_PAX_MPROTECT
51000 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
51001 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
51002 +#endif
51003 +
51004 for (nstart = start ; ; ) {
51005 unsigned long newflags;
51006
51007 @@ -300,6 +425,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
51008 if (error)
51009 goto out;
51010 perf_event_mmap(vma);
51011 +
51012 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
51013 +
51014 nstart = tmp;
51015
51016 if (nstart < prev->vm_end)
51017 diff -urNp linux-2.6.34.1/mm/mremap.c linux-2.6.34.1/mm/mremap.c
51018 --- linux-2.6.34.1/mm/mremap.c 2010-07-05 14:24:10.000000000 -0400
51019 +++ linux-2.6.34.1/mm/mremap.c 2010-07-07 09:04:58.000000000 -0400
51020 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
51021 continue;
51022 pte = ptep_clear_flush(vma, old_addr, old_pte);
51023 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
51024 +
51025 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51026 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
51027 + pte = pte_exprotect(pte);
51028 +#endif
51029 +
51030 set_pte_at(mm, new_addr, new_pte, pte);
51031 }
51032
51033 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
51034 if (is_vm_hugetlb_page(vma))
51035 goto Einval;
51036
51037 +#ifdef CONFIG_PAX_SEGMEXEC
51038 + if (pax_find_mirror_vma(vma))
51039 + goto Einval;
51040 +#endif
51041 +
51042 /* We can't remap across vm area boundaries */
51043 if (old_len > vma->vm_end - addr)
51044 goto Efault;
51045 @@ -321,20 +332,23 @@ static unsigned long mremap_to(unsigned
51046 unsigned long ret = -EINVAL;
51047 unsigned long charged = 0;
51048 unsigned long map_flags;
51049 + unsigned long pax_task_size = TASK_SIZE;
51050
51051 if (new_addr & ~PAGE_MASK)
51052 goto out;
51053
51054 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
51055 +#ifdef CONFIG_PAX_SEGMEXEC
51056 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
51057 + pax_task_size = SEGMEXEC_TASK_SIZE;
51058 +#endif
51059 +
51060 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
51061 goto out;
51062
51063 /* Check if the location we're moving into overlaps the
51064 * old location at all, and fail if it does.
51065 */
51066 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
51067 - goto out;
51068 -
51069 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
51070 + if (addr + old_len > new_addr && new_addr + new_len > addr)
51071 goto out;
51072
51073 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
51074 @@ -406,6 +420,7 @@ unsigned long do_mremap(unsigned long ad
51075 struct vm_area_struct *vma;
51076 unsigned long ret = -EINVAL;
51077 unsigned long charged = 0;
51078 + unsigned long pax_task_size = TASK_SIZE;
51079
51080 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
51081 goto out;
51082 @@ -424,6 +439,15 @@ unsigned long do_mremap(unsigned long ad
51083 if (!new_len)
51084 goto out;
51085
51086 +#ifdef CONFIG_PAX_SEGMEXEC
51087 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
51088 + pax_task_size = SEGMEXEC_TASK_SIZE;
51089 +#endif
51090 +
51091 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
51092 + old_len > pax_task_size || addr > pax_task_size-old_len)
51093 + goto out;
51094 +
51095 if (flags & MREMAP_FIXED) {
51096 if (flags & MREMAP_MAYMOVE)
51097 ret = mremap_to(addr, old_len, new_addr, new_len);
51098 @@ -473,6 +497,7 @@ unsigned long do_mremap(unsigned long ad
51099 addr + new_len);
51100 }
51101 ret = addr;
51102 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
51103 goto out;
51104 }
51105 }
51106 @@ -499,7 +524,13 @@ unsigned long do_mremap(unsigned long ad
51107 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
51108 if (ret)
51109 goto out;
51110 +
51111 + map_flags = vma->vm_flags;
51112 ret = move_vma(vma, addr, old_len, new_len, new_addr);
51113 + if (!(ret & ~PAGE_MASK)) {
51114 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
51115 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
51116 + }
51117 }
51118 out:
51119 if (ret & ~PAGE_MASK)
51120 diff -urNp linux-2.6.34.1/mm/nommu.c linux-2.6.34.1/mm/nommu.c
51121 --- linux-2.6.34.1/mm/nommu.c 2010-07-05 14:24:10.000000000 -0400
51122 +++ linux-2.6.34.1/mm/nommu.c 2010-07-07 09:04:58.000000000 -0400
51123 @@ -759,15 +759,6 @@ struct vm_area_struct *find_vma(struct m
51124 EXPORT_SYMBOL(find_vma);
51125
51126 /*
51127 - * find a VMA
51128 - * - we don't extend stack VMAs under NOMMU conditions
51129 - */
51130 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
51131 -{
51132 - return find_vma(mm, addr);
51133 -}
51134 -
51135 -/*
51136 * expand a stack to a given address
51137 * - not supported under NOMMU conditions
51138 */
51139 @@ -1484,6 +1475,7 @@ int split_vma(struct mm_struct *mm, stru
51140
51141 /* most fields are the same, copy all, and then fixup */
51142 *new = *vma;
51143 + INIT_LIST_HEAD(&new->anon_vma_chain);
51144 *region = *vma->vm_region;
51145 new->vm_region = region;
51146
51147 diff -urNp linux-2.6.34.1/mm/page_alloc.c linux-2.6.34.1/mm/page_alloc.c
51148 --- linux-2.6.34.1/mm/page_alloc.c 2010-07-05 14:24:10.000000000 -0400
51149 +++ linux-2.6.34.1/mm/page_alloc.c 2010-07-07 09:04:58.000000000 -0400
51150 @@ -606,6 +606,10 @@ static void __free_pages_ok(struct page
51151 int bad = 0;
51152 int wasMlocked = __TestClearPageMlocked(page);
51153
51154 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
51155 + unsigned long index = 1UL << order;
51156 +#endif
51157 +
51158 trace_mm_page_free_direct(page, order);
51159 kmemcheck_free_shadow(page, order);
51160
51161 @@ -619,6 +623,12 @@ static void __free_pages_ok(struct page
51162 debug_check_no_obj_freed(page_address(page),
51163 PAGE_SIZE << order);
51164 }
51165 +
51166 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
51167 + for (; index; --index)
51168 + sanitize_highpage(page + index - 1);
51169 +#endif
51170 +
51171 arch_free_page(page, order);
51172 kernel_map_pages(page, 1 << order, 0);
51173
51174 @@ -722,8 +732,10 @@ static int prep_new_page(struct page *pa
51175 arch_alloc_page(page, order);
51176 kernel_map_pages(page, 1 << order, 1);
51177
51178 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
51179 if (gfp_flags & __GFP_ZERO)
51180 prep_zero_page(page, order, gfp_flags);
51181 +#endif
51182
51183 if (order && (gfp_flags & __GFP_COMP))
51184 prep_compound_page(page, order);
51185 @@ -1119,6 +1131,11 @@ void free_hot_cold_page(struct page *pag
51186 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
51187 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
51188 }
51189 +
51190 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
51191 + sanitize_highpage(page);
51192 +#endif
51193 +
51194 arch_free_page(page, 0);
51195 kernel_map_pages(page, 1, 0);
51196
51197 @@ -3746,7 +3763,7 @@ static void __init setup_usemap(struct p
51198 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
51199 }
51200 #else
51201 -static void inline setup_usemap(struct pglist_data *pgdat,
51202 +static inline void setup_usemap(struct pglist_data *pgdat,
51203 struct zone *zone, unsigned long zonesize) {}
51204 #endif /* CONFIG_SPARSEMEM */
51205
51206 diff -urNp linux-2.6.34.1/mm/percpu.c linux-2.6.34.1/mm/percpu.c
51207 --- linux-2.6.34.1/mm/percpu.c 2010-07-05 14:24:10.000000000 -0400
51208 +++ linux-2.6.34.1/mm/percpu.c 2010-07-07 09:04:58.000000000 -0400
51209 @@ -116,7 +116,7 @@ static unsigned int pcpu_first_unit_cpu
51210 static unsigned int pcpu_last_unit_cpu __read_mostly;
51211
51212 /* the address of the first chunk which starts with the kernel static area */
51213 -void *pcpu_base_addr __read_mostly;
51214 +void *pcpu_base_addr __read_only;
51215 EXPORT_SYMBOL_GPL(pcpu_base_addr);
51216
51217 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
51218 diff -urNp linux-2.6.34.1/mm/rmap.c linux-2.6.34.1/mm/rmap.c
51219 --- linux-2.6.34.1/mm/rmap.c 2010-07-05 14:24:10.000000000 -0400
51220 +++ linux-2.6.34.1/mm/rmap.c 2010-07-07 09:04:58.000000000 -0400
51221 @@ -116,15 +116,29 @@ int anon_vma_prepare(struct vm_area_stru
51222 struct anon_vma *anon_vma = vma->anon_vma;
51223 struct anon_vma_chain *avc;
51224
51225 +#ifdef CONFIG_PAX_SEGMEXEC
51226 + struct anon_vma_chain *avc_m = NULL;
51227 +#endif
51228 +
51229 might_sleep();
51230 if (unlikely(!anon_vma)) {
51231 struct mm_struct *mm = vma->vm_mm;
51232 struct anon_vma *allocated;
51233
51234 +#ifdef CONFIG_PAX_SEGMEXEC
51235 + struct vm_area_struct *vma_m;
51236 +#endif
51237 +
51238 avc = anon_vma_chain_alloc();
51239 if (!avc)
51240 goto out_enomem;
51241
51242 +#ifdef CONFIG_PAX_SEGMEXEC
51243 + avc_m = anon_vma_chain_alloc();
51244 + if (!avc_m)
51245 + goto out_enomem_free_avc;
51246 +#endif
51247 +
51248 anon_vma = find_mergeable_anon_vma(vma);
51249 allocated = NULL;
51250 if (!anon_vma) {
51251 @@ -143,6 +157,20 @@ int anon_vma_prepare(struct vm_area_stru
51252 avc->vma = vma;
51253 list_add(&avc->same_vma, &vma->anon_vma_chain);
51254 list_add(&avc->same_anon_vma, &anon_vma->head);
51255 +
51256 +#ifdef CONFIG_PAX_SEGMEXEC
51257 + vma_m = pax_find_mirror_vma(vma);
51258 + if (vma_m) {
51259 + BUG_ON(vma_m->anon_vma);
51260 + vma_m->anon_vma = anon_vma;
51261 + avc_m->anon_vma = anon_vma;
51262 + avc_m->vma = vma;
51263 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
51264 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
51265 + avc_m = NULL;
51266 + }
51267 +#endif
51268 +
51269 allocated = NULL;
51270 avc = NULL;
51271 }
51272 @@ -151,12 +179,24 @@ int anon_vma_prepare(struct vm_area_stru
51273
51274 if (unlikely(allocated))
51275 anon_vma_free(allocated);
51276 +
51277 +#ifdef CONFIG_PAX_SEGMEXEC
51278 + if (unlikely(avc_m))
51279 + anon_vma_chain_free(avc_m);
51280 +#endif
51281 +
51282 if (unlikely(avc))
51283 anon_vma_chain_free(avc);
51284 }
51285 return 0;
51286
51287 out_enomem_free_avc:
51288 +
51289 +#ifdef CONFIG_PAX_SEGMEXEC
51290 + if (avc_m)
51291 + anon_vma_chain_free(avc_m);
51292 +#endif
51293 +
51294 anon_vma_chain_free(avc);
51295 out_enomem:
51296 return -ENOMEM;
51297 diff -urNp linux-2.6.34.1/mm/shmem.c linux-2.6.34.1/mm/shmem.c
51298 --- linux-2.6.34.1/mm/shmem.c 2010-07-05 14:24:10.000000000 -0400
51299 +++ linux-2.6.34.1/mm/shmem.c 2010-07-07 09:04:58.000000000 -0400
51300 @@ -30,7 +30,7 @@
51301 #include <linux/module.h>
51302 #include <linux/swap.h>
51303
51304 -static struct vfsmount *shm_mnt;
51305 +struct vfsmount *shm_mnt;
51306
51307 #ifdef CONFIG_SHMEM
51308 /*
51309 diff -urNp linux-2.6.34.1/mm/slab.c linux-2.6.34.1/mm/slab.c
51310 --- linux-2.6.34.1/mm/slab.c 2010-07-05 14:24:10.000000000 -0400
51311 +++ linux-2.6.34.1/mm/slab.c 2010-07-07 09:04:58.000000000 -0400
51312 @@ -308,7 +308,7 @@ struct kmem_list3 {
51313 * Need this for bootstrapping a per node allocator.
51314 */
51315 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
51316 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
51317 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
51318 #define CACHE_CACHE 0
51319 #define SIZE_AC MAX_NUMNODES
51320 #define SIZE_L3 (2 * MAX_NUMNODES)
51321 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
51322 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
51323 */
51324 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
51325 - const struct slab *slab, void *obj)
51326 + const struct slab *slab, const void *obj)
51327 {
51328 u32 offset = (obj - slab->s_mem);
51329 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
51330 @@ -584,14 +584,14 @@ struct cache_names {
51331 static struct cache_names __initdata cache_names[] = {
51332 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
51333 #include <linux/kmalloc_sizes.h>
51334 - {NULL,}
51335 + {NULL, NULL}
51336 #undef CACHE
51337 };
51338
51339 static struct arraycache_init initarray_cache __initdata =
51340 - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
51341 + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
51342 static struct arraycache_init initarray_generic =
51343 - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
51344 + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
51345
51346 /* internal cache of cache description objs */
51347 static struct kmem_cache cache_cache = {
51348 @@ -4483,15 +4483,66 @@ static const struct file_operations proc
51349
51350 static int __init slab_proc_init(void)
51351 {
51352 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
51353 + mode_t gr_mode = S_IRUGO;
51354 +
51355 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
51356 + gr_mode = S_IRUSR;
51357 +#endif
51358 +
51359 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
51360 #ifdef CONFIG_DEBUG_SLAB_LEAK
51361 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
51362 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
51363 #endif
51364 return 0;
51365 }
51366 module_init(slab_proc_init);
51367 #endif
51368
51369 +void check_object_size(const void *ptr, unsigned long n, bool to)
51370 +{
51371 +
51372 +#ifdef CONFIG_PAX_USERCOPY
51373 + struct kmem_cache *cachep;
51374 + struct slab *slabp;
51375 + struct page *page;
51376 + unsigned int objnr;
51377 + unsigned long offset;
51378 +
51379 + if (!n)
51380 + return;
51381 +
51382 + if (ZERO_OR_NULL_PTR(ptr))
51383 + goto report;
51384 +
51385 + if (!virt_addr_valid(ptr))
51386 + return;
51387 +
51388 + page = virt_to_head_page(ptr);
51389 +
51390 + if (!PageSlab(page)) {
51391 + if (object_is_on_stack(ptr, n) == -1)
51392 + goto report;
51393 + return;
51394 + }
51395 +
51396 + cachep = page_get_cache(page);
51397 + slabp = page_get_slab(page);
51398 + objnr = obj_to_index(cachep, slabp, ptr);
51399 + BUG_ON(objnr >= cachep->num);
51400 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
51401 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
51402 + return;
51403 +
51404 +report:
51405 + if (to)
51406 + pax_report_leak_to_user(ptr, n);
51407 + else
51408 + pax_report_overflow_from_user(ptr, n);
51409 +#endif
51410 +
51411 +}
51412 +EXPORT_SYMBOL(check_object_size);
51413 +
51414 /**
51415 * ksize - get the actual amount of memory allocated for a given object
51416 * @objp: Pointer to the object
51417 diff -urNp linux-2.6.34.1/mm/slob.c linux-2.6.34.1/mm/slob.c
51418 --- linux-2.6.34.1/mm/slob.c 2010-07-05 14:24:10.000000000 -0400
51419 +++ linux-2.6.34.1/mm/slob.c 2010-07-07 09:04:58.000000000 -0400
51420 @@ -29,7 +29,7 @@
51421 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
51422 * alloc_pages() directly, allocating compound pages so the page order
51423 * does not have to be separately tracked, and also stores the exact
51424 - * allocation size in page->private so that it can be used to accurately
51425 + * allocation size in slob_page->size so that it can be used to accurately
51426 * provide ksize(). These objects are detected in kfree() because slob_page()
51427 * is false for them.
51428 *
51429 @@ -58,6 +58,7 @@
51430 */
51431
51432 #include <linux/kernel.h>
51433 +#include <linux/sched.h>
51434 #include <linux/slab.h>
51435 #include <linux/mm.h>
51436 #include <linux/swap.h> /* struct reclaim_state */
51437 @@ -100,7 +101,8 @@ struct slob_page {
51438 unsigned long flags; /* mandatory */
51439 atomic_t _count; /* mandatory */
51440 slobidx_t units; /* free units left in page */
51441 - unsigned long pad[2];
51442 + unsigned long pad[1];
51443 + unsigned long size; /* size when >=PAGE_SIZE */
51444 slob_t *free; /* first free slob_t in page */
51445 struct list_head list; /* linked list of free pages */
51446 };
51447 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
51448 */
51449 static inline int is_slob_page(struct slob_page *sp)
51450 {
51451 - return PageSlab((struct page *)sp);
51452 + return PageSlab((struct page *)sp) && !sp->size;
51453 }
51454
51455 static inline void set_slob_page(struct slob_page *sp)
51456 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
51457
51458 static inline struct slob_page *slob_page(const void *addr)
51459 {
51460 - return (struct slob_page *)virt_to_page(addr);
51461 + return (struct slob_page *)virt_to_head_page(addr);
51462 }
51463
51464 /*
51465 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
51466 /*
51467 * Return the size of a slob block.
51468 */
51469 -static slobidx_t slob_units(slob_t *s)
51470 +static slobidx_t slob_units(const slob_t *s)
51471 {
51472 if (s->units > 0)
51473 return s->units;
51474 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
51475 /*
51476 * Return the next free slob block pointer after this one.
51477 */
51478 -static slob_t *slob_next(slob_t *s)
51479 +static slob_t *slob_next(const slob_t *s)
51480 {
51481 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
51482 slobidx_t next;
51483 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
51484 /*
51485 * Returns true if s is the last free block in its page.
51486 */
51487 -static int slob_last(slob_t *s)
51488 +static int slob_last(const slob_t *s)
51489 {
51490 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
51491 }
51492 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
51493 if (!page)
51494 return NULL;
51495
51496 + set_slob_page(page);
51497 return page_address(page);
51498 }
51499
51500 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
51501 if (!b)
51502 return NULL;
51503 sp = slob_page(b);
51504 - set_slob_page(sp);
51505
51506 spin_lock_irqsave(&slob_lock, flags);
51507 sp->units = SLOB_UNITS(PAGE_SIZE);
51508 sp->free = b;
51509 + sp->size = 0;
51510 INIT_LIST_HEAD(&sp->list);
51511 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
51512 set_slob_page_free(sp, slob_list);
51513 @@ -475,10 +478,9 @@ out:
51514 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
51515 #endif
51516
51517 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
51518 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
51519 {
51520 - unsigned int *m;
51521 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
51522 + slob_t *m;
51523 void *ret;
51524
51525 lockdep_trace_alloc(gfp);
51526 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
51527
51528 if (!m)
51529 return NULL;
51530 - *m = size;
51531 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
51532 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
51533 + m[0].units = size;
51534 + m[1].units = align;
51535 ret = (void *)m + align;
51536
51537 trace_kmalloc_node(_RET_IP_, ret,
51538 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
51539
51540 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
51541 if (ret) {
51542 - struct page *page;
51543 - page = virt_to_page(ret);
51544 - page->private = size;
51545 + struct slob_page *sp;
51546 + sp = slob_page(ret);
51547 + sp->size = size;
51548 }
51549
51550 trace_kmalloc_node(_RET_IP_, ret,
51551 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
51552 kmemleak_alloc(ret, size, 1, gfp);
51553 return ret;
51554 }
51555 +
51556 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
51557 +{
51558 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
51559 +
51560 + return __kmalloc_node_align(size, gfp, node, align);
51561 +}
51562 EXPORT_SYMBOL(__kmalloc_node);
51563
51564 void kfree(const void *block)
51565 @@ -528,13 +540,84 @@ void kfree(const void *block)
51566 sp = slob_page(block);
51567 if (is_slob_page(sp)) {
51568 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
51569 - unsigned int *m = (unsigned int *)(block - align);
51570 - slob_free(m, *m + align);
51571 - } else
51572 + slob_t *m = (slob_t *)(block - align);
51573 + slob_free(m, m[0].units + align);
51574 + } else {
51575 + clear_slob_page(sp);
51576 + free_slob_page(sp);
51577 + sp->size = 0;
51578 put_page(&sp->page);
51579 + }
51580 }
51581 EXPORT_SYMBOL(kfree);
51582
51583 +void check_object_size(const void *ptr, unsigned long n, bool to)
51584 +{
51585 +
51586 +#ifdef CONFIG_PAX_USERCOPY
51587 + struct slob_page *sp;
51588 + const slob_t *free;
51589 + const void *base;
51590 +
51591 + if (!n)
51592 + return;
51593 +
51594 + if (ZERO_OR_NULL_PTR(ptr))
51595 + goto report;
51596 +
51597 + if (!virt_addr_valid(ptr))
51598 + return;
51599 +
51600 + sp = slob_page(ptr);
51601 + if (!PageSlab((struct page*)sp)) {
51602 + if (object_is_on_stack(ptr, n) == -1)
51603 + goto report;
51604 + return;
51605 + }
51606 +
51607 + if (sp->size) {
51608 + base = page_address(&sp->page);
51609 + if (base <= ptr && n <= sp->size - (ptr - base))
51610 + return;
51611 + goto report;
51612 + }
51613 +
51614 + /* some tricky double walking to find the chunk */
51615 + base = (void *)((unsigned long)ptr & PAGE_MASK);
51616 + free = sp->free;
51617 +
51618 + while (!slob_last(free) && (void *)free <= ptr) {
51619 + base = free + slob_units(free);
51620 + free = slob_next(free);
51621 + }
51622 +
51623 + while (base < (void *)free) {
51624 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
51625 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
51626 + int offset;
51627 +
51628 + if (ptr < base + align)
51629 + goto report;
51630 +
51631 + offset = ptr - base - align;
51632 + if (offset < m) {
51633 + if (n <= m - offset)
51634 + return;
51635 + goto report;
51636 + }
51637 + base += size;
51638 + }
51639 +
51640 +report:
51641 + if (to)
51642 + pax_report_leak_to_user(ptr, n);
51643 + else
51644 + pax_report_overflow_from_user(ptr, n);
51645 +#endif
51646 +
51647 +}
51648 +EXPORT_SYMBOL(check_object_size);
51649 +
51650 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
51651 size_t ksize(const void *block)
51652 {
51653 @@ -547,10 +630,10 @@ size_t ksize(const void *block)
51654 sp = slob_page(block);
51655 if (is_slob_page(sp)) {
51656 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
51657 - unsigned int *m = (unsigned int *)(block - align);
51658 - return SLOB_UNITS(*m) * SLOB_UNIT;
51659 + slob_t *m = (slob_t *)(block - align);
51660 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
51661 } else
51662 - return sp->page.private;
51663 + return sp->size;
51664 }
51665 EXPORT_SYMBOL(ksize);
51666
51667 @@ -605,17 +688,25 @@ void *kmem_cache_alloc_node(struct kmem_
51668 {
51669 void *b;
51670
51671 +#ifdef CONFIG_PAX_USERCOPY
51672 + b = __kmalloc_node_align(c->size, flags, node, c->align);
51673 +#else
51674 if (c->size < PAGE_SIZE) {
51675 b = slob_alloc(c->size, flags, c->align, node);
51676 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
51677 SLOB_UNITS(c->size) * SLOB_UNIT,
51678 flags, node);
51679 } else {
51680 + struct slob_page *sp;
51681 +
51682 b = slob_new_pages(flags, get_order(c->size), node);
51683 + sp = slob_page(b);
51684 + sp->size = c->size;
51685 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
51686 PAGE_SIZE << get_order(c->size),
51687 flags, node);
51688 }
51689 +#endif
51690
51691 if (c->ctor)
51692 c->ctor(b);
51693 @@ -627,10 +718,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
51694
51695 static void __kmem_cache_free(void *b, int size)
51696 {
51697 - if (size < PAGE_SIZE)
51698 + struct slob_page *sp = slob_page(b);
51699 +
51700 + if (is_slob_page(sp))
51701 slob_free(b, size);
51702 - else
51703 + else {
51704 + clear_slob_page(sp);
51705 + free_slob_page(sp);
51706 + sp->size = 0;
51707 slob_free_pages(b, get_order(size));
51708 + }
51709 }
51710
51711 static void kmem_rcu_free(struct rcu_head *head)
51712 @@ -643,15 +740,24 @@ static void kmem_rcu_free(struct rcu_hea
51713
51714 void kmem_cache_free(struct kmem_cache *c, void *b)
51715 {
51716 + int size = c->size;
51717 +
51718 +#ifdef CONFIG_PAX_USERCOPY
51719 + if (size + c->align < PAGE_SIZE) {
51720 + size += c->align;
51721 + b -= c->align;
51722 + }
51723 +#endif
51724 +
51725 kmemleak_free_recursive(b, c->flags);
51726 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
51727 struct slob_rcu *slob_rcu;
51728 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
51729 + slob_rcu = b + (size - sizeof(struct slob_rcu));
51730 INIT_RCU_HEAD(&slob_rcu->head);
51731 - slob_rcu->size = c->size;
51732 + slob_rcu->size = size;
51733 call_rcu(&slob_rcu->head, kmem_rcu_free);
51734 } else {
51735 - __kmem_cache_free(b, c->size);
51736 + __kmem_cache_free(b, size);
51737 }
51738
51739 trace_kmem_cache_free(_RET_IP_, b);
51740 diff -urNp linux-2.6.34.1/mm/slub.c linux-2.6.34.1/mm/slub.c
51741 --- linux-2.6.34.1/mm/slub.c 2010-07-05 14:24:10.000000000 -0400
51742 +++ linux-2.6.34.1/mm/slub.c 2010-07-07 09:04:58.000000000 -0400
51743 @@ -1877,6 +1877,8 @@ void kmem_cache_free(struct kmem_cache *
51744
51745 page = virt_to_head_page(x);
51746
51747 + BUG_ON(!PageSlab(page));
51748 +
51749 slab_free(s, page, x, _RET_IP_);
51750
51751 trace_kmem_cache_free(_RET_IP_, x);
51752 @@ -1921,7 +1923,7 @@ static int slub_min_objects;
51753 * Merge control. If this is set then no merging of slab caches will occur.
51754 * (Could be removed. This was introduced to pacify the merge skeptics.)
51755 */
51756 -static int slub_nomerge;
51757 +static int slub_nomerge = 1;
51758
51759 /*
51760 * Calculate the order of allocation given an slab object size.
51761 @@ -2348,7 +2350,7 @@ static int kmem_cache_open(struct kmem_c
51762 * list to avoid pounding the page allocator excessively.
51763 */
51764 set_min_partial(s, ilog2(s->size));
51765 - s->refcount = 1;
51766 + atomic_set(&s->refcount, 1);
51767 #ifdef CONFIG_NUMA
51768 s->remote_node_defrag_ratio = 1000;
51769 #endif
51770 @@ -2488,8 +2490,7 @@ static inline int kmem_cache_close(struc
51771 void kmem_cache_destroy(struct kmem_cache *s)
51772 {
51773 down_write(&slub_lock);
51774 - s->refcount--;
51775 - if (!s->refcount) {
51776 + if (atomic_dec_and_test(&s->refcount)) {
51777 list_del(&s->list);
51778 up_write(&slub_lock);
51779 if (kmem_cache_close(s)) {
51780 @@ -2781,6 +2782,46 @@ void *__kmalloc_node(size_t size, gfp_t
51781 EXPORT_SYMBOL(__kmalloc_node);
51782 #endif
51783
51784 +void check_object_size(const void *ptr, unsigned long n, bool to)
51785 +{
51786 +
51787 +#ifdef CONFIG_PAX_USERCOPY
51788 + struct page *page;
51789 + struct kmem_cache *s;
51790 + unsigned long offset;
51791 +
51792 + if (!n)
51793 + return;
51794 +
51795 + if (ZERO_OR_NULL_PTR(ptr))
51796 + goto report;
51797 +
51798 + if (!virt_addr_valid(ptr))
51799 + return;
51800 +
51801 + page = get_object_page(ptr);
51802 +
51803 + if (!page) {
51804 + if (object_is_on_stack(ptr, n) == -1)
51805 + goto report;
51806 + return;
51807 + }
51808 +
51809 + s = page->slab;
51810 + offset = (ptr - page_address(page)) % s->size;
51811 + if (offset <= s->objsize && n <= s->objsize - offset)
51812 + return;
51813 +
51814 +report:
51815 + if (to)
51816 + pax_report_leak_to_user(ptr, n);
51817 + else
51818 + pax_report_overflow_from_user(ptr, n);
51819 +#endif
51820 +
51821 +}
51822 +EXPORT_SYMBOL(check_object_size);
51823 +
51824 size_t ksize(const void *object)
51825 {
51826 struct page *page;
51827 @@ -3050,7 +3091,7 @@ void __init kmem_cache_init(void)
51828 */
51829 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
51830 sizeof(struct kmem_cache_node), GFP_NOWAIT);
51831 - kmalloc_caches[0].refcount = -1;
51832 + atomic_set(&kmalloc_caches[0].refcount, -1);
51833 caches++;
51834
51835 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
51836 @@ -3159,7 +3200,7 @@ static int slab_unmergeable(struct kmem_
51837 /*
51838 * We may have set a slab to be unmergeable during bootstrap.
51839 */
51840 - if (s->refcount < 0)
51841 + if (atomic_read(&s->refcount) < 0)
51842 return 1;
51843
51844 return 0;
51845 @@ -3217,7 +3258,7 @@ struct kmem_cache *kmem_cache_create(con
51846 down_write(&slub_lock);
51847 s = find_mergeable(size, align, flags, name, ctor);
51848 if (s) {
51849 - s->refcount++;
51850 + atomic_inc(&s->refcount);
51851 /*
51852 * Adjust the object sizes so that we clear
51853 * the complete object on kzalloc.
51854 @@ -3228,7 +3269,7 @@ struct kmem_cache *kmem_cache_create(con
51855
51856 if (sysfs_slab_alias(s, name)) {
51857 down_write(&slub_lock);
51858 - s->refcount--;
51859 + atomic_dec(&s->refcount);
51860 up_write(&slub_lock);
51861 goto err;
51862 }
51863 @@ -3943,7 +3984,7 @@ SLAB_ATTR_RO(ctor);
51864
51865 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
51866 {
51867 - return sprintf(buf, "%d\n", s->refcount - 1);
51868 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
51869 }
51870 SLAB_ATTR_RO(aliases);
51871
51872 @@ -4664,7 +4705,13 @@ static const struct file_operations proc
51873
51874 static int __init slab_proc_init(void)
51875 {
51876 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
51877 + mode_t gr_mode = S_IRUGO;
51878 +
51879 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
51880 + gr_mode = S_IRUSR;
51881 +#endif
51882 +
51883 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
51884 return 0;
51885 }
51886 module_init(slab_proc_init);
51887 diff -urNp linux-2.6.34.1/mm/util.c linux-2.6.34.1/mm/util.c
51888 --- linux-2.6.34.1/mm/util.c 2010-07-05 14:24:10.000000000 -0400
51889 +++ linux-2.6.34.1/mm/util.c 2010-07-07 09:04:58.000000000 -0400
51890 @@ -245,6 +245,12 @@ EXPORT_SYMBOL(strndup_user);
51891 void arch_pick_mmap_layout(struct mm_struct *mm)
51892 {
51893 mm->mmap_base = TASK_UNMAPPED_BASE;
51894 +
51895 +#ifdef CONFIG_PAX_RANDMMAP
51896 + if (mm->pax_flags & MF_PAX_RANDMMAP)
51897 + mm->mmap_base += mm->delta_mmap;
51898 +#endif
51899 +
51900 mm->get_unmapped_area = arch_get_unmapped_area;
51901 mm->unmap_area = arch_unmap_area;
51902 }
51903 diff -urNp linux-2.6.34.1/mm/vmalloc.c linux-2.6.34.1/mm/vmalloc.c
51904 --- linux-2.6.34.1/mm/vmalloc.c 2010-07-05 14:24:10.000000000 -0400
51905 +++ linux-2.6.34.1/mm/vmalloc.c 2010-07-07 09:04:58.000000000 -0400
51906 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
51907
51908 pte = pte_offset_kernel(pmd, addr);
51909 do {
51910 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
51911 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
51912 +
51913 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51914 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
51915 + BUG_ON(!pte_exec(*pte));
51916 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
51917 + continue;
51918 + }
51919 +#endif
51920 +
51921 + {
51922 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
51923 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
51924 + }
51925 } while (pte++, addr += PAGE_SIZE, addr != end);
51926 }
51927
51928 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
51929 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
51930 {
51931 pte_t *pte;
51932 + int ret = -ENOMEM;
51933
51934 /*
51935 * nr is a running index into the array which helps higher level
51936 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
51937 pte = pte_alloc_kernel(pmd, addr);
51938 if (!pte)
51939 return -ENOMEM;
51940 +
51941 + pax_open_kernel();
51942 do {
51943 struct page *page = pages[*nr];
51944
51945 - if (WARN_ON(!pte_none(*pte)))
51946 - return -EBUSY;
51947 - if (WARN_ON(!page))
51948 - return -ENOMEM;
51949 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51950 + if (!(pgprot_val(prot) & _PAGE_NX))
51951 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
51952 + else
51953 +#endif
51954 +
51955 + if (WARN_ON(!pte_none(*pte))) {
51956 + ret = -EBUSY;
51957 + goto out;
51958 + }
51959 + if (WARN_ON(!page)) {
51960 + ret = -ENOMEM;
51961 + goto out;
51962 + }
51963 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
51964 (*nr)++;
51965 } while (pte++, addr += PAGE_SIZE, addr != end);
51966 - return 0;
51967 + ret = 0;
51968 +out:
51969 + pax_close_kernel();
51970 + return ret;
51971 }
51972
51973 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
51974 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
51975 * and fall back on vmalloc() if that fails. Others
51976 * just put it in the vmalloc space.
51977 */
51978 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
51979 +#ifdef CONFIG_MODULES
51980 +#ifdef MODULES_VADDR
51981 unsigned long addr = (unsigned long)x;
51982 if (addr >= MODULES_VADDR && addr < MODULES_END)
51983 return 1;
51984 #endif
51985 +
51986 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51987 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
51988 + return 1;
51989 +#endif
51990 +
51991 +#endif
51992 +
51993 return is_vmalloc_addr(x);
51994 }
51995
51996 @@ -292,13 +328,13 @@ static void __insert_vmap_area(struct vm
51997 struct rb_node *tmp;
51998
51999 while (*p) {
52000 - struct vmap_area *tmp;
52001 + struct vmap_area *varea;
52002
52003 parent = *p;
52004 - tmp = rb_entry(parent, struct vmap_area, rb_node);
52005 - if (va->va_start < tmp->va_end)
52006 + varea = rb_entry(parent, struct vmap_area, rb_node);
52007 + if (va->va_start < varea->va_end)
52008 p = &(*p)->rb_left;
52009 - else if (va->va_end > tmp->va_start)
52010 + else if (va->va_end > varea->va_start)
52011 p = &(*p)->rb_right;
52012 else
52013 BUG();
52014 @@ -1224,6 +1260,16 @@ static struct vm_struct *__get_vm_area_n
52015 struct vm_struct *area;
52016
52017 BUG_ON(in_interrupt());
52018 +
52019 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
52020 + if (flags & VM_KERNEXEC) {
52021 + if (start != VMALLOC_START || end != VMALLOC_END)
52022 + return NULL;
52023 + start = (unsigned long)MODULES_EXEC_VADDR;
52024 + end = (unsigned long)MODULES_EXEC_END;
52025 + }
52026 +#endif
52027 +
52028 if (flags & VM_IOREMAP) {
52029 int bit = fls(size);
52030
52031 @@ -1449,6 +1495,11 @@ void *vmap(struct page **pages, unsigned
52032 if (count > totalram_pages)
52033 return NULL;
52034
52035 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
52036 + if (!(pgprot_val(prot) & _PAGE_NX))
52037 + flags |= VM_KERNEXEC;
52038 +#endif
52039 +
52040 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
52041 __builtin_return_address(0));
52042 if (!area)
52043 @@ -1558,6 +1609,13 @@ static void *__vmalloc_node(unsigned lon
52044 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
52045 return NULL;
52046
52047 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
52048 + if (!(pgprot_val(prot) & _PAGE_NX))
52049 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
52050 + node, gfp_mask, caller);
52051 + else
52052 +#endif
52053 +
52054 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
52055 VMALLOC_END, node, gfp_mask, caller);
52056
52057 @@ -1576,6 +1634,7 @@ static void *__vmalloc_node(unsigned lon
52058 return addr;
52059 }
52060
52061 +#undef __vmalloc
52062 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
52063 {
52064 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
52065 @@ -1592,6 +1651,7 @@ EXPORT_SYMBOL(__vmalloc);
52066 * For tight control over page level allocator and protection flags
52067 * use __vmalloc() instead.
52068 */
52069 +#undef vmalloc
52070 void *vmalloc(unsigned long size)
52071 {
52072 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
52073 @@ -1606,6 +1666,7 @@ EXPORT_SYMBOL(vmalloc);
52074 * The resulting memory area is zeroed so it can be mapped to userspace
52075 * without leaking data.
52076 */
52077 +#undef vmalloc_user
52078 void *vmalloc_user(unsigned long size)
52079 {
52080 struct vm_struct *area;
52081 @@ -1633,6 +1694,7 @@ EXPORT_SYMBOL(vmalloc_user);
52082 * For tight control over page level allocator and protection flags
52083 * use __vmalloc() instead.
52084 */
52085 +#undef vmalloc_node
52086 void *vmalloc_node(unsigned long size, int node)
52087 {
52088 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
52089 @@ -1655,10 +1717,10 @@ EXPORT_SYMBOL(vmalloc_node);
52090 * For tight control over page level allocator and protection flags
52091 * use __vmalloc() instead.
52092 */
52093 -
52094 +#undef vmalloc_exec
52095 void *vmalloc_exec(unsigned long size)
52096 {
52097 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
52098 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
52099 -1, __builtin_return_address(0));
52100 }
52101
52102 @@ -1677,6 +1739,7 @@ void *vmalloc_exec(unsigned long size)
52103 * Allocate enough 32bit PA addressable pages to cover @size from the
52104 * page level allocator and map them into contiguous kernel virtual space.
52105 */
52106 +#undef vmalloc_32
52107 void *vmalloc_32(unsigned long size)
52108 {
52109 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
52110 @@ -1691,6 +1754,7 @@ EXPORT_SYMBOL(vmalloc_32);
52111 * The resulting memory area is 32bit addressable and zeroed so it can be
52112 * mapped to userspace without leaking data.
52113 */
52114 +#undef vmalloc_32_user
52115 void *vmalloc_32_user(unsigned long size)
52116 {
52117 struct vm_struct *area;
52118 diff -urNp linux-2.6.34.1/net/8021q/vlan.c linux-2.6.34.1/net/8021q/vlan.c
52119 --- linux-2.6.34.1/net/8021q/vlan.c 2010-07-05 14:24:10.000000000 -0400
52120 +++ linux-2.6.34.1/net/8021q/vlan.c 2010-07-07 09:04:58.000000000 -0400
52121 @@ -614,8 +614,7 @@ static int vlan_ioctl_handler(struct net
52122 err = -EPERM;
52123 if (!capable(CAP_NET_ADMIN))
52124 break;
52125 - if ((args.u.name_type >= 0) &&
52126 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
52127 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
52128 struct vlan_net *vn;
52129
52130 vn = net_generic(net, vlan_net_id);
52131 diff -urNp linux-2.6.34.1/net/atm/atm_misc.c linux-2.6.34.1/net/atm/atm_misc.c
52132 --- linux-2.6.34.1/net/atm/atm_misc.c 2010-07-05 14:24:10.000000000 -0400
52133 +++ linux-2.6.34.1/net/atm/atm_misc.c 2010-07-07 09:04:58.000000000 -0400
52134 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
52135 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
52136 return 1;
52137 atm_return(vcc, truesize);
52138 - atomic_inc(&vcc->stats->rx_drop);
52139 + atomic_inc_unchecked(&vcc->stats->rx_drop);
52140 return 0;
52141 }
52142 EXPORT_SYMBOL(atm_charge);
52143 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
52144 }
52145 }
52146 atm_return(vcc, guess);
52147 - atomic_inc(&vcc->stats->rx_drop);
52148 + atomic_inc_unchecked(&vcc->stats->rx_drop);
52149 return NULL;
52150 }
52151 EXPORT_SYMBOL(atm_alloc_charge);
52152 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
52153
52154 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
52155 {
52156 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
52157 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
52158 __SONET_ITEMS
52159 #undef __HANDLE_ITEM
52160 }
52161 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
52162
52163 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
52164 {
52165 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
52166 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
52167 __SONET_ITEMS
52168 #undef __HANDLE_ITEM
52169 }
52170 diff -urNp linux-2.6.34.1/net/atm/proc.c linux-2.6.34.1/net/atm/proc.c
52171 --- linux-2.6.34.1/net/atm/proc.c 2010-07-05 14:24:10.000000000 -0400
52172 +++ linux-2.6.34.1/net/atm/proc.c 2010-07-07 09:04:58.000000000 -0400
52173 @@ -44,9 +44,9 @@ static void add_stats(struct seq_file *s
52174 const struct k_atm_aal_stats *stats)
52175 {
52176 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
52177 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
52178 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
52179 - atomic_read(&stats->rx_drop));
52180 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
52181 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
52182 + atomic_read_unchecked(&stats->rx_drop));
52183 }
52184
52185 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
52186 diff -urNp linux-2.6.34.1/net/atm/resources.c linux-2.6.34.1/net/atm/resources.c
52187 --- linux-2.6.34.1/net/atm/resources.c 2010-07-05 14:24:10.000000000 -0400
52188 +++ linux-2.6.34.1/net/atm/resources.c 2010-07-07 09:04:58.000000000 -0400
52189 @@ -159,7 +159,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
52190 static void copy_aal_stats(struct k_atm_aal_stats *from,
52191 struct atm_aal_stats *to)
52192 {
52193 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
52194 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
52195 __AAL_STAT_ITEMS
52196 #undef __HANDLE_ITEM
52197 }
52198 @@ -167,7 +167,7 @@ static void copy_aal_stats(struct k_atm_
52199 static void subtract_aal_stats(struct k_atm_aal_stats *from,
52200 struct atm_aal_stats *to)
52201 {
52202 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
52203 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
52204 __AAL_STAT_ITEMS
52205 #undef __HANDLE_ITEM
52206 }
52207 diff -urNp linux-2.6.34.1/net/bridge/br_stp_if.c linux-2.6.34.1/net/bridge/br_stp_if.c
52208 --- linux-2.6.34.1/net/bridge/br_stp_if.c 2010-07-05 14:24:10.000000000 -0400
52209 +++ linux-2.6.34.1/net/bridge/br_stp_if.c 2010-07-07 09:04:58.000000000 -0400
52210 @@ -147,7 +147,7 @@ static void br_stp_stop(struct net_bridg
52211 char *envp[] = { NULL };
52212
52213 if (br->stp_enabled == BR_USER_STP) {
52214 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
52215 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
52216 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
52217 br->dev->name, r);
52218
52219 diff -urNp linux-2.6.34.1/net/bridge/netfilter/ebtables.c linux-2.6.34.1/net/bridge/netfilter/ebtables.c
52220 --- linux-2.6.34.1/net/bridge/netfilter/ebtables.c 2010-07-05 14:24:10.000000000 -0400
52221 +++ linux-2.6.34.1/net/bridge/netfilter/ebtables.c 2010-07-07 09:04:58.000000000 -0400
52222 @@ -1515,7 +1515,7 @@ static int do_ebt_get_ctl(struct sock *s
52223 tmp.valid_hooks = t->table->valid_hooks;
52224 }
52225 mutex_unlock(&ebt_mutex);
52226 - if (copy_to_user(user, &tmp, *len) != 0){
52227 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
52228 BUGPRINT("c2u Didn't work\n");
52229 ret = -EFAULT;
52230 break;
52231 diff -urNp linux-2.6.34.1/net/core/dev.c linux-2.6.34.1/net/core/dev.c
52232 --- linux-2.6.34.1/net/core/dev.c 2010-07-05 14:24:10.000000000 -0400
52233 +++ linux-2.6.34.1/net/core/dev.c 2010-07-07 09:04:58.000000000 -0400
52234 @@ -2249,7 +2249,7 @@ int netif_rx_ni(struct sk_buff *skb)
52235 }
52236 EXPORT_SYMBOL(netif_rx_ni);
52237
52238 -static void net_tx_action(struct softirq_action *h)
52239 +static void net_tx_action(void)
52240 {
52241 struct softnet_data *sd = &__get_cpu_var(softnet_data);
52242
52243 @@ -3019,7 +3019,7 @@ void netif_napi_del(struct napi_struct *
52244 EXPORT_SYMBOL(netif_napi_del);
52245
52246
52247 -static void net_rx_action(struct softirq_action *h)
52248 +static void net_rx_action(void)
52249 {
52250 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
52251 unsigned long time_limit = jiffies + 2;
52252 diff -urNp linux-2.6.34.1/net/core/flow.c linux-2.6.34.1/net/core/flow.c
52253 --- linux-2.6.34.1/net/core/flow.c 2010-07-05 14:24:10.000000000 -0400
52254 +++ linux-2.6.34.1/net/core/flow.c 2010-07-07 09:04:58.000000000 -0400
52255 @@ -39,7 +39,7 @@ atomic_t flow_cache_genid = ATOMIC_INIT(
52256
52257 static u32 flow_hash_shift;
52258 #define flow_hash_size (1 << flow_hash_shift)
52259 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
52260 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
52261
52262 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
52263
52264 @@ -52,7 +52,7 @@ struct flow_percpu_info {
52265 u32 hash_rnd;
52266 int count;
52267 };
52268 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
52269 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
52270
52271 #define flow_hash_rnd_recalc(cpu) \
52272 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
52273 @@ -69,7 +69,7 @@ struct flow_flush_info {
52274 atomic_t cpuleft;
52275 struct completion completion;
52276 };
52277 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
52278 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
52279
52280 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
52281
52282 diff -urNp linux-2.6.34.1/net/core/sock.c linux-2.6.34.1/net/core/sock.c
52283 --- linux-2.6.34.1/net/core/sock.c 2010-07-05 14:24:10.000000000 -0400
52284 +++ linux-2.6.34.1/net/core/sock.c 2010-07-07 09:04:58.000000000 -0400
52285 @@ -900,7 +900,7 @@ int sock_getsockopt(struct socket *sock,
52286 return -ENOTCONN;
52287 if (lv < len)
52288 return -EINVAL;
52289 - if (copy_to_user(optval, address, len))
52290 + if (len > sizeof(address) || copy_to_user(optval, address, len))
52291 return -EFAULT;
52292 goto lenout;
52293 }
52294 @@ -933,7 +933,7 @@ int sock_getsockopt(struct socket *sock,
52295
52296 if (len > lv)
52297 len = lv;
52298 - if (copy_to_user(optval, &v, len))
52299 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
52300 return -EFAULT;
52301 lenout:
52302 if (put_user(len, optlen))
52303 diff -urNp linux-2.6.34.1/net/dccp/ccids/ccid3.c linux-2.6.34.1/net/dccp/ccids/ccid3.c
52304 --- linux-2.6.34.1/net/dccp/ccids/ccid3.c 2010-07-05 14:24:10.000000000 -0400
52305 +++ linux-2.6.34.1/net/dccp/ccids/ccid3.c 2010-07-07 09:04:58.000000000 -0400
52306 @@ -41,7 +41,7 @@
52307 static int ccid3_debug;
52308 #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
52309 #else
52310 -#define ccid3_pr_debug(format, a...)
52311 +#define ccid3_pr_debug(format, a...) do {} while (0)
52312 #endif
52313
52314 /*
52315 diff -urNp linux-2.6.34.1/net/dccp/dccp.h linux-2.6.34.1/net/dccp/dccp.h
52316 --- linux-2.6.34.1/net/dccp/dccp.h 2010-07-05 14:24:10.000000000 -0400
52317 +++ linux-2.6.34.1/net/dccp/dccp.h 2010-07-07 09:04:58.000000000 -0400
52318 @@ -44,9 +44,9 @@ extern int dccp_debug;
52319 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
52320 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
52321 #else
52322 -#define dccp_pr_debug(format, a...)
52323 -#define dccp_pr_debug_cat(format, a...)
52324 -#define dccp_debug(format, a...)
52325 +#define dccp_pr_debug(format, a...) do {} while (0)
52326 +#define dccp_pr_debug_cat(format, a...) do {} while (0)
52327 +#define dccp_debug(format, a...) do {} while (0)
52328 #endif
52329
52330 extern struct inet_hashinfo dccp_hashinfo;
52331 diff -urNp linux-2.6.34.1/net/decnet/sysctl_net_decnet.c linux-2.6.34.1/net/decnet/sysctl_net_decnet.c
52332 --- linux-2.6.34.1/net/decnet/sysctl_net_decnet.c 2010-07-05 14:24:10.000000000 -0400
52333 +++ linux-2.6.34.1/net/decnet/sysctl_net_decnet.c 2010-07-07 09:04:58.000000000 -0400
52334 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
52335
52336 if (len > *lenp) len = *lenp;
52337
52338 - if (copy_to_user(buffer, addr, len))
52339 + if (len > sizeof(addr) || copy_to_user(buffer, addr, len))
52340 return -EFAULT;
52341
52342 *lenp = len;
52343 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
52344
52345 if (len > *lenp) len = *lenp;
52346
52347 - if (copy_to_user(buffer, devname, len))
52348 + if (len > sizeof(devname) || copy_to_user(buffer, devname, len))
52349 return -EFAULT;
52350
52351 *lenp = len;
52352 diff -urNp linux-2.6.34.1/net/ipv4/inet_hashtables.c linux-2.6.34.1/net/ipv4/inet_hashtables.c
52353 --- linux-2.6.34.1/net/ipv4/inet_hashtables.c 2010-07-05 14:24:10.000000000 -0400
52354 +++ linux-2.6.34.1/net/ipv4/inet_hashtables.c 2010-07-07 09:04:58.000000000 -0400
52355 @@ -18,11 +18,14 @@
52356 #include <linux/sched.h>
52357 #include <linux/slab.h>
52358 #include <linux/wait.h>
52359 +#include <linux/security.h>
52360
52361 #include <net/inet_connection_sock.h>
52362 #include <net/inet_hashtables.h>
52363 #include <net/ip.h>
52364
52365 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
52366 +
52367 /*
52368 * Allocate and initialize a new local port bind bucket.
52369 * The bindhash mutex for snum's hash chain must be held here.
52370 @@ -506,6 +509,8 @@ ok:
52371 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
52372 spin_unlock(&head->lock);
52373
52374 + gr_update_task_in_ip_table(current, inet_sk(sk));
52375 +
52376 if (tw) {
52377 inet_twsk_deschedule(tw, death_row);
52378 while (twrefcnt) {
52379 diff -urNp linux-2.6.34.1/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.34.1/net/ipv4/netfilter/nf_nat_snmp_basic.c
52380 --- linux-2.6.34.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-07-05 14:24:10.000000000 -0400
52381 +++ linux-2.6.34.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-07-07 09:04:58.000000000 -0400
52382 @@ -398,7 +398,7 @@ static unsigned char asn1_octets_decode(
52383
52384 *len = 0;
52385
52386 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
52387 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
52388 if (*octets == NULL) {
52389 if (net_ratelimit())
52390 printk("OOM in bsalg (%d)\n", __LINE__);
52391 diff -urNp linux-2.6.34.1/net/ipv4/tcp_ipv4.c linux-2.6.34.1/net/ipv4/tcp_ipv4.c
52392 --- linux-2.6.34.1/net/ipv4/tcp_ipv4.c 2010-07-05 14:24:10.000000000 -0400
52393 +++ linux-2.6.34.1/net/ipv4/tcp_ipv4.c 2010-07-07 09:04:58.000000000 -0400
52394 @@ -85,6 +85,9 @@
52395 int sysctl_tcp_tw_reuse __read_mostly;
52396 int sysctl_tcp_low_latency __read_mostly;
52397
52398 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52399 +extern int grsec_enable_blackhole;
52400 +#endif
52401
52402 #ifdef CONFIG_TCP_MD5SIG
52403 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
52404 @@ -1654,12 +1657,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
52405 TCP_SKB_CB(skb)->sacked = 0;
52406
52407 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
52408 - if (!sk)
52409 + if (!sk) {
52410 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52411 + ret = 1;
52412 +#endif
52413 goto no_tcp_socket;
52414 -
52415 + }
52416 process:
52417 - if (sk->sk_state == TCP_TIME_WAIT)
52418 + if (sk->sk_state == TCP_TIME_WAIT) {
52419 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52420 + ret = 2;
52421 +#endif
52422 goto do_time_wait;
52423 + }
52424
52425 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
52426 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
52427 @@ -1709,6 +1719,10 @@ no_tcp_socket:
52428 bad_packet:
52429 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
52430 } else {
52431 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52432 + if (!grsec_enable_blackhole || (ret == 1 &&
52433 + (skb->dev->flags & IFF_LOOPBACK)))
52434 +#endif
52435 tcp_v4_send_reset(NULL, skb);
52436 }
52437
52438 diff -urNp linux-2.6.34.1/net/ipv4/tcp_minisocks.c linux-2.6.34.1/net/ipv4/tcp_minisocks.c
52439 --- linux-2.6.34.1/net/ipv4/tcp_minisocks.c 2010-07-05 14:24:10.000000000 -0400
52440 +++ linux-2.6.34.1/net/ipv4/tcp_minisocks.c 2010-07-07 09:04:58.000000000 -0400
52441 @@ -27,6 +27,10 @@
52442 #include <net/inet_common.h>
52443 #include <net/xfrm.h>
52444
52445 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52446 +extern int grsec_enable_blackhole;
52447 +#endif
52448 +
52449 int sysctl_tcp_syncookies __read_mostly = 1;
52450 EXPORT_SYMBOL(sysctl_tcp_syncookies);
52451
52452 @@ -699,6 +703,10 @@ listen_overflow:
52453
52454 embryonic_reset:
52455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
52456 +
52457 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52458 + if (!grsec_enable_blackhole)
52459 +#endif
52460 if (!(flg & TCP_FLAG_RST))
52461 req->rsk_ops->send_reset(sk, skb);
52462
52463 diff -urNp linux-2.6.34.1/net/ipv4/tcp_probe.c linux-2.6.34.1/net/ipv4/tcp_probe.c
52464 --- linux-2.6.34.1/net/ipv4/tcp_probe.c 2010-07-05 14:24:10.000000000 -0400
52465 +++ linux-2.6.34.1/net/ipv4/tcp_probe.c 2010-07-07 09:04:58.000000000 -0400
52466 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
52467 if (cnt + width >= len)
52468 break;
52469
52470 - if (copy_to_user(buf + cnt, tbuf, width))
52471 + if (width > sizeof(tbuf) || copy_to_user(buf + cnt, tbuf, width))
52472 return -EFAULT;
52473 cnt += width;
52474 }
52475 diff -urNp linux-2.6.34.1/net/ipv4/tcp_timer.c linux-2.6.34.1/net/ipv4/tcp_timer.c
52476 --- linux-2.6.34.1/net/ipv4/tcp_timer.c 2010-07-05 14:24:10.000000000 -0400
52477 +++ linux-2.6.34.1/net/ipv4/tcp_timer.c 2010-07-07 09:04:58.000000000 -0400
52478 @@ -22,6 +22,10 @@
52479 #include <linux/gfp.h>
52480 #include <net/tcp.h>
52481
52482 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52483 +extern int grsec_lastack_retries;
52484 +#endif
52485 +
52486 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
52487 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
52488 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
52489 @@ -195,6 +199,13 @@ static int tcp_write_timeout(struct sock
52490 }
52491 }
52492
52493 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52494 + if ((sk->sk_state == TCP_LAST_ACK) &&
52495 + (grsec_lastack_retries > 0) &&
52496 + (grsec_lastack_retries < retry_until))
52497 + retry_until = grsec_lastack_retries;
52498 +#endif
52499 +
52500 if (retransmits_timed_out(sk, retry_until)) {
52501 /* Has it gone just too far? */
52502 tcp_write_err(sk);
52503 diff -urNp linux-2.6.34.1/net/ipv4/udp.c linux-2.6.34.1/net/ipv4/udp.c
52504 --- linux-2.6.34.1/net/ipv4/udp.c 2010-07-05 14:24:10.000000000 -0400
52505 +++ linux-2.6.34.1/net/ipv4/udp.c 2010-07-07 09:04:58.000000000 -0400
52506 @@ -86,6 +86,7 @@
52507 #include <linux/types.h>
52508 #include <linux/fcntl.h>
52509 #include <linux/module.h>
52510 +#include <linux/security.h>
52511 #include <linux/socket.h>
52512 #include <linux/sockios.h>
52513 #include <linux/igmp.h>
52514 @@ -107,6 +108,10 @@
52515 #include <net/xfrm.h>
52516 #include "udp_impl.h"
52517
52518 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52519 +extern int grsec_enable_blackhole;
52520 +#endif
52521 +
52522 struct udp_table udp_table __read_mostly;
52523 EXPORT_SYMBOL(udp_table);
52524
52525 @@ -563,6 +568,9 @@ found:
52526 return s;
52527 }
52528
52529 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
52530 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
52531 +
52532 /*
52533 * This routine is called by the ICMP module when it gets some
52534 * sort of error condition. If err < 0 then the socket should
52535 @@ -831,9 +839,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
52536 dport = usin->sin_port;
52537 if (dport == 0)
52538 return -EINVAL;
52539 +
52540 + err = gr_search_udp_sendmsg(sk, usin);
52541 + if (err)
52542 + return err;
52543 } else {
52544 if (sk->sk_state != TCP_ESTABLISHED)
52545 return -EDESTADDRREQ;
52546 +
52547 + err = gr_search_udp_sendmsg(sk, NULL);
52548 + if (err)
52549 + return err;
52550 +
52551 daddr = inet->inet_daddr;
52552 dport = inet->inet_dport;
52553 /* Open fast path for connected socket.
52554 @@ -1138,6 +1155,10 @@ try_again:
52555 if (!skb)
52556 goto out;
52557
52558 + err = gr_search_udp_recvmsg(sk, skb);
52559 + if (err)
52560 + goto out_free;
52561 +
52562 ulen = skb->len - sizeof(struct udphdr);
52563 if (len > ulen)
52564 len = ulen;
52565 @@ -1570,6 +1591,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
52566 goto csum_error;
52567
52568 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
52569 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52570 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
52571 +#endif
52572 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
52573
52574 /*
52575 diff -urNp linux-2.6.34.1/net/ipv6/exthdrs.c linux-2.6.34.1/net/ipv6/exthdrs.c
52576 --- linux-2.6.34.1/net/ipv6/exthdrs.c 2010-07-05 14:24:10.000000000 -0400
52577 +++ linux-2.6.34.1/net/ipv6/exthdrs.c 2010-07-07 09:04:58.000000000 -0400
52578 @@ -636,7 +636,7 @@ static struct tlvtype_proc tlvprochopopt
52579 .type = IPV6_TLV_JUMBO,
52580 .func = ipv6_hop_jumbo,
52581 },
52582 - { -1, }
52583 + { -1, NULL }
52584 };
52585
52586 int ipv6_parse_hopopts(struct sk_buff *skb)
52587 diff -urNp linux-2.6.34.1/net/ipv6/raw.c linux-2.6.34.1/net/ipv6/raw.c
52588 --- linux-2.6.34.1/net/ipv6/raw.c 2010-07-05 14:24:10.000000000 -0400
52589 +++ linux-2.6.34.1/net/ipv6/raw.c 2010-07-07 09:04:58.000000000 -0400
52590 @@ -598,7 +598,7 @@ out:
52591 return err;
52592 }
52593
52594 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
52595 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
52596 struct flowi *fl, struct rt6_info *rt,
52597 unsigned int flags)
52598 {
52599 diff -urNp linux-2.6.34.1/net/ipv6/tcp_ipv6.c linux-2.6.34.1/net/ipv6/tcp_ipv6.c
52600 --- linux-2.6.34.1/net/ipv6/tcp_ipv6.c 2010-07-05 14:24:10.000000000 -0400
52601 +++ linux-2.6.34.1/net/ipv6/tcp_ipv6.c 2010-07-07 09:04:58.000000000 -0400
52602 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
52603 }
52604 #endif
52605
52606 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52607 +extern int grsec_enable_blackhole;
52608 +#endif
52609 +
52610 static void tcp_v6_hash(struct sock *sk)
52611 {
52612 if (sk->sk_state != TCP_CLOSE) {
52613 @@ -1711,12 +1715,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
52614 TCP_SKB_CB(skb)->sacked = 0;
52615
52616 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
52617 - if (!sk)
52618 + if (!sk) {
52619 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52620 + ret = 1;
52621 +#endif
52622 goto no_tcp_socket;
52623 + }
52624
52625 process:
52626 - if (sk->sk_state == TCP_TIME_WAIT)
52627 + if (sk->sk_state == TCP_TIME_WAIT) {
52628 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52629 + ret = 2;
52630 +#endif
52631 goto do_time_wait;
52632 + }
52633
52634 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
52635 goto discard_and_relse;
52636 @@ -1759,6 +1771,10 @@ no_tcp_socket:
52637 bad_packet:
52638 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
52639 } else {
52640 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52641 + if (!grsec_enable_blackhole || (ret == 1 &&
52642 + (skb->dev->flags & IFF_LOOPBACK)))
52643 +#endif
52644 tcp_v6_send_reset(NULL, skb);
52645 }
52646
52647 diff -urNp linux-2.6.34.1/net/ipv6/udp.c linux-2.6.34.1/net/ipv6/udp.c
52648 --- linux-2.6.34.1/net/ipv6/udp.c 2010-07-05 14:24:10.000000000 -0400
52649 +++ linux-2.6.34.1/net/ipv6/udp.c 2010-07-07 09:04:58.000000000 -0400
52650 @@ -50,6 +50,10 @@
52651 #include <linux/seq_file.h>
52652 #include "udp_impl.h"
52653
52654 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52655 +extern int grsec_enable_blackhole;
52656 +#endif
52657 +
52658 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
52659 {
52660 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
52661 @@ -748,6 +752,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
52662 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
52663 proto == IPPROTO_UDPLITE);
52664
52665 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52666 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
52667 +#endif
52668 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
52669
52670 kfree_skb(skb);
52671 diff -urNp linux-2.6.34.1/net/irda/ircomm/ircomm_tty.c linux-2.6.34.1/net/irda/ircomm/ircomm_tty.c
52672 --- linux-2.6.34.1/net/irda/ircomm/ircomm_tty.c 2010-07-05 14:24:10.000000000 -0400
52673 +++ linux-2.6.34.1/net/irda/ircomm/ircomm_tty.c 2010-07-07 09:04:58.000000000 -0400
52674 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
52675 add_wait_queue(&self->open_wait, &wait);
52676
52677 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
52678 - __FILE__,__LINE__, tty->driver->name, self->open_count );
52679 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
52680
52681 /* As far as I can see, we protect open_count - Jean II */
52682 spin_lock_irqsave(&self->spinlock, flags);
52683 if (!tty_hung_up_p(filp)) {
52684 extra_count = 1;
52685 - self->open_count--;
52686 + atomic_dec(&self->open_count);
52687 }
52688 spin_unlock_irqrestore(&self->spinlock, flags);
52689 - self->blocked_open++;
52690 + atomic_inc(&self->blocked_open);
52691
52692 while (1) {
52693 if (tty->termios->c_cflag & CBAUD) {
52694 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
52695 }
52696
52697 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
52698 - __FILE__,__LINE__, tty->driver->name, self->open_count );
52699 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
52700
52701 schedule();
52702 }
52703 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
52704 if (extra_count) {
52705 /* ++ is not atomic, so this should be protected - Jean II */
52706 spin_lock_irqsave(&self->spinlock, flags);
52707 - self->open_count++;
52708 + atomic_inc(&self->open_count);
52709 spin_unlock_irqrestore(&self->spinlock, flags);
52710 }
52711 - self->blocked_open--;
52712 + atomic_dec(&self->blocked_open);
52713
52714 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
52715 - __FILE__,__LINE__, tty->driver->name, self->open_count);
52716 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count));
52717
52718 if (!retval)
52719 self->flags |= ASYNC_NORMAL_ACTIVE;
52720 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
52721 }
52722 /* ++ is not atomic, so this should be protected - Jean II */
52723 spin_lock_irqsave(&self->spinlock, flags);
52724 - self->open_count++;
52725 + atomic_inc(&self->open_count);
52726
52727 tty->driver_data = self;
52728 self->tty = tty;
52729 spin_unlock_irqrestore(&self->spinlock, flags);
52730
52731 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
52732 - self->line, self->open_count);
52733 + self->line, atomic_read(&self->open_count));
52734
52735 /* Not really used by us, but lets do it anyway */
52736 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
52737 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
52738 return;
52739 }
52740
52741 - if ((tty->count == 1) && (self->open_count != 1)) {
52742 + if ((tty->count == 1) && (atomic_read(&self->open_count) != 1)) {
52743 /*
52744 * Uh, oh. tty->count is 1, which means that the tty
52745 * structure will be freed. state->count should always
52746 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
52747 */
52748 IRDA_DEBUG(0, "%s(), bad serial port count; "
52749 "tty->count is 1, state->count is %d\n", __func__ ,
52750 - self->open_count);
52751 - self->open_count = 1;
52752 + atomic_read(&self->open_count));
52753 + atomic_set(&self->open_count, 1);
52754 }
52755
52756 - if (--self->open_count < 0) {
52757 + if (atomic_dec_return(&self->open_count) < 0) {
52758 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
52759 - __func__, self->line, self->open_count);
52760 - self->open_count = 0;
52761 + __func__, self->line, atomic_read(&self->open_count));
52762 + atomic_set(&self->open_count, 0);
52763 }
52764 - if (self->open_count) {
52765 + if (atomic_read(&self->open_count)) {
52766 spin_unlock_irqrestore(&self->spinlock, flags);
52767
52768 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
52769 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
52770 tty->closing = 0;
52771 self->tty = NULL;
52772
52773 - if (self->blocked_open) {
52774 + if (atomic_read(&self->blocked_open)) {
52775 if (self->close_delay)
52776 schedule_timeout_interruptible(self->close_delay);
52777 wake_up_interruptible(&self->open_wait);
52778 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
52779 spin_lock_irqsave(&self->spinlock, flags);
52780 self->flags &= ~ASYNC_NORMAL_ACTIVE;
52781 self->tty = NULL;
52782 - self->open_count = 0;
52783 + atomic_set(&self->open_count, 0);
52784 spin_unlock_irqrestore(&self->spinlock, flags);
52785
52786 wake_up_interruptible(&self->open_wait);
52787 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
52788 seq_putc(m, '\n');
52789
52790 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
52791 - seq_printf(m, "Open count: %d\n", self->open_count);
52792 + seq_printf(m, "Open count: %d\n", atomic_read(&self->open_count));
52793 seq_printf(m, "Max data size: %d\n", self->max_data_size);
52794 seq_printf(m, "Max header size: %d\n", self->max_header_size);
52795
52796 diff -urNp linux-2.6.34.1/net/mac80211/ieee80211_i.h linux-2.6.34.1/net/mac80211/ieee80211_i.h
52797 --- linux-2.6.34.1/net/mac80211/ieee80211_i.h 2010-07-05 14:24:10.000000000 -0400
52798 +++ linux-2.6.34.1/net/mac80211/ieee80211_i.h 2010-07-07 09:04:58.000000000 -0400
52799 @@ -630,7 +630,7 @@ struct ieee80211_local {
52800 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
52801 spinlock_t queue_stop_reason_lock;
52802
52803 - int open_count;
52804 + atomic_t open_count;
52805 int monitors, cooked_mntrs;
52806 /* number of interfaces with corresponding FIF_ flags */
52807 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
52808 diff -urNp linux-2.6.34.1/net/mac80211/iface.c linux-2.6.34.1/net/mac80211/iface.c
52809 --- linux-2.6.34.1/net/mac80211/iface.c 2010-07-05 14:24:10.000000000 -0400
52810 +++ linux-2.6.34.1/net/mac80211/iface.c 2010-07-07 09:04:58.000000000 -0400
52811 @@ -183,7 +183,7 @@ static int ieee80211_open(struct net_dev
52812 break;
52813 }
52814
52815 - if (local->open_count == 0) {
52816 + if (atomic_read(&local->open_count) == 0) {
52817 res = drv_start(local);
52818 if (res)
52819 goto err_del_bss;
52820 @@ -215,7 +215,7 @@ static int ieee80211_open(struct net_dev
52821 * Validate the MAC address for this device.
52822 */
52823 if (!is_valid_ether_addr(dev->dev_addr)) {
52824 - if (!local->open_count)
52825 + if (!atomic_read(&local->open_count))
52826 drv_stop(local);
52827 return -EADDRNOTAVAIL;
52828 }
52829 @@ -308,7 +308,7 @@ static int ieee80211_open(struct net_dev
52830
52831 hw_reconf_flags |= __ieee80211_recalc_idle(local);
52832
52833 - local->open_count++;
52834 + atomic_inc(&local->open_count);
52835 if (hw_reconf_flags) {
52836 ieee80211_hw_config(local, hw_reconf_flags);
52837 /*
52838 @@ -336,7 +336,7 @@ static int ieee80211_open(struct net_dev
52839 err_del_interface:
52840 drv_remove_interface(local, &sdata->vif);
52841 err_stop:
52842 - if (!local->open_count)
52843 + if (!atomic_read(&local->open_count))
52844 drv_stop(local);
52845 err_del_bss:
52846 sdata->bss = NULL;
52847 @@ -440,7 +440,7 @@ static int ieee80211_stop(struct net_dev
52848 WARN_ON(!list_empty(&sdata->u.ap.vlans));
52849 }
52850
52851 - local->open_count--;
52852 + atomic_dec(&local->open_count);
52853
52854 switch (sdata->vif.type) {
52855 case NL80211_IFTYPE_AP_VLAN:
52856 @@ -543,7 +543,7 @@ static int ieee80211_stop(struct net_dev
52857
52858 ieee80211_recalc_ps(local, -1);
52859
52860 - if (local->open_count == 0) {
52861 + if (atomic_read(&local->open_count) == 0) {
52862 ieee80211_clear_tx_pending(local);
52863 ieee80211_stop_device(local);
52864
52865 diff -urNp linux-2.6.34.1/net/mac80211/main.c linux-2.6.34.1/net/mac80211/main.c
52866 --- linux-2.6.34.1/net/mac80211/main.c 2010-07-05 14:24:10.000000000 -0400
52867 +++ linux-2.6.34.1/net/mac80211/main.c 2010-07-07 09:04:58.000000000 -0400
52868 @@ -148,7 +148,7 @@ int ieee80211_hw_config(struct ieee80211
52869 local->hw.conf.power_level = power;
52870 }
52871
52872 - if (changed && local->open_count) {
52873 + if (changed && atomic_read(&local->open_count)) {
52874 ret = drv_config(local, changed);
52875 /*
52876 * Goal:
52877 diff -urNp linux-2.6.34.1/net/mac80211/pm.c linux-2.6.34.1/net/mac80211/pm.c
52878 --- linux-2.6.34.1/net/mac80211/pm.c 2010-07-05 14:24:10.000000000 -0400
52879 +++ linux-2.6.34.1/net/mac80211/pm.c 2010-07-07 09:04:58.000000000 -0400
52880 @@ -101,7 +101,7 @@ int __ieee80211_suspend(struct ieee80211
52881 }
52882
52883 /* stop hardware - this must stop RX */
52884 - if (local->open_count)
52885 + if (atomic_read(&local->open_count))
52886 ieee80211_stop_device(local);
52887
52888 local->suspended = true;
52889 diff -urNp linux-2.6.34.1/net/mac80211/rate.c linux-2.6.34.1/net/mac80211/rate.c
52890 --- linux-2.6.34.1/net/mac80211/rate.c 2010-07-05 14:24:10.000000000 -0400
52891 +++ linux-2.6.34.1/net/mac80211/rate.c 2010-07-07 09:04:58.000000000 -0400
52892 @@ -355,7 +355,7 @@ int ieee80211_init_rate_ctrl_alg(struct
52893
52894 ASSERT_RTNL();
52895
52896 - if (local->open_count)
52897 + if (atomic_read(&local->open_count))
52898 return -EBUSY;
52899
52900 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
52901 diff -urNp linux-2.6.34.1/net/mac80211/rc80211_pid_debugfs.c linux-2.6.34.1/net/mac80211/rc80211_pid_debugfs.c
52902 --- linux-2.6.34.1/net/mac80211/rc80211_pid_debugfs.c 2010-07-05 14:24:10.000000000 -0400
52903 +++ linux-2.6.34.1/net/mac80211/rc80211_pid_debugfs.c 2010-07-07 09:04:58.000000000 -0400
52904 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
52905
52906 spin_unlock_irqrestore(&events->lock, status);
52907
52908 - if (copy_to_user(buf, pb, p))
52909 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
52910 return -EFAULT;
52911
52912 return p;
52913 diff -urNp linux-2.6.34.1/net/mac80211/tx.c linux-2.6.34.1/net/mac80211/tx.c
52914 --- linux-2.6.34.1/net/mac80211/tx.c 2010-07-05 14:24:10.000000000 -0400
52915 +++ linux-2.6.34.1/net/mac80211/tx.c 2010-07-07 09:04:58.000000000 -0400
52916 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
52917 return cpu_to_le16(dur);
52918 }
52919
52920 -static int inline is_ieee80211_device(struct ieee80211_local *local,
52921 +static inline int is_ieee80211_device(struct ieee80211_local *local,
52922 struct net_device *dev)
52923 {
52924 return local == wdev_priv(dev->ieee80211_ptr);
52925 diff -urNp linux-2.6.34.1/net/mac80211/util.c linux-2.6.34.1/net/mac80211/util.c
52926 --- linux-2.6.34.1/net/mac80211/util.c 2010-07-05 14:24:10.000000000 -0400
52927 +++ linux-2.6.34.1/net/mac80211/util.c 2010-07-07 09:04:58.000000000 -0400
52928 @@ -1088,7 +1088,7 @@ int ieee80211_reconfig(struct ieee80211_
52929 local->resuming = true;
52930
52931 /* restart hardware */
52932 - if (local->open_count) {
52933 + if (atomic_read(&local->open_count)) {
52934 /*
52935 * Upon resume hardware can sometimes be goofy due to
52936 * various platform / driver / bus issues, so restarting
52937 diff -urNp linux-2.6.34.1/net/packet/af_packet.c linux-2.6.34.1/net/packet/af_packet.c
52938 --- linux-2.6.34.1/net/packet/af_packet.c 2010-07-05 14:24:10.000000000 -0400
52939 +++ linux-2.6.34.1/net/packet/af_packet.c 2010-07-07 09:04:58.000000000 -0400
52940 @@ -2034,7 +2034,7 @@ static int packet_getsockopt(struct sock
52941 case PACKET_HDRLEN:
52942 if (len > sizeof(int))
52943 len = sizeof(int);
52944 - if (copy_from_user(&val, optval, len))
52945 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
52946 return -EFAULT;
52947 switch (val) {
52948 case TPACKET_V1:
52949 @@ -2066,7 +2066,7 @@ static int packet_getsockopt(struct sock
52950
52951 if (put_user(len, optlen))
52952 return -EFAULT;
52953 - if (copy_to_user(optval, data, len))
52954 + if (len > sizeof(st) || copy_to_user(optval, data, len))
52955 return -EFAULT;
52956 return 0;
52957 }
52958 diff -urNp linux-2.6.34.1/net/sctp/socket.c linux-2.6.34.1/net/sctp/socket.c
52959 --- linux-2.6.34.1/net/sctp/socket.c 2010-07-05 14:24:10.000000000 -0400
52960 +++ linux-2.6.34.1/net/sctp/socket.c 2010-07-07 09:04:58.000000000 -0400
52961 @@ -1483,7 +1483,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc
52962 struct sctp_sndrcvinfo *sinfo;
52963 struct sctp_initmsg *sinit;
52964 sctp_assoc_t associd = 0;
52965 - sctp_cmsgs_t cmsgs = { NULL };
52966 + sctp_cmsgs_t cmsgs = { NULL, NULL };
52967 int err;
52968 sctp_scope_t scope;
52969 long timeo;
52970 @@ -4390,7 +4390,7 @@ static int sctp_getsockopt_peer_addrs(st
52971 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
52972 if (space_left < addrlen)
52973 return -ENOMEM;
52974 - if (copy_to_user(to, &temp, addrlen))
52975 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
52976 return -EFAULT;
52977 to += addrlen;
52978 cnt++;
52979 @@ -5482,7 +5482,6 @@ pp_found:
52980 */
52981 int reuse = sk->sk_reuse;
52982 struct sock *sk2;
52983 - struct hlist_node *node;
52984
52985 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
52986 if (pp->fastreuse && sk->sk_reuse &&
52987 diff -urNp linux-2.6.34.1/net/socket.c linux-2.6.34.1/net/socket.c
52988 --- linux-2.6.34.1/net/socket.c 2010-07-05 14:24:10.000000000 -0400
52989 +++ linux-2.6.34.1/net/socket.c 2010-07-07 09:04:58.000000000 -0400
52990 @@ -88,6 +88,7 @@
52991 #include <linux/nsproxy.h>
52992 #include <linux/magic.h>
52993 #include <linux/slab.h>
52994 +#include <linux/in.h>
52995
52996 #include <asm/uaccess.h>
52997 #include <asm/unistd.h>
52998 @@ -104,6 +105,8 @@
52999 #include <linux/sockios.h>
53000 #include <linux/atalk.h>
53001
53002 +#include <linux/grsock.h>
53003 +
53004 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
53005 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
53006 unsigned long nr_segs, loff_t pos);
53007 @@ -305,7 +308,7 @@ static int sockfs_get_sb(struct file_sys
53008 mnt);
53009 }
53010
53011 -static struct vfsmount *sock_mnt __read_mostly;
53012 +struct vfsmount *sock_mnt __read_mostly;
53013
53014 static struct file_system_type sock_fs_type = {
53015 .name = "sockfs",
53016 @@ -1311,6 +1314,16 @@ SYSCALL_DEFINE3(socket, int, family, int
53017 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
53018 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
53019
53020 + if(!gr_search_socket(family, type, protocol)) {
53021 + retval = -EACCES;
53022 + goto out;
53023 + }
53024 +
53025 + if (gr_handle_sock_all(family, type, protocol)) {
53026 + retval = -EACCES;
53027 + goto out;
53028 + }
53029 +
53030 retval = sock_create(family, type, protocol, &sock);
53031 if (retval < 0)
53032 goto out;
53033 @@ -1423,6 +1436,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
53034 if (sock) {
53035 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
53036 if (err >= 0) {
53037 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
53038 + err = -EACCES;
53039 + goto error;
53040 + }
53041 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
53042 + if (err)
53043 + goto error;
53044 +
53045 err = security_socket_bind(sock,
53046 (struct sockaddr *)&address,
53047 addrlen);
53048 @@ -1431,6 +1452,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
53049 (struct sockaddr *)
53050 &address, addrlen);
53051 }
53052 +error:
53053 fput_light(sock->file, fput_needed);
53054 }
53055 return err;
53056 @@ -1454,10 +1476,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
53057 if ((unsigned)backlog > somaxconn)
53058 backlog = somaxconn;
53059
53060 + if (gr_handle_sock_server_other(sock->sk)) {
53061 + err = -EPERM;
53062 + goto error;
53063 + }
53064 +
53065 + err = gr_search_listen(sock);
53066 + if (err)
53067 + goto error;
53068 +
53069 err = security_socket_listen(sock, backlog);
53070 if (!err)
53071 err = sock->ops->listen(sock, backlog);
53072
53073 +error:
53074 fput_light(sock->file, fput_needed);
53075 }
53076 return err;
53077 @@ -1500,6 +1532,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
53078 newsock->type = sock->type;
53079 newsock->ops = sock->ops;
53080
53081 + if (gr_handle_sock_server_other(sock->sk)) {
53082 + err = -EPERM;
53083 + sock_release(newsock);
53084 + goto out_put;
53085 + }
53086 +
53087 + err = gr_search_accept(sock);
53088 + if (err) {
53089 + sock_release(newsock);
53090 + goto out_put;
53091 + }
53092 +
53093 /*
53094 * We don't need try_module_get here, as the listening socket (sock)
53095 * has the protocol module (sock->ops->owner) held.
53096 @@ -1538,6 +1582,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
53097 fd_install(newfd, newfile);
53098 err = newfd;
53099
53100 + gr_attach_curr_ip(newsock->sk);
53101 +
53102 out_put:
53103 fput_light(sock->file, fput_needed);
53104 out:
53105 @@ -1570,6 +1616,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
53106 int, addrlen)
53107 {
53108 struct socket *sock;
53109 + struct sockaddr *sck;
53110 struct sockaddr_storage address;
53111 int err, fput_needed;
53112
53113 @@ -1580,6 +1627,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
53114 if (err < 0)
53115 goto out_put;
53116
53117 + sck = (struct sockaddr *)&address;
53118 +
53119 + if (gr_handle_sock_client(sck)) {
53120 + err = -EACCES;
53121 + goto out_put;
53122 + }
53123 +
53124 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
53125 + if (err)
53126 + goto out_put;
53127 +
53128 err =
53129 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
53130 if (err)
53131 diff -urNp linux-2.6.34.1/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.34.1/net/sunrpc/xprtrdma/svc_rdma.c
53132 --- linux-2.6.34.1/net/sunrpc/xprtrdma/svc_rdma.c 2010-07-05 14:24:10.000000000 -0400
53133 +++ linux-2.6.34.1/net/sunrpc/xprtrdma/svc_rdma.c 2010-07-07 09:04:58.000000000 -0400
53134 @@ -106,7 +106,7 @@ static int read_reset_stat(ctl_table *ta
53135 len -= *ppos;
53136 if (len > *lenp)
53137 len = *lenp;
53138 - if (len && copy_to_user(buffer, str_buf, len))
53139 + if (len > sizeof(str_buf) || (len && copy_to_user(buffer, str_buf, len)))
53140 return -EFAULT;
53141 *lenp = len;
53142 *ppos += len;
53143 diff -urNp linux-2.6.34.1/net/sysctl_net.c linux-2.6.34.1/net/sysctl_net.c
53144 --- linux-2.6.34.1/net/sysctl_net.c 2010-07-05 14:24:10.000000000 -0400
53145 +++ linux-2.6.34.1/net/sysctl_net.c 2010-07-07 09:04:58.000000000 -0400
53146 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
53147 struct ctl_table *table)
53148 {
53149 /* Allow network administrator to have same access as root. */
53150 - if (capable(CAP_NET_ADMIN)) {
53151 + if (capable_nolog(CAP_NET_ADMIN)) {
53152 int mode = (table->mode >> 6) & 7;
53153 return (mode << 6) | (mode << 3) | mode;
53154 }
53155 diff -urNp linux-2.6.34.1/net/tipc/socket.c linux-2.6.34.1/net/tipc/socket.c
53156 --- linux-2.6.34.1/net/tipc/socket.c 2010-07-05 14:24:10.000000000 -0400
53157 +++ linux-2.6.34.1/net/tipc/socket.c 2010-07-07 09:04:58.000000000 -0400
53158 @@ -1451,8 +1451,9 @@ static int connect(struct socket *sock,
53159 } else {
53160 if (res == 0)
53161 res = -ETIMEDOUT;
53162 - else
53163 - ; /* leave "res" unchanged */
53164 + else {
53165 + /* leave "res" unchanged */
53166 + }
53167 sock->state = SS_DISCONNECTING;
53168 }
53169
53170 diff -urNp linux-2.6.34.1/net/unix/af_unix.c linux-2.6.34.1/net/unix/af_unix.c
53171 --- linux-2.6.34.1/net/unix/af_unix.c 2010-07-05 14:24:10.000000000 -0400
53172 +++ linux-2.6.34.1/net/unix/af_unix.c 2010-07-07 09:04:59.000000000 -0400
53173 @@ -735,6 +735,12 @@ static struct sock *unix_find_other(stru
53174 err = -ECONNREFUSED;
53175 if (!S_ISSOCK(inode->i_mode))
53176 goto put_fail;
53177 +
53178 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
53179 + err = -EACCES;
53180 + goto put_fail;
53181 + }
53182 +
53183 u = unix_find_socket_byinode(net, inode);
53184 if (!u)
53185 goto put_fail;
53186 @@ -755,6 +761,13 @@ static struct sock *unix_find_other(stru
53187 if (u) {
53188 struct dentry *dentry;
53189 dentry = unix_sk(u)->dentry;
53190 +
53191 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
53192 + err = -EPERM;
53193 + sock_put(u);
53194 + goto fail;
53195 + }
53196 +
53197 if (dentry)
53198 touch_atime(unix_sk(u)->mnt, dentry);
53199 } else
53200 @@ -840,11 +853,18 @@ static int unix_bind(struct socket *sock
53201 err = security_path_mknod(&nd.path, dentry, mode, 0);
53202 if (err)
53203 goto out_mknod_drop_write;
53204 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
53205 + err = -EACCES;
53206 + goto out_mknod_drop_write;
53207 + }
53208 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
53209 out_mknod_drop_write:
53210 mnt_drop_write(nd.path.mnt);
53211 if (err)
53212 goto out_mknod_dput;
53213 +
53214 + gr_handle_create(dentry, nd.path.mnt);
53215 +
53216 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
53217 dput(nd.path.dentry);
53218 nd.path.dentry = dentry;
53219 @@ -862,6 +882,10 @@ out_mknod_drop_write:
53220 goto out_unlock;
53221 }
53222
53223 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53224 + sk->sk_peercred.pid = current->pid;
53225 +#endif
53226 +
53227 list = &unix_socket_table[addr->hash];
53228 } else {
53229 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
53230 diff -urNp linux-2.6.34.1/net/wireless/wext-core.c linux-2.6.34.1/net/wireless/wext-core.c
53231 --- linux-2.6.34.1/net/wireless/wext-core.c 2010-07-05 14:24:10.000000000 -0400
53232 +++ linux-2.6.34.1/net/wireless/wext-core.c 2010-07-07 09:04:59.000000000 -0400
53233 @@ -744,8 +744,7 @@ static int ioctl_standard_iw_point(struc
53234 */
53235
53236 /* Support for very large requests */
53237 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
53238 - (user_length > descr->max_tokens)) {
53239 + if (user_length > descr->max_tokens) {
53240 /* Allow userspace to GET more than max so
53241 * we can support any size GET requests.
53242 * There is still a limit : -ENOMEM.
53243 diff -urNp linux-2.6.34.1/net/xfrm/xfrm_policy.c linux-2.6.34.1/net/xfrm/xfrm_policy.c
53244 --- linux-2.6.34.1/net/xfrm/xfrm_policy.c 2010-07-05 14:24:10.000000000 -0400
53245 +++ linux-2.6.34.1/net/xfrm/xfrm_policy.c 2010-07-07 09:04:59.000000000 -0400
53246 @@ -1510,7 +1510,7 @@ free_dst:
53247 goto out;
53248 }
53249
53250 -static int inline
53251 +static inline int
53252 xfrm_dst_alloc_copy(void **target, void *src, int size)
53253 {
53254 if (!*target) {
53255 @@ -1522,7 +1522,7 @@ xfrm_dst_alloc_copy(void **target, void
53256 return 0;
53257 }
53258
53259 -static int inline
53260 +static inline int
53261 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
53262 {
53263 #ifdef CONFIG_XFRM_SUB_POLICY
53264 @@ -1534,7 +1534,7 @@ xfrm_dst_update_parent(struct dst_entry
53265 #endif
53266 }
53267
53268 -static int inline
53269 +static inline int
53270 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
53271 {
53272 #ifdef CONFIG_XFRM_SUB_POLICY
53273 diff -urNp linux-2.6.34.1/scripts/basic/fixdep.c linux-2.6.34.1/scripts/basic/fixdep.c
53274 --- linux-2.6.34.1/scripts/basic/fixdep.c 2010-07-05 14:24:10.000000000 -0400
53275 +++ linux-2.6.34.1/scripts/basic/fixdep.c 2010-07-07 09:04:59.000000000 -0400
53276 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
53277
53278 static void parse_config_file(char *map, size_t len)
53279 {
53280 - int *end = (int *) (map + len);
53281 + unsigned int *end = (unsigned int *) (map + len);
53282 /* start at +1, so that p can never be < map */
53283 - int *m = (int *) map + 1;
53284 + unsigned int *m = (unsigned int *) map + 1;
53285 char *p, *q;
53286
53287 for (; m < end; m++) {
53288 @@ -371,7 +371,7 @@ static void print_deps(void)
53289 static void traps(void)
53290 {
53291 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
53292 - int *p = (int *)test;
53293 + unsigned int *p = (unsigned int *)test;
53294
53295 if (*p != INT_CONF) {
53296 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
53297 diff -urNp linux-2.6.34.1/scripts/kallsyms.c linux-2.6.34.1/scripts/kallsyms.c
53298 --- linux-2.6.34.1/scripts/kallsyms.c 2010-07-05 14:24:10.000000000 -0400
53299 +++ linux-2.6.34.1/scripts/kallsyms.c 2010-07-07 09:04:59.000000000 -0400
53300 @@ -43,10 +43,10 @@ struct text_range {
53301
53302 static unsigned long long _text;
53303 static struct text_range text_ranges[] = {
53304 - { "_stext", "_etext" },
53305 - { "_sinittext", "_einittext" },
53306 - { "_stext_l1", "_etext_l1" }, /* Blackfin on-chip L1 inst SRAM */
53307 - { "_stext_l2", "_etext_l2" }, /* Blackfin on-chip L2 SRAM */
53308 + { "_stext", "_etext", 0, 0 },
53309 + { "_sinittext", "_einittext", 0, 0 },
53310 + { "_stext_l1", "_etext_l1", 0, 0 }, /* Blackfin on-chip L1 inst SRAM */
53311 + { "_stext_l2", "_etext_l2", 0, 0 }, /* Blackfin on-chip L2 SRAM */
53312 };
53313 #define text_range_text (&text_ranges[0])
53314 #define text_range_inittext (&text_ranges[1])
53315 diff -urNp linux-2.6.34.1/scripts/mod/file2alias.c linux-2.6.34.1/scripts/mod/file2alias.c
53316 --- linux-2.6.34.1/scripts/mod/file2alias.c 2010-07-05 14:24:10.000000000 -0400
53317 +++ linux-2.6.34.1/scripts/mod/file2alias.c 2010-07-07 09:04:59.000000000 -0400
53318 @@ -72,7 +72,7 @@ static void device_id_check(const char *
53319 unsigned long size, unsigned long id_size,
53320 void *symval)
53321 {
53322 - int i;
53323 + unsigned int i;
53324
53325 if (size % id_size || size < id_size) {
53326 if (cross_build != 0)
53327 @@ -102,7 +102,7 @@ static void device_id_check(const char *
53328 /* USB is special because the bcdDevice can be matched against a numeric range */
53329 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
53330 static void do_usb_entry(struct usb_device_id *id,
53331 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
53332 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
53333 unsigned char range_lo, unsigned char range_hi,
53334 unsigned char max, struct module *mod)
53335 {
53336 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
53337 for (i = 0; i < count; i++) {
53338 const char *id = (char *)devs[i].id;
53339 char acpi_id[sizeof(devs[0].id)];
53340 - int j;
53341 + unsigned int j;
53342
53343 buf_printf(&mod->dev_table_buf,
53344 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
53345 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
53346
53347 for (j = 0; j < PNP_MAX_DEVICES; j++) {
53348 const char *id = (char *)card->devs[j].id;
53349 - int i2, j2;
53350 + unsigned int i2, j2;
53351 int dup = 0;
53352
53353 if (!id[0])
53354 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
53355 /* add an individual alias for every device entry */
53356 if (!dup) {
53357 char acpi_id[sizeof(card->devs[0].id)];
53358 - int k;
53359 + unsigned int k;
53360
53361 buf_printf(&mod->dev_table_buf,
53362 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
53363 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
53364 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
53365 char *alias)
53366 {
53367 - int i, j;
53368 + unsigned int i, j;
53369
53370 sprintf(alias, "dmi*");
53371
53372 diff -urNp linux-2.6.34.1/scripts/mod/modpost.c linux-2.6.34.1/scripts/mod/modpost.c
53373 --- linux-2.6.34.1/scripts/mod/modpost.c 2010-07-05 14:24:10.000000000 -0400
53374 +++ linux-2.6.34.1/scripts/mod/modpost.c 2010-07-07 09:04:59.000000000 -0400
53375 @@ -842,6 +842,7 @@ enum mismatch {
53376 INIT_TO_EXIT,
53377 EXIT_TO_INIT,
53378 EXPORT_TO_INIT_EXIT,
53379 + DATA_TO_TEXT
53380 };
53381
53382 struct sectioncheck {
53383 @@ -927,6 +928,12 @@ const struct sectioncheck sectioncheck[]
53384 .fromsec = { "__ksymtab*", NULL },
53385 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
53386 .mismatch = EXPORT_TO_INIT_EXIT
53387 +},
53388 +/* Do not reference code from writable data */
53389 +{
53390 + .fromsec = { DATA_SECTIONS, NULL },
53391 + .tosec = { TEXT_SECTIONS, NULL },
53392 + .mismatch = DATA_TO_TEXT
53393 }
53394 };
53395
53396 @@ -1031,10 +1038,10 @@ static Elf_Sym *find_elf_symbol(struct e
53397 continue;
53398 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
53399 continue;
53400 - if (sym->st_value == addr)
53401 - return sym;
53402 /* Find a symbol nearby - addr are maybe negative */
53403 d = sym->st_value - addr;
53404 + if (d == 0)
53405 + return sym;
53406 if (d < 0)
53407 d = addr - sym->st_value;
53408 if (d < distance) {
53409 @@ -1275,6 +1282,14 @@ static void report_sec_mismatch(const ch
53410 "Fix this by removing the %sannotation of %s "
53411 "or drop the export.\n",
53412 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
53413 + case DATA_TO_TEXT:
53414 +/*
53415 + fprintf(stderr,
53416 + "The variable %s references\n"
53417 + "the %s %s%s%s\n",
53418 + fromsym, to, sec2annotation(tosec), tosym, to_p);
53419 +*/
53420 + break;
53421 case NO_MISMATCH:
53422 /* To get warnings on missing members */
53423 break;
53424 @@ -1600,7 +1615,7 @@ void __attribute__((format(printf, 2, 3)
53425 va_end(ap);
53426 }
53427
53428 -void buf_write(struct buffer *buf, const char *s, int len)
53429 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
53430 {
53431 if (buf->size - buf->pos < len) {
53432 buf->size += len + SZ;
53433 @@ -1812,7 +1827,7 @@ static void write_if_changed(struct buff
53434 if (fstat(fileno(file), &st) < 0)
53435 goto close_write;
53436
53437 - if (st.st_size != b->pos)
53438 + if (st.st_size != (off_t)b->pos)
53439 goto close_write;
53440
53441 tmp = NOFAIL(malloc(b->pos));
53442 diff -urNp linux-2.6.34.1/scripts/mod/modpost.h linux-2.6.34.1/scripts/mod/modpost.h
53443 --- linux-2.6.34.1/scripts/mod/modpost.h 2010-07-05 14:24:10.000000000 -0400
53444 +++ linux-2.6.34.1/scripts/mod/modpost.h 2010-07-07 09:04:59.000000000 -0400
53445 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
53446
53447 struct buffer {
53448 char *p;
53449 - int pos;
53450 - int size;
53451 + unsigned int pos;
53452 + unsigned int size;
53453 };
53454
53455 void __attribute__((format(printf, 2, 3)))
53456 buf_printf(struct buffer *buf, const char *fmt, ...);
53457
53458 void
53459 -buf_write(struct buffer *buf, const char *s, int len);
53460 +buf_write(struct buffer *buf, const char *s, unsigned int len);
53461
53462 struct module {
53463 struct module *next;
53464 diff -urNp linux-2.6.34.1/scripts/mod/sumversion.c linux-2.6.34.1/scripts/mod/sumversion.c
53465 --- linux-2.6.34.1/scripts/mod/sumversion.c 2010-07-05 14:24:10.000000000 -0400
53466 +++ linux-2.6.34.1/scripts/mod/sumversion.c 2010-07-07 09:04:59.000000000 -0400
53467 @@ -455,7 +455,7 @@ static void write_version(const char *fi
53468 goto out;
53469 }
53470
53471 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
53472 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
53473 warn("writing sum in %s failed: %s\n",
53474 filename, strerror(errno));
53475 goto out;
53476 diff -urNp linux-2.6.34.1/scripts/pnmtologo.c linux-2.6.34.1/scripts/pnmtologo.c
53477 --- linux-2.6.34.1/scripts/pnmtologo.c 2010-07-05 14:24:10.000000000 -0400
53478 +++ linux-2.6.34.1/scripts/pnmtologo.c 2010-07-07 09:04:59.000000000 -0400
53479 @@ -237,14 +237,14 @@ static void write_header(void)
53480 fprintf(out, " * Linux logo %s\n", logoname);
53481 fputs(" */\n\n", out);
53482 fputs("#include <linux/linux_logo.h>\n\n", out);
53483 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
53484 + fprintf(out, "static unsigned char %s_data[] = {\n",
53485 logoname);
53486 }
53487
53488 static void write_footer(void)
53489 {
53490 fputs("\n};\n\n", out);
53491 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
53492 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
53493 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
53494 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
53495 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
53496 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
53497 fputs("\n};\n\n", out);
53498
53499 /* write logo clut */
53500 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
53501 + fprintf(out, "static unsigned char %s_clut[] = {\n",
53502 logoname);
53503 write_hex_cnt = 0;
53504 for (i = 0; i < logo_clutsize; i++) {
53505 diff -urNp linux-2.6.34.1/security/Kconfig linux-2.6.34.1/security/Kconfig
53506 --- linux-2.6.34.1/security/Kconfig 2010-07-05 14:24:10.000000000 -0400
53507 +++ linux-2.6.34.1/security/Kconfig 2010-07-07 09:04:59.000000000 -0400
53508 @@ -4,6 +4,499 @@
53509
53510 menu "Security options"
53511
53512 +source grsecurity/Kconfig
53513 +
53514 +menu "PaX"
53515 +
53516 + config PAX_PER_CPU_PGD
53517 + bool
53518 +
53519 + config TASK_SIZE_MAX_SHIFT
53520 + int
53521 + depends on X86_64
53522 + default 47 if !PAX_PER_CPU_PGD
53523 + default 42 if PAX_PER_CPU_PGD
53524 +
53525 +config PAX
53526 + bool "Enable various PaX features"
53527 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
53528 + help
53529 + This allows you to enable various PaX features. PaX adds
53530 + intrusion prevention mechanisms to the kernel that reduce
53531 + the risks posed by exploitable memory corruption bugs.
53532 +
53533 +menu "PaX Control"
53534 + depends on PAX
53535 +
53536 +config PAX_SOFTMODE
53537 + bool 'Support soft mode'
53538 + select PAX_PT_PAX_FLAGS
53539 + help
53540 + Enabling this option will allow you to run PaX in soft mode, that
53541 + is, PaX features will not be enforced by default, only on executables
53542 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
53543 + is the only way to mark executables for soft mode use.
53544 +
53545 + Soft mode can be activated by using the "pax_softmode=1" kernel command
53546 + line option on boot. Furthermore you can control various PaX features
53547 + at runtime via the entries in /proc/sys/kernel/pax.
53548 +
53549 +config PAX_EI_PAX
53550 + bool 'Use legacy ELF header marking'
53551 + help
53552 + Enabling this option will allow you to control PaX features on
53553 + a per executable basis via the 'chpax' utility available at
53554 + http://pax.grsecurity.net/. The control flags will be read from
53555 + an otherwise reserved part of the ELF header. This marking has
53556 + numerous drawbacks (no support for soft-mode, toolchain does not
53557 + know about the non-standard use of the ELF header) therefore it
53558 + has been deprecated in favour of PT_PAX_FLAGS support.
53559 +
53560 + If you have applications not marked by the PT_PAX_FLAGS ELF
53561 + program header then you MUST enable this option otherwise they
53562 + will not get any protection.
53563 +
53564 + Note that if you enable PT_PAX_FLAGS marking support as well,
53565 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
53566 +
53567 +config PAX_PT_PAX_FLAGS
53568 + bool 'Use ELF program header marking'
53569 + help
53570 + Enabling this option will allow you to control PaX features on
53571 + a per executable basis via the 'paxctl' utility available at
53572 + http://pax.grsecurity.net/. The control flags will be read from
53573 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
53574 + has the benefits of supporting both soft mode and being fully
53575 + integrated into the toolchain (the binutils patch is available
53576 + from http://pax.grsecurity.net).
53577 +
53578 + If you have applications not marked by the PT_PAX_FLAGS ELF
53579 + program header then you MUST enable the EI_PAX marking support
53580 + otherwise they will not get any protection.
53581 +
53582 + Note that if you enable the legacy EI_PAX marking support as well,
53583 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
53584 +
53585 +choice
53586 + prompt 'MAC system integration'
53587 + default PAX_HAVE_ACL_FLAGS
53588 + help
53589 + Mandatory Access Control systems have the option of controlling
53590 + PaX flags on a per executable basis, choose the method supported
53591 + by your particular system.
53592 +
53593 + - "none": if your MAC system does not interact with PaX,
53594 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
53595 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
53596 +
53597 + NOTE: this option is for developers/integrators only.
53598 +
53599 + config PAX_NO_ACL_FLAGS
53600 + bool 'none'
53601 +
53602 + config PAX_HAVE_ACL_FLAGS
53603 + bool 'direct'
53604 +
53605 + config PAX_HOOK_ACL_FLAGS
53606 + bool 'hook'
53607 +endchoice
53608 +
53609 +endmenu
53610 +
53611 +menu "Non-executable pages"
53612 + depends on PAX
53613 +
53614 +config PAX_NOEXEC
53615 + bool "Enforce non-executable pages"
53616 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
53617 + help
53618 + By design some architectures do not allow for protecting memory
53619 + pages against execution or even if they do, Linux does not make
53620 + use of this feature. In practice this means that if a page is
53621 + readable (such as the stack or heap) it is also executable.
53622 +
53623 + There is a well known exploit technique that makes use of this
53624 + fact and a common programming mistake where an attacker can
53625 + introduce code of his choice somewhere in the attacked program's
53626 + memory (typically the stack or the heap) and then execute it.
53627 +
53628 + If the attacked program was running with different (typically
53629 + higher) privileges than that of the attacker, then he can elevate
53630 + his own privilege level (e.g. get a root shell, write to files for
53631 + which he does not have write access to, etc).
53632 +
53633 + Enabling this option will let you choose from various features
53634 + that prevent the injection and execution of 'foreign' code in
53635 + a program.
53636 +
53637 + This will also break programs that rely on the old behaviour and
53638 + expect that dynamically allocated memory via the malloc() family
53639 + of functions is executable (which it is not). Notable examples
53640 + are the XFree86 4.x server, the java runtime and wine.
53641 +
53642 +config PAX_PAGEEXEC
53643 + bool "Paging based non-executable pages"
53644 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
53645 + select S390_SWITCH_AMODE if S390
53646 + select S390_EXEC_PROTECT if S390
53647 + help
53648 + This implementation is based on the paging feature of the CPU.
53649 + On i386 without hardware non-executable bit support there is a
53650 + variable but usually low performance impact, however on Intel's
53651 + P4 core based CPUs it is very high so you should not enable this
53652 + for kernels meant to be used on such CPUs.
53653 +
53654 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
53655 + with hardware non-executable bit support there is no performance
53656 + impact, on ppc the impact is negligible.
53657 +
53658 + Note that several architectures require various emulations due to
53659 + badly designed userland ABIs, this will cause a performance impact
53660 + but will disappear as soon as userland is fixed. For example, ppc
53661 + userland MUST have been built with secure-plt by a recent toolchain.
53662 +
53663 +config PAX_SEGMEXEC
53664 + bool "Segmentation based non-executable pages"
53665 + depends on PAX_NOEXEC && X86_32
53666 + help
53667 + This implementation is based on the segmentation feature of the
53668 + CPU and has a very small performance impact, however applications
53669 + will be limited to a 1.5 GB address space instead of the normal
53670 + 3 GB.
53671 +
53672 +config PAX_EMUTRAMP
53673 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
53674 + default y if PARISC
53675 + help
53676 + There are some programs and libraries that for one reason or
53677 + another attempt to execute special small code snippets from
53678 + non-executable memory pages. Most notable examples are the
53679 + signal handler return code generated by the kernel itself and
53680 + the GCC trampolines.
53681 +
53682 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
53683 + such programs will no longer work under your kernel.
53684 +
53685 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
53686 + utilities to enable trampoline emulation for the affected programs
53687 + yet still have the protection provided by the non-executable pages.
53688 +
53689 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
53690 + your system will not even boot.
53691 +
53692 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
53693 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
53694 + for the affected files.
53695 +
53696 + NOTE: enabling this feature *may* open up a loophole in the
53697 + protection provided by non-executable pages that an attacker
53698 + could abuse. Therefore the best solution is to not have any
53699 + files on your system that would require this option. This can
53700 + be achieved by not using libc5 (which relies on the kernel
53701 + signal handler return code) and not using or rewriting programs
53702 + that make use of the nested function implementation of GCC.
53703 + Skilled users can just fix GCC itself so that it implements
53704 + nested function calls in a way that does not interfere with PaX.
53705 +
53706 +config PAX_EMUSIGRT
53707 + bool "Automatically emulate sigreturn trampolines"
53708 + depends on PAX_EMUTRAMP && PARISC
53709 + default y
53710 + help
53711 + Enabling this option will have the kernel automatically detect
53712 + and emulate signal return trampolines executing on the stack
53713 + that would otherwise lead to task termination.
53714 +
53715 + This solution is intended as a temporary one for users with
53716 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
53717 + Modula-3 runtime, etc) or executables linked to such, basically
53718 + everything that does not specify its own SA_RESTORER function in
53719 + normal executable memory like glibc 2.1+ does.
53720 +
53721 + On parisc you MUST enable this option, otherwise your system will
53722 + not even boot.
53723 +
53724 + NOTE: this feature cannot be disabled on a per executable basis
53725 + and since it *does* open up a loophole in the protection provided
53726 + by non-executable pages, the best solution is to not have any
53727 + files on your system that would require this option.
53728 +
53729 +config PAX_MPROTECT
53730 + bool "Restrict mprotect()"
53731 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
53732 + help
53733 + Enabling this option will prevent programs from
53734 + - changing the executable status of memory pages that were
53735 + not originally created as executable,
53736 + - making read-only executable pages writable again,
53737 + - creating executable pages from anonymous memory.
53738 +
53739 + You should say Y here to complete the protection provided by
53740 + the enforcement of non-executable pages.
53741 +
53742 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
53743 + this feature on a per file basis.
53744 +
53745 +config PAX_NOELFRELOCS
53746 + bool "Disallow ELF text relocations"
53747 + depends on PAX_MPROTECT && !PAX_ETEXECRELOCS && (IA64 || PPC || X86)
53748 + help
53749 + Non-executable pages and mprotect() restrictions are effective
53750 + in preventing the introduction of new executable code into an
53751 + attacked task's address space. There remain only two venues
53752 + for this kind of attack: if the attacker can execute already
53753 + existing code in the attacked task then he can either have it
53754 + create and mmap() a file containing his code or have it mmap()
53755 + an already existing ELF library that does not have position
53756 + independent code in it and use mprotect() on it to make it
53757 + writable and copy his code there. While protecting against
53758 + the former approach is beyond PaX, the latter can be prevented
53759 + by having only PIC ELF libraries on one's system (which do not
53760 + need to relocate their code). If you are sure this is your case,
53761 + then enable this option otherwise be careful as you may not even
53762 + be able to boot or log on your system (for example, some PAM
53763 + modules are erroneously compiled as non-PIC by default).
53764 +
53765 + NOTE: if you are using dynamic ELF executables (as suggested
53766 + when using ASLR) then you must have made sure that you linked
53767 + your files using the PIC version of crt1 (the et_dyn.tar.gz package
53768 + referenced there has already been updated to support this).
53769 +
53770 +config PAX_ETEXECRELOCS
53771 + bool "Allow ELF ET_EXEC text relocations"
53772 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
53773 + default y
53774 + help
53775 + On some architectures there are incorrectly created applications
53776 + that require text relocations and would not work without enabling
53777 + this option. If you are an alpha, ia64 or parisc user, you should
53778 + enable this option and disable it once you have made sure that
53779 + none of your applications need it.
53780 +
53781 +config PAX_EMUPLT
53782 + bool "Automatically emulate ELF PLT"
53783 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
53784 + default y
53785 + help
53786 + Enabling this option will have the kernel automatically detect
53787 + and emulate the Procedure Linkage Table entries in ELF files.
53788 + On some architectures such entries are in writable memory, and
53789 + become non-executable leading to task termination. Therefore
53790 + it is mandatory that you enable this option on alpha, parisc,
53791 + sparc and sparc64, otherwise your system would not even boot.
53792 +
53793 + NOTE: this feature *does* open up a loophole in the protection
53794 + provided by the non-executable pages, therefore the proper
53795 + solution is to modify the toolchain to produce a PLT that does
53796 + not need to be writable.
53797 +
53798 +config PAX_DLRESOLVE
53799 + bool 'Emulate old glibc resolver stub'
53800 + depends on PAX_EMUPLT && SPARC
53801 + default n
53802 + help
53803 + This option is needed if userland has an old glibc (before 2.4)
53804 + that puts a 'save' instruction into the runtime generated resolver
53805 + stub that needs special emulation.
53806 +
53807 +config PAX_KERNEXEC
53808 + bool "Enforce non-executable kernel pages"
53809 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
53810 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
53811 + help
53812 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
53813 + that is, enabling this option will make it harder to inject
53814 + and execute 'foreign' code in kernel memory itself.
53815 +
53816 +config PAX_KERNEXEC_MODULE_TEXT
53817 + int "Minimum amount of memory reserved for module code"
53818 + default "4"
53819 + depends on PAX_KERNEXEC && X86_32 && MODULES
53820 + help
53821 + Due to implementation details the kernel must reserve a fixed
53822 + amount of memory for module code at compile time that cannot be
53823 + changed at runtime. Here you can specify the minimum amount
53824 + in MB that will be reserved. Due to the same implementation
53825 + details this size will always be rounded up to the next 2/4 MB
53826 + boundary (depends on PAE) so the actually available memory for
53827 + module code will usually be more than this minimum.
53828 +
53829 + The default 4 MB should be enough for most users but if you have
53830 + an excessive number of modules (e.g., most distribution configs
53831 + compile many drivers as modules) or use huge modules such as
53832 + nvidia's kernel driver, you will need to adjust this amount.
53833 + A good rule of thumb is to look at your currently loaded kernel
53834 + modules and add up their sizes.
53835 +
53836 +endmenu
53837 +
53838 +menu "Address Space Layout Randomization"
53839 + depends on PAX
53840 +
53841 +config PAX_ASLR
53842 + bool "Address Space Layout Randomization"
53843 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
53844 + help
53845 + Many if not most exploit techniques rely on the knowledge of
53846 + certain addresses in the attacked program. The following options
53847 + will allow the kernel to apply a certain amount of randomization
53848 + to specific parts of the program thereby forcing an attacker to
53849 + guess them in most cases. Any failed guess will most likely crash
53850 + the attacked program which allows the kernel to detect such attempts
53851 + and react on them. PaX itself provides no reaction mechanisms,
53852 + instead it is strongly encouraged that you make use of Nergal's
53853 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
53854 + (http://www.grsecurity.net/) built-in crash detection features or
53855 + develop one yourself.
53856 +
53857 + By saying Y here you can choose to randomize the following areas:
53858 + - top of the task's kernel stack
53859 + - top of the task's userland stack
53860 + - base address for mmap() requests that do not specify one
53861 + (this includes all libraries)
53862 + - base address of the main executable
53863 +
53864 + It is strongly recommended to say Y here as address space layout
53865 + randomization has negligible impact on performance yet it provides
53866 + a very effective protection.
53867 +
53868 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
53869 + this feature on a per file basis.
53870 +
53871 +config PAX_RANDKSTACK
53872 + bool "Randomize kernel stack base"
53873 + depends on PAX_ASLR && X86_TSC && X86_32
53874 + help
53875 + By saying Y here the kernel will randomize every task's kernel
53876 + stack on every system call. This will not only force an attacker
53877 + to guess it but also prevent him from making use of possible
53878 + leaked information about it.
53879 +
53880 + Since the kernel stack is a rather scarce resource, randomization
53881 + may cause unexpected stack overflows, therefore you should very
53882 + carefully test your system. Note that once enabled in the kernel
53883 + configuration, this feature cannot be disabled on a per file basis.
53884 +
53885 +config PAX_RANDUSTACK
53886 + bool "Randomize user stack base"
53887 + depends on PAX_ASLR
53888 + help
53889 + By saying Y here the kernel will randomize every task's userland
53890 + stack. The randomization is done in two steps where the second
53891 + one may apply a big amount of shift to the top of the stack and
53892 + cause problems for programs that want to use lots of memory (more
53893 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
53894 + For this reason the second step can be controlled by 'chpax' or
53895 + 'paxctl' on a per file basis.
53896 +
53897 +config PAX_RANDMMAP
53898 + bool "Randomize mmap() base"
53899 + depends on PAX_ASLR
53900 + help
53901 + By saying Y here the kernel will use a randomized base address for
53902 + mmap() requests that do not specify one themselves. As a result
53903 + all dynamically loaded libraries will appear at random addresses
53904 + and therefore be harder to exploit by a technique where an attacker
53905 + attempts to execute library code for his purposes (e.g. spawn a
53906 + shell from an exploited program that is running at an elevated
53907 + privilege level).
53908 +
53909 + Furthermore, if a program is relinked as a dynamic ELF file, its
53910 + base address will be randomized as well, completing the full
53911 + randomization of the address space layout. Attacking such programs
53912 + becomes a guess game. You can find an example of doing this at
53913 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
53914 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
53915 +
53916 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
53917 + feature on a per file basis.
53918 +
53919 +endmenu
53920 +
53921 +menu "Miscellaneous hardening features"
53922 +
53923 +config PAX_MEMORY_SANITIZE
53924 + bool "Sanitize all freed memory"
53925 + help
53926 + By saying Y here the kernel will erase memory pages as soon as they
53927 + are freed. This in turn reduces the lifetime of data stored in the
53928 + pages, making it less likely that sensitive information such as
53929 + passwords, cryptographic secrets, etc stay in memory for too long.
53930 +
53931 + This is especially useful for programs whose runtime is short, long
53932 + lived processes and the kernel itself benefit from this as long as
53933 + they operate on whole memory pages and ensure timely freeing of pages
53934 + that may hold sensitive information.
53935 +
53936 + The tradeoff is performance impact, on a single CPU system kernel
53937 + compilation sees a 3% slowdown, other systems and workloads may vary
53938 + and you are advised to test this feature on your expected workload
53939 + before deploying it.
53940 +
53941 + Note that this feature does not protect data stored in live pages,
53942 + e.g., process memory swapped to disk may stay there for a long time.
53943 +
53944 +config PAX_MEMORY_UDEREF
53945 + bool "Prevent invalid userland pointer dereference"
53946 + depends on X86 && !UML_X86 && !XEN
53947 + select PAX_PER_CPU_PGD if X86_64
53948 + help
53949 + By saying Y here the kernel will be prevented from dereferencing
53950 + userland pointers in contexts where the kernel expects only kernel
53951 + pointers. This is both a useful runtime debugging feature and a
53952 + security measure that prevents exploiting a class of kernel bugs.
53953 +
53954 + The tradeoff is that some virtualization solutions may experience
53955 + a huge slowdown and therefore you should not enable this feature
53956 + for kernels meant to run in such environments. Whether a given VM
53957 + solution is affected or not is best determined by simply trying it
53958 + out, the performance impact will be obvious right on boot as this
53959 + mechanism engages from very early on. A good rule of thumb is that
53960 + VMs running on CPUs without hardware virtualization support (i.e.,
53961 + the majority of IA-32 CPUs) will likely experience the slowdown.
53962 +
53963 +config PAX_REFCOUNT
53964 + bool "Prevent various kernel object reference counter overflows"
53965 + depends on GRKERNSEC && (X86 || SPARC64)
53966 + help
53967 + By saying Y here the kernel will detect and prevent overflowing
53968 + various (but not all) kinds of object reference counters. Such
53969 + overflows can normally occur due to bugs only and are often, if
53970 + not always, exploitable.
53971 +
53972 + The tradeoff is that data structures protected by an overflowed
53973 + refcount will never be freed and therefore will leak memory. Note
53974 + that this leak also happens even without this protection but in
53975 + that case the overflow can eventually trigger the freeing of the
53976 + data structure while it is still being used elsewhere, resulting
53977 + in the exploitable situation that this feature prevents.
53978 +
53979 + Since this has a negligible performance impact, you should enable
53980 + this feature.
53981 +
53982 +config PAX_USERCOPY
53983 + bool "Bounds check heap object copies between kernel and userland"
53984 + depends on X86 || PPC || SPARC
53985 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
53986 + help
53987 + By saying Y here the kernel will enforce the size of heap objects
53988 + when they are copied in either direction between the kernel and
53989 + userland, even if only a part of the heap object is copied.
53990 +
53991 + Specifically, this checking prevents information leaking from the
53992 + kernel heap during kernel to userland copies (if the kernel heap
53993 + object is otherwise fully initialized) and prevents kernel heap
53994 + overflows during userland to kernel copies.
53995 +
53996 + Note that the current implementation provides the strictest checks
53997 + for the SLUB allocator.
53998 +
53999 + Since this has a negligible performance impact, you should enable
54000 + this feature.
54001 +endmenu
54002 +
54003 +endmenu
54004 +
54005 config KEYS
54006 bool "Enable access key retention support"
54007 help
54008 @@ -124,7 +617,7 @@ config INTEL_TXT
54009 config LSM_MMAP_MIN_ADDR
54010 int "Low address space for LSM to protect from user allocation"
54011 depends on SECURITY && SECURITY_SELINUX
54012 - default 65536
54013 + default 32768
54014 help
54015 This is the portion of low virtual memory which should be protected
54016 from userspace allocation. Keeping a user from writing to low pages
54017 diff -urNp linux-2.6.34.1/security/commoncap.c linux-2.6.34.1/security/commoncap.c
54018 --- linux-2.6.34.1/security/commoncap.c 2010-07-05 14:24:10.000000000 -0400
54019 +++ linux-2.6.34.1/security/commoncap.c 2010-07-07 09:04:59.000000000 -0400
54020 @@ -28,6 +28,7 @@
54021 #include <linux/prctl.h>
54022 #include <linux/securebits.h>
54023 #include <linux/syslog.h>
54024 +#include <net/sock.h>
54025
54026 /*
54027 * If a non-root user executes a setuid-root binary in
54028 @@ -51,9 +52,11 @@ static void warn_setuid_and_fcaps_mixed(
54029 }
54030 }
54031
54032 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
54033 +
54034 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
54035 {
54036 - NETLINK_CB(skb).eff_cap = current_cap();
54037 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
54038 return 0;
54039 }
54040
54041 diff -urNp linux-2.6.34.1/security/integrity/ima/ima.h linux-2.6.34.1/security/integrity/ima/ima.h
54042 --- linux-2.6.34.1/security/integrity/ima/ima.h 2010-07-05 14:24:10.000000000 -0400
54043 +++ linux-2.6.34.1/security/integrity/ima/ima.h 2010-07-07 09:04:59.000000000 -0400
54044 @@ -83,8 +83,8 @@ void ima_add_violation(struct inode *ino
54045 extern spinlock_t ima_queue_lock;
54046
54047 struct ima_h_table {
54048 - atomic_long_t len; /* number of stored measurements in the list */
54049 - atomic_long_t violations;
54050 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
54051 + atomic_long_unchecked_t violations;
54052 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
54053 };
54054 extern struct ima_h_table ima_htable;
54055 diff -urNp linux-2.6.34.1/security/integrity/ima/ima_api.c linux-2.6.34.1/security/integrity/ima/ima_api.c
54056 --- linux-2.6.34.1/security/integrity/ima/ima_api.c 2010-07-05 14:24:10.000000000 -0400
54057 +++ linux-2.6.34.1/security/integrity/ima/ima_api.c 2010-07-07 09:04:59.000000000 -0400
54058 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
54059 int result;
54060
54061 /* can overflow, only indicator */
54062 - atomic_long_inc(&ima_htable.violations);
54063 + atomic_long_inc_unchecked(&ima_htable.violations);
54064
54065 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
54066 if (!entry) {
54067 diff -urNp linux-2.6.34.1/security/integrity/ima/ima_fs.c linux-2.6.34.1/security/integrity/ima/ima_fs.c
54068 --- linux-2.6.34.1/security/integrity/ima/ima_fs.c 2010-07-05 14:24:10.000000000 -0400
54069 +++ linux-2.6.34.1/security/integrity/ima/ima_fs.c 2010-07-07 09:04:59.000000000 -0400
54070 @@ -28,12 +28,12 @@
54071 static int valid_policy = 1;
54072 #define TMPBUFLEN 12
54073 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
54074 - loff_t *ppos, atomic_long_t *val)
54075 + loff_t *ppos, atomic_long_unchecked_t *val)
54076 {
54077 char tmpbuf[TMPBUFLEN];
54078 ssize_t len;
54079
54080 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
54081 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
54082 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
54083 }
54084
54085 diff -urNp linux-2.6.34.1/security/integrity/ima/ima_queue.c linux-2.6.34.1/security/integrity/ima/ima_queue.c
54086 --- linux-2.6.34.1/security/integrity/ima/ima_queue.c 2010-07-05 14:24:10.000000000 -0400
54087 +++ linux-2.6.34.1/security/integrity/ima/ima_queue.c 2010-07-07 09:04:59.000000000 -0400
54088 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
54089 INIT_LIST_HEAD(&qe->later);
54090 list_add_tail_rcu(&qe->later, &ima_measurements);
54091
54092 - atomic_long_inc(&ima_htable.len);
54093 + atomic_long_inc_unchecked(&ima_htable.len);
54094 key = ima_hash_key(entry->digest);
54095 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
54096 return 0;
54097 diff -urNp linux-2.6.34.1/security/min_addr.c linux-2.6.34.1/security/min_addr.c
54098 --- linux-2.6.34.1/security/min_addr.c 2010-07-05 14:24:10.000000000 -0400
54099 +++ linux-2.6.34.1/security/min_addr.c 2010-07-07 09:04:59.000000000 -0400
54100 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
54101 */
54102 static void update_mmap_min_addr(void)
54103 {
54104 +#ifndef SPARC
54105 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
54106 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
54107 mmap_min_addr = dac_mmap_min_addr;
54108 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
54109 #else
54110 mmap_min_addr = dac_mmap_min_addr;
54111 #endif
54112 +#endif
54113 }
54114
54115 /*
54116 diff -urNp linux-2.6.34.1/sound/aoa/codecs/onyx.c linux-2.6.34.1/sound/aoa/codecs/onyx.c
54117 --- linux-2.6.34.1/sound/aoa/codecs/onyx.c 2010-07-05 14:24:10.000000000 -0400
54118 +++ linux-2.6.34.1/sound/aoa/codecs/onyx.c 2010-07-07 09:04:59.000000000 -0400
54119 @@ -54,7 +54,7 @@ struct onyx {
54120 spdif_locked:1,
54121 analog_locked:1,
54122 original_mute:2;
54123 - int open_count;
54124 + atomic_t open_count;
54125 struct codec_info *codec_info;
54126
54127 /* mutex serializes concurrent access to the device
54128 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
54129 struct onyx *onyx = cii->codec_data;
54130
54131 mutex_lock(&onyx->mutex);
54132 - onyx->open_count++;
54133 + atomic_inc(&onyx->open_count);
54134 mutex_unlock(&onyx->mutex);
54135
54136 return 0;
54137 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
54138 struct onyx *onyx = cii->codec_data;
54139
54140 mutex_lock(&onyx->mutex);
54141 - onyx->open_count--;
54142 - if (!onyx->open_count)
54143 + if (atomic_dec_and_test(&onyx->open_count))
54144 onyx->spdif_locked = onyx->analog_locked = 0;
54145 mutex_unlock(&onyx->mutex);
54146
54147 diff -urNp linux-2.6.34.1/sound/core/oss/pcm_oss.c linux-2.6.34.1/sound/core/oss/pcm_oss.c
54148 --- linux-2.6.34.1/sound/core/oss/pcm_oss.c 2010-07-05 14:24:10.000000000 -0400
54149 +++ linux-2.6.34.1/sound/core/oss/pcm_oss.c 2010-07-07 09:04:59.000000000 -0400
54150 @@ -2962,8 +2962,8 @@ static void snd_pcm_oss_proc_done(struct
54151 }
54152 }
54153 #else /* !CONFIG_SND_VERBOSE_PROCFS */
54154 -#define snd_pcm_oss_proc_init(pcm)
54155 -#define snd_pcm_oss_proc_done(pcm)
54156 +#define snd_pcm_oss_proc_init(pcm) do {} while (0)
54157 +#define snd_pcm_oss_proc_done(pcm) do {} while (0)
54158 #endif /* CONFIG_SND_VERBOSE_PROCFS */
54159
54160 /*
54161 diff -urNp linux-2.6.34.1/sound/core/seq/seq_lock.h linux-2.6.34.1/sound/core/seq/seq_lock.h
54162 --- linux-2.6.34.1/sound/core/seq/seq_lock.h 2010-07-05 14:24:10.000000000 -0400
54163 +++ linux-2.6.34.1/sound/core/seq/seq_lock.h 2010-07-07 09:04:59.000000000 -0400
54164 @@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo
54165 #else /* SMP || CONFIG_SND_DEBUG */
54166
54167 typedef spinlock_t snd_use_lock_t; /* dummy */
54168 -#define snd_use_lock_init(lockp) /**/
54169 -#define snd_use_lock_use(lockp) /**/
54170 -#define snd_use_lock_free(lockp) /**/
54171 -#define snd_use_lock_sync(lockp) /**/
54172 +#define snd_use_lock_init(lockp) do {} while (0)
54173 +#define snd_use_lock_use(lockp) do {} while (0)
54174 +#define snd_use_lock_free(lockp) do {} while (0)
54175 +#define snd_use_lock_sync(lockp) do {} while (0)
54176
54177 #endif /* SMP || CONFIG_SND_DEBUG */
54178
54179 diff -urNp linux-2.6.34.1/sound/drivers/mts64.c linux-2.6.34.1/sound/drivers/mts64.c
54180 --- linux-2.6.34.1/sound/drivers/mts64.c 2010-07-05 14:24:10.000000000 -0400
54181 +++ linux-2.6.34.1/sound/drivers/mts64.c 2010-07-07 09:04:59.000000000 -0400
54182 @@ -66,7 +66,7 @@ struct mts64 {
54183 struct pardevice *pardev;
54184 int pardev_claimed;
54185
54186 - int open_count;
54187 + atomic_t open_count;
54188 int current_midi_output_port;
54189 int current_midi_input_port;
54190 u8 mode[MTS64_NUM_INPUT_PORTS];
54191 @@ -696,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
54192 {
54193 struct mts64 *mts = substream->rmidi->private_data;
54194
54195 - if (mts->open_count == 0) {
54196 + if (atomic_read(&mts->open_count) == 0) {
54197 /* We don't need a spinlock here, because this is just called
54198 if the device has not been opened before.
54199 So there aren't any IRQs from the device */
54200 @@ -704,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
54201
54202 msleep(50);
54203 }
54204 - ++(mts->open_count);
54205 + atomic_inc(&mts->open_count);
54206
54207 return 0;
54208 }
54209 @@ -714,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
54210 struct mts64 *mts = substream->rmidi->private_data;
54211 unsigned long flags;
54212
54213 - --(mts->open_count);
54214 - if (mts->open_count == 0) {
54215 + if (atomic_dec_return(&mts->open_count) == 0) {
54216 /* We need the spinlock_irqsave here because we can still
54217 have IRQs at this point */
54218 spin_lock_irqsave(&mts->lock, flags);
54219 @@ -724,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
54220
54221 msleep(500);
54222
54223 - } else if (mts->open_count < 0)
54224 - mts->open_count = 0;
54225 + } else if (atomic_read(&mts->open_count) < 0)
54226 + atomic_set(&mts->open_count, 0);
54227
54228 return 0;
54229 }
54230 diff -urNp linux-2.6.34.1/sound/drivers/portman2x4.c linux-2.6.34.1/sound/drivers/portman2x4.c
54231 --- linux-2.6.34.1/sound/drivers/portman2x4.c 2010-07-05 14:24:10.000000000 -0400
54232 +++ linux-2.6.34.1/sound/drivers/portman2x4.c 2010-07-07 09:04:59.000000000 -0400
54233 @@ -84,7 +84,7 @@ struct portman {
54234 struct pardevice *pardev;
54235 int pardev_claimed;
54236
54237 - int open_count;
54238 + atomic_t open_count;
54239 int mode[PORTMAN_NUM_INPUT_PORTS];
54240 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
54241 };
54242 diff -urNp linux-2.6.34.1/sound/oss/sb_audio.c linux-2.6.34.1/sound/oss/sb_audio.c
54243 --- linux-2.6.34.1/sound/oss/sb_audio.c 2010-07-05 14:24:10.000000000 -0400
54244 +++ linux-2.6.34.1/sound/oss/sb_audio.c 2010-07-07 09:04:59.000000000 -0400
54245 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
54246 buf16 = (signed short *)(localbuf + localoffs);
54247 while (c)
54248 {
54249 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
54250 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
54251 if (copy_from_user(lbuf8,
54252 userbuf+useroffs + p,
54253 locallen))
54254 diff -urNp linux-2.6.34.1/sound/pci/ac97/ac97_codec.c linux-2.6.34.1/sound/pci/ac97/ac97_codec.c
54255 --- linux-2.6.34.1/sound/pci/ac97/ac97_codec.c 2010-07-05 14:24:10.000000000 -0400
54256 +++ linux-2.6.34.1/sound/pci/ac97/ac97_codec.c 2010-07-07 09:04:59.000000000 -0400
54257 @@ -1962,7 +1962,7 @@ static int snd_ac97_dev_disconnect(struc
54258 }
54259
54260 /* build_ops to do nothing */
54261 -static struct snd_ac97_build_ops null_build_ops;
54262 +static const struct snd_ac97_build_ops null_build_ops;
54263
54264 #ifdef CONFIG_SND_AC97_POWER_SAVE
54265 static void do_update_power(struct work_struct *work)
54266 diff -urNp linux-2.6.34.1/sound/pci/ac97/ac97_patch.c linux-2.6.34.1/sound/pci/ac97/ac97_patch.c
54267 --- linux-2.6.34.1/sound/pci/ac97/ac97_patch.c 2010-07-05 14:24:10.000000000 -0400
54268 +++ linux-2.6.34.1/sound/pci/ac97/ac97_patch.c 2010-07-07 09:04:59.000000000 -0400
54269 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
54270 return 0;
54271 }
54272
54273 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
54274 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
54275 .build_spdif = patch_yamaha_ymf743_build_spdif,
54276 .build_3d = patch_yamaha_ymf7x3_3d,
54277 };
54278 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
54279 return 0;
54280 }
54281
54282 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
54283 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
54284 .build_3d = patch_yamaha_ymf7x3_3d,
54285 .build_post_spdif = patch_yamaha_ymf753_post_spdif
54286 };
54287 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
54288 return 0;
54289 }
54290
54291 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
54292 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
54293 .build_specific = patch_wolfson_wm9703_specific,
54294 };
54295
54296 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
54297 return 0;
54298 }
54299
54300 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
54301 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
54302 .build_specific = patch_wolfson_wm9704_specific,
54303 };
54304
54305 @@ -677,7 +677,7 @@ static int patch_wolfson_wm9711_specific
54306 return 0;
54307 }
54308
54309 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
54310 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
54311 .build_specific = patch_wolfson_wm9711_specific,
54312 };
54313
54314 @@ -871,7 +871,7 @@ static void patch_wolfson_wm9713_resume
54315 }
54316 #endif
54317
54318 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
54319 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
54320 .build_specific = patch_wolfson_wm9713_specific,
54321 .build_3d = patch_wolfson_wm9713_3d,
54322 #ifdef CONFIG_PM
54323 @@ -976,7 +976,7 @@ static int patch_sigmatel_stac97xx_speci
54324 return 0;
54325 }
54326
54327 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
54328 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
54329 .build_3d = patch_sigmatel_stac9700_3d,
54330 .build_specific = patch_sigmatel_stac97xx_specific
54331 };
54332 @@ -1023,7 +1023,7 @@ static int patch_sigmatel_stac9708_speci
54333 return patch_sigmatel_stac97xx_specific(ac97);
54334 }
54335
54336 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
54337 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
54338 .build_3d = patch_sigmatel_stac9708_3d,
54339 .build_specific = patch_sigmatel_stac9708_specific
54340 };
54341 @@ -1252,7 +1252,7 @@ static int patch_sigmatel_stac9758_speci
54342 return 0;
54343 }
54344
54345 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
54346 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
54347 .build_3d = patch_sigmatel_stac9700_3d,
54348 .build_specific = patch_sigmatel_stac9758_specific
54349 };
54350 @@ -1327,7 +1327,7 @@ static int patch_cirrus_build_spdif(stru
54351 return 0;
54352 }
54353
54354 -static struct snd_ac97_build_ops patch_cirrus_ops = {
54355 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
54356 .build_spdif = patch_cirrus_build_spdif
54357 };
54358
54359 @@ -1384,7 +1384,7 @@ static int patch_conexant_build_spdif(st
54360 return 0;
54361 }
54362
54363 -static struct snd_ac97_build_ops patch_conexant_ops = {
54364 +static const struct snd_ac97_build_ops patch_conexant_ops = {
54365 .build_spdif = patch_conexant_build_spdif
54366 };
54367
54368 @@ -1486,7 +1486,7 @@ static const struct snd_ac97_res_table a
54369 { AC97_VIDEO, 0x9f1f },
54370 { AC97_AUX, 0x9f1f },
54371 { AC97_PCM, 0x9f1f },
54372 - { } /* terminator */
54373 + { 0, 0 } /* terminator */
54374 };
54375
54376 static int patch_ad1819(struct snd_ac97 * ac97)
54377 @@ -1560,7 +1560,7 @@ static void patch_ad1881_chained(struct
54378 }
54379 }
54380
54381 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
54382 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
54383 #ifdef CONFIG_PM
54384 .resume = ad18xx_resume
54385 #endif
54386 @@ -1647,7 +1647,7 @@ static int patch_ad1885_specific(struct
54387 return 0;
54388 }
54389
54390 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
54391 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
54392 .build_specific = &patch_ad1885_specific,
54393 #ifdef CONFIG_PM
54394 .resume = ad18xx_resume
54395 @@ -1674,7 +1674,7 @@ static int patch_ad1886_specific(struct
54396 return 0;
54397 }
54398
54399 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
54400 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
54401 .build_specific = &patch_ad1886_specific,
54402 #ifdef CONFIG_PM
54403 .resume = ad18xx_resume
54404 @@ -1881,7 +1881,7 @@ static int patch_ad1981a_specific(struct
54405 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
54406 }
54407
54408 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
54409 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
54410 .build_post_spdif = patch_ad198x_post_spdif,
54411 .build_specific = patch_ad1981a_specific,
54412 #ifdef CONFIG_PM
54413 @@ -1936,7 +1936,7 @@ static int patch_ad1981b_specific(struct
54414 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
54415 }
54416
54417 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
54418 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
54419 .build_post_spdif = patch_ad198x_post_spdif,
54420 .build_specific = patch_ad1981b_specific,
54421 #ifdef CONFIG_PM
54422 @@ -2075,7 +2075,7 @@ static int patch_ad1888_specific(struct
54423 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
54424 }
54425
54426 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
54427 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
54428 .build_post_spdif = patch_ad198x_post_spdif,
54429 .build_specific = patch_ad1888_specific,
54430 #ifdef CONFIG_PM
54431 @@ -2124,7 +2124,7 @@ static int patch_ad1980_specific(struct
54432 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
54433 }
54434
54435 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
54436 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
54437 .build_post_spdif = patch_ad198x_post_spdif,
54438 .build_specific = patch_ad1980_specific,
54439 #ifdef CONFIG_PM
54440 @@ -2239,7 +2239,7 @@ static int patch_ad1985_specific(struct
54441 ARRAY_SIZE(snd_ac97_ad1985_controls));
54442 }
54443
54444 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
54445 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
54446 .build_post_spdif = patch_ad198x_post_spdif,
54447 .build_specific = patch_ad1985_specific,
54448 #ifdef CONFIG_PM
54449 @@ -2531,7 +2531,7 @@ static int patch_ad1986_specific(struct
54450 ARRAY_SIZE(snd_ac97_ad1985_controls));
54451 }
54452
54453 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
54454 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
54455 .build_post_spdif = patch_ad198x_post_spdif,
54456 .build_specific = patch_ad1986_specific,
54457 #ifdef CONFIG_PM
54458 @@ -2636,7 +2636,7 @@ static int patch_alc650_specific(struct
54459 return 0;
54460 }
54461
54462 -static struct snd_ac97_build_ops patch_alc650_ops = {
54463 +static const struct snd_ac97_build_ops patch_alc650_ops = {
54464 .build_specific = patch_alc650_specific,
54465 .update_jacks = alc650_update_jacks
54466 };
54467 @@ -2788,7 +2788,7 @@ static int patch_alc655_specific(struct
54468 return 0;
54469 }
54470
54471 -static struct snd_ac97_build_ops patch_alc655_ops = {
54472 +static const struct snd_ac97_build_ops patch_alc655_ops = {
54473 .build_specific = patch_alc655_specific,
54474 .update_jacks = alc655_update_jacks
54475 };
54476 @@ -2900,7 +2900,7 @@ static int patch_alc850_specific(struct
54477 return 0;
54478 }
54479
54480 -static struct snd_ac97_build_ops patch_alc850_ops = {
54481 +static const struct snd_ac97_build_ops patch_alc850_ops = {
54482 .build_specific = patch_alc850_specific,
54483 .update_jacks = alc850_update_jacks
54484 };
54485 @@ -2962,7 +2962,7 @@ static int patch_cm9738_specific(struct
54486 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
54487 }
54488
54489 -static struct snd_ac97_build_ops patch_cm9738_ops = {
54490 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
54491 .build_specific = patch_cm9738_specific,
54492 .update_jacks = cm9738_update_jacks
54493 };
54494 @@ -3053,7 +3053,7 @@ static int patch_cm9739_post_spdif(struc
54495 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
54496 }
54497
54498 -static struct snd_ac97_build_ops patch_cm9739_ops = {
54499 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
54500 .build_specific = patch_cm9739_specific,
54501 .build_post_spdif = patch_cm9739_post_spdif,
54502 .update_jacks = cm9739_update_jacks
54503 @@ -3227,7 +3227,7 @@ static int patch_cm9761_specific(struct
54504 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
54505 }
54506
54507 -static struct snd_ac97_build_ops patch_cm9761_ops = {
54508 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
54509 .build_specific = patch_cm9761_specific,
54510 .build_post_spdif = patch_cm9761_post_spdif,
54511 .update_jacks = cm9761_update_jacks
54512 @@ -3323,7 +3323,7 @@ static int patch_cm9780_specific(struct
54513 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
54514 }
54515
54516 -static struct snd_ac97_build_ops patch_cm9780_ops = {
54517 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
54518 .build_specific = patch_cm9780_specific,
54519 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
54520 };
54521 @@ -3443,7 +3443,7 @@ static int patch_vt1616_specific(struct
54522 return 0;
54523 }
54524
54525 -static struct snd_ac97_build_ops patch_vt1616_ops = {
54526 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
54527 .build_specific = patch_vt1616_specific
54528 };
54529
54530 @@ -3797,7 +3797,7 @@ static int patch_it2646_specific(struct
54531 return 0;
54532 }
54533
54534 -static struct snd_ac97_build_ops patch_it2646_ops = {
54535 +static const struct snd_ac97_build_ops patch_it2646_ops = {
54536 .build_specific = patch_it2646_specific,
54537 .update_jacks = it2646_update_jacks
54538 };
54539 @@ -3831,7 +3831,7 @@ static int patch_si3036_specific(struct
54540 return 0;
54541 }
54542
54543 -static struct snd_ac97_build_ops patch_si3036_ops = {
54544 +static const struct snd_ac97_build_ops patch_si3036_ops = {
54545 .build_specific = patch_si3036_specific,
54546 };
54547
54548 @@ -3864,7 +3864,7 @@ static struct snd_ac97_res_table lm4550_
54549 { AC97_AUX, 0x1f1f },
54550 { AC97_PCM, 0x1f1f },
54551 { AC97_REC_GAIN, 0x0f0f },
54552 - { } /* terminator */
54553 + { 0, 0 } /* terminator */
54554 };
54555
54556 static int patch_lm4550(struct snd_ac97 *ac97)
54557 @@ -3898,7 +3898,7 @@ static int patch_ucb1400_specific(struct
54558 return 0;
54559 }
54560
54561 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
54562 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
54563 .build_specific = patch_ucb1400_specific,
54564 };
54565
54566 diff -urNp linux-2.6.34.1/sound/pci/ens1370.c linux-2.6.34.1/sound/pci/ens1370.c
54567 --- linux-2.6.34.1/sound/pci/ens1370.c 2010-07-05 14:24:10.000000000 -0400
54568 +++ linux-2.6.34.1/sound/pci/ens1370.c 2010-07-07 09:04:59.000000000 -0400
54569 @@ -452,7 +452,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_audio
54570 { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
54571 { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
54572 #endif
54573 - { 0, }
54574 + { 0, 0, 0, 0, 0, 0, 0 }
54575 };
54576
54577 MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
54578 diff -urNp linux-2.6.34.1/sound/pci/hda/patch_hdmi.c linux-2.6.34.1/sound/pci/hda/patch_hdmi.c
54579 --- linux-2.6.34.1/sound/pci/hda/patch_hdmi.c 2010-07-05 14:24:10.000000000 -0400
54580 +++ linux-2.6.34.1/sound/pci/hda/patch_hdmi.c 2010-07-07 09:04:59.000000000 -0400
54581 @@ -657,10 +657,10 @@ static void hdmi_non_intrinsic_event(str
54582 cp_ready);
54583
54584 /* TODO */
54585 - if (cp_state)
54586 - ;
54587 - if (cp_ready)
54588 - ;
54589 + if (cp_state) {
54590 + }
54591 + if (cp_ready) {
54592 + }
54593 }
54594
54595
54596 diff -urNp linux-2.6.34.1/sound/pci/intel8x0.c linux-2.6.34.1/sound/pci/intel8x0.c
54597 --- linux-2.6.34.1/sound/pci/intel8x0.c 2010-07-05 14:24:10.000000000 -0400
54598 +++ linux-2.6.34.1/sound/pci/intel8x0.c 2010-07-07 09:04:59.000000000 -0400
54599 @@ -444,7 +444,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel
54600 { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
54601 { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
54602 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
54603 - { 0, }
54604 + { 0, 0, 0, 0, 0, 0, 0 }
54605 };
54606
54607 MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids);
54608 @@ -2129,7 +2129,7 @@ static struct ac97_quirk ac97_quirks[] _
54609 .type = AC97_TUNE_HP_ONLY
54610 },
54611 #endif
54612 - { } /* terminator */
54613 + { 0, 0, 0, 0, NULL, 0 } /* terminator */
54614 };
54615
54616 static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
54617 diff -urNp linux-2.6.34.1/sound/pci/intel8x0m.c linux-2.6.34.1/sound/pci/intel8x0m.c
54618 --- linux-2.6.34.1/sound/pci/intel8x0m.c 2010-07-05 14:24:10.000000000 -0400
54619 +++ linux-2.6.34.1/sound/pci/intel8x0m.c 2010-07-07 09:04:59.000000000 -0400
54620 @@ -239,7 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel
54621 { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
54622 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
54623 #endif
54624 - { 0, }
54625 + { 0, 0, 0, 0, 0, 0, 0 }
54626 };
54627
54628 MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids);
54629 @@ -1264,7 +1264,7 @@ static struct shortname_table {
54630 { 0x5455, "ALi M5455" },
54631 { 0x746d, "AMD AMD8111" },
54632 #endif
54633 - { 0 },
54634 + { 0, NULL },
54635 };
54636
54637 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
54638 diff -urNp linux-2.6.34.1/usr/gen_init_cpio.c linux-2.6.34.1/usr/gen_init_cpio.c
54639 --- linux-2.6.34.1/usr/gen_init_cpio.c 2010-07-05 14:24:10.000000000 -0400
54640 +++ linux-2.6.34.1/usr/gen_init_cpio.c 2010-07-07 09:04:59.000000000 -0400
54641 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
54642 int retval;
54643 int rc = -1;
54644 int namesize;
54645 - int i;
54646 + unsigned int i;
54647
54648 mode |= S_IFREG;
54649
54650 @@ -386,9 +386,10 @@ static char *cpio_replace_env(char *new_
54651 *env_var = *expanded = '\0';
54652 strncat(env_var, start + 2, end - start - 2);
54653 strncat(expanded, new_location, start - new_location);
54654 - strncat(expanded, getenv(env_var), PATH_MAX);
54655 - strncat(expanded, end + 1, PATH_MAX);
54656 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
54657 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
54658 strncpy(new_location, expanded, PATH_MAX);
54659 + new_location[PATH_MAX] = 0;
54660 } else
54661 break;
54662 }
54663 diff -urNp linux-2.6.34.1/virt/kvm/kvm_main.c linux-2.6.34.1/virt/kvm/kvm_main.c
54664 --- linux-2.6.34.1/virt/kvm/kvm_main.c 2010-07-05 14:24:10.000000000 -0400
54665 +++ linux-2.6.34.1/virt/kvm/kvm_main.c 2010-07-07 09:04:59.000000000 -0400
54666 @@ -1288,6 +1288,7 @@ static int kvm_vcpu_release(struct inode
54667 return 0;
54668 }
54669
54670 +/* cannot be const */
54671 static struct file_operations kvm_vcpu_fops = {
54672 .release = kvm_vcpu_release,
54673 .unlocked_ioctl = kvm_vcpu_ioctl,
54674 @@ -1744,6 +1745,7 @@ static int kvm_vm_mmap(struct file *file
54675 return 0;
54676 }
54677
54678 +/* cannot be const */
54679 static struct file_operations kvm_vm_fops = {
54680 .release = kvm_vm_release,
54681 .unlocked_ioctl = kvm_vm_ioctl,
54682 @@ -1834,6 +1836,7 @@ out:
54683 return r;
54684 }
54685
54686 +/* cannot be const */
54687 static struct file_operations kvm_chardev_ops = {
54688 .unlocked_ioctl = kvm_dev_ioctl,
54689 .compat_ioctl = kvm_dev_ioctl,
54690 @@ -1843,6 +1846,9 @@ static struct miscdevice kvm_dev = {
54691 KVM_MINOR,
54692 "kvm",
54693 &kvm_chardev_ops,
54694 + {NULL, NULL},
54695 + NULL,
54696 + NULL
54697 };
54698
54699 static void hardware_enable(void *junk)
54700 @@ -2179,7 +2185,7 @@ static void kvm_sched_out(struct preempt
54701 kvm_arch_vcpu_put(vcpu);
54702 }
54703
54704 -int kvm_init(void *opaque, unsigned int vcpu_size,
54705 +int kvm_init(const void *opaque, unsigned int vcpu_size,
54706 struct module *module)
54707 {
54708 int r;