]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.7-201110200052.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.7-201110200052.patch
CommitLineData
eb064459
PK
1diff -urNp linux-3.0.7/Documentation/dontdiff linux-3.0.7/Documentation/dontdiff
2--- linux-3.0.7/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.7/Documentation/dontdiff 2011-10-07 19:07:23.000000000 -0400
4@@ -5,6 +5,7 @@
5 *.cis
6 *.cpio
7 *.csp
8+*.dbg
9 *.dsp
10 *.dvi
11 *.elf
12@@ -48,9 +49,11 @@
13 *.tab.h
14 *.tex
15 *.ver
16+*.vim
17 *.xml
18 *.xz
19 *_MODULES
20+*_reg_safe.h
21 *_vga16.c
22 *~
23 \#*#
24@@ -70,6 +73,7 @@ Kerntypes
25 Module.markers
26 Module.symvers
27 PENDING
28+PERF*
29 SCCS
30 System.map*
31 TAGS
32@@ -98,6 +102,8 @@ bzImage*
33 capability_names.h
34 capflags.c
35 classlist.h*
36+clut_vga16.c
37+common-cmds.h
38 comp*.log
39 compile.h*
40 conf
41@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
42 gconf
43 gconf.glade.h
44 gen-devlist
45+gen-kdb_cmds.c
46 gen_crc32table
47 gen_init_cpio
48 generated
49 genheaders
50 genksyms
51 *_gray256.c
52+hash
53 hpet_example
54 hugepage-mmap
55 hugepage-shm
56@@ -146,7 +154,6 @@ int32.c
57 int4.c
58 int8.c
59 kallsyms
60-kconfig
61 keywords.c
62 ksym.c*
63 ksym.h*
64@@ -154,7 +161,6 @@ kxgettext
65 lkc_defs.h
66 lex.c
67 lex.*.c
68-linux
69 logo_*.c
70 logo_*_clut224.c
71 logo_*_mono.c
72@@ -166,7 +172,6 @@ machtypes.h
73 map
74 map_hugetlb
75 maui_boot.h
76-media
77 mconf
78 miboot*
79 mk_elfconfig
80@@ -174,6 +179,7 @@ mkboot
81 mkbugboot
82 mkcpustr
83 mkdep
84+mkpiggy
85 mkprep
86 mkregtable
87 mktables
88@@ -209,6 +215,7 @@ r300_reg_safe.h
89 r420_reg_safe.h
90 r600_reg_safe.h
91 recordmcount
92+regdb.c
93 relocs
94 rlim_names.h
95 rn50_reg_safe.h
96@@ -219,6 +226,7 @@ setup
97 setup.bin
98 setup.elf
99 sImage
100+slabinfo
101 sm_tbl*
102 split-include
103 syscalltab.h
104@@ -246,7 +254,9 @@ vmlinux
105 vmlinux-*
106 vmlinux.aout
107 vmlinux.bin.all
108+vmlinux.bin.bz2
109 vmlinux.lds
110+vmlinux.relocs
111 vmlinuz
112 voffset.h
113 vsyscall.lds
114@@ -254,6 +264,7 @@ vsyscall_32.lds
115 wanxlfw.inc
116 uImage
117 unifdef
118+utsrelease.h
119 wakeup.bin
120 wakeup.elf
121 wakeup.lds
122diff -urNp linux-3.0.7/Documentation/kernel-parameters.txt linux-3.0.7/Documentation/kernel-parameters.txt
123--- linux-3.0.7/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
124+++ linux-3.0.7/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
125@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
126 the specified number of seconds. This is to be used if
127 your oopses keep scrolling off the screen.
128
129+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
130+ virtualization environments that don't cope well with the
131+ expand down segment used by UDEREF on X86-32 or the frequent
132+ page table updates on X86-64.
133+
134+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
135+
136 pcbit= [HW,ISDN]
137
138 pcd. [PARIDE]
139diff -urNp linux-3.0.7/Makefile linux-3.0.7/Makefile
140--- linux-3.0.7/Makefile 2011-10-17 23:17:08.000000000 -0400
141+++ linux-3.0.7/Makefile 2011-10-17 23:17:19.000000000 -0400
142@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
143
144 HOSTCC = gcc
145 HOSTCXX = g++
146-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
147-HOSTCXXFLAGS = -O2
148+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
149+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
150+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
151
152 # Decide whether to build built-in, modular, or both.
153 # Normally, just do built-in.
154@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
155 KBUILD_CPPFLAGS := -D__KERNEL__
156
157 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
158+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
159 -fno-strict-aliasing -fno-common \
160 -Werror-implicit-function-declaration \
161 -Wno-format-security \
162 -fno-delete-null-pointer-checks
163+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
164 KBUILD_AFLAGS_KERNEL :=
165 KBUILD_CFLAGS_KERNEL :=
166 KBUILD_AFLAGS := -D__ASSEMBLY__
167@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
168 # Rules shared between *config targets and build targets
169
170 # Basic helpers built in scripts/
171-PHONY += scripts_basic
172-scripts_basic:
173+PHONY += scripts_basic gcc-plugins
174+scripts_basic: gcc-plugins
175 $(Q)$(MAKE) $(build)=scripts/basic
176 $(Q)rm -f .tmp_quiet_recordmcount
177
178@@ -564,6 +567,36 @@ else
179 KBUILD_CFLAGS += -O2
180 endif
181
182+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
183+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
184+ifdef CONFIG_PAX_MEMORY_STACKLEAK
185+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
186+endif
187+ifdef CONFIG_KALLOCSTAT_PLUGIN
188+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
189+endif
190+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
191+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
192+endif
193+ifdef CONFIG_CHECKER_PLUGIN
194+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
195+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
196+endif
197+endif
198+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
199+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
200+gcc-plugins:
201+ $(Q)$(MAKE) $(build)=tools/gcc
202+else
203+gcc-plugins:
204+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
205+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
206+else
207+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
208+endif
209+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
210+endif
211+
212 include $(srctree)/arch/$(SRCARCH)/Makefile
213
214 ifneq ($(CONFIG_FRAME_WARN),0)
215@@ -708,7 +741,7 @@ export mod_strip_cmd
216
217
218 ifeq ($(KBUILD_EXTMOD),)
219-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
220+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
221
222 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
223 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
224@@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
225
226 # The actual objects are generated when descending,
227 # make sure no implicit rule kicks in
228+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
229 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
230
231 # Handle descending into subdirectories listed in $(vmlinux-dirs)
232@@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
233 # Error messages still appears in the original language
234
235 PHONY += $(vmlinux-dirs)
236-$(vmlinux-dirs): prepare scripts
237+$(vmlinux-dirs): gcc-plugins prepare scripts
238 $(Q)$(MAKE) $(build)=$@
239
240 # Store (new) KERNELRELASE string in include/config/kernel.release
241@@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
242 $(Q)$(MAKE) $(build)=. missing-syscalls
243
244 # All the preparing..
245+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
246 prepare: prepare0
247
248 # Generate some files
249@@ -1087,6 +1122,7 @@ all: modules
250 # using awk while concatenating to the final file.
251
252 PHONY += modules
253+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
254 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
255 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
256 @$(kecho) ' Building modules, stage 2.';
257@@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
258
259 # Target to prepare building external modules
260 PHONY += modules_prepare
261-modules_prepare: prepare scripts
262+modules_prepare: gcc-plugins prepare scripts
263
264 # Target to install modules
265 PHONY += modules_install
266@@ -1198,7 +1234,7 @@ distclean: mrproper
267 @find $(srctree) $(RCS_FIND_IGNORE) \
268 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
269 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
270- -o -name '.*.rej' -o -size 0 \
271+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
272 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
273 -type f -print | xargs rm -f
274
275@@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
276 $(module-dirs): crmodverdir $(objtree)/Module.symvers
277 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
278
279+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
280 modules: $(module-dirs)
281 @$(kecho) ' Building modules, stage 2.';
282 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
283@@ -1485,17 +1522,19 @@ else
284 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
285 endif
286
287-%.s: %.c prepare scripts FORCE
288+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
289+%.s: %.c gcc-plugins prepare scripts FORCE
290 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
291 %.i: %.c prepare scripts FORCE
292 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
293-%.o: %.c prepare scripts FORCE
294+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
295+%.o: %.c gcc-plugins prepare scripts FORCE
296 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
297 %.lst: %.c prepare scripts FORCE
298 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
299-%.s: %.S prepare scripts FORCE
300+%.s: %.S gcc-plugins prepare scripts FORCE
301 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
302-%.o: %.S prepare scripts FORCE
303+%.o: %.S gcc-plugins prepare scripts FORCE
304 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
305 %.symtypes: %.c prepare scripts FORCE
306 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
307@@ -1505,11 +1544,13 @@ endif
308 $(cmd_crmodverdir)
309 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
310 $(build)=$(build-dir)
311-%/: prepare scripts FORCE
312+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
313+%/: gcc-plugins prepare scripts FORCE
314 $(cmd_crmodverdir)
315 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
316 $(build)=$(build-dir)
317-%.ko: prepare scripts FORCE
318+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
319+%.ko: gcc-plugins prepare scripts FORCE
320 $(cmd_crmodverdir)
321 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
322 $(build)=$(build-dir) $(@:.ko=.o)
323diff -urNp linux-3.0.7/arch/alpha/include/asm/elf.h linux-3.0.7/arch/alpha/include/asm/elf.h
324--- linux-3.0.7/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
325+++ linux-3.0.7/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
326@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
327
328 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
329
330+#ifdef CONFIG_PAX_ASLR
331+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
332+
333+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
334+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
335+#endif
336+
337 /* $0 is set by ld.so to a pointer to a function which might be
338 registered using atexit. This provides a mean for the dynamic
339 linker to call DT_FINI functions for shared libraries that have
340diff -urNp linux-3.0.7/arch/alpha/include/asm/pgtable.h linux-3.0.7/arch/alpha/include/asm/pgtable.h
341--- linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
342+++ linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
343@@ -101,6 +101,17 @@ struct vm_area_struct;
344 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
345 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
346 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
347+
348+#ifdef CONFIG_PAX_PAGEEXEC
349+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
350+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
351+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
352+#else
353+# define PAGE_SHARED_NOEXEC PAGE_SHARED
354+# define PAGE_COPY_NOEXEC PAGE_COPY
355+# define PAGE_READONLY_NOEXEC PAGE_READONLY
356+#endif
357+
358 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
359
360 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
361diff -urNp linux-3.0.7/arch/alpha/kernel/module.c linux-3.0.7/arch/alpha/kernel/module.c
362--- linux-3.0.7/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
363+++ linux-3.0.7/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
364@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
365
366 /* The small sections were sorted to the end of the segment.
367 The following should definitely cover them. */
368- gp = (u64)me->module_core + me->core_size - 0x8000;
369+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
370 got = sechdrs[me->arch.gotsecindex].sh_addr;
371
372 for (i = 0; i < n; i++) {
373diff -urNp linux-3.0.7/arch/alpha/kernel/osf_sys.c linux-3.0.7/arch/alpha/kernel/osf_sys.c
374--- linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
375+++ linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
376@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
377 /* At this point: (!vma || addr < vma->vm_end). */
378 if (limit - len < addr)
379 return -ENOMEM;
380- if (!vma || addr + len <= vma->vm_start)
381+ if (check_heap_stack_gap(vma, addr, len))
382 return addr;
383 addr = vma->vm_end;
384 vma = vma->vm_next;
385@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
386 merely specific addresses, but regions of memory -- perhaps
387 this feature should be incorporated into all ports? */
388
389+#ifdef CONFIG_PAX_RANDMMAP
390+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
391+#endif
392+
393 if (addr) {
394 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
395 if (addr != (unsigned long) -ENOMEM)
396@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
397 }
398
399 /* Next, try allocating at TASK_UNMAPPED_BASE. */
400- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
401- len, limit);
402+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
403+
404 if (addr != (unsigned long) -ENOMEM)
405 return addr;
406
407diff -urNp linux-3.0.7/arch/alpha/mm/fault.c linux-3.0.7/arch/alpha/mm/fault.c
408--- linux-3.0.7/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
409+++ linux-3.0.7/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
410@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
411 __reload_thread(pcb);
412 }
413
414+#ifdef CONFIG_PAX_PAGEEXEC
415+/*
416+ * PaX: decide what to do with offenders (regs->pc = fault address)
417+ *
418+ * returns 1 when task should be killed
419+ * 2 when patched PLT trampoline was detected
420+ * 3 when unpatched PLT trampoline was detected
421+ */
422+static int pax_handle_fetch_fault(struct pt_regs *regs)
423+{
424+
425+#ifdef CONFIG_PAX_EMUPLT
426+ int err;
427+
428+ do { /* PaX: patched PLT emulation #1 */
429+ unsigned int ldah, ldq, jmp;
430+
431+ err = get_user(ldah, (unsigned int *)regs->pc);
432+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
433+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
434+
435+ if (err)
436+ break;
437+
438+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
439+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
440+ jmp == 0x6BFB0000U)
441+ {
442+ unsigned long r27, addr;
443+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
444+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
445+
446+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
447+ err = get_user(r27, (unsigned long *)addr);
448+ if (err)
449+ break;
450+
451+ regs->r27 = r27;
452+ regs->pc = r27;
453+ return 2;
454+ }
455+ } while (0);
456+
457+ do { /* PaX: patched PLT emulation #2 */
458+ unsigned int ldah, lda, br;
459+
460+ err = get_user(ldah, (unsigned int *)regs->pc);
461+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
462+ err |= get_user(br, (unsigned int *)(regs->pc+8));
463+
464+ if (err)
465+ break;
466+
467+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
468+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
469+ (br & 0xFFE00000U) == 0xC3E00000U)
470+ {
471+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
472+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
473+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
474+
475+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
476+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
477+ return 2;
478+ }
479+ } while (0);
480+
481+ do { /* PaX: unpatched PLT emulation */
482+ unsigned int br;
483+
484+ err = get_user(br, (unsigned int *)regs->pc);
485+
486+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
487+ unsigned int br2, ldq, nop, jmp;
488+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
489+
490+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
491+ err = get_user(br2, (unsigned int *)addr);
492+ err |= get_user(ldq, (unsigned int *)(addr+4));
493+ err |= get_user(nop, (unsigned int *)(addr+8));
494+ err |= get_user(jmp, (unsigned int *)(addr+12));
495+ err |= get_user(resolver, (unsigned long *)(addr+16));
496+
497+ if (err)
498+ break;
499+
500+ if (br2 == 0xC3600000U &&
501+ ldq == 0xA77B000CU &&
502+ nop == 0x47FF041FU &&
503+ jmp == 0x6B7B0000U)
504+ {
505+ regs->r28 = regs->pc+4;
506+ regs->r27 = addr+16;
507+ regs->pc = resolver;
508+ return 3;
509+ }
510+ }
511+ } while (0);
512+#endif
513+
514+ return 1;
515+}
516+
517+void pax_report_insns(void *pc, void *sp)
518+{
519+ unsigned long i;
520+
521+ printk(KERN_ERR "PAX: bytes at PC: ");
522+ for (i = 0; i < 5; i++) {
523+ unsigned int c;
524+ if (get_user(c, (unsigned int *)pc+i))
525+ printk(KERN_CONT "???????? ");
526+ else
527+ printk(KERN_CONT "%08x ", c);
528+ }
529+ printk("\n");
530+}
531+#endif
532
533 /*
534 * This routine handles page faults. It determines the address,
535@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
536 good_area:
537 si_code = SEGV_ACCERR;
538 if (cause < 0) {
539- if (!(vma->vm_flags & VM_EXEC))
540+ if (!(vma->vm_flags & VM_EXEC)) {
541+
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
544+ goto bad_area;
545+
546+ up_read(&mm->mmap_sem);
547+ switch (pax_handle_fetch_fault(regs)) {
548+
549+#ifdef CONFIG_PAX_EMUPLT
550+ case 2:
551+ case 3:
552+ return;
553+#endif
554+
555+ }
556+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
557+ do_group_exit(SIGKILL);
558+#else
559 goto bad_area;
560+#endif
561+
562+ }
563 } else if (!cause) {
564 /* Allow reads even for write-only mappings */
565 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
566diff -urNp linux-3.0.7/arch/arm/include/asm/elf.h linux-3.0.7/arch/arm/include/asm/elf.h
567--- linux-3.0.7/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
568+++ linux-3.0.7/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
569@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
570 the loader. We need to make sure that it is out of the way of the program
571 that it will "exec", and that there is sufficient room for the brk. */
572
573-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
574+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
575+
576+#ifdef CONFIG_PAX_ASLR
577+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
578+
579+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
580+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
581+#endif
582
583 /* When the program starts, a1 contains a pointer to a function to be
584 registered with atexit, as per the SVR4 ABI. A value of 0 means we
585@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
586 extern void elf_set_personality(const struct elf32_hdr *);
587 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
588
589-struct mm_struct;
590-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
591-#define arch_randomize_brk arch_randomize_brk
592-
593 extern int vectors_user_mapping(void);
594 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
595 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
596diff -urNp linux-3.0.7/arch/arm/include/asm/kmap_types.h linux-3.0.7/arch/arm/include/asm/kmap_types.h
597--- linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
598+++ linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
599@@ -21,6 +21,7 @@ enum km_type {
600 KM_L1_CACHE,
601 KM_L2_CACHE,
602 KM_KDB,
603+ KM_CLEARPAGE,
604 KM_TYPE_NR
605 };
606
607diff -urNp linux-3.0.7/arch/arm/include/asm/uaccess.h linux-3.0.7/arch/arm/include/asm/uaccess.h
608--- linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
609+++ linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
610@@ -22,6 +22,8 @@
611 #define VERIFY_READ 0
612 #define VERIFY_WRITE 1
613
614+extern void check_object_size(const void *ptr, unsigned long n, bool to);
615+
616 /*
617 * The exception table consists of pairs of addresses: the first is the
618 * address of an instruction that is allowed to fault, and the second is
619@@ -387,8 +389,23 @@ do { \
620
621
622 #ifdef CONFIG_MMU
623-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
624-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
625+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
626+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
627+
628+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
629+{
630+ if (!__builtin_constant_p(n))
631+ check_object_size(to, n, false);
632+ return ___copy_from_user(to, from, n);
633+}
634+
635+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
636+{
637+ if (!__builtin_constant_p(n))
638+ check_object_size(from, n, true);
639+ return ___copy_to_user(to, from, n);
640+}
641+
642 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
643 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
644 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
645@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
646
647 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
648 {
649+ if ((long)n < 0)
650+ return n;
651+
652 if (access_ok(VERIFY_READ, from, n))
653 n = __copy_from_user(to, from, n);
654 else /* security hole - plug it */
655@@ -412,6 +432,9 @@ static inline unsigned long __must_check
656
657 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
658 {
659+ if ((long)n < 0)
660+ return n;
661+
662 if (access_ok(VERIFY_WRITE, to, n))
663 n = __copy_to_user(to, from, n);
664 return n;
665diff -urNp linux-3.0.7/arch/arm/kernel/armksyms.c linux-3.0.7/arch/arm/kernel/armksyms.c
666--- linux-3.0.7/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
667+++ linux-3.0.7/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
668@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
669 #ifdef CONFIG_MMU
670 EXPORT_SYMBOL(copy_page);
671
672-EXPORT_SYMBOL(__copy_from_user);
673-EXPORT_SYMBOL(__copy_to_user);
674+EXPORT_SYMBOL(___copy_from_user);
675+EXPORT_SYMBOL(___copy_to_user);
676 EXPORT_SYMBOL(__clear_user);
677
678 EXPORT_SYMBOL(__get_user_1);
679diff -urNp linux-3.0.7/arch/arm/kernel/process.c linux-3.0.7/arch/arm/kernel/process.c
680--- linux-3.0.7/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
681+++ linux-3.0.7/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
682@@ -28,7 +28,6 @@
683 #include <linux/tick.h>
684 #include <linux/utsname.h>
685 #include <linux/uaccess.h>
686-#include <linux/random.h>
687 #include <linux/hw_breakpoint.h>
688
689 #include <asm/cacheflush.h>
690@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
691 return 0;
692 }
693
694-unsigned long arch_randomize_brk(struct mm_struct *mm)
695-{
696- unsigned long range_end = mm->brk + 0x02000000;
697- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
698-}
699-
700 #ifdef CONFIG_MMU
701 /*
702 * The vectors page is always readable from user space for the
703diff -urNp linux-3.0.7/arch/arm/kernel/traps.c linux-3.0.7/arch/arm/kernel/traps.c
704--- linux-3.0.7/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
705+++ linux-3.0.7/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
706@@ -257,6 +257,8 @@ static int __die(const char *str, int er
707
708 static DEFINE_SPINLOCK(die_lock);
709
710+extern void gr_handle_kernel_exploit(void);
711+
712 /*
713 * This function is protected against re-entrancy.
714 */
715@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
716 panic("Fatal exception in interrupt");
717 if (panic_on_oops)
718 panic("Fatal exception");
719+
720+ gr_handle_kernel_exploit();
721+
722 if (ret != NOTIFY_STOP)
723 do_exit(SIGSEGV);
724 }
725diff -urNp linux-3.0.7/arch/arm/lib/copy_from_user.S linux-3.0.7/arch/arm/lib/copy_from_user.S
726--- linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
728@@ -16,7 +16,7 @@
729 /*
730 * Prototype:
731 *
732- * size_t __copy_from_user(void *to, const void *from, size_t n)
733+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
734 *
735 * Purpose:
736 *
737@@ -84,11 +84,11 @@
738
739 .text
740
741-ENTRY(__copy_from_user)
742+ENTRY(___copy_from_user)
743
744 #include "copy_template.S"
745
746-ENDPROC(__copy_from_user)
747+ENDPROC(___copy_from_user)
748
749 .pushsection .fixup,"ax"
750 .align 0
751diff -urNp linux-3.0.7/arch/arm/lib/copy_to_user.S linux-3.0.7/arch/arm/lib/copy_to_user.S
752--- linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
753+++ linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
754@@ -16,7 +16,7 @@
755 /*
756 * Prototype:
757 *
758- * size_t __copy_to_user(void *to, const void *from, size_t n)
759+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
760 *
761 * Purpose:
762 *
763@@ -88,11 +88,11 @@
764 .text
765
766 ENTRY(__copy_to_user_std)
767-WEAK(__copy_to_user)
768+WEAK(___copy_to_user)
769
770 #include "copy_template.S"
771
772-ENDPROC(__copy_to_user)
773+ENDPROC(___copy_to_user)
774 ENDPROC(__copy_to_user_std)
775
776 .pushsection .fixup,"ax"
777diff -urNp linux-3.0.7/arch/arm/lib/uaccess.S linux-3.0.7/arch/arm/lib/uaccess.S
778--- linux-3.0.7/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
779+++ linux-3.0.7/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
780@@ -20,7 +20,7 @@
781
782 #define PAGE_SHIFT 12
783
784-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
785+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
786 * Purpose : copy a block to user memory from kernel memory
787 * Params : to - user memory
788 * : from - kernel memory
789@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
790 sub r2, r2, ip
791 b .Lc2u_dest_aligned
792
793-ENTRY(__copy_to_user)
794+ENTRY(___copy_to_user)
795 stmfd sp!, {r2, r4 - r7, lr}
796 cmp r2, #4
797 blt .Lc2u_not_enough
798@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
799 ldrgtb r3, [r1], #0
800 USER( T(strgtb) r3, [r0], #1) @ May fault
801 b .Lc2u_finished
802-ENDPROC(__copy_to_user)
803+ENDPROC(___copy_to_user)
804
805 .pushsection .fixup,"ax"
806 .align 0
807 9001: ldmfd sp!, {r0, r4 - r7, pc}
808 .popsection
809
810-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
811+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
812 * Purpose : copy a block from user memory to kernel memory
813 * Params : to - kernel memory
814 * : from - user memory
815@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
816 sub r2, r2, ip
817 b .Lcfu_dest_aligned
818
819-ENTRY(__copy_from_user)
820+ENTRY(___copy_from_user)
821 stmfd sp!, {r0, r2, r4 - r7, lr}
822 cmp r2, #4
823 blt .Lcfu_not_enough
824@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
825 USER( T(ldrgtb) r3, [r1], #1) @ May fault
826 strgtb r3, [r0], #1
827 b .Lcfu_finished
828-ENDPROC(__copy_from_user)
829+ENDPROC(___copy_from_user)
830
831 .pushsection .fixup,"ax"
832 .align 0
833diff -urNp linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c
834--- linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
835+++ linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
836@@ -103,7 +103,7 @@ out:
837 }
838
839 unsigned long
840-__copy_to_user(void __user *to, const void *from, unsigned long n)
841+___copy_to_user(void __user *to, const void *from, unsigned long n)
842 {
843 /*
844 * This test is stubbed out of the main function above to keep
845diff -urNp linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c
846--- linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
847+++ linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
848@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
849 return sprintf(buf, "0x%X\n", mbox_value);
850 }
851
852-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
853+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
854
855 static int mbox_show(struct seq_file *s, void *data)
856 {
857diff -urNp linux-3.0.7/arch/arm/mm/fault.c linux-3.0.7/arch/arm/mm/fault.c
858--- linux-3.0.7/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
859+++ linux-3.0.7/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
860@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
861 }
862 #endif
863
864+#ifdef CONFIG_PAX_PAGEEXEC
865+ if (fsr & FSR_LNX_PF) {
866+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
867+ do_group_exit(SIGKILL);
868+ }
869+#endif
870+
871 tsk->thread.address = addr;
872 tsk->thread.error_code = fsr;
873 tsk->thread.trap_no = 14;
874@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
875 }
876 #endif /* CONFIG_MMU */
877
878+#ifdef CONFIG_PAX_PAGEEXEC
879+void pax_report_insns(void *pc, void *sp)
880+{
881+ long i;
882+
883+ printk(KERN_ERR "PAX: bytes at PC: ");
884+ for (i = 0; i < 20; i++) {
885+ unsigned char c;
886+ if (get_user(c, (__force unsigned char __user *)pc+i))
887+ printk(KERN_CONT "?? ");
888+ else
889+ printk(KERN_CONT "%02x ", c);
890+ }
891+ printk("\n");
892+
893+ printk(KERN_ERR "PAX: bytes at SP-4: ");
894+ for (i = -1; i < 20; i++) {
895+ unsigned long c;
896+ if (get_user(c, (__force unsigned long __user *)sp+i))
897+ printk(KERN_CONT "???????? ");
898+ else
899+ printk(KERN_CONT "%08lx ", c);
900+ }
901+ printk("\n");
902+}
903+#endif
904+
905 /*
906 * First Level Translation Fault Handler
907 *
908diff -urNp linux-3.0.7/arch/arm/mm/mmap.c linux-3.0.7/arch/arm/mm/mmap.c
909--- linux-3.0.7/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
910+++ linux-3.0.7/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
911@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
912 if (len > TASK_SIZE)
913 return -ENOMEM;
914
915+#ifdef CONFIG_PAX_RANDMMAP
916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
917+#endif
918+
919 if (addr) {
920 if (do_align)
921 addr = COLOUR_ALIGN(addr, pgoff);
922@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
923 addr = PAGE_ALIGN(addr);
924
925 vma = find_vma(mm, addr);
926- if (TASK_SIZE - len >= addr &&
927- (!vma || addr + len <= vma->vm_start))
928+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
929 return addr;
930 }
931 if (len > mm->cached_hole_size) {
932- start_addr = addr = mm->free_area_cache;
933+ start_addr = addr = mm->free_area_cache;
934 } else {
935- start_addr = addr = TASK_UNMAPPED_BASE;
936- mm->cached_hole_size = 0;
937+ start_addr = addr = mm->mmap_base;
938+ mm->cached_hole_size = 0;
939 }
940 /* 8 bits of randomness in 20 address space bits */
941 if ((current->flags & PF_RANDOMIZE) &&
942@@ -100,14 +103,14 @@ full_search:
943 * Start a new search - just in case we missed
944 * some holes.
945 */
946- if (start_addr != TASK_UNMAPPED_BASE) {
947- start_addr = addr = TASK_UNMAPPED_BASE;
948+ if (start_addr != mm->mmap_base) {
949+ start_addr = addr = mm->mmap_base;
950 mm->cached_hole_size = 0;
951 goto full_search;
952 }
953 return -ENOMEM;
954 }
955- if (!vma || addr + len <= vma->vm_start) {
956+ if (check_heap_stack_gap(vma, addr, len)) {
957 /*
958 * Remember the place where we stopped the search:
959 */
960diff -urNp linux-3.0.7/arch/avr32/include/asm/elf.h linux-3.0.7/arch/avr32/include/asm/elf.h
961--- linux-3.0.7/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
962+++ linux-3.0.7/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
963@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
964 the loader. We need to make sure that it is out of the way of the program
965 that it will "exec", and that there is sufficient room for the brk. */
966
967-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
968+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
969
970+#ifdef CONFIG_PAX_ASLR
971+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
972+
973+#define PAX_DELTA_MMAP_LEN 15
974+#define PAX_DELTA_STACK_LEN 15
975+#endif
976
977 /* This yields a mask that user programs can use to figure out what
978 instruction set this CPU supports. This could be done in user space,
979diff -urNp linux-3.0.7/arch/avr32/include/asm/kmap_types.h linux-3.0.7/arch/avr32/include/asm/kmap_types.h
980--- linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
981+++ linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
982@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
983 D(11) KM_IRQ1,
984 D(12) KM_SOFTIRQ0,
985 D(13) KM_SOFTIRQ1,
986-D(14) KM_TYPE_NR
987+D(14) KM_CLEARPAGE,
988+D(15) KM_TYPE_NR
989 };
990
991 #undef D
992diff -urNp linux-3.0.7/arch/avr32/mm/fault.c linux-3.0.7/arch/avr32/mm/fault.c
993--- linux-3.0.7/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
994+++ linux-3.0.7/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
995@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
996
997 int exception_trace = 1;
998
999+#ifdef CONFIG_PAX_PAGEEXEC
1000+void pax_report_insns(void *pc, void *sp)
1001+{
1002+ unsigned long i;
1003+
1004+ printk(KERN_ERR "PAX: bytes at PC: ");
1005+ for (i = 0; i < 20; i++) {
1006+ unsigned char c;
1007+ if (get_user(c, (unsigned char *)pc+i))
1008+ printk(KERN_CONT "???????? ");
1009+ else
1010+ printk(KERN_CONT "%02x ", c);
1011+ }
1012+ printk("\n");
1013+}
1014+#endif
1015+
1016 /*
1017 * This routine handles page faults. It determines the address and the
1018 * problem, and then passes it off to one of the appropriate routines.
1019@@ -156,6 +173,16 @@ bad_area:
1020 up_read(&mm->mmap_sem);
1021
1022 if (user_mode(regs)) {
1023+
1024+#ifdef CONFIG_PAX_PAGEEXEC
1025+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1026+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1027+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1028+ do_group_exit(SIGKILL);
1029+ }
1030+ }
1031+#endif
1032+
1033 if (exception_trace && printk_ratelimit())
1034 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1035 "sp %08lx ecr %lu\n",
1036diff -urNp linux-3.0.7/arch/frv/include/asm/kmap_types.h linux-3.0.7/arch/frv/include/asm/kmap_types.h
1037--- linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1038+++ linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1039@@ -23,6 +23,7 @@ enum km_type {
1040 KM_IRQ1,
1041 KM_SOFTIRQ0,
1042 KM_SOFTIRQ1,
1043+ KM_CLEARPAGE,
1044 KM_TYPE_NR
1045 };
1046
1047diff -urNp linux-3.0.7/arch/frv/mm/elf-fdpic.c linux-3.0.7/arch/frv/mm/elf-fdpic.c
1048--- linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
1049+++ linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
1050@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
1051 if (addr) {
1052 addr = PAGE_ALIGN(addr);
1053 vma = find_vma(current->mm, addr);
1054- if (TASK_SIZE - len >= addr &&
1055- (!vma || addr + len <= vma->vm_start))
1056+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1057 goto success;
1058 }
1059
1060@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
1061 for (; vma; vma = vma->vm_next) {
1062 if (addr > limit)
1063 break;
1064- if (addr + len <= vma->vm_start)
1065+ if (check_heap_stack_gap(vma, addr, len))
1066 goto success;
1067 addr = vma->vm_end;
1068 }
1069@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
1070 for (; vma; vma = vma->vm_next) {
1071 if (addr > limit)
1072 break;
1073- if (addr + len <= vma->vm_start)
1074+ if (check_heap_stack_gap(vma, addr, len))
1075 goto success;
1076 addr = vma->vm_end;
1077 }
1078diff -urNp linux-3.0.7/arch/ia64/include/asm/elf.h linux-3.0.7/arch/ia64/include/asm/elf.h
1079--- linux-3.0.7/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1080+++ linux-3.0.7/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1081@@ -42,6 +42,13 @@
1082 */
1083 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1084
1085+#ifdef CONFIG_PAX_ASLR
1086+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1087+
1088+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1089+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1090+#endif
1091+
1092 #define PT_IA_64_UNWIND 0x70000001
1093
1094 /* IA-64 relocations: */
1095diff -urNp linux-3.0.7/arch/ia64/include/asm/pgtable.h linux-3.0.7/arch/ia64/include/asm/pgtable.h
1096--- linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1097+++ linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1098@@ -12,7 +12,7 @@
1099 * David Mosberger-Tang <davidm@hpl.hp.com>
1100 */
1101
1102-
1103+#include <linux/const.h>
1104 #include <asm/mman.h>
1105 #include <asm/page.h>
1106 #include <asm/processor.h>
1107@@ -143,6 +143,17 @@
1108 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1109 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1110 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1111+
1112+#ifdef CONFIG_PAX_PAGEEXEC
1113+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1114+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1115+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1116+#else
1117+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1118+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1119+# define PAGE_COPY_NOEXEC PAGE_COPY
1120+#endif
1121+
1122 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1123 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1124 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1125diff -urNp linux-3.0.7/arch/ia64/include/asm/spinlock.h linux-3.0.7/arch/ia64/include/asm/spinlock.h
1126--- linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
1127+++ linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
1128@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1129 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1130
1131 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1132- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1133+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1134 }
1135
1136 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1137diff -urNp linux-3.0.7/arch/ia64/include/asm/uaccess.h linux-3.0.7/arch/ia64/include/asm/uaccess.h
1138--- linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1139+++ linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1140@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1141 const void *__cu_from = (from); \
1142 long __cu_len = (n); \
1143 \
1144- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1145+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1146 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1147 __cu_len; \
1148 })
1149@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1150 long __cu_len = (n); \
1151 \
1152 __chk_user_ptr(__cu_from); \
1153- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1154+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1155 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1156 __cu_len; \
1157 })
1158diff -urNp linux-3.0.7/arch/ia64/kernel/module.c linux-3.0.7/arch/ia64/kernel/module.c
1159--- linux-3.0.7/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1160+++ linux-3.0.7/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1161@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1162 void
1163 module_free (struct module *mod, void *module_region)
1164 {
1165- if (mod && mod->arch.init_unw_table &&
1166- module_region == mod->module_init) {
1167+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1168 unw_remove_unwind_table(mod->arch.init_unw_table);
1169 mod->arch.init_unw_table = NULL;
1170 }
1171@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1172 }
1173
1174 static inline int
1175+in_init_rx (const struct module *mod, uint64_t addr)
1176+{
1177+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1178+}
1179+
1180+static inline int
1181+in_init_rw (const struct module *mod, uint64_t addr)
1182+{
1183+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1184+}
1185+
1186+static inline int
1187 in_init (const struct module *mod, uint64_t addr)
1188 {
1189- return addr - (uint64_t) mod->module_init < mod->init_size;
1190+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1191+}
1192+
1193+static inline int
1194+in_core_rx (const struct module *mod, uint64_t addr)
1195+{
1196+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1197+}
1198+
1199+static inline int
1200+in_core_rw (const struct module *mod, uint64_t addr)
1201+{
1202+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1203 }
1204
1205 static inline int
1206 in_core (const struct module *mod, uint64_t addr)
1207 {
1208- return addr - (uint64_t) mod->module_core < mod->core_size;
1209+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1210 }
1211
1212 static inline int
1213@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1214 break;
1215
1216 case RV_BDREL:
1217- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1218+ if (in_init_rx(mod, val))
1219+ val -= (uint64_t) mod->module_init_rx;
1220+ else if (in_init_rw(mod, val))
1221+ val -= (uint64_t) mod->module_init_rw;
1222+ else if (in_core_rx(mod, val))
1223+ val -= (uint64_t) mod->module_core_rx;
1224+ else if (in_core_rw(mod, val))
1225+ val -= (uint64_t) mod->module_core_rw;
1226 break;
1227
1228 case RV_LTV:
1229@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1230 * addresses have been selected...
1231 */
1232 uint64_t gp;
1233- if (mod->core_size > MAX_LTOFF)
1234+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1235 /*
1236 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1237 * at the end of the module.
1238 */
1239- gp = mod->core_size - MAX_LTOFF / 2;
1240+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1241 else
1242- gp = mod->core_size / 2;
1243- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1244+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1245+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1246 mod->arch.gp = gp;
1247 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1248 }
1249diff -urNp linux-3.0.7/arch/ia64/kernel/sys_ia64.c linux-3.0.7/arch/ia64/kernel/sys_ia64.c
1250--- linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
1251+++ linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
1252@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1253 if (REGION_NUMBER(addr) == RGN_HPAGE)
1254 addr = 0;
1255 #endif
1256+
1257+#ifdef CONFIG_PAX_RANDMMAP
1258+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1259+ addr = mm->free_area_cache;
1260+ else
1261+#endif
1262+
1263 if (!addr)
1264 addr = mm->free_area_cache;
1265
1266@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1267 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1268 /* At this point: (!vma || addr < vma->vm_end). */
1269 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1270- if (start_addr != TASK_UNMAPPED_BASE) {
1271+ if (start_addr != mm->mmap_base) {
1272 /* Start a new search --- just in case we missed some holes. */
1273- addr = TASK_UNMAPPED_BASE;
1274+ addr = mm->mmap_base;
1275 goto full_search;
1276 }
1277 return -ENOMEM;
1278 }
1279- if (!vma || addr + len <= vma->vm_start) {
1280+ if (check_heap_stack_gap(vma, addr, len)) {
1281 /* Remember the address where we stopped this search: */
1282 mm->free_area_cache = addr + len;
1283 return addr;
1284diff -urNp linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S
1285--- linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
1286+++ linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
1287@@ -199,7 +199,7 @@ SECTIONS {
1288 /* Per-cpu data: */
1289 . = ALIGN(PERCPU_PAGE_SIZE);
1290 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1291- __phys_per_cpu_start = __per_cpu_load;
1292+ __phys_per_cpu_start = per_cpu_load;
1293 /*
1294 * ensure percpu data fits
1295 * into percpu page size
1296diff -urNp linux-3.0.7/arch/ia64/mm/fault.c linux-3.0.7/arch/ia64/mm/fault.c
1297--- linux-3.0.7/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1298+++ linux-3.0.7/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1299@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
1300 return pte_present(pte);
1301 }
1302
1303+#ifdef CONFIG_PAX_PAGEEXEC
1304+void pax_report_insns(void *pc, void *sp)
1305+{
1306+ unsigned long i;
1307+
1308+ printk(KERN_ERR "PAX: bytes at PC: ");
1309+ for (i = 0; i < 8; i++) {
1310+ unsigned int c;
1311+ if (get_user(c, (unsigned int *)pc+i))
1312+ printk(KERN_CONT "???????? ");
1313+ else
1314+ printk(KERN_CONT "%08x ", c);
1315+ }
1316+ printk("\n");
1317+}
1318+#endif
1319+
1320 void __kprobes
1321 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1322 {
1323@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1324 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1325 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1326
1327- if ((vma->vm_flags & mask) != mask)
1328+ if ((vma->vm_flags & mask) != mask) {
1329+
1330+#ifdef CONFIG_PAX_PAGEEXEC
1331+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1332+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1333+ goto bad_area;
1334+
1335+ up_read(&mm->mmap_sem);
1336+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1337+ do_group_exit(SIGKILL);
1338+ }
1339+#endif
1340+
1341 goto bad_area;
1342
1343+ }
1344+
1345 /*
1346 * If for any reason at all we couldn't handle the fault, make
1347 * sure we exit gracefully rather than endlessly redo the
1348diff -urNp linux-3.0.7/arch/ia64/mm/hugetlbpage.c linux-3.0.7/arch/ia64/mm/hugetlbpage.c
1349--- linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1350+++ linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1351@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1352 /* At this point: (!vmm || addr < vmm->vm_end). */
1353 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1354 return -ENOMEM;
1355- if (!vmm || (addr + len) <= vmm->vm_start)
1356+ if (check_heap_stack_gap(vmm, addr, len))
1357 return addr;
1358 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1359 }
1360diff -urNp linux-3.0.7/arch/ia64/mm/init.c linux-3.0.7/arch/ia64/mm/init.c
1361--- linux-3.0.7/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1362+++ linux-3.0.7/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1363@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1364 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1365 vma->vm_end = vma->vm_start + PAGE_SIZE;
1366 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1367+
1368+#ifdef CONFIG_PAX_PAGEEXEC
1369+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1370+ vma->vm_flags &= ~VM_EXEC;
1371+
1372+#ifdef CONFIG_PAX_MPROTECT
1373+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1374+ vma->vm_flags &= ~VM_MAYEXEC;
1375+#endif
1376+
1377+ }
1378+#endif
1379+
1380 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1381 down_write(&current->mm->mmap_sem);
1382 if (insert_vm_struct(current->mm, vma)) {
1383diff -urNp linux-3.0.7/arch/m32r/lib/usercopy.c linux-3.0.7/arch/m32r/lib/usercopy.c
1384--- linux-3.0.7/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1385+++ linux-3.0.7/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1386@@ -14,6 +14,9 @@
1387 unsigned long
1388 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1389 {
1390+ if ((long)n < 0)
1391+ return n;
1392+
1393 prefetch(from);
1394 if (access_ok(VERIFY_WRITE, to, n))
1395 __copy_user(to,from,n);
1396@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1397 unsigned long
1398 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1399 {
1400+ if ((long)n < 0)
1401+ return n;
1402+
1403 prefetchw(to);
1404 if (access_ok(VERIFY_READ, from, n))
1405 __copy_user_zeroing(to,from,n);
1406diff -urNp linux-3.0.7/arch/mips/include/asm/elf.h linux-3.0.7/arch/mips/include/asm/elf.h
1407--- linux-3.0.7/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1408+++ linux-3.0.7/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1409@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1410 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1411 #endif
1412
1413+#ifdef CONFIG_PAX_ASLR
1414+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1415+
1416+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1417+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1418+#endif
1419+
1420 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1421 struct linux_binprm;
1422 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1423 int uses_interp);
1424
1425-struct mm_struct;
1426-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1427-#define arch_randomize_brk arch_randomize_brk
1428-
1429 #endif /* _ASM_ELF_H */
1430diff -urNp linux-3.0.7/arch/mips/include/asm/page.h linux-3.0.7/arch/mips/include/asm/page.h
1431--- linux-3.0.7/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1432+++ linux-3.0.7/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1433@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1434 #ifdef CONFIG_CPU_MIPS32
1435 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1436 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1437- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1438+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1439 #else
1440 typedef struct { unsigned long long pte; } pte_t;
1441 #define pte_val(x) ((x).pte)
1442diff -urNp linux-3.0.7/arch/mips/include/asm/system.h linux-3.0.7/arch/mips/include/asm/system.h
1443--- linux-3.0.7/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1444+++ linux-3.0.7/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1445@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1446 */
1447 #define __ARCH_WANT_UNLOCKED_CTXSW
1448
1449-extern unsigned long arch_align_stack(unsigned long sp);
1450+#define arch_align_stack(x) ((x) & ~0xfUL)
1451
1452 #endif /* _ASM_SYSTEM_H */
1453diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c
1454--- linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1455+++ linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1456@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1457 #undef ELF_ET_DYN_BASE
1458 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1459
1460+#ifdef CONFIG_PAX_ASLR
1461+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1462+
1463+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1464+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1465+#endif
1466+
1467 #include <asm/processor.h>
1468 #include <linux/module.h>
1469 #include <linux/elfcore.h>
1470diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c
1471--- linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1472+++ linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1473@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1474 #undef ELF_ET_DYN_BASE
1475 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1476
1477+#ifdef CONFIG_PAX_ASLR
1478+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1479+
1480+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1481+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1482+#endif
1483+
1484 #include <asm/processor.h>
1485
1486 /*
1487diff -urNp linux-3.0.7/arch/mips/kernel/process.c linux-3.0.7/arch/mips/kernel/process.c
1488--- linux-3.0.7/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1489+++ linux-3.0.7/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1490@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1491 out:
1492 return pc;
1493 }
1494-
1495-/*
1496- * Don't forget that the stack pointer must be aligned on a 8 bytes
1497- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1498- */
1499-unsigned long arch_align_stack(unsigned long sp)
1500-{
1501- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1502- sp -= get_random_int() & ~PAGE_MASK;
1503-
1504- return sp & ALMASK;
1505-}
1506diff -urNp linux-3.0.7/arch/mips/mm/fault.c linux-3.0.7/arch/mips/mm/fault.c
1507--- linux-3.0.7/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1508+++ linux-3.0.7/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1509@@ -28,6 +28,23 @@
1510 #include <asm/highmem.h> /* For VMALLOC_END */
1511 #include <linux/kdebug.h>
1512
1513+#ifdef CONFIG_PAX_PAGEEXEC
1514+void pax_report_insns(void *pc, void *sp)
1515+{
1516+ unsigned long i;
1517+
1518+ printk(KERN_ERR "PAX: bytes at PC: ");
1519+ for (i = 0; i < 5; i++) {
1520+ unsigned int c;
1521+ if (get_user(c, (unsigned int *)pc+i))
1522+ printk(KERN_CONT "???????? ");
1523+ else
1524+ printk(KERN_CONT "%08x ", c);
1525+ }
1526+ printk("\n");
1527+}
1528+#endif
1529+
1530 /*
1531 * This routine handles page faults. It determines the address,
1532 * and the problem, and then passes it off to one of the appropriate
1533diff -urNp linux-3.0.7/arch/mips/mm/mmap.c linux-3.0.7/arch/mips/mm/mmap.c
1534--- linux-3.0.7/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1535+++ linux-3.0.7/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1536@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1537 do_color_align = 0;
1538 if (filp || (flags & MAP_SHARED))
1539 do_color_align = 1;
1540+
1541+#ifdef CONFIG_PAX_RANDMMAP
1542+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1543+#endif
1544+
1545 if (addr) {
1546 if (do_color_align)
1547 addr = COLOUR_ALIGN(addr, pgoff);
1548 else
1549 addr = PAGE_ALIGN(addr);
1550 vmm = find_vma(current->mm, addr);
1551- if (TASK_SIZE - len >= addr &&
1552- (!vmm || addr + len <= vmm->vm_start))
1553+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1554 return addr;
1555 }
1556 addr = current->mm->mmap_base;
1557@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1558 /* At this point: (!vmm || addr < vmm->vm_end). */
1559 if (TASK_SIZE - len < addr)
1560 return -ENOMEM;
1561- if (!vmm || addr + len <= vmm->vm_start)
1562+ if (check_heap_stack_gap(vmm, addr, len))
1563 return addr;
1564 addr = vmm->vm_end;
1565 if (do_color_align)
1566@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1567 mm->get_unmapped_area = arch_get_unmapped_area;
1568 mm->unmap_area = arch_unmap_area;
1569 }
1570-
1571-static inline unsigned long brk_rnd(void)
1572-{
1573- unsigned long rnd = get_random_int();
1574-
1575- rnd = rnd << PAGE_SHIFT;
1576- /* 8MB for 32bit, 256MB for 64bit */
1577- if (TASK_IS_32BIT_ADDR)
1578- rnd = rnd & 0x7ffffful;
1579- else
1580- rnd = rnd & 0xffffffful;
1581-
1582- return rnd;
1583-}
1584-
1585-unsigned long arch_randomize_brk(struct mm_struct *mm)
1586-{
1587- unsigned long base = mm->brk;
1588- unsigned long ret;
1589-
1590- ret = PAGE_ALIGN(base + brk_rnd());
1591-
1592- if (ret < mm->brk)
1593- return mm->brk;
1594-
1595- return ret;
1596-}
1597diff -urNp linux-3.0.7/arch/parisc/include/asm/elf.h linux-3.0.7/arch/parisc/include/asm/elf.h
1598--- linux-3.0.7/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1599+++ linux-3.0.7/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1600@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1601
1602 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1603
1604+#ifdef CONFIG_PAX_ASLR
1605+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1606+
1607+#define PAX_DELTA_MMAP_LEN 16
1608+#define PAX_DELTA_STACK_LEN 16
1609+#endif
1610+
1611 /* This yields a mask that user programs can use to figure out what
1612 instruction set this CPU supports. This could be done in user space,
1613 but it's not easy, and we've already done it here. */
1614diff -urNp linux-3.0.7/arch/parisc/include/asm/pgtable.h linux-3.0.7/arch/parisc/include/asm/pgtable.h
1615--- linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1616+++ linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1617@@ -210,6 +210,17 @@ struct vm_area_struct;
1618 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1619 #define PAGE_COPY PAGE_EXECREAD
1620 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1621+
1622+#ifdef CONFIG_PAX_PAGEEXEC
1623+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1624+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1625+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1626+#else
1627+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1628+# define PAGE_COPY_NOEXEC PAGE_COPY
1629+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1630+#endif
1631+
1632 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1633 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1634 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1635diff -urNp linux-3.0.7/arch/parisc/kernel/module.c linux-3.0.7/arch/parisc/kernel/module.c
1636--- linux-3.0.7/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1637+++ linux-3.0.7/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1638@@ -98,16 +98,38 @@
1639
1640 /* three functions to determine where in the module core
1641 * or init pieces the location is */
1642+static inline int in_init_rx(struct module *me, void *loc)
1643+{
1644+ return (loc >= me->module_init_rx &&
1645+ loc < (me->module_init_rx + me->init_size_rx));
1646+}
1647+
1648+static inline int in_init_rw(struct module *me, void *loc)
1649+{
1650+ return (loc >= me->module_init_rw &&
1651+ loc < (me->module_init_rw + me->init_size_rw));
1652+}
1653+
1654 static inline int in_init(struct module *me, void *loc)
1655 {
1656- return (loc >= me->module_init &&
1657- loc <= (me->module_init + me->init_size));
1658+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1659+}
1660+
1661+static inline int in_core_rx(struct module *me, void *loc)
1662+{
1663+ return (loc >= me->module_core_rx &&
1664+ loc < (me->module_core_rx + me->core_size_rx));
1665+}
1666+
1667+static inline int in_core_rw(struct module *me, void *loc)
1668+{
1669+ return (loc >= me->module_core_rw &&
1670+ loc < (me->module_core_rw + me->core_size_rw));
1671 }
1672
1673 static inline int in_core(struct module *me, void *loc)
1674 {
1675- return (loc >= me->module_core &&
1676- loc <= (me->module_core + me->core_size));
1677+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1678 }
1679
1680 static inline int in_local(struct module *me, void *loc)
1681@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1682 }
1683
1684 /* align things a bit */
1685- me->core_size = ALIGN(me->core_size, 16);
1686- me->arch.got_offset = me->core_size;
1687- me->core_size += gots * sizeof(struct got_entry);
1688-
1689- me->core_size = ALIGN(me->core_size, 16);
1690- me->arch.fdesc_offset = me->core_size;
1691- me->core_size += fdescs * sizeof(Elf_Fdesc);
1692+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1693+ me->arch.got_offset = me->core_size_rw;
1694+ me->core_size_rw += gots * sizeof(struct got_entry);
1695+
1696+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1697+ me->arch.fdesc_offset = me->core_size_rw;
1698+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1699
1700 me->arch.got_max = gots;
1701 me->arch.fdesc_max = fdescs;
1702@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1703
1704 BUG_ON(value == 0);
1705
1706- got = me->module_core + me->arch.got_offset;
1707+ got = me->module_core_rw + me->arch.got_offset;
1708 for (i = 0; got[i].addr; i++)
1709 if (got[i].addr == value)
1710 goto out;
1711@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1712 #ifdef CONFIG_64BIT
1713 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1714 {
1715- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1716+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1717
1718 if (!value) {
1719 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1720@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1721
1722 /* Create new one */
1723 fdesc->addr = value;
1724- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1725+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1726 return (Elf_Addr)fdesc;
1727 }
1728 #endif /* CONFIG_64BIT */
1729@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1730
1731 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1732 end = table + sechdrs[me->arch.unwind_section].sh_size;
1733- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1734+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1735
1736 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1737 me->arch.unwind_section, table, end, gp);
1738diff -urNp linux-3.0.7/arch/parisc/kernel/sys_parisc.c linux-3.0.7/arch/parisc/kernel/sys_parisc.c
1739--- linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1740+++ linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1741@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1742 /* At this point: (!vma || addr < vma->vm_end). */
1743 if (TASK_SIZE - len < addr)
1744 return -ENOMEM;
1745- if (!vma || addr + len <= vma->vm_start)
1746+ if (check_heap_stack_gap(vma, addr, len))
1747 return addr;
1748 addr = vma->vm_end;
1749 }
1750@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1751 /* At this point: (!vma || addr < vma->vm_end). */
1752 if (TASK_SIZE - len < addr)
1753 return -ENOMEM;
1754- if (!vma || addr + len <= vma->vm_start)
1755+ if (check_heap_stack_gap(vma, addr, len))
1756 return addr;
1757 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1758 if (addr < vma->vm_end) /* handle wraparound */
1759@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1760 if (flags & MAP_FIXED)
1761 return addr;
1762 if (!addr)
1763- addr = TASK_UNMAPPED_BASE;
1764+ addr = current->mm->mmap_base;
1765
1766 if (filp) {
1767 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1768diff -urNp linux-3.0.7/arch/parisc/kernel/traps.c linux-3.0.7/arch/parisc/kernel/traps.c
1769--- linux-3.0.7/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1770+++ linux-3.0.7/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1771@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1772
1773 down_read(&current->mm->mmap_sem);
1774 vma = find_vma(current->mm,regs->iaoq[0]);
1775- if (vma && (regs->iaoq[0] >= vma->vm_start)
1776- && (vma->vm_flags & VM_EXEC)) {
1777-
1778+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1779 fault_address = regs->iaoq[0];
1780 fault_space = regs->iasq[0];
1781
1782diff -urNp linux-3.0.7/arch/parisc/mm/fault.c linux-3.0.7/arch/parisc/mm/fault.c
1783--- linux-3.0.7/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1784+++ linux-3.0.7/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1785@@ -15,6 +15,7 @@
1786 #include <linux/sched.h>
1787 #include <linux/interrupt.h>
1788 #include <linux/module.h>
1789+#include <linux/unistd.h>
1790
1791 #include <asm/uaccess.h>
1792 #include <asm/traps.h>
1793@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1794 static unsigned long
1795 parisc_acctyp(unsigned long code, unsigned int inst)
1796 {
1797- if (code == 6 || code == 16)
1798+ if (code == 6 || code == 7 || code == 16)
1799 return VM_EXEC;
1800
1801 switch (inst & 0xf0000000) {
1802@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1803 }
1804 #endif
1805
1806+#ifdef CONFIG_PAX_PAGEEXEC
1807+/*
1808+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1809+ *
1810+ * returns 1 when task should be killed
1811+ * 2 when rt_sigreturn trampoline was detected
1812+ * 3 when unpatched PLT trampoline was detected
1813+ */
1814+static int pax_handle_fetch_fault(struct pt_regs *regs)
1815+{
1816+
1817+#ifdef CONFIG_PAX_EMUPLT
1818+ int err;
1819+
1820+ do { /* PaX: unpatched PLT emulation */
1821+ unsigned int bl, depwi;
1822+
1823+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1824+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1825+
1826+ if (err)
1827+ break;
1828+
1829+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1830+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1831+
1832+ err = get_user(ldw, (unsigned int *)addr);
1833+ err |= get_user(bv, (unsigned int *)(addr+4));
1834+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1835+
1836+ if (err)
1837+ break;
1838+
1839+ if (ldw == 0x0E801096U &&
1840+ bv == 0xEAC0C000U &&
1841+ ldw2 == 0x0E881095U)
1842+ {
1843+ unsigned int resolver, map;
1844+
1845+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1846+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1847+ if (err)
1848+ break;
1849+
1850+ regs->gr[20] = instruction_pointer(regs)+8;
1851+ regs->gr[21] = map;
1852+ regs->gr[22] = resolver;
1853+ regs->iaoq[0] = resolver | 3UL;
1854+ regs->iaoq[1] = regs->iaoq[0] + 4;
1855+ return 3;
1856+ }
1857+ }
1858+ } while (0);
1859+#endif
1860+
1861+#ifdef CONFIG_PAX_EMUTRAMP
1862+
1863+#ifndef CONFIG_PAX_EMUSIGRT
1864+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1865+ return 1;
1866+#endif
1867+
1868+ do { /* PaX: rt_sigreturn emulation */
1869+ unsigned int ldi1, ldi2, bel, nop;
1870+
1871+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1872+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1873+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1874+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1875+
1876+ if (err)
1877+ break;
1878+
1879+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1880+ ldi2 == 0x3414015AU &&
1881+ bel == 0xE4008200U &&
1882+ nop == 0x08000240U)
1883+ {
1884+ regs->gr[25] = (ldi1 & 2) >> 1;
1885+ regs->gr[20] = __NR_rt_sigreturn;
1886+ regs->gr[31] = regs->iaoq[1] + 16;
1887+ regs->sr[0] = regs->iasq[1];
1888+ regs->iaoq[0] = 0x100UL;
1889+ regs->iaoq[1] = regs->iaoq[0] + 4;
1890+ regs->iasq[0] = regs->sr[2];
1891+ regs->iasq[1] = regs->sr[2];
1892+ return 2;
1893+ }
1894+ } while (0);
1895+#endif
1896+
1897+ return 1;
1898+}
1899+
1900+void pax_report_insns(void *pc, void *sp)
1901+{
1902+ unsigned long i;
1903+
1904+ printk(KERN_ERR "PAX: bytes at PC: ");
1905+ for (i = 0; i < 5; i++) {
1906+ unsigned int c;
1907+ if (get_user(c, (unsigned int *)pc+i))
1908+ printk(KERN_CONT "???????? ");
1909+ else
1910+ printk(KERN_CONT "%08x ", c);
1911+ }
1912+ printk("\n");
1913+}
1914+#endif
1915+
1916 int fixup_exception(struct pt_regs *regs)
1917 {
1918 const struct exception_table_entry *fix;
1919@@ -192,8 +303,33 @@ good_area:
1920
1921 acc_type = parisc_acctyp(code,regs->iir);
1922
1923- if ((vma->vm_flags & acc_type) != acc_type)
1924+ if ((vma->vm_flags & acc_type) != acc_type) {
1925+
1926+#ifdef CONFIG_PAX_PAGEEXEC
1927+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1928+ (address & ~3UL) == instruction_pointer(regs))
1929+ {
1930+ up_read(&mm->mmap_sem);
1931+ switch (pax_handle_fetch_fault(regs)) {
1932+
1933+#ifdef CONFIG_PAX_EMUPLT
1934+ case 3:
1935+ return;
1936+#endif
1937+
1938+#ifdef CONFIG_PAX_EMUTRAMP
1939+ case 2:
1940+ return;
1941+#endif
1942+
1943+ }
1944+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1945+ do_group_exit(SIGKILL);
1946+ }
1947+#endif
1948+
1949 goto bad_area;
1950+ }
1951
1952 /*
1953 * If for any reason at all we couldn't handle the fault, make
1954diff -urNp linux-3.0.7/arch/powerpc/include/asm/elf.h linux-3.0.7/arch/powerpc/include/asm/elf.h
1955--- linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1956+++ linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1957@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1958 the loader. We need to make sure that it is out of the way of the program
1959 that it will "exec", and that there is sufficient room for the brk. */
1960
1961-extern unsigned long randomize_et_dyn(unsigned long base);
1962-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1963+#define ELF_ET_DYN_BASE (0x20000000)
1964+
1965+#ifdef CONFIG_PAX_ASLR
1966+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1967+
1968+#ifdef __powerpc64__
1969+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1970+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1971+#else
1972+#define PAX_DELTA_MMAP_LEN 15
1973+#define PAX_DELTA_STACK_LEN 15
1974+#endif
1975+#endif
1976
1977 /*
1978 * Our registers are always unsigned longs, whether we're a 32 bit
1979@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1980 (0x7ff >> (PAGE_SHIFT - 12)) : \
1981 (0x3ffff >> (PAGE_SHIFT - 12)))
1982
1983-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1984-#define arch_randomize_brk arch_randomize_brk
1985-
1986 #endif /* __KERNEL__ */
1987
1988 /*
1989diff -urNp linux-3.0.7/arch/powerpc/include/asm/kmap_types.h linux-3.0.7/arch/powerpc/include/asm/kmap_types.h
1990--- linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1991+++ linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1992@@ -27,6 +27,7 @@ enum km_type {
1993 KM_PPC_SYNC_PAGE,
1994 KM_PPC_SYNC_ICACHE,
1995 KM_KDB,
1996+ KM_CLEARPAGE,
1997 KM_TYPE_NR
1998 };
1999
2000diff -urNp linux-3.0.7/arch/powerpc/include/asm/mman.h linux-3.0.7/arch/powerpc/include/asm/mman.h
2001--- linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
2003@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
2004 }
2005 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2006
2007-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2008+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2009 {
2010 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2011 }
2012diff -urNp linux-3.0.7/arch/powerpc/include/asm/page.h linux-3.0.7/arch/powerpc/include/asm/page.h
2013--- linux-3.0.7/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
2014+++ linux-3.0.7/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
2015@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2016 * and needs to be executable. This means the whole heap ends
2017 * up being executable.
2018 */
2019-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2020- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2021+#define VM_DATA_DEFAULT_FLAGS32 \
2022+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2023+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2024
2025 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2026 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2027@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2028 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2029 #endif
2030
2031+#define ktla_ktva(addr) (addr)
2032+#define ktva_ktla(addr) (addr)
2033+
2034 #ifndef __ASSEMBLY__
2035
2036 #undef STRICT_MM_TYPECHECKS
2037diff -urNp linux-3.0.7/arch/powerpc/include/asm/page_64.h linux-3.0.7/arch/powerpc/include/asm/page_64.h
2038--- linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
2039+++ linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
2040@@ -155,15 +155,18 @@ do { \
2041 * stack by default, so in the absence of a PT_GNU_STACK program header
2042 * we turn execute permission off.
2043 */
2044-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2045- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2046+#define VM_STACK_DEFAULT_FLAGS32 \
2047+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2048+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2049
2050 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2051 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2052
2053+#ifndef CONFIG_PAX_PAGEEXEC
2054 #define VM_STACK_DEFAULT_FLAGS \
2055 (is_32bit_task() ? \
2056 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2057+#endif
2058
2059 #include <asm-generic/getorder.h>
2060
2061diff -urNp linux-3.0.7/arch/powerpc/include/asm/pgtable.h linux-3.0.7/arch/powerpc/include/asm/pgtable.h
2062--- linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
2063+++ linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
2064@@ -2,6 +2,7 @@
2065 #define _ASM_POWERPC_PGTABLE_H
2066 #ifdef __KERNEL__
2067
2068+#include <linux/const.h>
2069 #ifndef __ASSEMBLY__
2070 #include <asm/processor.h> /* For TASK_SIZE */
2071 #include <asm/mmu.h>
2072diff -urNp linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h
2073--- linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
2074+++ linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
2075@@ -21,6 +21,7 @@
2076 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2077 #define _PAGE_USER 0x004 /* usermode access allowed */
2078 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2079+#define _PAGE_EXEC _PAGE_GUARDED
2080 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2081 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2082 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2083diff -urNp linux-3.0.7/arch/powerpc/include/asm/reg.h linux-3.0.7/arch/powerpc/include/asm/reg.h
2084--- linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
2085+++ linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
2086@@ -209,6 +209,7 @@
2087 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2088 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2089 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2090+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2091 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2092 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2093 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2094diff -urNp linux-3.0.7/arch/powerpc/include/asm/system.h linux-3.0.7/arch/powerpc/include/asm/system.h
2095--- linux-3.0.7/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2096+++ linux-3.0.7/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2097@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2098 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2099 #endif
2100
2101-extern unsigned long arch_align_stack(unsigned long sp);
2102+#define arch_align_stack(x) ((x) & ~0xfUL)
2103
2104 /* Used in very early kernel initialization. */
2105 extern unsigned long reloc_offset(void);
2106diff -urNp linux-3.0.7/arch/powerpc/include/asm/uaccess.h linux-3.0.7/arch/powerpc/include/asm/uaccess.h
2107--- linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2108+++ linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2109@@ -13,6 +13,8 @@
2110 #define VERIFY_READ 0
2111 #define VERIFY_WRITE 1
2112
2113+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2114+
2115 /*
2116 * The fs value determines whether argument validity checking should be
2117 * performed or not. If get_fs() == USER_DS, checking is performed, with
2118@@ -327,52 +329,6 @@ do { \
2119 extern unsigned long __copy_tofrom_user(void __user *to,
2120 const void __user *from, unsigned long size);
2121
2122-#ifndef __powerpc64__
2123-
2124-static inline unsigned long copy_from_user(void *to,
2125- const void __user *from, unsigned long n)
2126-{
2127- unsigned long over;
2128-
2129- if (access_ok(VERIFY_READ, from, n))
2130- return __copy_tofrom_user((__force void __user *)to, from, n);
2131- if ((unsigned long)from < TASK_SIZE) {
2132- over = (unsigned long)from + n - TASK_SIZE;
2133- return __copy_tofrom_user((__force void __user *)to, from,
2134- n - over) + over;
2135- }
2136- return n;
2137-}
2138-
2139-static inline unsigned long copy_to_user(void __user *to,
2140- const void *from, unsigned long n)
2141-{
2142- unsigned long over;
2143-
2144- if (access_ok(VERIFY_WRITE, to, n))
2145- return __copy_tofrom_user(to, (__force void __user *)from, n);
2146- if ((unsigned long)to < TASK_SIZE) {
2147- over = (unsigned long)to + n - TASK_SIZE;
2148- return __copy_tofrom_user(to, (__force void __user *)from,
2149- n - over) + over;
2150- }
2151- return n;
2152-}
2153-
2154-#else /* __powerpc64__ */
2155-
2156-#define __copy_in_user(to, from, size) \
2157- __copy_tofrom_user((to), (from), (size))
2158-
2159-extern unsigned long copy_from_user(void *to, const void __user *from,
2160- unsigned long n);
2161-extern unsigned long copy_to_user(void __user *to, const void *from,
2162- unsigned long n);
2163-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2164- unsigned long n);
2165-
2166-#endif /* __powerpc64__ */
2167-
2168 static inline unsigned long __copy_from_user_inatomic(void *to,
2169 const void __user *from, unsigned long n)
2170 {
2171@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2172 if (ret == 0)
2173 return 0;
2174 }
2175+
2176+ if (!__builtin_constant_p(n))
2177+ check_object_size(to, n, false);
2178+
2179 return __copy_tofrom_user((__force void __user *)to, from, n);
2180 }
2181
2182@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2183 if (ret == 0)
2184 return 0;
2185 }
2186+
2187+ if (!__builtin_constant_p(n))
2188+ check_object_size(from, n, true);
2189+
2190 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2191 }
2192
2193@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2194 return __copy_to_user_inatomic(to, from, size);
2195 }
2196
2197+#ifndef __powerpc64__
2198+
2199+static inline unsigned long __must_check copy_from_user(void *to,
2200+ const void __user *from, unsigned long n)
2201+{
2202+ unsigned long over;
2203+
2204+ if ((long)n < 0)
2205+ return n;
2206+
2207+ if (access_ok(VERIFY_READ, from, n)) {
2208+ if (!__builtin_constant_p(n))
2209+ check_object_size(to, n, false);
2210+ return __copy_tofrom_user((__force void __user *)to, from, n);
2211+ }
2212+ if ((unsigned long)from < TASK_SIZE) {
2213+ over = (unsigned long)from + n - TASK_SIZE;
2214+ if (!__builtin_constant_p(n - over))
2215+ check_object_size(to, n - over, false);
2216+ return __copy_tofrom_user((__force void __user *)to, from,
2217+ n - over) + over;
2218+ }
2219+ return n;
2220+}
2221+
2222+static inline unsigned long __must_check copy_to_user(void __user *to,
2223+ const void *from, unsigned long n)
2224+{
2225+ unsigned long over;
2226+
2227+ if ((long)n < 0)
2228+ return n;
2229+
2230+ if (access_ok(VERIFY_WRITE, to, n)) {
2231+ if (!__builtin_constant_p(n))
2232+ check_object_size(from, n, true);
2233+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2234+ }
2235+ if ((unsigned long)to < TASK_SIZE) {
2236+ over = (unsigned long)to + n - TASK_SIZE;
2237+ if (!__builtin_constant_p(n))
2238+ check_object_size(from, n - over, true);
2239+ return __copy_tofrom_user(to, (__force void __user *)from,
2240+ n - over) + over;
2241+ }
2242+ return n;
2243+}
2244+
2245+#else /* __powerpc64__ */
2246+
2247+#define __copy_in_user(to, from, size) \
2248+ __copy_tofrom_user((to), (from), (size))
2249+
2250+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2251+{
2252+ if ((long)n < 0 || n > INT_MAX)
2253+ return n;
2254+
2255+ if (!__builtin_constant_p(n))
2256+ check_object_size(to, n, false);
2257+
2258+ if (likely(access_ok(VERIFY_READ, from, n)))
2259+ n = __copy_from_user(to, from, n);
2260+ else
2261+ memset(to, 0, n);
2262+ return n;
2263+}
2264+
2265+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2266+{
2267+ if ((long)n < 0 || n > INT_MAX)
2268+ return n;
2269+
2270+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2271+ if (!__builtin_constant_p(n))
2272+ check_object_size(from, n, true);
2273+ n = __copy_to_user(to, from, n);
2274+ }
2275+ return n;
2276+}
2277+
2278+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2279+ unsigned long n);
2280+
2281+#endif /* __powerpc64__ */
2282+
2283 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2284
2285 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2286diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S
2287--- linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
2288+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
2289@@ -567,6 +567,7 @@ storage_fault_common:
2290 std r14,_DAR(r1)
2291 std r15,_DSISR(r1)
2292 addi r3,r1,STACK_FRAME_OVERHEAD
2293+ bl .save_nvgprs
2294 mr r4,r14
2295 mr r5,r15
2296 ld r14,PACA_EXGEN+EX_R14(r13)
2297@@ -576,8 +577,7 @@ storage_fault_common:
2298 cmpdi r3,0
2299 bne- 1f
2300 b .ret_from_except_lite
2301-1: bl .save_nvgprs
2302- mr r5,r3
2303+1: mr r5,r3
2304 addi r3,r1,STACK_FRAME_OVERHEAD
2305 ld r4,_DAR(r1)
2306 bl .bad_page_fault
2307diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S
2308--- linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
2309+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
2310@@ -956,10 +956,10 @@ handle_page_fault:
2311 11: ld r4,_DAR(r1)
2312 ld r5,_DSISR(r1)
2313 addi r3,r1,STACK_FRAME_OVERHEAD
2314+ bl .save_nvgprs
2315 bl .do_page_fault
2316 cmpdi r3,0
2317 beq+ 13f
2318- bl .save_nvgprs
2319 mr r5,r3
2320 addi r3,r1,STACK_FRAME_OVERHEAD
2321 lwz r4,_DAR(r1)
2322diff -urNp linux-3.0.7/arch/powerpc/kernel/module.c linux-3.0.7/arch/powerpc/kernel/module.c
2323--- linux-3.0.7/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2324+++ linux-3.0.7/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2325@@ -31,11 +31,24 @@
2326
2327 LIST_HEAD(module_bug_list);
2328
2329+#ifdef CONFIG_PAX_KERNEXEC
2330 void *module_alloc(unsigned long size)
2331 {
2332 if (size == 0)
2333 return NULL;
2334
2335+ return vmalloc(size);
2336+}
2337+
2338+void *module_alloc_exec(unsigned long size)
2339+#else
2340+void *module_alloc(unsigned long size)
2341+#endif
2342+
2343+{
2344+ if (size == 0)
2345+ return NULL;
2346+
2347 return vmalloc_exec(size);
2348 }
2349
2350@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2351 vfree(module_region);
2352 }
2353
2354+#ifdef CONFIG_PAX_KERNEXEC
2355+void module_free_exec(struct module *mod, void *module_region)
2356+{
2357+ module_free(mod, module_region);
2358+}
2359+#endif
2360+
2361 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2362 const Elf_Shdr *sechdrs,
2363 const char *name)
2364diff -urNp linux-3.0.7/arch/powerpc/kernel/module_32.c linux-3.0.7/arch/powerpc/kernel/module_32.c
2365--- linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2366+++ linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2367@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2368 me->arch.core_plt_section = i;
2369 }
2370 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2371- printk("Module doesn't contain .plt or .init.plt sections.\n");
2372+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2373 return -ENOEXEC;
2374 }
2375
2376@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2377
2378 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2379 /* Init, or core PLT? */
2380- if (location >= mod->module_core
2381- && location < mod->module_core + mod->core_size)
2382+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2383+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2384 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2385- else
2386+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2387+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2388 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2389+ else {
2390+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2391+ return ~0UL;
2392+ }
2393
2394 /* Find this entry, or if that fails, the next avail. entry */
2395 while (entry->jump[0]) {
2396diff -urNp linux-3.0.7/arch/powerpc/kernel/process.c linux-3.0.7/arch/powerpc/kernel/process.c
2397--- linux-3.0.7/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2398+++ linux-3.0.7/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2399@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2400 * Lookup NIP late so we have the best change of getting the
2401 * above info out without failing
2402 */
2403- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2404- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2405+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2406+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2407 #endif
2408 show_stack(current, (unsigned long *) regs->gpr[1]);
2409 if (!user_mode(regs))
2410@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2411 newsp = stack[0];
2412 ip = stack[STACK_FRAME_LR_SAVE];
2413 if (!firstframe || ip != lr) {
2414- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2415+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2416 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2417 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2418- printk(" (%pS)",
2419+ printk(" (%pA)",
2420 (void *)current->ret_stack[curr_frame].ret);
2421 curr_frame--;
2422 }
2423@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2424 struct pt_regs *regs = (struct pt_regs *)
2425 (sp + STACK_FRAME_OVERHEAD);
2426 lr = regs->link;
2427- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2428+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2429 regs->trap, (void *)regs->nip, (void *)lr);
2430 firstframe = 1;
2431 }
2432@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2433 }
2434
2435 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2436-
2437-unsigned long arch_align_stack(unsigned long sp)
2438-{
2439- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2440- sp -= get_random_int() & ~PAGE_MASK;
2441- return sp & ~0xf;
2442-}
2443-
2444-static inline unsigned long brk_rnd(void)
2445-{
2446- unsigned long rnd = 0;
2447-
2448- /* 8MB for 32bit, 1GB for 64bit */
2449- if (is_32bit_task())
2450- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2451- else
2452- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2453-
2454- return rnd << PAGE_SHIFT;
2455-}
2456-
2457-unsigned long arch_randomize_brk(struct mm_struct *mm)
2458-{
2459- unsigned long base = mm->brk;
2460- unsigned long ret;
2461-
2462-#ifdef CONFIG_PPC_STD_MMU_64
2463- /*
2464- * If we are using 1TB segments and we are allowed to randomise
2465- * the heap, we can put it above 1TB so it is backed by a 1TB
2466- * segment. Otherwise the heap will be in the bottom 1TB
2467- * which always uses 256MB segments and this may result in a
2468- * performance penalty.
2469- */
2470- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2471- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2472-#endif
2473-
2474- ret = PAGE_ALIGN(base + brk_rnd());
2475-
2476- if (ret < mm->brk)
2477- return mm->brk;
2478-
2479- return ret;
2480-}
2481-
2482-unsigned long randomize_et_dyn(unsigned long base)
2483-{
2484- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2485-
2486- if (ret < base)
2487- return base;
2488-
2489- return ret;
2490-}
2491diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_32.c linux-3.0.7/arch/powerpc/kernel/signal_32.c
2492--- linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2493+++ linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2494@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2495 /* Save user registers on the stack */
2496 frame = &rt_sf->uc.uc_mcontext;
2497 addr = frame;
2498- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2499+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2500 if (save_user_regs(regs, frame, 0, 1))
2501 goto badframe;
2502 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2503diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_64.c linux-3.0.7/arch/powerpc/kernel/signal_64.c
2504--- linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2505+++ linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2506@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2507 current->thread.fpscr.val = 0;
2508
2509 /* Set up to return from userspace. */
2510- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2511+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2512 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2513 } else {
2514 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2515diff -urNp linux-3.0.7/arch/powerpc/kernel/traps.c linux-3.0.7/arch/powerpc/kernel/traps.c
2516--- linux-3.0.7/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2517+++ linux-3.0.7/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2518@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2519 static inline void pmac_backlight_unblank(void) { }
2520 #endif
2521
2522+extern void gr_handle_kernel_exploit(void);
2523+
2524 int die(const char *str, struct pt_regs *regs, long err)
2525 {
2526 static struct {
2527@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2528 if (panic_on_oops)
2529 panic("Fatal exception");
2530
2531+ gr_handle_kernel_exploit();
2532+
2533 oops_exit();
2534 do_exit(err);
2535
2536diff -urNp linux-3.0.7/arch/powerpc/kernel/vdso.c linux-3.0.7/arch/powerpc/kernel/vdso.c
2537--- linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2538+++ linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2539@@ -36,6 +36,7 @@
2540 #include <asm/firmware.h>
2541 #include <asm/vdso.h>
2542 #include <asm/vdso_datapage.h>
2543+#include <asm/mman.h>
2544
2545 #include "setup.h"
2546
2547@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2548 vdso_base = VDSO32_MBASE;
2549 #endif
2550
2551- current->mm->context.vdso_base = 0;
2552+ current->mm->context.vdso_base = ~0UL;
2553
2554 /* vDSO has a problem and was disabled, just don't "enable" it for the
2555 * process
2556@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2557 vdso_base = get_unmapped_area(NULL, vdso_base,
2558 (vdso_pages << PAGE_SHIFT) +
2559 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2560- 0, 0);
2561+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2562 if (IS_ERR_VALUE(vdso_base)) {
2563 rc = vdso_base;
2564 goto fail_mmapsem;
2565diff -urNp linux-3.0.7/arch/powerpc/lib/usercopy_64.c linux-3.0.7/arch/powerpc/lib/usercopy_64.c
2566--- linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2567+++ linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2568@@ -9,22 +9,6 @@
2569 #include <linux/module.h>
2570 #include <asm/uaccess.h>
2571
2572-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2573-{
2574- if (likely(access_ok(VERIFY_READ, from, n)))
2575- n = __copy_from_user(to, from, n);
2576- else
2577- memset(to, 0, n);
2578- return n;
2579-}
2580-
2581-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2582-{
2583- if (likely(access_ok(VERIFY_WRITE, to, n)))
2584- n = __copy_to_user(to, from, n);
2585- return n;
2586-}
2587-
2588 unsigned long copy_in_user(void __user *to, const void __user *from,
2589 unsigned long n)
2590 {
2591@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2592 return n;
2593 }
2594
2595-EXPORT_SYMBOL(copy_from_user);
2596-EXPORT_SYMBOL(copy_to_user);
2597 EXPORT_SYMBOL(copy_in_user);
2598
2599diff -urNp linux-3.0.7/arch/powerpc/mm/fault.c linux-3.0.7/arch/powerpc/mm/fault.c
2600--- linux-3.0.7/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2601+++ linux-3.0.7/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2602@@ -32,6 +32,10 @@
2603 #include <linux/perf_event.h>
2604 #include <linux/magic.h>
2605 #include <linux/ratelimit.h>
2606+#include <linux/slab.h>
2607+#include <linux/pagemap.h>
2608+#include <linux/compiler.h>
2609+#include <linux/unistd.h>
2610
2611 #include <asm/firmware.h>
2612 #include <asm/page.h>
2613@@ -43,6 +47,7 @@
2614 #include <asm/tlbflush.h>
2615 #include <asm/siginfo.h>
2616 #include <mm/mmu_decl.h>
2617+#include <asm/ptrace.h>
2618
2619 #ifdef CONFIG_KPROBES
2620 static inline int notify_page_fault(struct pt_regs *regs)
2621@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2622 }
2623 #endif
2624
2625+#ifdef CONFIG_PAX_PAGEEXEC
2626+/*
2627+ * PaX: decide what to do with offenders (regs->nip = fault address)
2628+ *
2629+ * returns 1 when task should be killed
2630+ */
2631+static int pax_handle_fetch_fault(struct pt_regs *regs)
2632+{
2633+ return 1;
2634+}
2635+
2636+void pax_report_insns(void *pc, void *sp)
2637+{
2638+ unsigned long i;
2639+
2640+ printk(KERN_ERR "PAX: bytes at PC: ");
2641+ for (i = 0; i < 5; i++) {
2642+ unsigned int c;
2643+ if (get_user(c, (unsigned int __user *)pc+i))
2644+ printk(KERN_CONT "???????? ");
2645+ else
2646+ printk(KERN_CONT "%08x ", c);
2647+ }
2648+ printk("\n");
2649+}
2650+#endif
2651+
2652 /*
2653 * Check whether the instruction at regs->nip is a store using
2654 * an update addressing form which will update r1.
2655@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2656 * indicate errors in DSISR but can validly be set in SRR1.
2657 */
2658 if (trap == 0x400)
2659- error_code &= 0x48200000;
2660+ error_code &= 0x58200000;
2661 else
2662 is_write = error_code & DSISR_ISSTORE;
2663 #else
2664@@ -259,7 +291,7 @@ good_area:
2665 * "undefined". Of those that can be set, this is the only
2666 * one which seems bad.
2667 */
2668- if (error_code & 0x10000000)
2669+ if (error_code & DSISR_GUARDED)
2670 /* Guarded storage error. */
2671 goto bad_area;
2672 #endif /* CONFIG_8xx */
2673@@ -274,7 +306,7 @@ good_area:
2674 * processors use the same I/D cache coherency mechanism
2675 * as embedded.
2676 */
2677- if (error_code & DSISR_PROTFAULT)
2678+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2679 goto bad_area;
2680 #endif /* CONFIG_PPC_STD_MMU */
2681
2682@@ -343,6 +375,23 @@ bad_area:
2683 bad_area_nosemaphore:
2684 /* User mode accesses cause a SIGSEGV */
2685 if (user_mode(regs)) {
2686+
2687+#ifdef CONFIG_PAX_PAGEEXEC
2688+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2689+#ifdef CONFIG_PPC_STD_MMU
2690+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2691+#else
2692+ if (is_exec && regs->nip == address) {
2693+#endif
2694+ switch (pax_handle_fetch_fault(regs)) {
2695+ }
2696+
2697+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2698+ do_group_exit(SIGKILL);
2699+ }
2700+ }
2701+#endif
2702+
2703 _exception(SIGSEGV, regs, code, address);
2704 return 0;
2705 }
2706diff -urNp linux-3.0.7/arch/powerpc/mm/mmap_64.c linux-3.0.7/arch/powerpc/mm/mmap_64.c
2707--- linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2708+++ linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2709@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = arch_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.7/arch/powerpc/mm/slice.c linux-3.0.7/arch/powerpc/mm/slice.c
2733--- linux-3.0.7/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.7/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2735@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2736 if ((mm->task_size - len) < addr)
2737 return 0;
2738 vma = find_vma(mm, addr);
2739- return (!vma || (addr + len) <= vma->vm_start);
2740+ return check_heap_stack_gap(vma, addr, len);
2741 }
2742
2743 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2744@@ -256,7 +256,7 @@ full_search:
2745 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2746 continue;
2747 }
2748- if (!vma || addr + len <= vma->vm_start) {
2749+ if (check_heap_stack_gap(vma, addr, len)) {
2750 /*
2751 * Remember the place where we stopped the search:
2752 */
2753@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2754 }
2755 }
2756
2757- addr = mm->mmap_base;
2758- while (addr > len) {
2759+ if (mm->mmap_base < len)
2760+ addr = -ENOMEM;
2761+ else
2762+ addr = mm->mmap_base - len;
2763+
2764+ while (!IS_ERR_VALUE(addr)) {
2765 /* Go down by chunk size */
2766- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2767+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2768
2769 /* Check for hit with different page size */
2770 mask = slice_range_to_mask(addr, len);
2771@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2772 * return with success:
2773 */
2774 vma = find_vma(mm, addr);
2775- if (!vma || (addr + len) <= vma->vm_start) {
2776+ if (check_heap_stack_gap(vma, addr, len)) {
2777 /* remember the address as a hint for next time */
2778 if (use_cache)
2779 mm->free_area_cache = addr;
2780@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2781 mm->cached_hole_size = vma->vm_start - addr;
2782
2783 /* try just below the current vma->vm_start */
2784- addr = vma->vm_start;
2785+ addr = skip_heap_stack_gap(vma, len);
2786 }
2787
2788 /*
2789@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2790 if (fixed && addr > (mm->task_size - len))
2791 return -EINVAL;
2792
2793+#ifdef CONFIG_PAX_RANDMMAP
2794+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2795+ addr = 0;
2796+#endif
2797+
2798 /* If hint, make sure it matches our alignment restrictions */
2799 if (!fixed && addr) {
2800 addr = _ALIGN_UP(addr, 1ul << pshift);
2801diff -urNp linux-3.0.7/arch/s390/include/asm/elf.h linux-3.0.7/arch/s390/include/asm/elf.h
2802--- linux-3.0.7/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2803+++ linux-3.0.7/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2804@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2805 the loader. We need to make sure that it is out of the way of the program
2806 that it will "exec", and that there is sufficient room for the brk. */
2807
2808-extern unsigned long randomize_et_dyn(unsigned long base);
2809-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2810+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2811+
2812+#ifdef CONFIG_PAX_ASLR
2813+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2814+
2815+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2816+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2817+#endif
2818
2819 /* This yields a mask that user programs can use to figure out what
2820 instruction set this CPU supports. */
2821@@ -210,7 +216,4 @@ struct linux_binprm;
2822 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2823 int arch_setup_additional_pages(struct linux_binprm *, int);
2824
2825-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2826-#define arch_randomize_brk arch_randomize_brk
2827-
2828 #endif
2829diff -urNp linux-3.0.7/arch/s390/include/asm/system.h linux-3.0.7/arch/s390/include/asm/system.h
2830--- linux-3.0.7/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2831+++ linux-3.0.7/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2832@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2833 extern void (*_machine_halt)(void);
2834 extern void (*_machine_power_off)(void);
2835
2836-extern unsigned long arch_align_stack(unsigned long sp);
2837+#define arch_align_stack(x) ((x) & ~0xfUL)
2838
2839 static inline int tprot(unsigned long addr)
2840 {
2841diff -urNp linux-3.0.7/arch/s390/include/asm/uaccess.h linux-3.0.7/arch/s390/include/asm/uaccess.h
2842--- linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2843+++ linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2844@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2845 copy_to_user(void __user *to, const void *from, unsigned long n)
2846 {
2847 might_fault();
2848+
2849+ if ((long)n < 0)
2850+ return n;
2851+
2852 if (access_ok(VERIFY_WRITE, to, n))
2853 n = __copy_to_user(to, from, n);
2854 return n;
2855@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2856 static inline unsigned long __must_check
2857 __copy_from_user(void *to, const void __user *from, unsigned long n)
2858 {
2859+ if ((long)n < 0)
2860+ return n;
2861+
2862 if (__builtin_constant_p(n) && (n <= 256))
2863 return uaccess.copy_from_user_small(n, from, to);
2864 else
2865@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2866 unsigned int sz = __compiletime_object_size(to);
2867
2868 might_fault();
2869+
2870+ if ((long)n < 0)
2871+ return n;
2872+
2873 if (unlikely(sz != -1 && sz < n)) {
2874 copy_from_user_overflow();
2875 return n;
2876diff -urNp linux-3.0.7/arch/s390/kernel/module.c linux-3.0.7/arch/s390/kernel/module.c
2877--- linux-3.0.7/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2878+++ linux-3.0.7/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2879@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2880
2881 /* Increase core size by size of got & plt and set start
2882 offsets for got and plt. */
2883- me->core_size = ALIGN(me->core_size, 4);
2884- me->arch.got_offset = me->core_size;
2885- me->core_size += me->arch.got_size;
2886- me->arch.plt_offset = me->core_size;
2887- me->core_size += me->arch.plt_size;
2888+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2889+ me->arch.got_offset = me->core_size_rw;
2890+ me->core_size_rw += me->arch.got_size;
2891+ me->arch.plt_offset = me->core_size_rx;
2892+ me->core_size_rx += me->arch.plt_size;
2893 return 0;
2894 }
2895
2896@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2897 if (info->got_initialized == 0) {
2898 Elf_Addr *gotent;
2899
2900- gotent = me->module_core + me->arch.got_offset +
2901+ gotent = me->module_core_rw + me->arch.got_offset +
2902 info->got_offset;
2903 *gotent = val;
2904 info->got_initialized = 1;
2905@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2906 else if (r_type == R_390_GOTENT ||
2907 r_type == R_390_GOTPLTENT)
2908 *(unsigned int *) loc =
2909- (val + (Elf_Addr) me->module_core - loc) >> 1;
2910+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2911 else if (r_type == R_390_GOT64 ||
2912 r_type == R_390_GOTPLT64)
2913 *(unsigned long *) loc = val;
2914@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2915 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2916 if (info->plt_initialized == 0) {
2917 unsigned int *ip;
2918- ip = me->module_core + me->arch.plt_offset +
2919+ ip = me->module_core_rx + me->arch.plt_offset +
2920 info->plt_offset;
2921 #ifndef CONFIG_64BIT
2922 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2923@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2924 val - loc + 0xffffUL < 0x1ffffeUL) ||
2925 (r_type == R_390_PLT32DBL &&
2926 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2927- val = (Elf_Addr) me->module_core +
2928+ val = (Elf_Addr) me->module_core_rx +
2929 me->arch.plt_offset +
2930 info->plt_offset;
2931 val += rela->r_addend - loc;
2932@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2933 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2934 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2935 val = val + rela->r_addend -
2936- ((Elf_Addr) me->module_core + me->arch.got_offset);
2937+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2938 if (r_type == R_390_GOTOFF16)
2939 *(unsigned short *) loc = val;
2940 else if (r_type == R_390_GOTOFF32)
2941@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2942 break;
2943 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2944 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2945- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2946+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2947 rela->r_addend - loc;
2948 if (r_type == R_390_GOTPC)
2949 *(unsigned int *) loc = val;
2950diff -urNp linux-3.0.7/arch/s390/kernel/process.c linux-3.0.7/arch/s390/kernel/process.c
2951--- linux-3.0.7/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2952+++ linux-3.0.7/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2953@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2954 }
2955 return 0;
2956 }
2957-
2958-unsigned long arch_align_stack(unsigned long sp)
2959-{
2960- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2961- sp -= get_random_int() & ~PAGE_MASK;
2962- return sp & ~0xf;
2963-}
2964-
2965-static inline unsigned long brk_rnd(void)
2966-{
2967- /* 8MB for 32bit, 1GB for 64bit */
2968- if (is_32bit_task())
2969- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2970- else
2971- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2972-}
2973-
2974-unsigned long arch_randomize_brk(struct mm_struct *mm)
2975-{
2976- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2977-
2978- if (ret < mm->brk)
2979- return mm->brk;
2980- return ret;
2981-}
2982-
2983-unsigned long randomize_et_dyn(unsigned long base)
2984-{
2985- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2986-
2987- if (!(current->flags & PF_RANDOMIZE))
2988- return base;
2989- if (ret < base)
2990- return base;
2991- return ret;
2992-}
2993diff -urNp linux-3.0.7/arch/s390/kernel/setup.c linux-3.0.7/arch/s390/kernel/setup.c
2994--- linux-3.0.7/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2995+++ linux-3.0.7/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2996@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2997 }
2998 early_param("mem", early_parse_mem);
2999
3000-unsigned int user_mode = HOME_SPACE_MODE;
3001+unsigned int user_mode = SECONDARY_SPACE_MODE;
3002 EXPORT_SYMBOL_GPL(user_mode);
3003
3004 static int set_amode_and_uaccess(unsigned long user_amode,
3005diff -urNp linux-3.0.7/arch/s390/mm/mmap.c linux-3.0.7/arch/s390/mm/mmap.c
3006--- linux-3.0.7/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
3007+++ linux-3.0.7/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
3008@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
3009 */
3010 if (mmap_is_legacy()) {
3011 mm->mmap_base = TASK_UNMAPPED_BASE;
3012+
3013+#ifdef CONFIG_PAX_RANDMMAP
3014+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3015+ mm->mmap_base += mm->delta_mmap;
3016+#endif
3017+
3018 mm->get_unmapped_area = arch_get_unmapped_area;
3019 mm->unmap_area = arch_unmap_area;
3020 } else {
3021 mm->mmap_base = mmap_base();
3022+
3023+#ifdef CONFIG_PAX_RANDMMAP
3024+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3025+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3026+#endif
3027+
3028 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3029 mm->unmap_area = arch_unmap_area_topdown;
3030 }
3031@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
3032 */
3033 if (mmap_is_legacy()) {
3034 mm->mmap_base = TASK_UNMAPPED_BASE;
3035+
3036+#ifdef CONFIG_PAX_RANDMMAP
3037+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3038+ mm->mmap_base += mm->delta_mmap;
3039+#endif
3040+
3041 mm->get_unmapped_area = s390_get_unmapped_area;
3042 mm->unmap_area = arch_unmap_area;
3043 } else {
3044 mm->mmap_base = mmap_base();
3045+
3046+#ifdef CONFIG_PAX_RANDMMAP
3047+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3048+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3049+#endif
3050+
3051 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3052 mm->unmap_area = arch_unmap_area_topdown;
3053 }
3054diff -urNp linux-3.0.7/arch/score/include/asm/system.h linux-3.0.7/arch/score/include/asm/system.h
3055--- linux-3.0.7/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
3056+++ linux-3.0.7/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
3057@@ -17,7 +17,7 @@ do { \
3058 #define finish_arch_switch(prev) do {} while (0)
3059
3060 typedef void (*vi_handler_t)(void);
3061-extern unsigned long arch_align_stack(unsigned long sp);
3062+#define arch_align_stack(x) (x)
3063
3064 #define mb() barrier()
3065 #define rmb() barrier()
3066diff -urNp linux-3.0.7/arch/score/kernel/process.c linux-3.0.7/arch/score/kernel/process.c
3067--- linux-3.0.7/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
3068+++ linux-3.0.7/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
3069@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3070
3071 return task_pt_regs(task)->cp0_epc;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- return sp;
3077-}
3078diff -urNp linux-3.0.7/arch/sh/mm/mmap.c linux-3.0.7/arch/sh/mm/mmap.c
3079--- linux-3.0.7/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
3080+++ linux-3.0.7/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
3081@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3082 addr = PAGE_ALIGN(addr);
3083
3084 vma = find_vma(mm, addr);
3085- if (TASK_SIZE - len >= addr &&
3086- (!vma || addr + len <= vma->vm_start))
3087+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3088 return addr;
3089 }
3090
3091@@ -106,7 +105,7 @@ full_search:
3092 }
3093 return -ENOMEM;
3094 }
3095- if (likely(!vma || addr + len <= vma->vm_start)) {
3096+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3097 /*
3098 * Remember the place where we stopped the search:
3099 */
3100@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3101 addr = PAGE_ALIGN(addr);
3102
3103 vma = find_vma(mm, addr);
3104- if (TASK_SIZE - len >= addr &&
3105- (!vma || addr + len <= vma->vm_start))
3106+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3107 return addr;
3108 }
3109
3110@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3111 /* make sure it can fit in the remaining address space */
3112 if (likely(addr > len)) {
3113 vma = find_vma(mm, addr-len);
3114- if (!vma || addr <= vma->vm_start) {
3115+ if (check_heap_stack_gap(vma, addr - len, len)) {
3116 /* remember the address as a hint for next time */
3117 return (mm->free_area_cache = addr-len);
3118 }
3119@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3120 if (unlikely(mm->mmap_base < len))
3121 goto bottomup;
3122
3123- addr = mm->mmap_base-len;
3124- if (do_colour_align)
3125- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3126+ addr = mm->mmap_base - len;
3127
3128 do {
3129+ if (do_colour_align)
3130+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3131 /*
3132 * Lookup failure means no vma is above this address,
3133 * else if new region fits below vma->vm_start,
3134 * return with success:
3135 */
3136 vma = find_vma(mm, addr);
3137- if (likely(!vma || addr+len <= vma->vm_start)) {
3138+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3139 /* remember the address as a hint for next time */
3140 return (mm->free_area_cache = addr);
3141 }
3142@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3143 mm->cached_hole_size = vma->vm_start - addr;
3144
3145 /* try just below the current vma->vm_start */
3146- addr = vma->vm_start-len;
3147- if (do_colour_align)
3148- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3149- } while (likely(len < vma->vm_start));
3150+ addr = skip_heap_stack_gap(vma, len);
3151+ } while (!IS_ERR_VALUE(addr));
3152
3153 bottomup:
3154 /*
3155diff -urNp linux-3.0.7/arch/sparc/Makefile linux-3.0.7/arch/sparc/Makefile
3156--- linux-3.0.7/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
3157+++ linux-3.0.7/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
3158@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
3159 # Export what is needed by arch/sparc/boot/Makefile
3160 export VMLINUX_INIT VMLINUX_MAIN
3161 VMLINUX_INIT := $(head-y) $(init-y)
3162-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3163+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3164 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3165 VMLINUX_MAIN += $(drivers-y) $(net-y)
3166
3167diff -urNp linux-3.0.7/arch/sparc/include/asm/atomic_64.h linux-3.0.7/arch/sparc/include/asm/atomic_64.h
3168--- linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
3169+++ linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
3170@@ -14,18 +14,40 @@
3171 #define ATOMIC64_INIT(i) { (i) }
3172
3173 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3174+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3175+{
3176+ return v->counter;
3177+}
3178 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3179+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3180+{
3181+ return v->counter;
3182+}
3183
3184 #define atomic_set(v, i) (((v)->counter) = i)
3185+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3186+{
3187+ v->counter = i;
3188+}
3189 #define atomic64_set(v, i) (((v)->counter) = i)
3190+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3191+{
3192+ v->counter = i;
3193+}
3194
3195 extern void atomic_add(int, atomic_t *);
3196+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3197 extern void atomic64_add(long, atomic64_t *);
3198+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3199 extern void atomic_sub(int, atomic_t *);
3200+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3201 extern void atomic64_sub(long, atomic64_t *);
3202+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3203
3204 extern int atomic_add_ret(int, atomic_t *);
3205+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3206 extern long atomic64_add_ret(long, atomic64_t *);
3207+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3208 extern int atomic_sub_ret(int, atomic_t *);
3209 extern long atomic64_sub_ret(long, atomic64_t *);
3210
3211@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3212 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3213
3214 #define atomic_inc_return(v) atomic_add_ret(1, v)
3215+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3216+{
3217+ return atomic_add_ret_unchecked(1, v);
3218+}
3219 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3220+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3221+{
3222+ return atomic64_add_ret_unchecked(1, v);
3223+}
3224
3225 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3226 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3227
3228 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3229+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3230+{
3231+ return atomic_add_ret_unchecked(i, v);
3232+}
3233 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3234+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3235+{
3236+ return atomic64_add_ret_unchecked(i, v);
3237+}
3238
3239 /*
3240 * atomic_inc_and_test - increment and test
3241@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3242 * other cases.
3243 */
3244 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3245+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3246+{
3247+ return atomic_inc_return_unchecked(v) == 0;
3248+}
3249 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3250
3251 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3252@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3253 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3254
3255 #define atomic_inc(v) atomic_add(1, v)
3256+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3257+{
3258+ atomic_add_unchecked(1, v);
3259+}
3260 #define atomic64_inc(v) atomic64_add(1, v)
3261+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3262+{
3263+ atomic64_add_unchecked(1, v);
3264+}
3265
3266 #define atomic_dec(v) atomic_sub(1, v)
3267+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3268+{
3269+ atomic_sub_unchecked(1, v);
3270+}
3271 #define atomic64_dec(v) atomic64_sub(1, v)
3272+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3273+{
3274+ atomic64_sub_unchecked(1, v);
3275+}
3276
3277 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3278 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3279
3280 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3281+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3282+{
3283+ return cmpxchg(&v->counter, old, new);
3284+}
3285 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3286+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3287+{
3288+ return xchg(&v->counter, new);
3289+}
3290
3291 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3292 {
3293- int c, old;
3294+ int c, old, new;
3295 c = atomic_read(v);
3296 for (;;) {
3297- if (unlikely(c == (u)))
3298+ if (unlikely(c == u))
3299 break;
3300- old = atomic_cmpxchg((v), c, c + (a));
3301+
3302+ asm volatile("addcc %2, %0, %0\n"
3303+
3304+#ifdef CONFIG_PAX_REFCOUNT
3305+ "tvs %%icc, 6\n"
3306+#endif
3307+
3308+ : "=r" (new)
3309+ : "0" (c), "ir" (a)
3310+ : "cc");
3311+
3312+ old = atomic_cmpxchg(v, c, new);
3313 if (likely(old == c))
3314 break;
3315 c = old;
3316 }
3317- return c != (u);
3318+ return c != u;
3319 }
3320
3321 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3322@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3323 #define atomic64_cmpxchg(v, o, n) \
3324 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3325 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3326+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3327+{
3328+ return xchg(&v->counter, new);
3329+}
3330
3331 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3332 {
3333- long c, old;
3334+ long c, old, new;
3335 c = atomic64_read(v);
3336 for (;;) {
3337- if (unlikely(c == (u)))
3338+ if (unlikely(c == u))
3339 break;
3340- old = atomic64_cmpxchg((v), c, c + (a));
3341+
3342+ asm volatile("addcc %2, %0, %0\n"
3343+
3344+#ifdef CONFIG_PAX_REFCOUNT
3345+ "tvs %%xcc, 6\n"
3346+#endif
3347+
3348+ : "=r" (new)
3349+ : "0" (c), "ir" (a)
3350+ : "cc");
3351+
3352+ old = atomic64_cmpxchg(v, c, new);
3353 if (likely(old == c))
3354 break;
3355 c = old;
3356 }
3357- return c != (u);
3358+ return c != u;
3359 }
3360
3361 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3362diff -urNp linux-3.0.7/arch/sparc/include/asm/cache.h linux-3.0.7/arch/sparc/include/asm/cache.h
3363--- linux-3.0.7/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3364+++ linux-3.0.7/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3365@@ -10,7 +10,7 @@
3366 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3367
3368 #define L1_CACHE_SHIFT 5
3369-#define L1_CACHE_BYTES 32
3370+#define L1_CACHE_BYTES 32UL
3371
3372 #ifdef CONFIG_SPARC32
3373 #define SMP_CACHE_BYTES_SHIFT 5
3374diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_32.h linux-3.0.7/arch/sparc/include/asm/elf_32.h
3375--- linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3376+++ linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3377@@ -114,6 +114,13 @@ typedef struct {
3378
3379 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3380
3381+#ifdef CONFIG_PAX_ASLR
3382+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3383+
3384+#define PAX_DELTA_MMAP_LEN 16
3385+#define PAX_DELTA_STACK_LEN 16
3386+#endif
3387+
3388 /* This yields a mask that user programs can use to figure out what
3389 instruction set this cpu supports. This can NOT be done in userspace
3390 on Sparc. */
3391diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_64.h linux-3.0.7/arch/sparc/include/asm/elf_64.h
3392--- linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3393+++ linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3394@@ -180,6 +180,13 @@ typedef struct {
3395 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3396 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3397
3398+#ifdef CONFIG_PAX_ASLR
3399+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3400+
3401+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3402+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3403+#endif
3404+
3405 extern unsigned long sparc64_elf_hwcap;
3406 #define ELF_HWCAP sparc64_elf_hwcap
3407
3408diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtable_32.h linux-3.0.7/arch/sparc/include/asm/pgtable_32.h
3409--- linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3410+++ linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3411@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3412 BTFIXUPDEF_INT(page_none)
3413 BTFIXUPDEF_INT(page_copy)
3414 BTFIXUPDEF_INT(page_readonly)
3415+
3416+#ifdef CONFIG_PAX_PAGEEXEC
3417+BTFIXUPDEF_INT(page_shared_noexec)
3418+BTFIXUPDEF_INT(page_copy_noexec)
3419+BTFIXUPDEF_INT(page_readonly_noexec)
3420+#endif
3421+
3422 BTFIXUPDEF_INT(page_kernel)
3423
3424 #define PMD_SHIFT SUN4C_PMD_SHIFT
3425@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3426 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3427 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3428
3429+#ifdef CONFIG_PAX_PAGEEXEC
3430+extern pgprot_t PAGE_SHARED_NOEXEC;
3431+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3432+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3433+#else
3434+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3435+# define PAGE_COPY_NOEXEC PAGE_COPY
3436+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3437+#endif
3438+
3439 extern unsigned long page_kernel;
3440
3441 #ifdef MODULE
3442diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h
3443--- linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3444+++ linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3445@@ -115,6 +115,13 @@
3446 SRMMU_EXEC | SRMMU_REF)
3447 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3448 SRMMU_EXEC | SRMMU_REF)
3449+
3450+#ifdef CONFIG_PAX_PAGEEXEC
3451+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3452+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3453+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3454+#endif
3455+
3456 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3457 SRMMU_DIRTY | SRMMU_REF)
3458
3459diff -urNp linux-3.0.7/arch/sparc/include/asm/spinlock_64.h linux-3.0.7/arch/sparc/include/asm/spinlock_64.h
3460--- linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:54:53.000000000 -0400
3461+++ linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:55:27.000000000 -0400
3462@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3463
3464 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3465
3466-static void inline arch_read_lock(arch_rwlock_t *lock)
3467+static inline void arch_read_lock(arch_rwlock_t *lock)
3468 {
3469 unsigned long tmp1, tmp2;
3470
3471 __asm__ __volatile__ (
3472 "1: ldsw [%2], %0\n"
3473 " brlz,pn %0, 2f\n"
3474-"4: add %0, 1, %1\n"
3475+"4: addcc %0, 1, %1\n"
3476+
3477+#ifdef CONFIG_PAX_REFCOUNT
3478+" tvs %%icc, 6\n"
3479+#endif
3480+
3481 " cas [%2], %0, %1\n"
3482 " cmp %0, %1\n"
3483 " bne,pn %%icc, 1b\n"
3484@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3485 " .previous"
3486 : "=&r" (tmp1), "=&r" (tmp2)
3487 : "r" (lock)
3488- : "memory");
3489+ : "memory", "cc");
3490 }
3491
3492-static int inline arch_read_trylock(arch_rwlock_t *lock)
3493+static inline int arch_read_trylock(arch_rwlock_t *lock)
3494 {
3495 int tmp1, tmp2;
3496
3497@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3498 "1: ldsw [%2], %0\n"
3499 " brlz,a,pn %0, 2f\n"
3500 " mov 0, %0\n"
3501-" add %0, 1, %1\n"
3502+" addcc %0, 1, %1\n"
3503+
3504+#ifdef CONFIG_PAX_REFCOUNT
3505+" tvs %%icc, 6\n"
3506+#endif
3507+
3508 " cas [%2], %0, %1\n"
3509 " cmp %0, %1\n"
3510 " bne,pn %%icc, 1b\n"
3511@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3512 return tmp1;
3513 }
3514
3515-static void inline arch_read_unlock(arch_rwlock_t *lock)
3516+static inline void arch_read_unlock(arch_rwlock_t *lock)
3517 {
3518 unsigned long tmp1, tmp2;
3519
3520 __asm__ __volatile__(
3521 "1: lduw [%2], %0\n"
3522-" sub %0, 1, %1\n"
3523+" subcc %0, 1, %1\n"
3524+
3525+#ifdef CONFIG_PAX_REFCOUNT
3526+" tvs %%icc, 6\n"
3527+#endif
3528+
3529 " cas [%2], %0, %1\n"
3530 " cmp %0, %1\n"
3531 " bne,pn %%xcc, 1b\n"
3532@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3533 : "memory");
3534 }
3535
3536-static void inline arch_write_lock(arch_rwlock_t *lock)
3537+static inline void arch_write_lock(arch_rwlock_t *lock)
3538 {
3539 unsigned long mask, tmp1, tmp2;
3540
3541@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3542 : "memory");
3543 }
3544
3545-static void inline arch_write_unlock(arch_rwlock_t *lock)
3546+static inline void arch_write_unlock(arch_rwlock_t *lock)
3547 {
3548 __asm__ __volatile__(
3549 " stw %%g0, [%0]"
3550@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3551 : "memory");
3552 }
3553
3554-static int inline arch_write_trylock(arch_rwlock_t *lock)
3555+static inline int arch_write_trylock(arch_rwlock_t *lock)
3556 {
3557 unsigned long mask, tmp1, tmp2, result;
3558
3559diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_32.h linux-3.0.7/arch/sparc/include/asm/thread_info_32.h
3560--- linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3561+++ linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3562@@ -50,6 +50,8 @@ struct thread_info {
3563 unsigned long w_saved;
3564
3565 struct restart_block restart_block;
3566+
3567+ unsigned long lowest_stack;
3568 };
3569
3570 /*
3571diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_64.h linux-3.0.7/arch/sparc/include/asm/thread_info_64.h
3572--- linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3573+++ linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3574@@ -63,6 +63,8 @@ struct thread_info {
3575 struct pt_regs *kern_una_regs;
3576 unsigned int kern_una_insn;
3577
3578+ unsigned long lowest_stack;
3579+
3580 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3581 };
3582
3583diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess.h linux-3.0.7/arch/sparc/include/asm/uaccess.h
3584--- linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3585+++ linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3586@@ -1,5 +1,13 @@
3587 #ifndef ___ASM_SPARC_UACCESS_H
3588 #define ___ASM_SPARC_UACCESS_H
3589+
3590+#ifdef __KERNEL__
3591+#ifndef __ASSEMBLY__
3592+#include <linux/types.h>
3593+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3594+#endif
3595+#endif
3596+
3597 #if defined(__sparc__) && defined(__arch64__)
3598 #include <asm/uaccess_64.h>
3599 #else
3600diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_32.h linux-3.0.7/arch/sparc/include/asm/uaccess_32.h
3601--- linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3602+++ linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3603@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3604
3605 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3606 {
3607- if (n && __access_ok((unsigned long) to, n))
3608+ if ((long)n < 0)
3609+ return n;
3610+
3611+ if (n && __access_ok((unsigned long) to, n)) {
3612+ if (!__builtin_constant_p(n))
3613+ check_object_size(from, n, true);
3614 return __copy_user(to, (__force void __user *) from, n);
3615- else
3616+ } else
3617 return n;
3618 }
3619
3620 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3621 {
3622+ if ((long)n < 0)
3623+ return n;
3624+
3625+ if (!__builtin_constant_p(n))
3626+ check_object_size(from, n, true);
3627+
3628 return __copy_user(to, (__force void __user *) from, n);
3629 }
3630
3631 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3632 {
3633- if (n && __access_ok((unsigned long) from, n))
3634+ if ((long)n < 0)
3635+ return n;
3636+
3637+ if (n && __access_ok((unsigned long) from, n)) {
3638+ if (!__builtin_constant_p(n))
3639+ check_object_size(to, n, false);
3640 return __copy_user((__force void __user *) to, from, n);
3641- else
3642+ } else
3643 return n;
3644 }
3645
3646 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3647 {
3648+ if ((long)n < 0)
3649+ return n;
3650+
3651 return __copy_user((__force void __user *) to, from, n);
3652 }
3653
3654diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_64.h linux-3.0.7/arch/sparc/include/asm/uaccess_64.h
3655--- linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3656+++ linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3657@@ -10,6 +10,7 @@
3658 #include <linux/compiler.h>
3659 #include <linux/string.h>
3660 #include <linux/thread_info.h>
3661+#include <linux/kernel.h>
3662 #include <asm/asi.h>
3663 #include <asm/system.h>
3664 #include <asm/spitfire.h>
3665@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3666 static inline unsigned long __must_check
3667 copy_from_user(void *to, const void __user *from, unsigned long size)
3668 {
3669- unsigned long ret = ___copy_from_user(to, from, size);
3670+ unsigned long ret;
3671
3672+ if ((long)size < 0 || size > INT_MAX)
3673+ return size;
3674+
3675+ if (!__builtin_constant_p(size))
3676+ check_object_size(to, size, false);
3677+
3678+ ret = ___copy_from_user(to, from, size);
3679 if (unlikely(ret))
3680 ret = copy_from_user_fixup(to, from, size);
3681
3682@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3683 static inline unsigned long __must_check
3684 copy_to_user(void __user *to, const void *from, unsigned long size)
3685 {
3686- unsigned long ret = ___copy_to_user(to, from, size);
3687+ unsigned long ret;
3688+
3689+ if ((long)size < 0 || size > INT_MAX)
3690+ return size;
3691+
3692+ if (!__builtin_constant_p(size))
3693+ check_object_size(from, size, true);
3694
3695+ ret = ___copy_to_user(to, from, size);
3696 if (unlikely(ret))
3697 ret = copy_to_user_fixup(to, from, size);
3698 return ret;
3699diff -urNp linux-3.0.7/arch/sparc/kernel/Makefile linux-3.0.7/arch/sparc/kernel/Makefile
3700--- linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:54:53.000000000 -0400
3701+++ linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:55:27.000000000 -0400
3702@@ -3,7 +3,7 @@
3703 #
3704
3705 asflags-y := -ansi
3706-ccflags-y := -Werror
3707+#ccflags-y := -Werror
3708
3709 extra-y := head_$(BITS).o
3710 extra-y += init_task.o
3711diff -urNp linux-3.0.7/arch/sparc/kernel/process_32.c linux-3.0.7/arch/sparc/kernel/process_32.c
3712--- linux-3.0.7/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3713+++ linux-3.0.7/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3714@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3715 rw->ins[4], rw->ins[5],
3716 rw->ins[6],
3717 rw->ins[7]);
3718- printk("%pS\n", (void *) rw->ins[7]);
3719+ printk("%pA\n", (void *) rw->ins[7]);
3720 rw = (struct reg_window32 *) rw->ins[6];
3721 }
3722 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3723@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3724
3725 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3726 r->psr, r->pc, r->npc, r->y, print_tainted());
3727- printk("PC: <%pS>\n", (void *) r->pc);
3728+ printk("PC: <%pA>\n", (void *) r->pc);
3729 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3730 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3731 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3732 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3733 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3734 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3735- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3736+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3737
3738 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3739 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3740@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3741 rw = (struct reg_window32 *) fp;
3742 pc = rw->ins[7];
3743 printk("[%08lx : ", pc);
3744- printk("%pS ] ", (void *) pc);
3745+ printk("%pA ] ", (void *) pc);
3746 fp = rw->ins[6];
3747 } while (++count < 16);
3748 printk("\n");
3749diff -urNp linux-3.0.7/arch/sparc/kernel/process_64.c linux-3.0.7/arch/sparc/kernel/process_64.c
3750--- linux-3.0.7/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3751+++ linux-3.0.7/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3752@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3753 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3754 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3755 if (regs->tstate & TSTATE_PRIV)
3756- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3757+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3758 }
3759
3760 void show_regs(struct pt_regs *regs)
3761 {
3762 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3763 regs->tpc, regs->tnpc, regs->y, print_tainted());
3764- printk("TPC: <%pS>\n", (void *) regs->tpc);
3765+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3766 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3767 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3768 regs->u_regs[3]);
3769@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3770 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3771 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3772 regs->u_regs[15]);
3773- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3774+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3775 show_regwindow(regs);
3776 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3777 }
3778@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3779 ((tp && tp->task) ? tp->task->pid : -1));
3780
3781 if (gp->tstate & TSTATE_PRIV) {
3782- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3783+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3784 (void *) gp->tpc,
3785 (void *) gp->o7,
3786 (void *) gp->i7,
3787diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c
3788--- linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3789+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3790@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3791 if (ARCH_SUN4C && len > 0x20000000)
3792 return -ENOMEM;
3793 if (!addr)
3794- addr = TASK_UNMAPPED_BASE;
3795+ addr = current->mm->mmap_base;
3796
3797 if (flags & MAP_SHARED)
3798 addr = COLOUR_ALIGN(addr);
3799@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3800 }
3801 if (TASK_SIZE - PAGE_SIZE - len < addr)
3802 return -ENOMEM;
3803- if (!vmm || addr + len <= vmm->vm_start)
3804+ if (check_heap_stack_gap(vmm, addr, len))
3805 return addr;
3806 addr = vmm->vm_end;
3807 if (flags & MAP_SHARED)
3808diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c
3809--- linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3811@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3812 /* We do not accept a shared mapping if it would violate
3813 * cache aliasing constraints.
3814 */
3815- if ((flags & MAP_SHARED) &&
3816+ if ((filp || (flags & MAP_SHARED)) &&
3817 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3818 return -EINVAL;
3819 return addr;
3820@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3821 if (filp || (flags & MAP_SHARED))
3822 do_color_align = 1;
3823
3824+#ifdef CONFIG_PAX_RANDMMAP
3825+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3826+#endif
3827+
3828 if (addr) {
3829 if (do_color_align)
3830 addr = COLOUR_ALIGN(addr, pgoff);
3831@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3832 addr = PAGE_ALIGN(addr);
3833
3834 vma = find_vma(mm, addr);
3835- if (task_size - len >= addr &&
3836- (!vma || addr + len <= vma->vm_start))
3837+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3838 return addr;
3839 }
3840
3841 if (len > mm->cached_hole_size) {
3842- start_addr = addr = mm->free_area_cache;
3843+ start_addr = addr = mm->free_area_cache;
3844 } else {
3845- start_addr = addr = TASK_UNMAPPED_BASE;
3846+ start_addr = addr = mm->mmap_base;
3847 mm->cached_hole_size = 0;
3848 }
3849
3850@@ -174,14 +177,14 @@ full_search:
3851 vma = find_vma(mm, VA_EXCLUDE_END);
3852 }
3853 if (unlikely(task_size < addr)) {
3854- if (start_addr != TASK_UNMAPPED_BASE) {
3855- start_addr = addr = TASK_UNMAPPED_BASE;
3856+ if (start_addr != mm->mmap_base) {
3857+ start_addr = addr = mm->mmap_base;
3858 mm->cached_hole_size = 0;
3859 goto full_search;
3860 }
3861 return -ENOMEM;
3862 }
3863- if (likely(!vma || addr + len <= vma->vm_start)) {
3864+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3865 /*
3866 * Remember the place where we stopped the search:
3867 */
3868@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3869 /* We do not accept a shared mapping if it would violate
3870 * cache aliasing constraints.
3871 */
3872- if ((flags & MAP_SHARED) &&
3873+ if ((filp || (flags & MAP_SHARED)) &&
3874 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3875 return -EINVAL;
3876 return addr;
3877@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3878 addr = PAGE_ALIGN(addr);
3879
3880 vma = find_vma(mm, addr);
3881- if (task_size - len >= addr &&
3882- (!vma || addr + len <= vma->vm_start))
3883+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3884 return addr;
3885 }
3886
3887@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3888 /* make sure it can fit in the remaining address space */
3889 if (likely(addr > len)) {
3890 vma = find_vma(mm, addr-len);
3891- if (!vma || addr <= vma->vm_start) {
3892+ if (check_heap_stack_gap(vma, addr - len, len)) {
3893 /* remember the address as a hint for next time */
3894 return (mm->free_area_cache = addr-len);
3895 }
3896@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3897 if (unlikely(mm->mmap_base < len))
3898 goto bottomup;
3899
3900- addr = mm->mmap_base-len;
3901- if (do_color_align)
3902- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3903+ addr = mm->mmap_base - len;
3904
3905 do {
3906+ if (do_color_align)
3907+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3908 /*
3909 * Lookup failure means no vma is above this address,
3910 * else if new region fits below vma->vm_start,
3911 * return with success:
3912 */
3913 vma = find_vma(mm, addr);
3914- if (likely(!vma || addr+len <= vma->vm_start)) {
3915+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3916 /* remember the address as a hint for next time */
3917 return (mm->free_area_cache = addr);
3918 }
3919@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3920 mm->cached_hole_size = vma->vm_start - addr;
3921
3922 /* try just below the current vma->vm_start */
3923- addr = vma->vm_start-len;
3924- if (do_color_align)
3925- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3926- } while (likely(len < vma->vm_start));
3927+ addr = skip_heap_stack_gap(vma, len);
3928+ } while (!IS_ERR_VALUE(addr));
3929
3930 bottomup:
3931 /*
3932@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3933 gap == RLIM_INFINITY ||
3934 sysctl_legacy_va_layout) {
3935 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3936+
3937+#ifdef CONFIG_PAX_RANDMMAP
3938+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3939+ mm->mmap_base += mm->delta_mmap;
3940+#endif
3941+
3942 mm->get_unmapped_area = arch_get_unmapped_area;
3943 mm->unmap_area = arch_unmap_area;
3944 } else {
3945@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3946 gap = (task_size / 6 * 5);
3947
3948 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3949+
3950+#ifdef CONFIG_PAX_RANDMMAP
3951+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3952+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3953+#endif
3954+
3955 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3956 mm->unmap_area = arch_unmap_area_topdown;
3957 }
3958diff -urNp linux-3.0.7/arch/sparc/kernel/traps_32.c linux-3.0.7/arch/sparc/kernel/traps_32.c
3959--- linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3960+++ linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3961@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3962 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3963 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3964
3965+extern void gr_handle_kernel_exploit(void);
3966+
3967 void die_if_kernel(char *str, struct pt_regs *regs)
3968 {
3969 static int die_counter;
3970@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3971 count++ < 30 &&
3972 (((unsigned long) rw) >= PAGE_OFFSET) &&
3973 !(((unsigned long) rw) & 0x7)) {
3974- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3975+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3976 (void *) rw->ins[7]);
3977 rw = (struct reg_window32 *)rw->ins[6];
3978 }
3979 }
3980 printk("Instruction DUMP:");
3981 instruction_dump ((unsigned long *) regs->pc);
3982- if(regs->psr & PSR_PS)
3983+ if(regs->psr & PSR_PS) {
3984+ gr_handle_kernel_exploit();
3985 do_exit(SIGKILL);
3986+ }
3987 do_exit(SIGSEGV);
3988 }
3989
3990diff -urNp linux-3.0.7/arch/sparc/kernel/traps_64.c linux-3.0.7/arch/sparc/kernel/traps_64.c
3991--- linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3992+++ linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3993@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3994 i + 1,
3995 p->trapstack[i].tstate, p->trapstack[i].tpc,
3996 p->trapstack[i].tnpc, p->trapstack[i].tt);
3997- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3998+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3999 }
4000 }
4001
4002@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
4003
4004 lvl -= 0x100;
4005 if (regs->tstate & TSTATE_PRIV) {
4006+
4007+#ifdef CONFIG_PAX_REFCOUNT
4008+ if (lvl == 6)
4009+ pax_report_refcount_overflow(regs);
4010+#endif
4011+
4012 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4013 die_if_kernel(buffer, regs);
4014 }
4015@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
4016 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4017 {
4018 char buffer[32];
4019-
4020+
4021 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4022 0, lvl, SIGTRAP) == NOTIFY_STOP)
4023 return;
4024
4025+#ifdef CONFIG_PAX_REFCOUNT
4026+ if (lvl == 6)
4027+ pax_report_refcount_overflow(regs);
4028+#endif
4029+
4030 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4031
4032 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4033@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
4034 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4035 printk("%s" "ERROR(%d): ",
4036 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4037- printk("TPC<%pS>\n", (void *) regs->tpc);
4038+ printk("TPC<%pA>\n", (void *) regs->tpc);
4039 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4040 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4041 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4042@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
4043 smp_processor_id(),
4044 (type & 0x1) ? 'I' : 'D',
4045 regs->tpc);
4046- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4047+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4048 panic("Irrecoverable Cheetah+ parity error.");
4049 }
4050
4051@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
4052 smp_processor_id(),
4053 (type & 0x1) ? 'I' : 'D',
4054 regs->tpc);
4055- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4056+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4057 }
4058
4059 struct sun4v_error_entry {
4060@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
4061
4062 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4063 regs->tpc, tl);
4064- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4065+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4066 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4067- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4068+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4069 (void *) regs->u_regs[UREG_I7]);
4070 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4071 "pte[%lx] error[%lx]\n",
4072@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
4073
4074 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4075 regs->tpc, tl);
4076- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4077+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4078 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4079- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4080+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4081 (void *) regs->u_regs[UREG_I7]);
4082 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4083 "pte[%lx] error[%lx]\n",
4084@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
4085 fp = (unsigned long)sf->fp + STACK_BIAS;
4086 }
4087
4088- printk(" [%016lx] %pS\n", pc, (void *) pc);
4089+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4090 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4091 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4092 int index = tsk->curr_ret_stack;
4093 if (tsk->ret_stack && index >= graph) {
4094 pc = tsk->ret_stack[index - graph].ret;
4095- printk(" [%016lx] %pS\n", pc, (void *) pc);
4096+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4097 graph++;
4098 }
4099 }
4100@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
4101 return (struct reg_window *) (fp + STACK_BIAS);
4102 }
4103
4104+extern void gr_handle_kernel_exploit(void);
4105+
4106 void die_if_kernel(char *str, struct pt_regs *regs)
4107 {
4108 static int die_counter;
4109@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
4110 while (rw &&
4111 count++ < 30 &&
4112 kstack_valid(tp, (unsigned long) rw)) {
4113- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4114+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4115 (void *) rw->ins[7]);
4116
4117 rw = kernel_stack_up(rw);
4118@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
4119 }
4120 user_instruction_dump ((unsigned int __user *) regs->tpc);
4121 }
4122- if (regs->tstate & TSTATE_PRIV)
4123+ if (regs->tstate & TSTATE_PRIV) {
4124+ gr_handle_kernel_exploit();
4125 do_exit(SIGKILL);
4126+ }
4127 do_exit(SIGSEGV);
4128 }
4129 EXPORT_SYMBOL(die_if_kernel);
4130diff -urNp linux-3.0.7/arch/sparc/kernel/unaligned_64.c linux-3.0.7/arch/sparc/kernel/unaligned_64.c
4131--- linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
4132+++ linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
4133@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
4134 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4135
4136 if (__ratelimit(&ratelimit)) {
4137- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4138+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4139 regs->tpc, (void *) regs->tpc);
4140 }
4141 }
4142diff -urNp linux-3.0.7/arch/sparc/lib/Makefile linux-3.0.7/arch/sparc/lib/Makefile
4143--- linux-3.0.7/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4144+++ linux-3.0.7/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4145@@ -2,7 +2,7 @@
4146 #
4147
4148 asflags-y := -ansi -DST_DIV0=0x02
4149-ccflags-y := -Werror
4150+#ccflags-y := -Werror
4151
4152 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4153 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4154diff -urNp linux-3.0.7/arch/sparc/lib/atomic_64.S linux-3.0.7/arch/sparc/lib/atomic_64.S
4155--- linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
4156+++ linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
4157@@ -18,7 +18,12 @@
4158 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4159 BACKOFF_SETUP(%o2)
4160 1: lduw [%o1], %g1
4161- add %g1, %o0, %g7
4162+ addcc %g1, %o0, %g7
4163+
4164+#ifdef CONFIG_PAX_REFCOUNT
4165+ tvs %icc, 6
4166+#endif
4167+
4168 cas [%o1], %g1, %g7
4169 cmp %g1, %g7
4170 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4171@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4172 2: BACKOFF_SPIN(%o2, %o3, 1b)
4173 .size atomic_add, .-atomic_add
4174
4175+ .globl atomic_add_unchecked
4176+ .type atomic_add_unchecked,#function
4177+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4178+ BACKOFF_SETUP(%o2)
4179+1: lduw [%o1], %g1
4180+ add %g1, %o0, %g7
4181+ cas [%o1], %g1, %g7
4182+ cmp %g1, %g7
4183+ bne,pn %icc, 2f
4184+ nop
4185+ retl
4186+ nop
4187+2: BACKOFF_SPIN(%o2, %o3, 1b)
4188+ .size atomic_add_unchecked, .-atomic_add_unchecked
4189+
4190 .globl atomic_sub
4191 .type atomic_sub,#function
4192 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4193 BACKOFF_SETUP(%o2)
4194 1: lduw [%o1], %g1
4195- sub %g1, %o0, %g7
4196+ subcc %g1, %o0, %g7
4197+
4198+#ifdef CONFIG_PAX_REFCOUNT
4199+ tvs %icc, 6
4200+#endif
4201+
4202 cas [%o1], %g1, %g7
4203 cmp %g1, %g7
4204 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4205@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4206 2: BACKOFF_SPIN(%o2, %o3, 1b)
4207 .size atomic_sub, .-atomic_sub
4208
4209+ .globl atomic_sub_unchecked
4210+ .type atomic_sub_unchecked,#function
4211+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4212+ BACKOFF_SETUP(%o2)
4213+1: lduw [%o1], %g1
4214+ sub %g1, %o0, %g7
4215+ cas [%o1], %g1, %g7
4216+ cmp %g1, %g7
4217+ bne,pn %icc, 2f
4218+ nop
4219+ retl
4220+ nop
4221+2: BACKOFF_SPIN(%o2, %o3, 1b)
4222+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4223+
4224 .globl atomic_add_ret
4225 .type atomic_add_ret,#function
4226 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4227 BACKOFF_SETUP(%o2)
4228 1: lduw [%o1], %g1
4229- add %g1, %o0, %g7
4230+ addcc %g1, %o0, %g7
4231+
4232+#ifdef CONFIG_PAX_REFCOUNT
4233+ tvs %icc, 6
4234+#endif
4235+
4236 cas [%o1], %g1, %g7
4237 cmp %g1, %g7
4238 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4239@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4240 2: BACKOFF_SPIN(%o2, %o3, 1b)
4241 .size atomic_add_ret, .-atomic_add_ret
4242
4243+ .globl atomic_add_ret_unchecked
4244+ .type atomic_add_ret_unchecked,#function
4245+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4246+ BACKOFF_SETUP(%o2)
4247+1: lduw [%o1], %g1
4248+ addcc %g1, %o0, %g7
4249+ cas [%o1], %g1, %g7
4250+ cmp %g1, %g7
4251+ bne,pn %icc, 2f
4252+ add %g7, %o0, %g7
4253+ sra %g7, 0, %o0
4254+ retl
4255+ nop
4256+2: BACKOFF_SPIN(%o2, %o3, 1b)
4257+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4258+
4259 .globl atomic_sub_ret
4260 .type atomic_sub_ret,#function
4261 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4262 BACKOFF_SETUP(%o2)
4263 1: lduw [%o1], %g1
4264- sub %g1, %o0, %g7
4265+ subcc %g1, %o0, %g7
4266+
4267+#ifdef CONFIG_PAX_REFCOUNT
4268+ tvs %icc, 6
4269+#endif
4270+
4271 cas [%o1], %g1, %g7
4272 cmp %g1, %g7
4273 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4274@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4275 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4276 BACKOFF_SETUP(%o2)
4277 1: ldx [%o1], %g1
4278- add %g1, %o0, %g7
4279+ addcc %g1, %o0, %g7
4280+
4281+#ifdef CONFIG_PAX_REFCOUNT
4282+ tvs %xcc, 6
4283+#endif
4284+
4285 casx [%o1], %g1, %g7
4286 cmp %g1, %g7
4287 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4288@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4289 2: BACKOFF_SPIN(%o2, %o3, 1b)
4290 .size atomic64_add, .-atomic64_add
4291
4292+ .globl atomic64_add_unchecked
4293+ .type atomic64_add_unchecked,#function
4294+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4295+ BACKOFF_SETUP(%o2)
4296+1: ldx [%o1], %g1
4297+ addcc %g1, %o0, %g7
4298+ casx [%o1], %g1, %g7
4299+ cmp %g1, %g7
4300+ bne,pn %xcc, 2f
4301+ nop
4302+ retl
4303+ nop
4304+2: BACKOFF_SPIN(%o2, %o3, 1b)
4305+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4306+
4307 .globl atomic64_sub
4308 .type atomic64_sub,#function
4309 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4310 BACKOFF_SETUP(%o2)
4311 1: ldx [%o1], %g1
4312- sub %g1, %o0, %g7
4313+ subcc %g1, %o0, %g7
4314+
4315+#ifdef CONFIG_PAX_REFCOUNT
4316+ tvs %xcc, 6
4317+#endif
4318+
4319 casx [%o1], %g1, %g7
4320 cmp %g1, %g7
4321 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4322@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4323 2: BACKOFF_SPIN(%o2, %o3, 1b)
4324 .size atomic64_sub, .-atomic64_sub
4325
4326+ .globl atomic64_sub_unchecked
4327+ .type atomic64_sub_unchecked,#function
4328+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4329+ BACKOFF_SETUP(%o2)
4330+1: ldx [%o1], %g1
4331+ subcc %g1, %o0, %g7
4332+ casx [%o1], %g1, %g7
4333+ cmp %g1, %g7
4334+ bne,pn %xcc, 2f
4335+ nop
4336+ retl
4337+ nop
4338+2: BACKOFF_SPIN(%o2, %o3, 1b)
4339+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4340+
4341 .globl atomic64_add_ret
4342 .type atomic64_add_ret,#function
4343 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4344 BACKOFF_SETUP(%o2)
4345 1: ldx [%o1], %g1
4346- add %g1, %o0, %g7
4347+ addcc %g1, %o0, %g7
4348+
4349+#ifdef CONFIG_PAX_REFCOUNT
4350+ tvs %xcc, 6
4351+#endif
4352+
4353 casx [%o1], %g1, %g7
4354 cmp %g1, %g7
4355 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4356@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4357 2: BACKOFF_SPIN(%o2, %o3, 1b)
4358 .size atomic64_add_ret, .-atomic64_add_ret
4359
4360+ .globl atomic64_add_ret_unchecked
4361+ .type atomic64_add_ret_unchecked,#function
4362+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4363+ BACKOFF_SETUP(%o2)
4364+1: ldx [%o1], %g1
4365+ addcc %g1, %o0, %g7
4366+ casx [%o1], %g1, %g7
4367+ cmp %g1, %g7
4368+ bne,pn %xcc, 2f
4369+ add %g7, %o0, %g7
4370+ mov %g7, %o0
4371+ retl
4372+ nop
4373+2: BACKOFF_SPIN(%o2, %o3, 1b)
4374+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4375+
4376 .globl atomic64_sub_ret
4377 .type atomic64_sub_ret,#function
4378 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4379 BACKOFF_SETUP(%o2)
4380 1: ldx [%o1], %g1
4381- sub %g1, %o0, %g7
4382+ subcc %g1, %o0, %g7
4383+
4384+#ifdef CONFIG_PAX_REFCOUNT
4385+ tvs %xcc, 6
4386+#endif
4387+
4388 casx [%o1], %g1, %g7
4389 cmp %g1, %g7
4390 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4391diff -urNp linux-3.0.7/arch/sparc/lib/ksyms.c linux-3.0.7/arch/sparc/lib/ksyms.c
4392--- linux-3.0.7/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4393+++ linux-3.0.7/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4394@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4395
4396 /* Atomic counter implementation. */
4397 EXPORT_SYMBOL(atomic_add);
4398+EXPORT_SYMBOL(atomic_add_unchecked);
4399 EXPORT_SYMBOL(atomic_add_ret);
4400+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4401 EXPORT_SYMBOL(atomic_sub);
4402+EXPORT_SYMBOL(atomic_sub_unchecked);
4403 EXPORT_SYMBOL(atomic_sub_ret);
4404 EXPORT_SYMBOL(atomic64_add);
4405+EXPORT_SYMBOL(atomic64_add_unchecked);
4406 EXPORT_SYMBOL(atomic64_add_ret);
4407+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4408 EXPORT_SYMBOL(atomic64_sub);
4409+EXPORT_SYMBOL(atomic64_sub_unchecked);
4410 EXPORT_SYMBOL(atomic64_sub_ret);
4411
4412 /* Atomic bit operations. */
4413diff -urNp linux-3.0.7/arch/sparc/mm/Makefile linux-3.0.7/arch/sparc/mm/Makefile
4414--- linux-3.0.7/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
4415+++ linux-3.0.7/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
4416@@ -2,7 +2,7 @@
4417 #
4418
4419 asflags-y := -ansi
4420-ccflags-y := -Werror
4421+#ccflags-y := -Werror
4422
4423 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
4424 obj-y += fault_$(BITS).o
4425diff -urNp linux-3.0.7/arch/sparc/mm/fault_32.c linux-3.0.7/arch/sparc/mm/fault_32.c
4426--- linux-3.0.7/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4427+++ linux-3.0.7/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4428@@ -22,6 +22,9 @@
4429 #include <linux/interrupt.h>
4430 #include <linux/module.h>
4431 #include <linux/kdebug.h>
4432+#include <linux/slab.h>
4433+#include <linux/pagemap.h>
4434+#include <linux/compiler.h>
4435
4436 #include <asm/system.h>
4437 #include <asm/page.h>
4438@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4439 return safe_compute_effective_address(regs, insn);
4440 }
4441
4442+#ifdef CONFIG_PAX_PAGEEXEC
4443+#ifdef CONFIG_PAX_DLRESOLVE
4444+static void pax_emuplt_close(struct vm_area_struct *vma)
4445+{
4446+ vma->vm_mm->call_dl_resolve = 0UL;
4447+}
4448+
4449+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4450+{
4451+ unsigned int *kaddr;
4452+
4453+ vmf->page = alloc_page(GFP_HIGHUSER);
4454+ if (!vmf->page)
4455+ return VM_FAULT_OOM;
4456+
4457+ kaddr = kmap(vmf->page);
4458+ memset(kaddr, 0, PAGE_SIZE);
4459+ kaddr[0] = 0x9DE3BFA8U; /* save */
4460+ flush_dcache_page(vmf->page);
4461+ kunmap(vmf->page);
4462+ return VM_FAULT_MAJOR;
4463+}
4464+
4465+static const struct vm_operations_struct pax_vm_ops = {
4466+ .close = pax_emuplt_close,
4467+ .fault = pax_emuplt_fault
4468+};
4469+
4470+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4471+{
4472+ int ret;
4473+
4474+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4475+ vma->vm_mm = current->mm;
4476+ vma->vm_start = addr;
4477+ vma->vm_end = addr + PAGE_SIZE;
4478+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4479+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4480+ vma->vm_ops = &pax_vm_ops;
4481+
4482+ ret = insert_vm_struct(current->mm, vma);
4483+ if (ret)
4484+ return ret;
4485+
4486+ ++current->mm->total_vm;
4487+ return 0;
4488+}
4489+#endif
4490+
4491+/*
4492+ * PaX: decide what to do with offenders (regs->pc = fault address)
4493+ *
4494+ * returns 1 when task should be killed
4495+ * 2 when patched PLT trampoline was detected
4496+ * 3 when unpatched PLT trampoline was detected
4497+ */
4498+static int pax_handle_fetch_fault(struct pt_regs *regs)
4499+{
4500+
4501+#ifdef CONFIG_PAX_EMUPLT
4502+ int err;
4503+
4504+ do { /* PaX: patched PLT emulation #1 */
4505+ unsigned int sethi1, sethi2, jmpl;
4506+
4507+ err = get_user(sethi1, (unsigned int *)regs->pc);
4508+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4509+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4510+
4511+ if (err)
4512+ break;
4513+
4514+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4515+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4516+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4517+ {
4518+ unsigned int addr;
4519+
4520+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4521+ addr = regs->u_regs[UREG_G1];
4522+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4523+ regs->pc = addr;
4524+ regs->npc = addr+4;
4525+ return 2;
4526+ }
4527+ } while (0);
4528+
4529+ { /* PaX: patched PLT emulation #2 */
4530+ unsigned int ba;
4531+
4532+ err = get_user(ba, (unsigned int *)regs->pc);
4533+
4534+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4535+ unsigned int addr;
4536+
4537+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4538+ regs->pc = addr;
4539+ regs->npc = addr+4;
4540+ return 2;
4541+ }
4542+ }
4543+
4544+ do { /* PaX: patched PLT emulation #3 */
4545+ unsigned int sethi, jmpl, nop;
4546+
4547+ err = get_user(sethi, (unsigned int *)regs->pc);
4548+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4549+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4550+
4551+ if (err)
4552+ break;
4553+
4554+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4555+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4556+ nop == 0x01000000U)
4557+ {
4558+ unsigned int addr;
4559+
4560+ addr = (sethi & 0x003FFFFFU) << 10;
4561+ regs->u_regs[UREG_G1] = addr;
4562+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4563+ regs->pc = addr;
4564+ regs->npc = addr+4;
4565+ return 2;
4566+ }
4567+ } while (0);
4568+
4569+ do { /* PaX: unpatched PLT emulation step 1 */
4570+ unsigned int sethi, ba, nop;
4571+
4572+ err = get_user(sethi, (unsigned int *)regs->pc);
4573+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4574+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4575+
4576+ if (err)
4577+ break;
4578+
4579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4580+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4581+ nop == 0x01000000U)
4582+ {
4583+ unsigned int addr, save, call;
4584+
4585+ if ((ba & 0xFFC00000U) == 0x30800000U)
4586+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4587+ else
4588+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4589+
4590+ err = get_user(save, (unsigned int *)addr);
4591+ err |= get_user(call, (unsigned int *)(addr+4));
4592+ err |= get_user(nop, (unsigned int *)(addr+8));
4593+ if (err)
4594+ break;
4595+
4596+#ifdef CONFIG_PAX_DLRESOLVE
4597+ if (save == 0x9DE3BFA8U &&
4598+ (call & 0xC0000000U) == 0x40000000U &&
4599+ nop == 0x01000000U)
4600+ {
4601+ struct vm_area_struct *vma;
4602+ unsigned long call_dl_resolve;
4603+
4604+ down_read(&current->mm->mmap_sem);
4605+ call_dl_resolve = current->mm->call_dl_resolve;
4606+ up_read(&current->mm->mmap_sem);
4607+ if (likely(call_dl_resolve))
4608+ goto emulate;
4609+
4610+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4611+
4612+ down_write(&current->mm->mmap_sem);
4613+ if (current->mm->call_dl_resolve) {
4614+ call_dl_resolve = current->mm->call_dl_resolve;
4615+ up_write(&current->mm->mmap_sem);
4616+ if (vma)
4617+ kmem_cache_free(vm_area_cachep, vma);
4618+ goto emulate;
4619+ }
4620+
4621+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4622+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4623+ up_write(&current->mm->mmap_sem);
4624+ if (vma)
4625+ kmem_cache_free(vm_area_cachep, vma);
4626+ return 1;
4627+ }
4628+
4629+ if (pax_insert_vma(vma, call_dl_resolve)) {
4630+ up_write(&current->mm->mmap_sem);
4631+ kmem_cache_free(vm_area_cachep, vma);
4632+ return 1;
4633+ }
4634+
4635+ current->mm->call_dl_resolve = call_dl_resolve;
4636+ up_write(&current->mm->mmap_sem);
4637+
4638+emulate:
4639+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4640+ regs->pc = call_dl_resolve;
4641+ regs->npc = addr+4;
4642+ return 3;
4643+ }
4644+#endif
4645+
4646+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4647+ if ((save & 0xFFC00000U) == 0x05000000U &&
4648+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4652+ regs->u_regs[UREG_G2] = addr + 4;
4653+ addr = (save & 0x003FFFFFU) << 10;
4654+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4655+ regs->pc = addr;
4656+ regs->npc = addr+4;
4657+ return 3;
4658+ }
4659+ }
4660+ } while (0);
4661+
4662+ do { /* PaX: unpatched PLT emulation step 2 */
4663+ unsigned int save, call, nop;
4664+
4665+ err = get_user(save, (unsigned int *)(regs->pc-4));
4666+ err |= get_user(call, (unsigned int *)regs->pc);
4667+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4668+ if (err)
4669+ break;
4670+
4671+ if (save == 0x9DE3BFA8U &&
4672+ (call & 0xC0000000U) == 0x40000000U &&
4673+ nop == 0x01000000U)
4674+ {
4675+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4676+
4677+ regs->u_regs[UREG_RETPC] = regs->pc;
4678+ regs->pc = dl_resolve;
4679+ regs->npc = dl_resolve+4;
4680+ return 3;
4681+ }
4682+ } while (0);
4683+#endif
4684+
4685+ return 1;
4686+}
4687+
4688+void pax_report_insns(void *pc, void *sp)
4689+{
4690+ unsigned long i;
4691+
4692+ printk(KERN_ERR "PAX: bytes at PC: ");
4693+ for (i = 0; i < 8; i++) {
4694+ unsigned int c;
4695+ if (get_user(c, (unsigned int *)pc+i))
4696+ printk(KERN_CONT "???????? ");
4697+ else
4698+ printk(KERN_CONT "%08x ", c);
4699+ }
4700+ printk("\n");
4701+}
4702+#endif
4703+
4704 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4705 int text_fault)
4706 {
4707@@ -281,6 +546,24 @@ good_area:
4708 if(!(vma->vm_flags & VM_WRITE))
4709 goto bad_area;
4710 } else {
4711+
4712+#ifdef CONFIG_PAX_PAGEEXEC
4713+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4714+ up_read(&mm->mmap_sem);
4715+ switch (pax_handle_fetch_fault(regs)) {
4716+
4717+#ifdef CONFIG_PAX_EMUPLT
4718+ case 2:
4719+ case 3:
4720+ return;
4721+#endif
4722+
4723+ }
4724+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4725+ do_group_exit(SIGKILL);
4726+ }
4727+#endif
4728+
4729 /* Allow reads even for write-only mappings */
4730 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4731 goto bad_area;
4732diff -urNp linux-3.0.7/arch/sparc/mm/fault_64.c linux-3.0.7/arch/sparc/mm/fault_64.c
4733--- linux-3.0.7/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4734+++ linux-3.0.7/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4735@@ -21,6 +21,9 @@
4736 #include <linux/kprobes.h>
4737 #include <linux/kdebug.h>
4738 #include <linux/percpu.h>
4739+#include <linux/slab.h>
4740+#include <linux/pagemap.h>
4741+#include <linux/compiler.h>
4742
4743 #include <asm/page.h>
4744 #include <asm/pgtable.h>
4745@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4746 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4747 regs->tpc);
4748 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4749- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4750+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4751 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4752 dump_stack();
4753 unhandled_fault(regs->tpc, current, regs);
4754@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4755 show_regs(regs);
4756 }
4757
4758+#ifdef CONFIG_PAX_PAGEEXEC
4759+#ifdef CONFIG_PAX_DLRESOLVE
4760+static void pax_emuplt_close(struct vm_area_struct *vma)
4761+{
4762+ vma->vm_mm->call_dl_resolve = 0UL;
4763+}
4764+
4765+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4766+{
4767+ unsigned int *kaddr;
4768+
4769+ vmf->page = alloc_page(GFP_HIGHUSER);
4770+ if (!vmf->page)
4771+ return VM_FAULT_OOM;
4772+
4773+ kaddr = kmap(vmf->page);
4774+ memset(kaddr, 0, PAGE_SIZE);
4775+ kaddr[0] = 0x9DE3BFA8U; /* save */
4776+ flush_dcache_page(vmf->page);
4777+ kunmap(vmf->page);
4778+ return VM_FAULT_MAJOR;
4779+}
4780+
4781+static const struct vm_operations_struct pax_vm_ops = {
4782+ .close = pax_emuplt_close,
4783+ .fault = pax_emuplt_fault
4784+};
4785+
4786+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4787+{
4788+ int ret;
4789+
4790+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4791+ vma->vm_mm = current->mm;
4792+ vma->vm_start = addr;
4793+ vma->vm_end = addr + PAGE_SIZE;
4794+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4795+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4796+ vma->vm_ops = &pax_vm_ops;
4797+
4798+ ret = insert_vm_struct(current->mm, vma);
4799+ if (ret)
4800+ return ret;
4801+
4802+ ++current->mm->total_vm;
4803+ return 0;
4804+}
4805+#endif
4806+
4807+/*
4808+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4809+ *
4810+ * returns 1 when task should be killed
4811+ * 2 when patched PLT trampoline was detected
4812+ * 3 when unpatched PLT trampoline was detected
4813+ */
4814+static int pax_handle_fetch_fault(struct pt_regs *regs)
4815+{
4816+
4817+#ifdef CONFIG_PAX_EMUPLT
4818+ int err;
4819+
4820+ do { /* PaX: patched PLT emulation #1 */
4821+ unsigned int sethi1, sethi2, jmpl;
4822+
4823+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4824+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4825+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4826+
4827+ if (err)
4828+ break;
4829+
4830+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4831+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4832+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4833+ {
4834+ unsigned long addr;
4835+
4836+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4837+ addr = regs->u_regs[UREG_G1];
4838+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4839+
4840+ if (test_thread_flag(TIF_32BIT))
4841+ addr &= 0xFFFFFFFFUL;
4842+
4843+ regs->tpc = addr;
4844+ regs->tnpc = addr+4;
4845+ return 2;
4846+ }
4847+ } while (0);
4848+
4849+ { /* PaX: patched PLT emulation #2 */
4850+ unsigned int ba;
4851+
4852+ err = get_user(ba, (unsigned int *)regs->tpc);
4853+
4854+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4855+ unsigned long addr;
4856+
4857+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4858+
4859+ if (test_thread_flag(TIF_32BIT))
4860+ addr &= 0xFFFFFFFFUL;
4861+
4862+ regs->tpc = addr;
4863+ regs->tnpc = addr+4;
4864+ return 2;
4865+ }
4866+ }
4867+
4868+ do { /* PaX: patched PLT emulation #3 */
4869+ unsigned int sethi, jmpl, nop;
4870+
4871+ err = get_user(sethi, (unsigned int *)regs->tpc);
4872+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4873+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4874+
4875+ if (err)
4876+ break;
4877+
4878+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4879+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4880+ nop == 0x01000000U)
4881+ {
4882+ unsigned long addr;
4883+
4884+ addr = (sethi & 0x003FFFFFU) << 10;
4885+ regs->u_regs[UREG_G1] = addr;
4886+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4887+
4888+ if (test_thread_flag(TIF_32BIT))
4889+ addr &= 0xFFFFFFFFUL;
4890+
4891+ regs->tpc = addr;
4892+ regs->tnpc = addr+4;
4893+ return 2;
4894+ }
4895+ } while (0);
4896+
4897+ do { /* PaX: patched PLT emulation #4 */
4898+ unsigned int sethi, mov1, call, mov2;
4899+
4900+ err = get_user(sethi, (unsigned int *)regs->tpc);
4901+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4902+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4903+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4904+
4905+ if (err)
4906+ break;
4907+
4908+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4909+ mov1 == 0x8210000FU &&
4910+ (call & 0xC0000000U) == 0x40000000U &&
4911+ mov2 == 0x9E100001U)
4912+ {
4913+ unsigned long addr;
4914+
4915+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4916+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4917+
4918+ if (test_thread_flag(TIF_32BIT))
4919+ addr &= 0xFFFFFFFFUL;
4920+
4921+ regs->tpc = addr;
4922+ regs->tnpc = addr+4;
4923+ return 2;
4924+ }
4925+ } while (0);
4926+
4927+ do { /* PaX: patched PLT emulation #5 */
4928+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4929+
4930+ err = get_user(sethi, (unsigned int *)regs->tpc);
4931+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4932+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4933+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4934+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4935+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4936+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4937+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4938+
4939+ if (err)
4940+ break;
4941+
4942+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4943+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4944+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4945+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4946+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4947+ sllx == 0x83287020U &&
4948+ jmpl == 0x81C04005U &&
4949+ nop == 0x01000000U)
4950+ {
4951+ unsigned long addr;
4952+
4953+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4954+ regs->u_regs[UREG_G1] <<= 32;
4955+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4956+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4957+ regs->tpc = addr;
4958+ regs->tnpc = addr+4;
4959+ return 2;
4960+ }
4961+ } while (0);
4962+
4963+ do { /* PaX: patched PLT emulation #6 */
4964+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4965+
4966+ err = get_user(sethi, (unsigned int *)regs->tpc);
4967+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4968+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4969+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4970+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4971+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4972+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4973+
4974+ if (err)
4975+ break;
4976+
4977+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4978+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4979+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4980+ sllx == 0x83287020U &&
4981+ (or & 0xFFFFE000U) == 0x8A116000U &&
4982+ jmpl == 0x81C04005U &&
4983+ nop == 0x01000000U)
4984+ {
4985+ unsigned long addr;
4986+
4987+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4988+ regs->u_regs[UREG_G1] <<= 32;
4989+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4990+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ } while (0);
4996+
4997+ do { /* PaX: unpatched PLT emulation step 1 */
4998+ unsigned int sethi, ba, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+ unsigned int save, call;
5013+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5014+
5015+ if ((ba & 0xFFC00000U) == 0x30800000U)
5016+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5017+ else
5018+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5019+
5020+ if (test_thread_flag(TIF_32BIT))
5021+ addr &= 0xFFFFFFFFUL;
5022+
5023+ err = get_user(save, (unsigned int *)addr);
5024+ err |= get_user(call, (unsigned int *)(addr+4));
5025+ err |= get_user(nop, (unsigned int *)(addr+8));
5026+ if (err)
5027+ break;
5028+
5029+#ifdef CONFIG_PAX_DLRESOLVE
5030+ if (save == 0x9DE3BFA8U &&
5031+ (call & 0xC0000000U) == 0x40000000U &&
5032+ nop == 0x01000000U)
5033+ {
5034+ struct vm_area_struct *vma;
5035+ unsigned long call_dl_resolve;
5036+
5037+ down_read(&current->mm->mmap_sem);
5038+ call_dl_resolve = current->mm->call_dl_resolve;
5039+ up_read(&current->mm->mmap_sem);
5040+ if (likely(call_dl_resolve))
5041+ goto emulate;
5042+
5043+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5044+
5045+ down_write(&current->mm->mmap_sem);
5046+ if (current->mm->call_dl_resolve) {
5047+ call_dl_resolve = current->mm->call_dl_resolve;
5048+ up_write(&current->mm->mmap_sem);
5049+ if (vma)
5050+ kmem_cache_free(vm_area_cachep, vma);
5051+ goto emulate;
5052+ }
5053+
5054+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5055+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5056+ up_write(&current->mm->mmap_sem);
5057+ if (vma)
5058+ kmem_cache_free(vm_area_cachep, vma);
5059+ return 1;
5060+ }
5061+
5062+ if (pax_insert_vma(vma, call_dl_resolve)) {
5063+ up_write(&current->mm->mmap_sem);
5064+ kmem_cache_free(vm_area_cachep, vma);
5065+ return 1;
5066+ }
5067+
5068+ current->mm->call_dl_resolve = call_dl_resolve;
5069+ up_write(&current->mm->mmap_sem);
5070+
5071+emulate:
5072+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5073+ regs->tpc = call_dl_resolve;
5074+ regs->tnpc = addr+4;
5075+ return 3;
5076+ }
5077+#endif
5078+
5079+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5080+ if ((save & 0xFFC00000U) == 0x05000000U &&
5081+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5082+ nop == 0x01000000U)
5083+ {
5084+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5085+ regs->u_regs[UREG_G2] = addr + 4;
5086+ addr = (save & 0x003FFFFFU) << 10;
5087+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5088+
5089+ if (test_thread_flag(TIF_32BIT))
5090+ addr &= 0xFFFFFFFFUL;
5091+
5092+ regs->tpc = addr;
5093+ regs->tnpc = addr+4;
5094+ return 3;
5095+ }
5096+
5097+ /* PaX: 64-bit PLT stub */
5098+ err = get_user(sethi1, (unsigned int *)addr);
5099+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5100+ err |= get_user(or1, (unsigned int *)(addr+8));
5101+ err |= get_user(or2, (unsigned int *)(addr+12));
5102+ err |= get_user(sllx, (unsigned int *)(addr+16));
5103+ err |= get_user(add, (unsigned int *)(addr+20));
5104+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5105+ err |= get_user(nop, (unsigned int *)(addr+28));
5106+ if (err)
5107+ break;
5108+
5109+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5110+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5111+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5112+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5113+ sllx == 0x89293020U &&
5114+ add == 0x8A010005U &&
5115+ jmpl == 0x89C14000U &&
5116+ nop == 0x01000000U)
5117+ {
5118+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5119+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5120+ regs->u_regs[UREG_G4] <<= 32;
5121+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5122+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5123+ regs->u_regs[UREG_G4] = addr + 24;
5124+ addr = regs->u_regs[UREG_G5];
5125+ regs->tpc = addr;
5126+ regs->tnpc = addr+4;
5127+ return 3;
5128+ }
5129+ }
5130+ } while (0);
5131+
5132+#ifdef CONFIG_PAX_DLRESOLVE
5133+ do { /* PaX: unpatched PLT emulation step 2 */
5134+ unsigned int save, call, nop;
5135+
5136+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5137+ err |= get_user(call, (unsigned int *)regs->tpc);
5138+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5139+ if (err)
5140+ break;
5141+
5142+ if (save == 0x9DE3BFA8U &&
5143+ (call & 0xC0000000U) == 0x40000000U &&
5144+ nop == 0x01000000U)
5145+ {
5146+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5147+
5148+ if (test_thread_flag(TIF_32BIT))
5149+ dl_resolve &= 0xFFFFFFFFUL;
5150+
5151+ regs->u_regs[UREG_RETPC] = regs->tpc;
5152+ regs->tpc = dl_resolve;
5153+ regs->tnpc = dl_resolve+4;
5154+ return 3;
5155+ }
5156+ } while (0);
5157+#endif
5158+
5159+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5160+ unsigned int sethi, ba, nop;
5161+
5162+ err = get_user(sethi, (unsigned int *)regs->tpc);
5163+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5164+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5165+
5166+ if (err)
5167+ break;
5168+
5169+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5170+ (ba & 0xFFF00000U) == 0x30600000U &&
5171+ nop == 0x01000000U)
5172+ {
5173+ unsigned long addr;
5174+
5175+ addr = (sethi & 0x003FFFFFU) << 10;
5176+ regs->u_regs[UREG_G1] = addr;
5177+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5178+
5179+ if (test_thread_flag(TIF_32BIT))
5180+ addr &= 0xFFFFFFFFUL;
5181+
5182+ regs->tpc = addr;
5183+ regs->tnpc = addr+4;
5184+ return 2;
5185+ }
5186+ } while (0);
5187+
5188+#endif
5189+
5190+ return 1;
5191+}
5192+
5193+void pax_report_insns(void *pc, void *sp)
5194+{
5195+ unsigned long i;
5196+
5197+ printk(KERN_ERR "PAX: bytes at PC: ");
5198+ for (i = 0; i < 8; i++) {
5199+ unsigned int c;
5200+ if (get_user(c, (unsigned int *)pc+i))
5201+ printk(KERN_CONT "???????? ");
5202+ else
5203+ printk(KERN_CONT "%08x ", c);
5204+ }
5205+ printk("\n");
5206+}
5207+#endif
5208+
5209 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5210 {
5211 struct mm_struct *mm = current->mm;
5212@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
5213 if (!vma)
5214 goto bad_area;
5215
5216+#ifdef CONFIG_PAX_PAGEEXEC
5217+ /* PaX: detect ITLB misses on non-exec pages */
5218+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5219+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5220+ {
5221+ if (address != regs->tpc)
5222+ goto good_area;
5223+
5224+ up_read(&mm->mmap_sem);
5225+ switch (pax_handle_fetch_fault(regs)) {
5226+
5227+#ifdef CONFIG_PAX_EMUPLT
5228+ case 2:
5229+ case 3:
5230+ return;
5231+#endif
5232+
5233+ }
5234+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5235+ do_group_exit(SIGKILL);
5236+ }
5237+#endif
5238+
5239 /* Pure DTLB misses do not tell us whether the fault causing
5240 * load/store/atomic was a write or not, it only says that there
5241 * was no match. So in such a case we (carefully) read the
5242diff -urNp linux-3.0.7/arch/sparc/mm/hugetlbpage.c linux-3.0.7/arch/sparc/mm/hugetlbpage.c
5243--- linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
5244+++ linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
5245@@ -68,7 +68,7 @@ full_search:
5246 }
5247 return -ENOMEM;
5248 }
5249- if (likely(!vma || addr + len <= vma->vm_start)) {
5250+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5251 /*
5252 * Remember the place where we stopped the search:
5253 */
5254@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
5255 /* make sure it can fit in the remaining address space */
5256 if (likely(addr > len)) {
5257 vma = find_vma(mm, addr-len);
5258- if (!vma || addr <= vma->vm_start) {
5259+ if (check_heap_stack_gap(vma, addr - len, len)) {
5260 /* remember the address as a hint for next time */
5261 return (mm->free_area_cache = addr-len);
5262 }
5263@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
5264 if (unlikely(mm->mmap_base < len))
5265 goto bottomup;
5266
5267- addr = (mm->mmap_base-len) & HPAGE_MASK;
5268+ addr = mm->mmap_base - len;
5269
5270 do {
5271+ addr &= HPAGE_MASK;
5272 /*
5273 * Lookup failure means no vma is above this address,
5274 * else if new region fits below vma->vm_start,
5275 * return with success:
5276 */
5277 vma = find_vma(mm, addr);
5278- if (likely(!vma || addr+len <= vma->vm_start)) {
5279+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5280 /* remember the address as a hint for next time */
5281 return (mm->free_area_cache = addr);
5282 }
5283@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
5284 mm->cached_hole_size = vma->vm_start - addr;
5285
5286 /* try just below the current vma->vm_start */
5287- addr = (vma->vm_start-len) & HPAGE_MASK;
5288- } while (likely(len < vma->vm_start));
5289+ addr = skip_heap_stack_gap(vma, len);
5290+ } while (!IS_ERR_VALUE(addr));
5291
5292 bottomup:
5293 /*
5294@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
5295 if (addr) {
5296 addr = ALIGN(addr, HPAGE_SIZE);
5297 vma = find_vma(mm, addr);
5298- if (task_size - len >= addr &&
5299- (!vma || addr + len <= vma->vm_start))
5300+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5301 return addr;
5302 }
5303 if (mm->get_unmapped_area == arch_get_unmapped_area)
5304diff -urNp linux-3.0.7/arch/sparc/mm/init_32.c linux-3.0.7/arch/sparc/mm/init_32.c
5305--- linux-3.0.7/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
5306+++ linux-3.0.7/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
5307@@ -316,6 +316,9 @@ extern void device_scan(void);
5308 pgprot_t PAGE_SHARED __read_mostly;
5309 EXPORT_SYMBOL(PAGE_SHARED);
5310
5311+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5312+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5313+
5314 void __init paging_init(void)
5315 {
5316 switch(sparc_cpu_model) {
5317@@ -344,17 +347,17 @@ void __init paging_init(void)
5318
5319 /* Initialize the protection map with non-constant, MMU dependent values. */
5320 protection_map[0] = PAGE_NONE;
5321- protection_map[1] = PAGE_READONLY;
5322- protection_map[2] = PAGE_COPY;
5323- protection_map[3] = PAGE_COPY;
5324+ protection_map[1] = PAGE_READONLY_NOEXEC;
5325+ protection_map[2] = PAGE_COPY_NOEXEC;
5326+ protection_map[3] = PAGE_COPY_NOEXEC;
5327 protection_map[4] = PAGE_READONLY;
5328 protection_map[5] = PAGE_READONLY;
5329 protection_map[6] = PAGE_COPY;
5330 protection_map[7] = PAGE_COPY;
5331 protection_map[8] = PAGE_NONE;
5332- protection_map[9] = PAGE_READONLY;
5333- protection_map[10] = PAGE_SHARED;
5334- protection_map[11] = PAGE_SHARED;
5335+ protection_map[9] = PAGE_READONLY_NOEXEC;
5336+ protection_map[10] = PAGE_SHARED_NOEXEC;
5337+ protection_map[11] = PAGE_SHARED_NOEXEC;
5338 protection_map[12] = PAGE_READONLY;
5339 protection_map[13] = PAGE_READONLY;
5340 protection_map[14] = PAGE_SHARED;
5341diff -urNp linux-3.0.7/arch/sparc/mm/srmmu.c linux-3.0.7/arch/sparc/mm/srmmu.c
5342--- linux-3.0.7/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5343+++ linux-3.0.7/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5344@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5345 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5346 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5347 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5348+
5349+#ifdef CONFIG_PAX_PAGEEXEC
5350+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5351+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5352+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5353+#endif
5354+
5355 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5356 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5357
5358diff -urNp linux-3.0.7/arch/um/include/asm/kmap_types.h linux-3.0.7/arch/um/include/asm/kmap_types.h
5359--- linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5360+++ linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5361@@ -23,6 +23,7 @@ enum km_type {
5362 KM_IRQ1,
5363 KM_SOFTIRQ0,
5364 KM_SOFTIRQ1,
5365+ KM_CLEARPAGE,
5366 KM_TYPE_NR
5367 };
5368
5369diff -urNp linux-3.0.7/arch/um/include/asm/page.h linux-3.0.7/arch/um/include/asm/page.h
5370--- linux-3.0.7/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5371+++ linux-3.0.7/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5372@@ -14,6 +14,9 @@
5373 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5374 #define PAGE_MASK (~(PAGE_SIZE-1))
5375
5376+#define ktla_ktva(addr) (addr)
5377+#define ktva_ktla(addr) (addr)
5378+
5379 #ifndef __ASSEMBLY__
5380
5381 struct page;
5382diff -urNp linux-3.0.7/arch/um/kernel/process.c linux-3.0.7/arch/um/kernel/process.c
5383--- linux-3.0.7/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5384+++ linux-3.0.7/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5385@@ -404,22 +404,6 @@ int singlestepping(void * t)
5386 return 2;
5387 }
5388
5389-/*
5390- * Only x86 and x86_64 have an arch_align_stack().
5391- * All other arches have "#define arch_align_stack(x) (x)"
5392- * in their asm/system.h
5393- * As this is included in UML from asm-um/system-generic.h,
5394- * we can use it to behave as the subarch does.
5395- */
5396-#ifndef arch_align_stack
5397-unsigned long arch_align_stack(unsigned long sp)
5398-{
5399- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5400- sp -= get_random_int() % 8192;
5401- return sp & ~0xf;
5402-}
5403-#endif
5404-
5405 unsigned long get_wchan(struct task_struct *p)
5406 {
5407 unsigned long stack_page, sp, ip;
5408diff -urNp linux-3.0.7/arch/um/sys-i386/syscalls.c linux-3.0.7/arch/um/sys-i386/syscalls.c
5409--- linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5410+++ linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5411@@ -11,6 +11,21 @@
5412 #include "asm/uaccess.h"
5413 #include "asm/unistd.h"
5414
5415+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5416+{
5417+ unsigned long pax_task_size = TASK_SIZE;
5418+
5419+#ifdef CONFIG_PAX_SEGMEXEC
5420+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5421+ pax_task_size = SEGMEXEC_TASK_SIZE;
5422+#endif
5423+
5424+ if (len > pax_task_size || addr > pax_task_size - len)
5425+ return -EINVAL;
5426+
5427+ return 0;
5428+}
5429+
5430 /*
5431 * The prototype on i386 is:
5432 *
5433diff -urNp linux-3.0.7/arch/x86/Kconfig linux-3.0.7/arch/x86/Kconfig
5434--- linux-3.0.7/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
5435+++ linux-3.0.7/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
5436@@ -229,7 +229,7 @@ config X86_HT
5437
5438 config X86_32_LAZY_GS
5439 def_bool y
5440- depends on X86_32 && !CC_STACKPROTECTOR
5441+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5442
5443 config ARCH_HWEIGHT_CFLAGS
5444 string
5445@@ -1018,7 +1018,7 @@ choice
5446
5447 config NOHIGHMEM
5448 bool "off"
5449- depends on !X86_NUMAQ
5450+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5451 ---help---
5452 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5453 However, the address space of 32-bit x86 processors is only 4
5454@@ -1055,7 +1055,7 @@ config NOHIGHMEM
5455
5456 config HIGHMEM4G
5457 bool "4GB"
5458- depends on !X86_NUMAQ
5459+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5460 ---help---
5461 Select this if you have a 32-bit processor and between 1 and 4
5462 gigabytes of physical RAM.
5463@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
5464 hex
5465 default 0xB0000000 if VMSPLIT_3G_OPT
5466 default 0x80000000 if VMSPLIT_2G
5467- default 0x78000000 if VMSPLIT_2G_OPT
5468+ default 0x70000000 if VMSPLIT_2G_OPT
5469 default 0x40000000 if VMSPLIT_1G
5470 default 0xC0000000
5471 depends on X86_32
5472@@ -1483,6 +1483,7 @@ config SECCOMP
5473
5474 config CC_STACKPROTECTOR
5475 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5476+ depends on X86_64 || !PAX_MEMORY_UDEREF
5477 ---help---
5478 This option turns on the -fstack-protector GCC feature. This
5479 feature puts, at the beginning of functions, a canary value on
5480@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
5481 config PHYSICAL_START
5482 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5483 default "0x1000000"
5484+ range 0x400000 0x40000000
5485 ---help---
5486 This gives the physical address where the kernel is loaded.
5487
5488@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
5489 config PHYSICAL_ALIGN
5490 hex "Alignment value to which kernel should be aligned" if X86_32
5491 default "0x1000000"
5492+ range 0x400000 0x1000000 if PAX_KERNEXEC
5493 range 0x2000 0x1000000
5494 ---help---
5495 This value puts the alignment restrictions on physical address
5496@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
5497 Say N if you want to disable CPU hotplug.
5498
5499 config COMPAT_VDSO
5500- def_bool y
5501+ def_bool n
5502 prompt "Compat VDSO support"
5503 depends on X86_32 || IA32_EMULATION
5504+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5505 ---help---
5506 Map the 32-bit VDSO to the predictable old-style address too.
5507
5508diff -urNp linux-3.0.7/arch/x86/Kconfig.cpu linux-3.0.7/arch/x86/Kconfig.cpu
5509--- linux-3.0.7/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
5510+++ linux-3.0.7/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
5511@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
5512
5513 config X86_F00F_BUG
5514 def_bool y
5515- depends on M586MMX || M586TSC || M586 || M486 || M386
5516+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5517
5518 config X86_INVD_BUG
5519 def_bool y
5520@@ -362,7 +362,7 @@ config X86_POPAD_OK
5521
5522 config X86_ALIGNMENT_16
5523 def_bool y
5524- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5525+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5526
5527 config X86_INTEL_USERCOPY
5528 def_bool y
5529@@ -408,7 +408,7 @@ config X86_CMPXCHG64
5530 # generates cmov.
5531 config X86_CMOV
5532 def_bool y
5533- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5534+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5535
5536 config X86_MINIMUM_CPU_FAMILY
5537 int
5538diff -urNp linux-3.0.7/arch/x86/Kconfig.debug linux-3.0.7/arch/x86/Kconfig.debug
5539--- linux-3.0.7/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
5540+++ linux-3.0.7/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
5541@@ -81,7 +81,7 @@ config X86_PTDUMP
5542 config DEBUG_RODATA
5543 bool "Write protect kernel read-only data structures"
5544 default y
5545- depends on DEBUG_KERNEL
5546+ depends on DEBUG_KERNEL && BROKEN
5547 ---help---
5548 Mark the kernel read-only data as write-protected in the pagetables,
5549 in order to catch accidental (and incorrect) writes to such const
5550@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5551
5552 config DEBUG_SET_MODULE_RONX
5553 bool "Set loadable kernel module data as NX and text as RO"
5554- depends on MODULES
5555+ depends on MODULES && BROKEN
5556 ---help---
5557 This option helps catch unintended modifications to loadable
5558 kernel module's text and read-only data. It also prevents execution
5559diff -urNp linux-3.0.7/arch/x86/Makefile linux-3.0.7/arch/x86/Makefile
5560--- linux-3.0.7/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
5561+++ linux-3.0.7/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
5562@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
5563 else
5564 BITS := 64
5565 UTS_MACHINE := x86_64
5566+ biarch := $(call cc-option,-m64)
5567 CHECKFLAGS += -D__x86_64__ -m64
5568
5569 KBUILD_AFLAGS += -m64
5570@@ -195,3 +196,12 @@ define archhelp
5571 echo ' FDARGS="..." arguments for the booted kernel'
5572 echo ' FDINITRD=file initrd for the booted kernel'
5573 endef
5574+
5575+define OLD_LD
5576+
5577+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5578+*** Please upgrade your binutils to 2.18 or newer
5579+endef
5580+
5581+archprepare:
5582+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5583diff -urNp linux-3.0.7/arch/x86/boot/Makefile linux-3.0.7/arch/x86/boot/Makefile
5584--- linux-3.0.7/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5585+++ linux-3.0.7/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5586@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5587 $(call cc-option, -fno-stack-protector) \
5588 $(call cc-option, -mpreferred-stack-boundary=2)
5589 KBUILD_CFLAGS += $(call cc-option, -m32)
5590+ifdef CONSTIFY_PLUGIN
5591+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5592+endif
5593 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5594 GCOV_PROFILE := n
5595
5596diff -urNp linux-3.0.7/arch/x86/boot/bitops.h linux-3.0.7/arch/x86/boot/bitops.h
5597--- linux-3.0.7/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5598+++ linux-3.0.7/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5599@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5600 u8 v;
5601 const u32 *p = (const u32 *)addr;
5602
5603- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5604+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5605 return v;
5606 }
5607
5608@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5609
5610 static inline void set_bit(int nr, void *addr)
5611 {
5612- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5613+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5614 }
5615
5616 #endif /* BOOT_BITOPS_H */
5617diff -urNp linux-3.0.7/arch/x86/boot/boot.h linux-3.0.7/arch/x86/boot/boot.h
5618--- linux-3.0.7/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5619+++ linux-3.0.7/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5620@@ -85,7 +85,7 @@ static inline void io_delay(void)
5621 static inline u16 ds(void)
5622 {
5623 u16 seg;
5624- asm("movw %%ds,%0" : "=rm" (seg));
5625+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5626 return seg;
5627 }
5628
5629@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5630 static inline int memcmp(const void *s1, const void *s2, size_t len)
5631 {
5632 u8 diff;
5633- asm("repe; cmpsb; setnz %0"
5634+ asm volatile("repe; cmpsb; setnz %0"
5635 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5636 return diff;
5637 }
5638diff -urNp linux-3.0.7/arch/x86/boot/compressed/Makefile linux-3.0.7/arch/x86/boot/compressed/Makefile
5639--- linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5640+++ linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5641@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5642 KBUILD_CFLAGS += $(cflags-y)
5643 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5644 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5645+ifdef CONSTIFY_PLUGIN
5646+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5647+endif
5648
5649 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5650 GCOV_PROFILE := n
5651diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_32.S linux-3.0.7/arch/x86/boot/compressed/head_32.S
5652--- linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5653+++ linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5654@@ -76,7 +76,7 @@ ENTRY(startup_32)
5655 notl %eax
5656 andl %eax, %ebx
5657 #else
5658- movl $LOAD_PHYSICAL_ADDR, %ebx
5659+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5660 #endif
5661
5662 /* Target address to relocate to for decompression */
5663@@ -162,7 +162,7 @@ relocated:
5664 * and where it was actually loaded.
5665 */
5666 movl %ebp, %ebx
5667- subl $LOAD_PHYSICAL_ADDR, %ebx
5668+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5669 jz 2f /* Nothing to be done if loaded at compiled addr. */
5670 /*
5671 * Process relocations.
5672@@ -170,8 +170,7 @@ relocated:
5673
5674 1: subl $4, %edi
5675 movl (%edi), %ecx
5676- testl %ecx, %ecx
5677- jz 2f
5678+ jecxz 2f
5679 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5680 jmp 1b
5681 2:
5682diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_64.S linux-3.0.7/arch/x86/boot/compressed/head_64.S
5683--- linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5684+++ linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5685@@ -91,7 +91,7 @@ ENTRY(startup_32)
5686 notl %eax
5687 andl %eax, %ebx
5688 #else
5689- movl $LOAD_PHYSICAL_ADDR, %ebx
5690+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5691 #endif
5692
5693 /* Target address to relocate to for decompression */
5694@@ -233,7 +233,7 @@ ENTRY(startup_64)
5695 notq %rax
5696 andq %rax, %rbp
5697 #else
5698- movq $LOAD_PHYSICAL_ADDR, %rbp
5699+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5700 #endif
5701
5702 /* Target address to relocate to for decompression */
5703diff -urNp linux-3.0.7/arch/x86/boot/compressed/misc.c linux-3.0.7/arch/x86/boot/compressed/misc.c
5704--- linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5705+++ linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5706@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5707 case PT_LOAD:
5708 #ifdef CONFIG_RELOCATABLE
5709 dest = output;
5710- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5711+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5712 #else
5713 dest = (void *)(phdr->p_paddr);
5714 #endif
5715@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5716 error("Destination address too large");
5717 #endif
5718 #ifndef CONFIG_RELOCATABLE
5719- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5720+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5721 error("Wrong destination address");
5722 #endif
5723
5724diff -urNp linux-3.0.7/arch/x86/boot/compressed/relocs.c linux-3.0.7/arch/x86/boot/compressed/relocs.c
5725--- linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5726+++ linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5727@@ -13,8 +13,11 @@
5728
5729 static void die(char *fmt, ...);
5730
5731+#include "../../../../include/generated/autoconf.h"
5732+
5733 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5734 static Elf32_Ehdr ehdr;
5735+static Elf32_Phdr *phdr;
5736 static unsigned long reloc_count, reloc_idx;
5737 static unsigned long *relocs;
5738
5739@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5740 }
5741 }
5742
5743+static void read_phdrs(FILE *fp)
5744+{
5745+ unsigned int i;
5746+
5747+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5748+ if (!phdr) {
5749+ die("Unable to allocate %d program headers\n",
5750+ ehdr.e_phnum);
5751+ }
5752+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5753+ die("Seek to %d failed: %s\n",
5754+ ehdr.e_phoff, strerror(errno));
5755+ }
5756+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5757+ die("Cannot read ELF program headers: %s\n",
5758+ strerror(errno));
5759+ }
5760+ for(i = 0; i < ehdr.e_phnum; i++) {
5761+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5762+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5763+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5764+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5765+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5766+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5767+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5768+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5769+ }
5770+
5771+}
5772+
5773 static void read_shdrs(FILE *fp)
5774 {
5775- int i;
5776+ unsigned int i;
5777 Elf32_Shdr shdr;
5778
5779 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5780@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5781
5782 static void read_strtabs(FILE *fp)
5783 {
5784- int i;
5785+ unsigned int i;
5786 for (i = 0; i < ehdr.e_shnum; i++) {
5787 struct section *sec = &secs[i];
5788 if (sec->shdr.sh_type != SHT_STRTAB) {
5789@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5790
5791 static void read_symtabs(FILE *fp)
5792 {
5793- int i,j;
5794+ unsigned int i,j;
5795 for (i = 0; i < ehdr.e_shnum; i++) {
5796 struct section *sec = &secs[i];
5797 if (sec->shdr.sh_type != SHT_SYMTAB) {
5798@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5799
5800 static void read_relocs(FILE *fp)
5801 {
5802- int i,j;
5803+ unsigned int i,j;
5804+ uint32_t base;
5805+
5806 for (i = 0; i < ehdr.e_shnum; i++) {
5807 struct section *sec = &secs[i];
5808 if (sec->shdr.sh_type != SHT_REL) {
5809@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5810 die("Cannot read symbol table: %s\n",
5811 strerror(errno));
5812 }
5813+ base = 0;
5814+ for (j = 0; j < ehdr.e_phnum; j++) {
5815+ if (phdr[j].p_type != PT_LOAD )
5816+ continue;
5817+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5818+ continue;
5819+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5820+ break;
5821+ }
5822 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5823 Elf32_Rel *rel = &sec->reltab[j];
5824- rel->r_offset = elf32_to_cpu(rel->r_offset);
5825+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5826 rel->r_info = elf32_to_cpu(rel->r_info);
5827 }
5828 }
5829@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5830
5831 static void print_absolute_symbols(void)
5832 {
5833- int i;
5834+ unsigned int i;
5835 printf("Absolute symbols\n");
5836 printf(" Num: Value Size Type Bind Visibility Name\n");
5837 for (i = 0; i < ehdr.e_shnum; i++) {
5838 struct section *sec = &secs[i];
5839 char *sym_strtab;
5840 Elf32_Sym *sh_symtab;
5841- int j;
5842+ unsigned int j;
5843
5844 if (sec->shdr.sh_type != SHT_SYMTAB) {
5845 continue;
5846@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5847
5848 static void print_absolute_relocs(void)
5849 {
5850- int i, printed = 0;
5851+ unsigned int i, printed = 0;
5852
5853 for (i = 0; i < ehdr.e_shnum; i++) {
5854 struct section *sec = &secs[i];
5855 struct section *sec_applies, *sec_symtab;
5856 char *sym_strtab;
5857 Elf32_Sym *sh_symtab;
5858- int j;
5859+ unsigned int j;
5860 if (sec->shdr.sh_type != SHT_REL) {
5861 continue;
5862 }
5863@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5864
5865 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5866 {
5867- int i;
5868+ unsigned int i;
5869 /* Walk through the relocations */
5870 for (i = 0; i < ehdr.e_shnum; i++) {
5871 char *sym_strtab;
5872 Elf32_Sym *sh_symtab;
5873 struct section *sec_applies, *sec_symtab;
5874- int j;
5875+ unsigned int j;
5876 struct section *sec = &secs[i];
5877
5878 if (sec->shdr.sh_type != SHT_REL) {
5879@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5880 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5881 continue;
5882 }
5883+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5884+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5885+ continue;
5886+
5887+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5888+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5889+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5890+ continue;
5891+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5892+ continue;
5893+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5894+ continue;
5895+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5896+ continue;
5897+#endif
5898+
5899 switch (r_type) {
5900 case R_386_NONE:
5901 case R_386_PC32:
5902@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5903
5904 static void emit_relocs(int as_text)
5905 {
5906- int i;
5907+ unsigned int i;
5908 /* Count how many relocations I have and allocate space for them. */
5909 reloc_count = 0;
5910 walk_relocs(count_reloc);
5911@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5912 fname, strerror(errno));
5913 }
5914 read_ehdr(fp);
5915+ read_phdrs(fp);
5916 read_shdrs(fp);
5917 read_strtabs(fp);
5918 read_symtabs(fp);
5919diff -urNp linux-3.0.7/arch/x86/boot/cpucheck.c linux-3.0.7/arch/x86/boot/cpucheck.c
5920--- linux-3.0.7/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5921+++ linux-3.0.7/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5922@@ -74,7 +74,7 @@ static int has_fpu(void)
5923 u16 fcw = -1, fsw = -1;
5924 u32 cr0;
5925
5926- asm("movl %%cr0,%0" : "=r" (cr0));
5927+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5928 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5929 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5930 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5931@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5932 {
5933 u32 f0, f1;
5934
5935- asm("pushfl ; "
5936+ asm volatile("pushfl ; "
5937 "pushfl ; "
5938 "popl %0 ; "
5939 "movl %0,%1 ; "
5940@@ -115,7 +115,7 @@ static void get_flags(void)
5941 set_bit(X86_FEATURE_FPU, cpu.flags);
5942
5943 if (has_eflag(X86_EFLAGS_ID)) {
5944- asm("cpuid"
5945+ asm volatile("cpuid"
5946 : "=a" (max_intel_level),
5947 "=b" (cpu_vendor[0]),
5948 "=d" (cpu_vendor[1]),
5949@@ -124,7 +124,7 @@ static void get_flags(void)
5950
5951 if (max_intel_level >= 0x00000001 &&
5952 max_intel_level <= 0x0000ffff) {
5953- asm("cpuid"
5954+ asm volatile("cpuid"
5955 : "=a" (tfms),
5956 "=c" (cpu.flags[4]),
5957 "=d" (cpu.flags[0])
5958@@ -136,7 +136,7 @@ static void get_flags(void)
5959 cpu.model += ((tfms >> 16) & 0xf) << 4;
5960 }
5961
5962- asm("cpuid"
5963+ asm volatile("cpuid"
5964 : "=a" (max_amd_level)
5965 : "a" (0x80000000)
5966 : "ebx", "ecx", "edx");
5967@@ -144,7 +144,7 @@ static void get_flags(void)
5968 if (max_amd_level >= 0x80000001 &&
5969 max_amd_level <= 0x8000ffff) {
5970 u32 eax = 0x80000001;
5971- asm("cpuid"
5972+ asm volatile("cpuid"
5973 : "+a" (eax),
5974 "=c" (cpu.flags[6]),
5975 "=d" (cpu.flags[1])
5976@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5977 u32 ecx = MSR_K7_HWCR;
5978 u32 eax, edx;
5979
5980- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5981+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5982 eax &= ~(1 << 15);
5983- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5984+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5985
5986 get_flags(); /* Make sure it really did something */
5987 err = check_flags();
5988@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5989 u32 ecx = MSR_VIA_FCR;
5990 u32 eax, edx;
5991
5992- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5993+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5994 eax |= (1<<1)|(1<<7);
5995- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5996+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5997
5998 set_bit(X86_FEATURE_CX8, cpu.flags);
5999 err = check_flags();
6000@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6001 u32 eax, edx;
6002 u32 level = 1;
6003
6004- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6005- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6006- asm("cpuid"
6007+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6008+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6009+ asm volatile("cpuid"
6010 : "+a" (level), "=d" (cpu.flags[0])
6011 : : "ecx", "ebx");
6012- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6013+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6014
6015 err = check_flags();
6016 }
6017diff -urNp linux-3.0.7/arch/x86/boot/header.S linux-3.0.7/arch/x86/boot/header.S
6018--- linux-3.0.7/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
6019+++ linux-3.0.7/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
6020@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6021 # single linked list of
6022 # struct setup_data
6023
6024-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6025+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6026
6027 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6028 #define VO_INIT_SIZE (VO__end - VO__text)
6029diff -urNp linux-3.0.7/arch/x86/boot/memory.c linux-3.0.7/arch/x86/boot/memory.c
6030--- linux-3.0.7/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
6031+++ linux-3.0.7/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
6032@@ -19,7 +19,7 @@
6033
6034 static int detect_memory_e820(void)
6035 {
6036- int count = 0;
6037+ unsigned int count = 0;
6038 struct biosregs ireg, oreg;
6039 struct e820entry *desc = boot_params.e820_map;
6040 static struct e820entry buf; /* static so it is zeroed */
6041diff -urNp linux-3.0.7/arch/x86/boot/video-vesa.c linux-3.0.7/arch/x86/boot/video-vesa.c
6042--- linux-3.0.7/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
6043+++ linux-3.0.7/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
6044@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6045
6046 boot_params.screen_info.vesapm_seg = oreg.es;
6047 boot_params.screen_info.vesapm_off = oreg.di;
6048+ boot_params.screen_info.vesapm_size = oreg.cx;
6049 }
6050
6051 /*
6052diff -urNp linux-3.0.7/arch/x86/boot/video.c linux-3.0.7/arch/x86/boot/video.c
6053--- linux-3.0.7/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
6054+++ linux-3.0.7/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
6055@@ -96,7 +96,7 @@ static void store_mode_params(void)
6056 static unsigned int get_entry(void)
6057 {
6058 char entry_buf[4];
6059- int i, len = 0;
6060+ unsigned int i, len = 0;
6061 int key;
6062 unsigned int v;
6063
6064diff -urNp linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S
6065--- linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
6066+++ linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
6067@@ -8,6 +8,8 @@
6068 * including this sentence is retained in full.
6069 */
6070
6071+#include <asm/alternative-asm.h>
6072+
6073 .extern crypto_ft_tab
6074 .extern crypto_it_tab
6075 .extern crypto_fl_tab
6076@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6077 je B192; \
6078 leaq 32(r9),r9;
6079
6080+#define ret pax_force_retaddr; ret
6081+
6082 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6083 movq r1,r2; \
6084 movq r3,r4; \
6085diff -urNp linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S
6086--- linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
6087+++ linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
6088@@ -1,3 +1,5 @@
6089+#include <asm/alternative-asm.h>
6090+
6091 # enter ECRYPT_encrypt_bytes
6092 .text
6093 .p2align 5
6094@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6095 add %r11,%rsp
6096 mov %rdi,%rax
6097 mov %rsi,%rdx
6098+ pax_force_retaddr
6099 ret
6100 # bytesatleast65:
6101 ._bytesatleast65:
6102@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6103 add %r11,%rsp
6104 mov %rdi,%rax
6105 mov %rsi,%rdx
6106+ pax_force_retaddr
6107 ret
6108 # enter ECRYPT_ivsetup
6109 .text
6110@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6111 add %r11,%rsp
6112 mov %rdi,%rax
6113 mov %rsi,%rdx
6114+ pax_force_retaddr
6115 ret
6116diff -urNp linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S
6117--- linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
6118+++ linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
6119@@ -21,6 +21,7 @@
6120 .text
6121
6122 #include <asm/asm-offsets.h>
6123+#include <asm/alternative-asm.h>
6124
6125 #define a_offset 0
6126 #define b_offset 4
6127@@ -269,6 +270,7 @@ twofish_enc_blk:
6128
6129 popq R1
6130 movq $1,%rax
6131+ pax_force_retaddr
6132 ret
6133
6134 twofish_dec_blk:
6135@@ -321,4 +323,5 @@ twofish_dec_blk:
6136
6137 popq R1
6138 movq $1,%rax
6139+ pax_force_retaddr
6140 ret
6141diff -urNp linux-3.0.7/arch/x86/ia32/ia32_aout.c linux-3.0.7/arch/x86/ia32/ia32_aout.c
6142--- linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
6143+++ linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
6144@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
6145 unsigned long dump_start, dump_size;
6146 struct user32 dump;
6147
6148+ memset(&dump, 0, sizeof(dump));
6149+
6150 fs = get_fs();
6151 set_fs(KERNEL_DS);
6152 has_dumped = 1;
6153diff -urNp linux-3.0.7/arch/x86/ia32/ia32_signal.c linux-3.0.7/arch/x86/ia32/ia32_signal.c
6154--- linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
6155+++ linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
6156@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
6157 }
6158 seg = get_fs();
6159 set_fs(KERNEL_DS);
6160- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6161+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6162 set_fs(seg);
6163 if (ret >= 0 && uoss_ptr) {
6164 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6165@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
6166 */
6167 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6168 size_t frame_size,
6169- void **fpstate)
6170+ void __user **fpstate)
6171 {
6172 unsigned long sp;
6173
6174@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
6175
6176 if (used_math()) {
6177 sp = sp - sig_xstate_ia32_size;
6178- *fpstate = (struct _fpstate_ia32 *) sp;
6179+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6180 if (save_i387_xstate_ia32(*fpstate) < 0)
6181 return (void __user *) -1L;
6182 }
6183@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6184 sp -= frame_size;
6185 /* Align the stack pointer according to the i386 ABI,
6186 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6187- sp = ((sp + 4) & -16ul) - 4;
6188+ sp = ((sp - 12) & -16ul) - 4;
6189 return (void __user *) sp;
6190 }
6191
6192@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6193 * These are actually not used anymore, but left because some
6194 * gdb versions depend on them as a marker.
6195 */
6196- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6197+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6198 } put_user_catch(err);
6199
6200 if (err)
6201@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6202 0xb8,
6203 __NR_ia32_rt_sigreturn,
6204 0x80cd,
6205- 0,
6206+ 0
6207 };
6208
6209 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6210@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6211
6212 if (ka->sa.sa_flags & SA_RESTORER)
6213 restorer = ka->sa.sa_restorer;
6214+ else if (current->mm->context.vdso)
6215+ /* Return stub is in 32bit vsyscall page */
6216+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6217 else
6218- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6219- rt_sigreturn);
6220+ restorer = &frame->retcode;
6221 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6222
6223 /*
6224 * Not actually used anymore, but left because some gdb
6225 * versions need it.
6226 */
6227- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6228+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6229 } put_user_catch(err);
6230
6231 if (err)
6232diff -urNp linux-3.0.7/arch/x86/ia32/ia32entry.S linux-3.0.7/arch/x86/ia32/ia32entry.S
6233--- linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
6234+++ linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-10-11 10:44:33.000000000 -0400
6235@@ -13,7 +13,9 @@
6236 #include <asm/thread_info.h>
6237 #include <asm/segment.h>
6238 #include <asm/irqflags.h>
6239+#include <asm/pgtable.h>
6240 #include <linux/linkage.h>
6241+#include <asm/alternative-asm.h>
6242
6243 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6244 #include <linux/elf-em.h>
6245@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
6246 ENDPROC(native_irq_enable_sysexit)
6247 #endif
6248
6249+ .macro pax_enter_kernel_user
6250+#ifdef CONFIG_PAX_MEMORY_UDEREF
6251+ call pax_enter_kernel_user
6252+#endif
6253+ .endm
6254+
6255+ .macro pax_exit_kernel_user
6256+#ifdef CONFIG_PAX_MEMORY_UDEREF
6257+ call pax_exit_kernel_user
6258+#endif
6259+#ifdef CONFIG_PAX_RANDKSTACK
6260+ pushq %rax
6261+ call pax_randomize_kstack
6262+ popq %rax
6263+#endif
6264+ .endm
6265+
6266+ .macro pax_erase_kstack
6267+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6268+ call pax_erase_kstack
6269+#endif
6270+ .endm
6271+
6272 /*
6273 * 32bit SYSENTER instruction entry.
6274 *
6275@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
6276 CFI_REGISTER rsp,rbp
6277 SWAPGS_UNSAFE_STACK
6278 movq PER_CPU_VAR(kernel_stack), %rsp
6279- addq $(KERNEL_STACK_OFFSET),%rsp
6280+ pax_enter_kernel_user
6281 /*
6282 * No need to follow this irqs on/off section: the syscall
6283 * disabled irqs, here we enable it straight after entry:
6284@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
6285 CFI_REL_OFFSET rsp,0
6286 pushfq_cfi
6287 /*CFI_REL_OFFSET rflags,0*/
6288- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6289+ GET_THREAD_INFO(%r10)
6290+ movl TI_sysenter_return(%r10), %r10d
6291 CFI_REGISTER rip,r10
6292 pushq_cfi $__USER32_CS
6293 /*CFI_REL_OFFSET cs,0*/
6294@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
6295 SAVE_ARGS 0,0,1
6296 /* no need to do an access_ok check here because rbp has been
6297 32bit zero extended */
6298+
6299+#ifdef CONFIG_PAX_MEMORY_UDEREF
6300+ mov $PAX_USER_SHADOW_BASE,%r10
6301+ add %r10,%rbp
6302+#endif
6303+
6304 1: movl (%rbp),%ebp
6305 .section __ex_table,"a"
6306 .quad 1b,ia32_badarg
6307@@ -168,6 +200,8 @@ sysenter_dispatch:
6308 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6309 jnz sysexit_audit
6310 sysexit_from_sys_call:
6311+ pax_exit_kernel_user
6312+ pax_erase_kstack
6313 andl $~TS_COMPAT,TI_status(%r10)
6314 /* clear IF, that popfq doesn't enable interrupts early */
6315 andl $~0x200,EFLAGS-R11(%rsp)
6316@@ -194,6 +228,9 @@ sysexit_from_sys_call:
6317 movl %eax,%esi /* 2nd arg: syscall number */
6318 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6319 call audit_syscall_entry
6320+
6321+ pax_erase_kstack
6322+
6323 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6324 cmpq $(IA32_NR_syscalls-1),%rax
6325 ja ia32_badsys
6326@@ -246,6 +283,9 @@ sysenter_tracesys:
6327 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6328 movq %rsp,%rdi /* &pt_regs -> arg1 */
6329 call syscall_trace_enter
6330+
6331+ pax_erase_kstack
6332+
6333 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6334 RESTORE_REST
6335 cmpq $(IA32_NR_syscalls-1),%rax
6336@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
6337 ENTRY(ia32_cstar_target)
6338 CFI_STARTPROC32 simple
6339 CFI_SIGNAL_FRAME
6340- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6341+ CFI_DEF_CFA rsp,0
6342 CFI_REGISTER rip,rcx
6343 /*CFI_REGISTER rflags,r11*/
6344 SWAPGS_UNSAFE_STACK
6345 movl %esp,%r8d
6346 CFI_REGISTER rsp,r8
6347 movq PER_CPU_VAR(kernel_stack),%rsp
6348+
6349+#ifdef CONFIG_PAX_MEMORY_UDEREF
6350+ pax_enter_kernel_user
6351+#endif
6352+
6353 /*
6354 * No need to follow this irqs on/off section: the syscall
6355 * disabled irqs and here we enable it straight after entry:
6356 */
6357 ENABLE_INTERRUPTS(CLBR_NONE)
6358- SAVE_ARGS 8,1,1
6359+ SAVE_ARGS 8*6,1,1
6360 movl %eax,%eax /* zero extension */
6361 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6362 movq %rcx,RIP-ARGOFFSET(%rsp)
6363@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
6364 /* no need to do an access_ok check here because r8 has been
6365 32bit zero extended */
6366 /* hardware stack frame is complete now */
6367+
6368+#ifdef CONFIG_PAX_MEMORY_UDEREF
6369+ mov $PAX_USER_SHADOW_BASE,%r10
6370+ add %r10,%r8
6371+#endif
6372+
6373 1: movl (%r8),%r9d
6374 .section __ex_table,"a"
6375 .quad 1b,ia32_badarg
6376@@ -327,6 +378,8 @@ cstar_dispatch:
6377 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6378 jnz sysretl_audit
6379 sysretl_from_sys_call:
6380+ pax_exit_kernel_user
6381+ pax_erase_kstack
6382 andl $~TS_COMPAT,TI_status(%r10)
6383 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6384 movl RIP-ARGOFFSET(%rsp),%ecx
6385@@ -364,6 +417,9 @@ cstar_tracesys:
6386 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6387 movq %rsp,%rdi /* &pt_regs -> arg1 */
6388 call syscall_trace_enter
6389+
6390+ pax_erase_kstack
6391+
6392 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6393 RESTORE_REST
6394 xchgl %ebp,%r9d
6395@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
6396 CFI_REL_OFFSET rip,RIP-RIP
6397 PARAVIRT_ADJUST_EXCEPTION_FRAME
6398 SWAPGS
6399+ pax_enter_kernel_user
6400 /*
6401 * No need to follow this irqs on/off section: the syscall
6402 * disabled irqs and here we enable it straight after entry:
6403@@ -441,6 +498,9 @@ ia32_tracesys:
6404 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6405 movq %rsp,%rdi /* &pt_regs -> arg1 */
6406 call syscall_trace_enter
6407+
6408+ pax_erase_kstack
6409+
6410 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6411 RESTORE_REST
6412 cmpq $(IA32_NR_syscalls-1),%rax
6413@@ -455,6 +515,7 @@ ia32_badsys:
6414
6415 quiet_ni_syscall:
6416 movq $-ENOSYS,%rax
6417+ pax_force_retaddr
6418 ret
6419 CFI_ENDPROC
6420
6421diff -urNp linux-3.0.7/arch/x86/ia32/sys_ia32.c linux-3.0.7/arch/x86/ia32/sys_ia32.c
6422--- linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
6423+++ linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
6424@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
6425 */
6426 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6427 {
6428- typeof(ubuf->st_uid) uid = 0;
6429- typeof(ubuf->st_gid) gid = 0;
6430+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
6431+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
6432 SET_UID(uid, stat->uid);
6433 SET_GID(gid, stat->gid);
6434 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6435@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
6436 }
6437 set_fs(KERNEL_DS);
6438 ret = sys_rt_sigprocmask(how,
6439- set ? (sigset_t __user *)&s : NULL,
6440- oset ? (sigset_t __user *)&s : NULL,
6441+ set ? (sigset_t __force_user *)&s : NULL,
6442+ oset ? (sigset_t __force_user *)&s : NULL,
6443 sigsetsize);
6444 set_fs(old_fs);
6445 if (ret)
6446@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
6447 return alarm_setitimer(seconds);
6448 }
6449
6450-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6451+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6452 int options)
6453 {
6454 return compat_sys_wait4(pid, stat_addr, options, NULL);
6455@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
6456 mm_segment_t old_fs = get_fs();
6457
6458 set_fs(KERNEL_DS);
6459- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6460+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6461 set_fs(old_fs);
6462 if (put_compat_timespec(&t, interval))
6463 return -EFAULT;
6464@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6465 mm_segment_t old_fs = get_fs();
6466
6467 set_fs(KERNEL_DS);
6468- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6469+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6470 set_fs(old_fs);
6471 if (!ret) {
6472 switch (_NSIG_WORDS) {
6473@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6474 if (copy_siginfo_from_user32(&info, uinfo))
6475 return -EFAULT;
6476 set_fs(KERNEL_DS);
6477- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6478+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6479 set_fs(old_fs);
6480 return ret;
6481 }
6482@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6483 return -EFAULT;
6484
6485 set_fs(KERNEL_DS);
6486- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6487+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6488 count);
6489 set_fs(old_fs);
6490
6491diff -urNp linux-3.0.7/arch/x86/include/asm/alternative-asm.h linux-3.0.7/arch/x86/include/asm/alternative-asm.h
6492--- linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6493+++ linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6494@@ -15,6 +15,20 @@
6495 .endm
6496 #endif
6497
6498+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6499+ .macro pax_force_retaddr rip=0
6500+ btsq $63,\rip(%rsp)
6501+ .endm
6502+ .macro pax_force_fptr ptr
6503+ btsq $63,\ptr
6504+ .endm
6505+#else
6506+ .macro pax_force_retaddr rip=0
6507+ .endm
6508+ .macro pax_force_fptr ptr
6509+ .endm
6510+#endif
6511+
6512 .macro altinstruction_entry orig alt feature orig_len alt_len
6513 .align 8
6514 .quad \orig
6515diff -urNp linux-3.0.7/arch/x86/include/asm/alternative.h linux-3.0.7/arch/x86/include/asm/alternative.h
6516--- linux-3.0.7/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6517+++ linux-3.0.7/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6518@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6519 ".section .discard,\"aw\",@progbits\n" \
6520 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6521 ".previous\n" \
6522- ".section .altinstr_replacement, \"ax\"\n" \
6523+ ".section .altinstr_replacement, \"a\"\n" \
6524 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6525 ".previous"
6526
6527diff -urNp linux-3.0.7/arch/x86/include/asm/apic.h linux-3.0.7/arch/x86/include/asm/apic.h
6528--- linux-3.0.7/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6529+++ linux-3.0.7/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6530@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6531
6532 #ifdef CONFIG_X86_LOCAL_APIC
6533
6534-extern unsigned int apic_verbosity;
6535+extern int apic_verbosity;
6536 extern int local_apic_timer_c2_ok;
6537
6538 extern int disable_apic;
6539diff -urNp linux-3.0.7/arch/x86/include/asm/apm.h linux-3.0.7/arch/x86/include/asm/apm.h
6540--- linux-3.0.7/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6541+++ linux-3.0.7/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6542@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6543 __asm__ __volatile__(APM_DO_ZERO_SEGS
6544 "pushl %%edi\n\t"
6545 "pushl %%ebp\n\t"
6546- "lcall *%%cs:apm_bios_entry\n\t"
6547+ "lcall *%%ss:apm_bios_entry\n\t"
6548 "setc %%al\n\t"
6549 "popl %%ebp\n\t"
6550 "popl %%edi\n\t"
6551@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6552 __asm__ __volatile__(APM_DO_ZERO_SEGS
6553 "pushl %%edi\n\t"
6554 "pushl %%ebp\n\t"
6555- "lcall *%%cs:apm_bios_entry\n\t"
6556+ "lcall *%%ss:apm_bios_entry\n\t"
6557 "setc %%bl\n\t"
6558 "popl %%ebp\n\t"
6559 "popl %%edi\n\t"
6560diff -urNp linux-3.0.7/arch/x86/include/asm/atomic.h linux-3.0.7/arch/x86/include/asm/atomic.h
6561--- linux-3.0.7/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6562+++ linux-3.0.7/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6563@@ -22,7 +22,18 @@
6564 */
6565 static inline int atomic_read(const atomic_t *v)
6566 {
6567- return (*(volatile int *)&(v)->counter);
6568+ return (*(volatile const int *)&(v)->counter);
6569+}
6570+
6571+/**
6572+ * atomic_read_unchecked - read atomic variable
6573+ * @v: pointer of type atomic_unchecked_t
6574+ *
6575+ * Atomically reads the value of @v.
6576+ */
6577+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6578+{
6579+ return (*(volatile const int *)&(v)->counter);
6580 }
6581
6582 /**
6583@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6584 }
6585
6586 /**
6587+ * atomic_set_unchecked - set atomic variable
6588+ * @v: pointer of type atomic_unchecked_t
6589+ * @i: required value
6590+ *
6591+ * Atomically sets the value of @v to @i.
6592+ */
6593+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6594+{
6595+ v->counter = i;
6596+}
6597+
6598+/**
6599 * atomic_add - add integer to atomic variable
6600 * @i: integer value to add
6601 * @v: pointer of type atomic_t
6602@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6603 */
6604 static inline void atomic_add(int i, atomic_t *v)
6605 {
6606- asm volatile(LOCK_PREFIX "addl %1,%0"
6607+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6608+
6609+#ifdef CONFIG_PAX_REFCOUNT
6610+ "jno 0f\n"
6611+ LOCK_PREFIX "subl %1,%0\n"
6612+ "int $4\n0:\n"
6613+ _ASM_EXTABLE(0b, 0b)
6614+#endif
6615+
6616+ : "+m" (v->counter)
6617+ : "ir" (i));
6618+}
6619+
6620+/**
6621+ * atomic_add_unchecked - add integer to atomic variable
6622+ * @i: integer value to add
6623+ * @v: pointer of type atomic_unchecked_t
6624+ *
6625+ * Atomically adds @i to @v.
6626+ */
6627+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6628+{
6629+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6630 : "+m" (v->counter)
6631 : "ir" (i));
6632 }
6633@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6634 */
6635 static inline void atomic_sub(int i, atomic_t *v)
6636 {
6637- asm volatile(LOCK_PREFIX "subl %1,%0"
6638+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6639+
6640+#ifdef CONFIG_PAX_REFCOUNT
6641+ "jno 0f\n"
6642+ LOCK_PREFIX "addl %1,%0\n"
6643+ "int $4\n0:\n"
6644+ _ASM_EXTABLE(0b, 0b)
6645+#endif
6646+
6647+ : "+m" (v->counter)
6648+ : "ir" (i));
6649+}
6650+
6651+/**
6652+ * atomic_sub_unchecked - subtract integer from atomic variable
6653+ * @i: integer value to subtract
6654+ * @v: pointer of type atomic_unchecked_t
6655+ *
6656+ * Atomically subtracts @i from @v.
6657+ */
6658+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6659+{
6660+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6661 : "+m" (v->counter)
6662 : "ir" (i));
6663 }
6664@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6665 {
6666 unsigned char c;
6667
6668- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6669+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6670+
6671+#ifdef CONFIG_PAX_REFCOUNT
6672+ "jno 0f\n"
6673+ LOCK_PREFIX "addl %2,%0\n"
6674+ "int $4\n0:\n"
6675+ _ASM_EXTABLE(0b, 0b)
6676+#endif
6677+
6678+ "sete %1\n"
6679 : "+m" (v->counter), "=qm" (c)
6680 : "ir" (i) : "memory");
6681 return c;
6682@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6683 */
6684 static inline void atomic_inc(atomic_t *v)
6685 {
6686- asm volatile(LOCK_PREFIX "incl %0"
6687+ asm volatile(LOCK_PREFIX "incl %0\n"
6688+
6689+#ifdef CONFIG_PAX_REFCOUNT
6690+ "jno 0f\n"
6691+ LOCK_PREFIX "decl %0\n"
6692+ "int $4\n0:\n"
6693+ _ASM_EXTABLE(0b, 0b)
6694+#endif
6695+
6696+ : "+m" (v->counter));
6697+}
6698+
6699+/**
6700+ * atomic_inc_unchecked - increment atomic variable
6701+ * @v: pointer of type atomic_unchecked_t
6702+ *
6703+ * Atomically increments @v by 1.
6704+ */
6705+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6706+{
6707+ asm volatile(LOCK_PREFIX "incl %0\n"
6708 : "+m" (v->counter));
6709 }
6710
6711@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6712 */
6713 static inline void atomic_dec(atomic_t *v)
6714 {
6715- asm volatile(LOCK_PREFIX "decl %0"
6716+ asm volatile(LOCK_PREFIX "decl %0\n"
6717+
6718+#ifdef CONFIG_PAX_REFCOUNT
6719+ "jno 0f\n"
6720+ LOCK_PREFIX "incl %0\n"
6721+ "int $4\n0:\n"
6722+ _ASM_EXTABLE(0b, 0b)
6723+#endif
6724+
6725+ : "+m" (v->counter));
6726+}
6727+
6728+/**
6729+ * atomic_dec_unchecked - decrement atomic variable
6730+ * @v: pointer of type atomic_unchecked_t
6731+ *
6732+ * Atomically decrements @v by 1.
6733+ */
6734+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6735+{
6736+ asm volatile(LOCK_PREFIX "decl %0\n"
6737 : "+m" (v->counter));
6738 }
6739
6740@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6741 {
6742 unsigned char c;
6743
6744- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6745+ asm volatile(LOCK_PREFIX "decl %0\n"
6746+
6747+#ifdef CONFIG_PAX_REFCOUNT
6748+ "jno 0f\n"
6749+ LOCK_PREFIX "incl %0\n"
6750+ "int $4\n0:\n"
6751+ _ASM_EXTABLE(0b, 0b)
6752+#endif
6753+
6754+ "sete %1\n"
6755 : "+m" (v->counter), "=qm" (c)
6756 : : "memory");
6757 return c != 0;
6758@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6759 {
6760 unsigned char c;
6761
6762- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6763+ asm volatile(LOCK_PREFIX "incl %0\n"
6764+
6765+#ifdef CONFIG_PAX_REFCOUNT
6766+ "jno 0f\n"
6767+ LOCK_PREFIX "decl %0\n"
6768+ "int $4\n0:\n"
6769+ _ASM_EXTABLE(0b, 0b)
6770+#endif
6771+
6772+ "sete %1\n"
6773+ : "+m" (v->counter), "=qm" (c)
6774+ : : "memory");
6775+ return c != 0;
6776+}
6777+
6778+/**
6779+ * atomic_inc_and_test_unchecked - increment and test
6780+ * @v: pointer of type atomic_unchecked_t
6781+ *
6782+ * Atomically increments @v by 1
6783+ * and returns true if the result is zero, or false for all
6784+ * other cases.
6785+ */
6786+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6787+{
6788+ unsigned char c;
6789+
6790+ asm volatile(LOCK_PREFIX "incl %0\n"
6791+ "sete %1\n"
6792 : "+m" (v->counter), "=qm" (c)
6793 : : "memory");
6794 return c != 0;
6795@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6796 {
6797 unsigned char c;
6798
6799- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6800+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6801+
6802+#ifdef CONFIG_PAX_REFCOUNT
6803+ "jno 0f\n"
6804+ LOCK_PREFIX "subl %2,%0\n"
6805+ "int $4\n0:\n"
6806+ _ASM_EXTABLE(0b, 0b)
6807+#endif
6808+
6809+ "sets %1\n"
6810 : "+m" (v->counter), "=qm" (c)
6811 : "ir" (i) : "memory");
6812 return c;
6813@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6814 #endif
6815 /* Modern 486+ processor */
6816 __i = i;
6817+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6818+
6819+#ifdef CONFIG_PAX_REFCOUNT
6820+ "jno 0f\n"
6821+ "movl %0, %1\n"
6822+ "int $4\n0:\n"
6823+ _ASM_EXTABLE(0b, 0b)
6824+#endif
6825+
6826+ : "+r" (i), "+m" (v->counter)
6827+ : : "memory");
6828+ return i + __i;
6829+
6830+#ifdef CONFIG_M386
6831+no_xadd: /* Legacy 386 processor */
6832+ local_irq_save(flags);
6833+ __i = atomic_read(v);
6834+ atomic_set(v, i + __i);
6835+ local_irq_restore(flags);
6836+ return i + __i;
6837+#endif
6838+}
6839+
6840+/**
6841+ * atomic_add_return_unchecked - add integer and return
6842+ * @v: pointer of type atomic_unchecked_t
6843+ * @i: integer value to add
6844+ *
6845+ * Atomically adds @i to @v and returns @i + @v
6846+ */
6847+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6848+{
6849+ int __i;
6850+#ifdef CONFIG_M386
6851+ unsigned long flags;
6852+ if (unlikely(boot_cpu_data.x86 <= 3))
6853+ goto no_xadd;
6854+#endif
6855+ /* Modern 486+ processor */
6856+ __i = i;
6857 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6858 : "+r" (i), "+m" (v->counter)
6859 : : "memory");
6860@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6861 }
6862
6863 #define atomic_inc_return(v) (atomic_add_return(1, v))
6864+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6865+{
6866+ return atomic_add_return_unchecked(1, v);
6867+}
6868 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6869
6870 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6871@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6872 return cmpxchg(&v->counter, old, new);
6873 }
6874
6875+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6876+{
6877+ return cmpxchg(&v->counter, old, new);
6878+}
6879+
6880 static inline int atomic_xchg(atomic_t *v, int new)
6881 {
6882 return xchg(&v->counter, new);
6883 }
6884
6885+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6886+{
6887+ return xchg(&v->counter, new);
6888+}
6889+
6890 /**
6891 * atomic_add_unless - add unless the number is already a given value
6892 * @v: pointer of type atomic_t
6893@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6894 */
6895 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6896 {
6897- int c, old;
6898+ int c, old, new;
6899 c = atomic_read(v);
6900 for (;;) {
6901- if (unlikely(c == (u)))
6902+ if (unlikely(c == u))
6903 break;
6904- old = atomic_cmpxchg((v), c, c + (a));
6905+
6906+ asm volatile("addl %2,%0\n"
6907+
6908+#ifdef CONFIG_PAX_REFCOUNT
6909+ "jno 0f\n"
6910+ "subl %2,%0\n"
6911+ "int $4\n0:\n"
6912+ _ASM_EXTABLE(0b, 0b)
6913+#endif
6914+
6915+ : "=r" (new)
6916+ : "0" (c), "ir" (a));
6917+
6918+ old = atomic_cmpxchg(v, c, new);
6919 if (likely(old == c))
6920 break;
6921 c = old;
6922 }
6923- return c != (u);
6924+ return c != u;
6925 }
6926
6927 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6928
6929+/**
6930+ * atomic_inc_not_zero_hint - increment if not null
6931+ * @v: pointer of type atomic_t
6932+ * @hint: probable value of the atomic before the increment
6933+ *
6934+ * This version of atomic_inc_not_zero() gives a hint of probable
6935+ * value of the atomic. This helps processor to not read the memory
6936+ * before doing the atomic read/modify/write cycle, lowering
6937+ * number of bus transactions on some arches.
6938+ *
6939+ * Returns: 0 if increment was not done, 1 otherwise.
6940+ */
6941+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6942+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6943+{
6944+ int val, c = hint, new;
6945+
6946+ /* sanity test, should be removed by compiler if hint is a constant */
6947+ if (!hint)
6948+ return atomic_inc_not_zero(v);
6949+
6950+ do {
6951+ asm volatile("incl %0\n"
6952+
6953+#ifdef CONFIG_PAX_REFCOUNT
6954+ "jno 0f\n"
6955+ "decl %0\n"
6956+ "int $4\n0:\n"
6957+ _ASM_EXTABLE(0b, 0b)
6958+#endif
6959+
6960+ : "=r" (new)
6961+ : "0" (c));
6962+
6963+ val = atomic_cmpxchg(v, c, new);
6964+ if (val == c)
6965+ return 1;
6966+ c = val;
6967+ } while (c);
6968+
6969+ return 0;
6970+}
6971+
6972 /*
6973 * atomic_dec_if_positive - decrement by 1 if old value positive
6974 * @v: pointer of type atomic_t
6975diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_32.h linux-3.0.7/arch/x86/include/asm/atomic64_32.h
6976--- linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6977+++ linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6978@@ -12,6 +12,14 @@ typedef struct {
6979 u64 __aligned(8) counter;
6980 } atomic64_t;
6981
6982+#ifdef CONFIG_PAX_REFCOUNT
6983+typedef struct {
6984+ u64 __aligned(8) counter;
6985+} atomic64_unchecked_t;
6986+#else
6987+typedef atomic64_t atomic64_unchecked_t;
6988+#endif
6989+
6990 #define ATOMIC64_INIT(val) { (val) }
6991
6992 #ifdef CONFIG_X86_CMPXCHG64
6993@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6994 }
6995
6996 /**
6997+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6998+ * @p: pointer to type atomic64_unchecked_t
6999+ * @o: expected value
7000+ * @n: new value
7001+ *
7002+ * Atomically sets @v to @n if it was equal to @o and returns
7003+ * the old value.
7004+ */
7005+
7006+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7007+{
7008+ return cmpxchg64(&v->counter, o, n);
7009+}
7010+
7011+/**
7012 * atomic64_xchg - xchg atomic64 variable
7013 * @v: pointer to type atomic64_t
7014 * @n: value to assign
7015@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
7016 }
7017
7018 /**
7019+ * atomic64_set_unchecked - set atomic64 variable
7020+ * @v: pointer to type atomic64_unchecked_t
7021+ * @n: value to assign
7022+ *
7023+ * Atomically sets the value of @v to @n.
7024+ */
7025+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7026+{
7027+ unsigned high = (unsigned)(i >> 32);
7028+ unsigned low = (unsigned)i;
7029+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7030+ : "+b" (low), "+c" (high)
7031+ : "S" (v)
7032+ : "eax", "edx", "memory"
7033+ );
7034+}
7035+
7036+/**
7037 * atomic64_read - read atomic64 variable
7038 * @v: pointer to type atomic64_t
7039 *
7040@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
7041 }
7042
7043 /**
7044+ * atomic64_read_unchecked - read atomic64 variable
7045+ * @v: pointer to type atomic64_unchecked_t
7046+ *
7047+ * Atomically reads the value of @v and returns it.
7048+ */
7049+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7050+{
7051+ long long r;
7052+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7053+ : "=A" (r), "+c" (v)
7054+ : : "memory"
7055+ );
7056+ return r;
7057+ }
7058+
7059+/**
7060 * atomic64_add_return - add and return
7061 * @i: integer value to add
7062 * @v: pointer to type atomic64_t
7063@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
7064 return i;
7065 }
7066
7067+/**
7068+ * atomic64_add_return_unchecked - add and return
7069+ * @i: integer value to add
7070+ * @v: pointer to type atomic64_unchecked_t
7071+ *
7072+ * Atomically adds @i to @v and returns @i + *@v
7073+ */
7074+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7075+{
7076+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7077+ : "+A" (i), "+c" (v)
7078+ : : "memory"
7079+ );
7080+ return i;
7081+}
7082+
7083 /*
7084 * Other variants with different arithmetic operators:
7085 */
7086@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
7087 return a;
7088 }
7089
7090+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7091+{
7092+ long long a;
7093+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7094+ : "=A" (a)
7095+ : "S" (v)
7096+ : "memory", "ecx"
7097+ );
7098+ return a;
7099+}
7100+
7101 static inline long long atomic64_dec_return(atomic64_t *v)
7102 {
7103 long long a;
7104@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
7105 }
7106
7107 /**
7108+ * atomic64_add_unchecked - add integer to atomic64 variable
7109+ * @i: integer value to add
7110+ * @v: pointer to type atomic64_unchecked_t
7111+ *
7112+ * Atomically adds @i to @v.
7113+ */
7114+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7115+{
7116+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7117+ : "+A" (i), "+c" (v)
7118+ : : "memory"
7119+ );
7120+ return i;
7121+}
7122+
7123+/**
7124 * atomic64_sub - subtract the atomic64 variable
7125 * @i: integer value to subtract
7126 * @v: pointer to type atomic64_t
7127diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_64.h linux-3.0.7/arch/x86/include/asm/atomic64_64.h
7128--- linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
7129+++ linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
7130@@ -18,7 +18,19 @@
7131 */
7132 static inline long atomic64_read(const atomic64_t *v)
7133 {
7134- return (*(volatile long *)&(v)->counter);
7135+ return (*(volatile const long *)&(v)->counter);
7136+}
7137+
7138+/**
7139+ * atomic64_read_unchecked - read atomic64 variable
7140+ * @v: pointer of type atomic64_unchecked_t
7141+ *
7142+ * Atomically reads the value of @v.
7143+ * Doesn't imply a read memory barrier.
7144+ */
7145+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7146+{
7147+ return (*(volatile const long *)&(v)->counter);
7148 }
7149
7150 /**
7151@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
7152 }
7153
7154 /**
7155+ * atomic64_set_unchecked - set atomic64 variable
7156+ * @v: pointer to type atomic64_unchecked_t
7157+ * @i: required value
7158+ *
7159+ * Atomically sets the value of @v to @i.
7160+ */
7161+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7162+{
7163+ v->counter = i;
7164+}
7165+
7166+/**
7167 * atomic64_add - add integer to atomic64 variable
7168 * @i: integer value to add
7169 * @v: pointer to type atomic64_t
7170@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
7171 */
7172 static inline void atomic64_add(long i, atomic64_t *v)
7173 {
7174+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7175+
7176+#ifdef CONFIG_PAX_REFCOUNT
7177+ "jno 0f\n"
7178+ LOCK_PREFIX "subq %1,%0\n"
7179+ "int $4\n0:\n"
7180+ _ASM_EXTABLE(0b, 0b)
7181+#endif
7182+
7183+ : "=m" (v->counter)
7184+ : "er" (i), "m" (v->counter));
7185+}
7186+
7187+/**
7188+ * atomic64_add_unchecked - add integer to atomic64 variable
7189+ * @i: integer value to add
7190+ * @v: pointer to type atomic64_unchecked_t
7191+ *
7192+ * Atomically adds @i to @v.
7193+ */
7194+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7195+{
7196 asm volatile(LOCK_PREFIX "addq %1,%0"
7197 : "=m" (v->counter)
7198 : "er" (i), "m" (v->counter));
7199@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
7200 */
7201 static inline void atomic64_sub(long i, atomic64_t *v)
7202 {
7203- asm volatile(LOCK_PREFIX "subq %1,%0"
7204+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7205+
7206+#ifdef CONFIG_PAX_REFCOUNT
7207+ "jno 0f\n"
7208+ LOCK_PREFIX "addq %1,%0\n"
7209+ "int $4\n0:\n"
7210+ _ASM_EXTABLE(0b, 0b)
7211+#endif
7212+
7213+ : "=m" (v->counter)
7214+ : "er" (i), "m" (v->counter));
7215+}
7216+
7217+/**
7218+ * atomic64_sub_unchecked - subtract the atomic64 variable
7219+ * @i: integer value to subtract
7220+ * @v: pointer to type atomic64_unchecked_t
7221+ *
7222+ * Atomically subtracts @i from @v.
7223+ */
7224+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7225+{
7226+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7227 : "=m" (v->counter)
7228 : "er" (i), "m" (v->counter));
7229 }
7230@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
7231 {
7232 unsigned char c;
7233
7234- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7235+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7236+
7237+#ifdef CONFIG_PAX_REFCOUNT
7238+ "jno 0f\n"
7239+ LOCK_PREFIX "addq %2,%0\n"
7240+ "int $4\n0:\n"
7241+ _ASM_EXTABLE(0b, 0b)
7242+#endif
7243+
7244+ "sete %1\n"
7245 : "=m" (v->counter), "=qm" (c)
7246 : "er" (i), "m" (v->counter) : "memory");
7247 return c;
7248@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
7249 */
7250 static inline void atomic64_inc(atomic64_t *v)
7251 {
7252+ asm volatile(LOCK_PREFIX "incq %0\n"
7253+
7254+#ifdef CONFIG_PAX_REFCOUNT
7255+ "jno 0f\n"
7256+ LOCK_PREFIX "decq %0\n"
7257+ "int $4\n0:\n"
7258+ _ASM_EXTABLE(0b, 0b)
7259+#endif
7260+
7261+ : "=m" (v->counter)
7262+ : "m" (v->counter));
7263+}
7264+
7265+/**
7266+ * atomic64_inc_unchecked - increment atomic64 variable
7267+ * @v: pointer to type atomic64_unchecked_t
7268+ *
7269+ * Atomically increments @v by 1.
7270+ */
7271+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7272+{
7273 asm volatile(LOCK_PREFIX "incq %0"
7274 : "=m" (v->counter)
7275 : "m" (v->counter));
7276@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
7277 */
7278 static inline void atomic64_dec(atomic64_t *v)
7279 {
7280- asm volatile(LOCK_PREFIX "decq %0"
7281+ asm volatile(LOCK_PREFIX "decq %0\n"
7282+
7283+#ifdef CONFIG_PAX_REFCOUNT
7284+ "jno 0f\n"
7285+ LOCK_PREFIX "incq %0\n"
7286+ "int $4\n0:\n"
7287+ _ASM_EXTABLE(0b, 0b)
7288+#endif
7289+
7290+ : "=m" (v->counter)
7291+ : "m" (v->counter));
7292+}
7293+
7294+/**
7295+ * atomic64_dec_unchecked - decrement atomic64 variable
7296+ * @v: pointer to type atomic64_t
7297+ *
7298+ * Atomically decrements @v by 1.
7299+ */
7300+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7301+{
7302+ asm volatile(LOCK_PREFIX "decq %0\n"
7303 : "=m" (v->counter)
7304 : "m" (v->counter));
7305 }
7306@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
7307 {
7308 unsigned char c;
7309
7310- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7311+ asm volatile(LOCK_PREFIX "decq %0\n"
7312+
7313+#ifdef CONFIG_PAX_REFCOUNT
7314+ "jno 0f\n"
7315+ LOCK_PREFIX "incq %0\n"
7316+ "int $4\n0:\n"
7317+ _ASM_EXTABLE(0b, 0b)
7318+#endif
7319+
7320+ "sete %1\n"
7321 : "=m" (v->counter), "=qm" (c)
7322 : "m" (v->counter) : "memory");
7323 return c != 0;
7324@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
7325 {
7326 unsigned char c;
7327
7328- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7329+ asm volatile(LOCK_PREFIX "incq %0\n"
7330+
7331+#ifdef CONFIG_PAX_REFCOUNT
7332+ "jno 0f\n"
7333+ LOCK_PREFIX "decq %0\n"
7334+ "int $4\n0:\n"
7335+ _ASM_EXTABLE(0b, 0b)
7336+#endif
7337+
7338+ "sete %1\n"
7339 : "=m" (v->counter), "=qm" (c)
7340 : "m" (v->counter) : "memory");
7341 return c != 0;
7342@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
7343 {
7344 unsigned char c;
7345
7346- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7347+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7348+
7349+#ifdef CONFIG_PAX_REFCOUNT
7350+ "jno 0f\n"
7351+ LOCK_PREFIX "subq %2,%0\n"
7352+ "int $4\n0:\n"
7353+ _ASM_EXTABLE(0b, 0b)
7354+#endif
7355+
7356+ "sets %1\n"
7357 : "=m" (v->counter), "=qm" (c)
7358 : "er" (i), "m" (v->counter) : "memory");
7359 return c;
7360@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
7361 static inline long atomic64_add_return(long i, atomic64_t *v)
7362 {
7363 long __i = i;
7364- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7365+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7366+
7367+#ifdef CONFIG_PAX_REFCOUNT
7368+ "jno 0f\n"
7369+ "movq %0, %1\n"
7370+ "int $4\n0:\n"
7371+ _ASM_EXTABLE(0b, 0b)
7372+#endif
7373+
7374+ : "+r" (i), "+m" (v->counter)
7375+ : : "memory");
7376+ return i + __i;
7377+}
7378+
7379+/**
7380+ * atomic64_add_return_unchecked - add and return
7381+ * @i: integer value to add
7382+ * @v: pointer to type atomic64_unchecked_t
7383+ *
7384+ * Atomically adds @i to @v and returns @i + @v
7385+ */
7386+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7387+{
7388+ long __i = i;
7389+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7390 : "+r" (i), "+m" (v->counter)
7391 : : "memory");
7392 return i + __i;
7393@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
7394 }
7395
7396 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7397+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7398+{
7399+ return atomic64_add_return_unchecked(1, v);
7400+}
7401 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7402
7403 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7404@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
7405 return cmpxchg(&v->counter, old, new);
7406 }
7407
7408+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7409+{
7410+ return cmpxchg(&v->counter, old, new);
7411+}
7412+
7413 static inline long atomic64_xchg(atomic64_t *v, long new)
7414 {
7415 return xchg(&v->counter, new);
7416@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
7417 */
7418 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7419 {
7420- long c, old;
7421+ long c, old, new;
7422 c = atomic64_read(v);
7423 for (;;) {
7424- if (unlikely(c == (u)))
7425+ if (unlikely(c == u))
7426 break;
7427- old = atomic64_cmpxchg((v), c, c + (a));
7428+
7429+ asm volatile("add %2,%0\n"
7430+
7431+#ifdef CONFIG_PAX_REFCOUNT
7432+ "jno 0f\n"
7433+ "sub %2,%0\n"
7434+ "int $4\n0:\n"
7435+ _ASM_EXTABLE(0b, 0b)
7436+#endif
7437+
7438+ : "=r" (new)
7439+ : "0" (c), "ir" (a));
7440+
7441+ old = atomic64_cmpxchg(v, c, new);
7442 if (likely(old == c))
7443 break;
7444 c = old;
7445 }
7446- return c != (u);
7447+ return c != u;
7448 }
7449
7450 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7451diff -urNp linux-3.0.7/arch/x86/include/asm/bitops.h linux-3.0.7/arch/x86/include/asm/bitops.h
7452--- linux-3.0.7/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
7453+++ linux-3.0.7/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
7454@@ -38,7 +38,7 @@
7455 * a mask operation on a byte.
7456 */
7457 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7458-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7459+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7460 #define CONST_MASK(nr) (1 << ((nr) & 7))
7461
7462 /**
7463diff -urNp linux-3.0.7/arch/x86/include/asm/boot.h linux-3.0.7/arch/x86/include/asm/boot.h
7464--- linux-3.0.7/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
7465+++ linux-3.0.7/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
7466@@ -11,10 +11,15 @@
7467 #include <asm/pgtable_types.h>
7468
7469 /* Physical address where kernel should be loaded. */
7470-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7471+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7472 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7473 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7474
7475+#ifndef __ASSEMBLY__
7476+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7477+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7478+#endif
7479+
7480 /* Minimum kernel alignment, as a power of two */
7481 #ifdef CONFIG_X86_64
7482 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7483diff -urNp linux-3.0.7/arch/x86/include/asm/cache.h linux-3.0.7/arch/x86/include/asm/cache.h
7484--- linux-3.0.7/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7485+++ linux-3.0.7/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7486@@ -5,12 +5,13 @@
7487
7488 /* L1 cache line size */
7489 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7490-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7491+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7492
7493 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7494+#define __read_only __attribute__((__section__(".data..read_only")))
7495
7496 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7497-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7498+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7499
7500 #ifdef CONFIG_X86_VSMP
7501 #ifdef CONFIG_SMP
7502diff -urNp linux-3.0.7/arch/x86/include/asm/cacheflush.h linux-3.0.7/arch/x86/include/asm/cacheflush.h
7503--- linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7504+++ linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7505@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7506 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7507
7508 if (pg_flags == _PGMT_DEFAULT)
7509- return -1;
7510+ return ~0UL;
7511 else if (pg_flags == _PGMT_WC)
7512 return _PAGE_CACHE_WC;
7513 else if (pg_flags == _PGMT_UC_MINUS)
7514diff -urNp linux-3.0.7/arch/x86/include/asm/checksum_32.h linux-3.0.7/arch/x86/include/asm/checksum_32.h
7515--- linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7516+++ linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7517@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7518 int len, __wsum sum,
7519 int *src_err_ptr, int *dst_err_ptr);
7520
7521+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7522+ int len, __wsum sum,
7523+ int *src_err_ptr, int *dst_err_ptr);
7524+
7525+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7526+ int len, __wsum sum,
7527+ int *src_err_ptr, int *dst_err_ptr);
7528+
7529 /*
7530 * Note: when you get a NULL pointer exception here this means someone
7531 * passed in an incorrect kernel address to one of these functions.
7532@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7533 int *err_ptr)
7534 {
7535 might_sleep();
7536- return csum_partial_copy_generic((__force void *)src, dst,
7537+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7538 len, sum, err_ptr, NULL);
7539 }
7540
7541@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7542 {
7543 might_sleep();
7544 if (access_ok(VERIFY_WRITE, dst, len))
7545- return csum_partial_copy_generic(src, (__force void *)dst,
7546+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7547 len, sum, NULL, err_ptr);
7548
7549 if (len)
7550diff -urNp linux-3.0.7/arch/x86/include/asm/cpufeature.h linux-3.0.7/arch/x86/include/asm/cpufeature.h
7551--- linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7552+++ linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7553@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7554 ".section .discard,\"aw\",@progbits\n"
7555 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7556 ".previous\n"
7557- ".section .altinstr_replacement,\"ax\"\n"
7558+ ".section .altinstr_replacement,\"a\"\n"
7559 "3: movb $1,%0\n"
7560 "4:\n"
7561 ".previous\n"
7562diff -urNp linux-3.0.7/arch/x86/include/asm/desc.h linux-3.0.7/arch/x86/include/asm/desc.h
7563--- linux-3.0.7/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7564+++ linux-3.0.7/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7565@@ -4,6 +4,7 @@
7566 #include <asm/desc_defs.h>
7567 #include <asm/ldt.h>
7568 #include <asm/mmu.h>
7569+#include <asm/pgtable.h>
7570
7571 #include <linux/smp.h>
7572
7573@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7574
7575 desc->type = (info->read_exec_only ^ 1) << 1;
7576 desc->type |= info->contents << 2;
7577+ desc->type |= info->seg_not_present ^ 1;
7578
7579 desc->s = 1;
7580 desc->dpl = 0x3;
7581@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7582 }
7583
7584 extern struct desc_ptr idt_descr;
7585-extern gate_desc idt_table[];
7586-
7587-struct gdt_page {
7588- struct desc_struct gdt[GDT_ENTRIES];
7589-} __attribute__((aligned(PAGE_SIZE)));
7590-
7591-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7592+extern gate_desc idt_table[256];
7593
7594+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7595 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7596 {
7597- return per_cpu(gdt_page, cpu).gdt;
7598+ return cpu_gdt_table[cpu];
7599 }
7600
7601 #ifdef CONFIG_X86_64
7602@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7603 unsigned long base, unsigned dpl, unsigned flags,
7604 unsigned short seg)
7605 {
7606- gate->a = (seg << 16) | (base & 0xffff);
7607- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7608+ gate->gate.offset_low = base;
7609+ gate->gate.seg = seg;
7610+ gate->gate.reserved = 0;
7611+ gate->gate.type = type;
7612+ gate->gate.s = 0;
7613+ gate->gate.dpl = dpl;
7614+ gate->gate.p = 1;
7615+ gate->gate.offset_high = base >> 16;
7616 }
7617
7618 #endif
7619@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7620
7621 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7622 {
7623+ pax_open_kernel();
7624 memcpy(&idt[entry], gate, sizeof(*gate));
7625+ pax_close_kernel();
7626 }
7627
7628 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7629 {
7630+ pax_open_kernel();
7631 memcpy(&ldt[entry], desc, 8);
7632+ pax_close_kernel();
7633 }
7634
7635 static inline void
7636@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7637 default: size = sizeof(*gdt); break;
7638 }
7639
7640+ pax_open_kernel();
7641 memcpy(&gdt[entry], desc, size);
7642+ pax_close_kernel();
7643 }
7644
7645 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7646@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7647
7648 static inline void native_load_tr_desc(void)
7649 {
7650+ pax_open_kernel();
7651 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7652+ pax_close_kernel();
7653 }
7654
7655 static inline void native_load_gdt(const struct desc_ptr *dtr)
7656@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7657 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7658 unsigned int i;
7659
7660+ pax_open_kernel();
7661 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7662 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7663+ pax_close_kernel();
7664 }
7665
7666 #define _LDT_empty(info) \
7667@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7668 desc->limit = (limit >> 16) & 0xf;
7669 }
7670
7671-static inline void _set_gate(int gate, unsigned type, void *addr,
7672+static inline void _set_gate(int gate, unsigned type, const void *addr,
7673 unsigned dpl, unsigned ist, unsigned seg)
7674 {
7675 gate_desc s;
7676@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7677 * Pentium F0 0F bugfix can have resulted in the mapped
7678 * IDT being write-protected.
7679 */
7680-static inline void set_intr_gate(unsigned int n, void *addr)
7681+static inline void set_intr_gate(unsigned int n, const void *addr)
7682 {
7683 BUG_ON((unsigned)n > 0xFF);
7684 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7685@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7686 /*
7687 * This routine sets up an interrupt gate at directory privilege level 3.
7688 */
7689-static inline void set_system_intr_gate(unsigned int n, void *addr)
7690+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7691 {
7692 BUG_ON((unsigned)n > 0xFF);
7693 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7694 }
7695
7696-static inline void set_system_trap_gate(unsigned int n, void *addr)
7697+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7698 {
7699 BUG_ON((unsigned)n > 0xFF);
7700 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7701 }
7702
7703-static inline void set_trap_gate(unsigned int n, void *addr)
7704+static inline void set_trap_gate(unsigned int n, const void *addr)
7705 {
7706 BUG_ON((unsigned)n > 0xFF);
7707 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7708@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7709 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7710 {
7711 BUG_ON((unsigned)n > 0xFF);
7712- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7713+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7714 }
7715
7716-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7717+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7718 {
7719 BUG_ON((unsigned)n > 0xFF);
7720 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7721 }
7722
7723-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7724+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7725 {
7726 BUG_ON((unsigned)n > 0xFF);
7727 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7728 }
7729
7730+#ifdef CONFIG_X86_32
7731+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7732+{
7733+ struct desc_struct d;
7734+
7735+ if (likely(limit))
7736+ limit = (limit - 1UL) >> PAGE_SHIFT;
7737+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7738+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7739+}
7740+#endif
7741+
7742 #endif /* _ASM_X86_DESC_H */
7743diff -urNp linux-3.0.7/arch/x86/include/asm/desc_defs.h linux-3.0.7/arch/x86/include/asm/desc_defs.h
7744--- linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7745+++ linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7746@@ -31,6 +31,12 @@ struct desc_struct {
7747 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7748 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7749 };
7750+ struct {
7751+ u16 offset_low;
7752+ u16 seg;
7753+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7754+ unsigned offset_high: 16;
7755+ } gate;
7756 };
7757 } __attribute__((packed));
7758
7759diff -urNp linux-3.0.7/arch/x86/include/asm/e820.h linux-3.0.7/arch/x86/include/asm/e820.h
7760--- linux-3.0.7/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7761+++ linux-3.0.7/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7762@@ -69,7 +69,7 @@ struct e820map {
7763 #define ISA_START_ADDRESS 0xa0000
7764 #define ISA_END_ADDRESS 0x100000
7765
7766-#define BIOS_BEGIN 0x000a0000
7767+#define BIOS_BEGIN 0x000c0000
7768 #define BIOS_END 0x00100000
7769
7770 #define BIOS_ROM_BASE 0xffe00000
7771diff -urNp linux-3.0.7/arch/x86/include/asm/elf.h linux-3.0.7/arch/x86/include/asm/elf.h
7772--- linux-3.0.7/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7773+++ linux-3.0.7/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7774@@ -237,7 +237,25 @@ extern int force_personality32;
7775 the loader. We need to make sure that it is out of the way of the program
7776 that it will "exec", and that there is sufficient room for the brk. */
7777
7778+#ifdef CONFIG_PAX_SEGMEXEC
7779+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7780+#else
7781 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7782+#endif
7783+
7784+#ifdef CONFIG_PAX_ASLR
7785+#ifdef CONFIG_X86_32
7786+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7787+
7788+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7789+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7790+#else
7791+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7792+
7793+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7794+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7795+#endif
7796+#endif
7797
7798 /* This yields a mask that user programs can use to figure out what
7799 instruction set this CPU supports. This could be done in user space,
7800@@ -290,9 +308,7 @@ do { \
7801
7802 #define ARCH_DLINFO \
7803 do { \
7804- if (vdso_enabled) \
7805- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7806- (unsigned long)current->mm->context.vdso); \
7807+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7808 } while (0)
7809
7810 #define AT_SYSINFO 32
7811@@ -303,7 +319,7 @@ do { \
7812
7813 #endif /* !CONFIG_X86_32 */
7814
7815-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7816+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7817
7818 #define VDSO_ENTRY \
7819 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7820@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7821 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7822 #define compat_arch_setup_additional_pages syscall32_setup_pages
7823
7824-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7825-#define arch_randomize_brk arch_randomize_brk
7826-
7827 #endif /* _ASM_X86_ELF_H */
7828diff -urNp linux-3.0.7/arch/x86/include/asm/emergency-restart.h linux-3.0.7/arch/x86/include/asm/emergency-restart.h
7829--- linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7830+++ linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7831@@ -15,6 +15,6 @@ enum reboot_type {
7832
7833 extern enum reboot_type reboot_type;
7834
7835-extern void machine_emergency_restart(void);
7836+extern void machine_emergency_restart(void) __noreturn;
7837
7838 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7839diff -urNp linux-3.0.7/arch/x86/include/asm/futex.h linux-3.0.7/arch/x86/include/asm/futex.h
7840--- linux-3.0.7/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7841+++ linux-3.0.7/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7842@@ -12,16 +12,18 @@
7843 #include <asm/system.h>
7844
7845 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7846+ typecheck(u32 __user *, uaddr); \
7847 asm volatile("1:\t" insn "\n" \
7848 "2:\t.section .fixup,\"ax\"\n" \
7849 "3:\tmov\t%3, %1\n" \
7850 "\tjmp\t2b\n" \
7851 "\t.previous\n" \
7852 _ASM_EXTABLE(1b, 3b) \
7853- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7854+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7855 : "i" (-EFAULT), "0" (oparg), "1" (0))
7856
7857 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7858+ typecheck(u32 __user *, uaddr); \
7859 asm volatile("1:\tmovl %2, %0\n" \
7860 "\tmovl\t%0, %3\n" \
7861 "\t" insn "\n" \
7862@@ -34,7 +36,7 @@
7863 _ASM_EXTABLE(1b, 4b) \
7864 _ASM_EXTABLE(2b, 4b) \
7865 : "=&a" (oldval), "=&r" (ret), \
7866- "+m" (*uaddr), "=&r" (tem) \
7867+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7868 : "r" (oparg), "i" (-EFAULT), "1" (0))
7869
7870 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7871@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7872
7873 switch (op) {
7874 case FUTEX_OP_SET:
7875- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7876+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7877 break;
7878 case FUTEX_OP_ADD:
7879- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7880+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7881 uaddr, oparg);
7882 break;
7883 case FUTEX_OP_OR:
7884@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7885 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7886 return -EFAULT;
7887
7888- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7889+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7890 "2:\t.section .fixup, \"ax\"\n"
7891 "3:\tmov %3, %0\n"
7892 "\tjmp 2b\n"
7893 "\t.previous\n"
7894 _ASM_EXTABLE(1b, 3b)
7895- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7896+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7897 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7898 : "memory"
7899 );
7900diff -urNp linux-3.0.7/arch/x86/include/asm/hw_irq.h linux-3.0.7/arch/x86/include/asm/hw_irq.h
7901--- linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7902+++ linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7903@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7904 extern void enable_IO_APIC(void);
7905
7906 /* Statistics */
7907-extern atomic_t irq_err_count;
7908-extern atomic_t irq_mis_count;
7909+extern atomic_unchecked_t irq_err_count;
7910+extern atomic_unchecked_t irq_mis_count;
7911
7912 /* EISA */
7913 extern void eisa_set_level_irq(unsigned int irq);
7914diff -urNp linux-3.0.7/arch/x86/include/asm/i387.h linux-3.0.7/arch/x86/include/asm/i387.h
7915--- linux-3.0.7/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7916+++ linux-3.0.7/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7917@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7918 {
7919 int err;
7920
7921+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7922+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7923+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7924+#endif
7925+
7926 /* See comment in fxsave() below. */
7927 #ifdef CONFIG_AS_FXSAVEQ
7928 asm volatile("1: fxrstorq %[fx]\n\t"
7929@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7930 {
7931 int err;
7932
7933+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7934+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7935+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7936+#endif
7937+
7938 /*
7939 * Clear the bytes not touched by the fxsave and reserved
7940 * for the SW usage.
7941@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7942 #endif /* CONFIG_X86_64 */
7943
7944 /* We need a safe address that is cheap to find and that is already
7945- in L1 during context switch. The best choices are unfortunately
7946- different for UP and SMP */
7947-#ifdef CONFIG_SMP
7948-#define safe_address (__per_cpu_offset[0])
7949-#else
7950-#define safe_address (kstat_cpu(0).cpustat.user)
7951-#endif
7952+ in L1 during context switch. */
7953+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7954
7955 /*
7956 * These must be called with preempt disabled
7957@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7958 struct thread_info *me = current_thread_info();
7959 preempt_disable();
7960 if (me->status & TS_USEDFPU)
7961- __save_init_fpu(me->task);
7962+ __save_init_fpu(current);
7963 else
7964 clts();
7965 }
7966diff -urNp linux-3.0.7/arch/x86/include/asm/io.h linux-3.0.7/arch/x86/include/asm/io.h
7967--- linux-3.0.7/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7968+++ linux-3.0.7/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7969@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7970
7971 #include <linux/vmalloc.h>
7972
7973+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7974+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7975+{
7976+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7977+}
7978+
7979+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7980+{
7981+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7982+}
7983+
7984 /*
7985 * Convert a virtual cached pointer to an uncached pointer
7986 */
7987diff -urNp linux-3.0.7/arch/x86/include/asm/irqflags.h linux-3.0.7/arch/x86/include/asm/irqflags.h
7988--- linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7989+++ linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7990@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7991 sti; \
7992 sysexit
7993
7994+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7995+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7996+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7997+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7998+
7999 #else
8000 #define INTERRUPT_RETURN iret
8001 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8002diff -urNp linux-3.0.7/arch/x86/include/asm/kprobes.h linux-3.0.7/arch/x86/include/asm/kprobes.h
8003--- linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
8004+++ linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
8005@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8006 #define RELATIVEJUMP_SIZE 5
8007 #define RELATIVECALL_OPCODE 0xe8
8008 #define RELATIVE_ADDR_SIZE 4
8009-#define MAX_STACK_SIZE 64
8010-#define MIN_STACK_SIZE(ADDR) \
8011- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8012- THREAD_SIZE - (unsigned long)(ADDR))) \
8013- ? (MAX_STACK_SIZE) \
8014- : (((unsigned long)current_thread_info()) + \
8015- THREAD_SIZE - (unsigned long)(ADDR)))
8016+#define MAX_STACK_SIZE 64UL
8017+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8018
8019 #define flush_insn_slot(p) do { } while (0)
8020
8021diff -urNp linux-3.0.7/arch/x86/include/asm/kvm_host.h linux-3.0.7/arch/x86/include/asm/kvm_host.h
8022--- linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
8023+++ linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
8024@@ -441,7 +441,7 @@ struct kvm_arch {
8025 unsigned int n_used_mmu_pages;
8026 unsigned int n_requested_mmu_pages;
8027 unsigned int n_max_mmu_pages;
8028- atomic_t invlpg_counter;
8029+ atomic_unchecked_t invlpg_counter;
8030 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8031 /*
8032 * Hash table of struct kvm_mmu_page.
8033@@ -619,7 +619,7 @@ struct kvm_x86_ops {
8034 enum x86_intercept_stage stage);
8035
8036 const struct trace_print_flags *exit_reasons_str;
8037-};
8038+} __do_const;
8039
8040 struct kvm_arch_async_pf {
8041 u32 token;
8042diff -urNp linux-3.0.7/arch/x86/include/asm/local.h linux-3.0.7/arch/x86/include/asm/local.h
8043--- linux-3.0.7/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
8044+++ linux-3.0.7/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
8045@@ -18,26 +18,58 @@ typedef struct {
8046
8047 static inline void local_inc(local_t *l)
8048 {
8049- asm volatile(_ASM_INC "%0"
8050+ asm volatile(_ASM_INC "%0\n"
8051+
8052+#ifdef CONFIG_PAX_REFCOUNT
8053+ "jno 0f\n"
8054+ _ASM_DEC "%0\n"
8055+ "int $4\n0:\n"
8056+ _ASM_EXTABLE(0b, 0b)
8057+#endif
8058+
8059 : "+m" (l->a.counter));
8060 }
8061
8062 static inline void local_dec(local_t *l)
8063 {
8064- asm volatile(_ASM_DEC "%0"
8065+ asm volatile(_ASM_DEC "%0\n"
8066+
8067+#ifdef CONFIG_PAX_REFCOUNT
8068+ "jno 0f\n"
8069+ _ASM_INC "%0\n"
8070+ "int $4\n0:\n"
8071+ _ASM_EXTABLE(0b, 0b)
8072+#endif
8073+
8074 : "+m" (l->a.counter));
8075 }
8076
8077 static inline void local_add(long i, local_t *l)
8078 {
8079- asm volatile(_ASM_ADD "%1,%0"
8080+ asm volatile(_ASM_ADD "%1,%0\n"
8081+
8082+#ifdef CONFIG_PAX_REFCOUNT
8083+ "jno 0f\n"
8084+ _ASM_SUB "%1,%0\n"
8085+ "int $4\n0:\n"
8086+ _ASM_EXTABLE(0b, 0b)
8087+#endif
8088+
8089 : "+m" (l->a.counter)
8090 : "ir" (i));
8091 }
8092
8093 static inline void local_sub(long i, local_t *l)
8094 {
8095- asm volatile(_ASM_SUB "%1,%0"
8096+ asm volatile(_ASM_SUB "%1,%0\n"
8097+
8098+#ifdef CONFIG_PAX_REFCOUNT
8099+ "jno 0f\n"
8100+ _ASM_ADD "%1,%0\n"
8101+ "int $4\n0:\n"
8102+ _ASM_EXTABLE(0b, 0b)
8103+#endif
8104+
8105 : "+m" (l->a.counter)
8106 : "ir" (i));
8107 }
8108@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8109 {
8110 unsigned char c;
8111
8112- asm volatile(_ASM_SUB "%2,%0; sete %1"
8113+ asm volatile(_ASM_SUB "%2,%0\n"
8114+
8115+#ifdef CONFIG_PAX_REFCOUNT
8116+ "jno 0f\n"
8117+ _ASM_ADD "%2,%0\n"
8118+ "int $4\n0:\n"
8119+ _ASM_EXTABLE(0b, 0b)
8120+#endif
8121+
8122+ "sete %1\n"
8123 : "+m" (l->a.counter), "=qm" (c)
8124 : "ir" (i) : "memory");
8125 return c;
8126@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8127 {
8128 unsigned char c;
8129
8130- asm volatile(_ASM_DEC "%0; sete %1"
8131+ asm volatile(_ASM_DEC "%0\n"
8132+
8133+#ifdef CONFIG_PAX_REFCOUNT
8134+ "jno 0f\n"
8135+ _ASM_INC "%0\n"
8136+ "int $4\n0:\n"
8137+ _ASM_EXTABLE(0b, 0b)
8138+#endif
8139+
8140+ "sete %1\n"
8141 : "+m" (l->a.counter), "=qm" (c)
8142 : : "memory");
8143 return c != 0;
8144@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8145 {
8146 unsigned char c;
8147
8148- asm volatile(_ASM_INC "%0; sete %1"
8149+ asm volatile(_ASM_INC "%0\n"
8150+
8151+#ifdef CONFIG_PAX_REFCOUNT
8152+ "jno 0f\n"
8153+ _ASM_DEC "%0\n"
8154+ "int $4\n0:\n"
8155+ _ASM_EXTABLE(0b, 0b)
8156+#endif
8157+
8158+ "sete %1\n"
8159 : "+m" (l->a.counter), "=qm" (c)
8160 : : "memory");
8161 return c != 0;
8162@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8163 {
8164 unsigned char c;
8165
8166- asm volatile(_ASM_ADD "%2,%0; sets %1"
8167+ asm volatile(_ASM_ADD "%2,%0\n"
8168+
8169+#ifdef CONFIG_PAX_REFCOUNT
8170+ "jno 0f\n"
8171+ _ASM_SUB "%2,%0\n"
8172+ "int $4\n0:\n"
8173+ _ASM_EXTABLE(0b, 0b)
8174+#endif
8175+
8176+ "sets %1\n"
8177 : "+m" (l->a.counter), "=qm" (c)
8178 : "ir" (i) : "memory");
8179 return c;
8180@@ -133,7 +201,15 @@ static inline long local_add_return(long
8181 #endif
8182 /* Modern 486+ processor */
8183 __i = i;
8184- asm volatile(_ASM_XADD "%0, %1;"
8185+ asm volatile(_ASM_XADD "%0, %1\n"
8186+
8187+#ifdef CONFIG_PAX_REFCOUNT
8188+ "jno 0f\n"
8189+ _ASM_MOV "%0,%1\n"
8190+ "int $4\n0:\n"
8191+ _ASM_EXTABLE(0b, 0b)
8192+#endif
8193+
8194 : "+r" (i), "+m" (l->a.counter)
8195 : : "memory");
8196 return i + __i;
8197diff -urNp linux-3.0.7/arch/x86/include/asm/mman.h linux-3.0.7/arch/x86/include/asm/mman.h
8198--- linux-3.0.7/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
8199+++ linux-3.0.7/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
8200@@ -5,4 +5,14 @@
8201
8202 #include <asm-generic/mman.h>
8203
8204+#ifdef __KERNEL__
8205+#ifndef __ASSEMBLY__
8206+#ifdef CONFIG_X86_32
8207+#define arch_mmap_check i386_mmap_check
8208+int i386_mmap_check(unsigned long addr, unsigned long len,
8209+ unsigned long flags);
8210+#endif
8211+#endif
8212+#endif
8213+
8214 #endif /* _ASM_X86_MMAN_H */
8215diff -urNp linux-3.0.7/arch/x86/include/asm/mmu.h linux-3.0.7/arch/x86/include/asm/mmu.h
8216--- linux-3.0.7/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
8217+++ linux-3.0.7/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
8218@@ -9,7 +9,7 @@
8219 * we put the segment information here.
8220 */
8221 typedef struct {
8222- void *ldt;
8223+ struct desc_struct *ldt;
8224 int size;
8225
8226 #ifdef CONFIG_X86_64
8227@@ -18,7 +18,19 @@ typedef struct {
8228 #endif
8229
8230 struct mutex lock;
8231- void *vdso;
8232+ unsigned long vdso;
8233+
8234+#ifdef CONFIG_X86_32
8235+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8236+ unsigned long user_cs_base;
8237+ unsigned long user_cs_limit;
8238+
8239+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8240+ cpumask_t cpu_user_cs_mask;
8241+#endif
8242+
8243+#endif
8244+#endif
8245 } mm_context_t;
8246
8247 #ifdef CONFIG_SMP
8248diff -urNp linux-3.0.7/arch/x86/include/asm/mmu_context.h linux-3.0.7/arch/x86/include/asm/mmu_context.h
8249--- linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
8250+++ linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
8251@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
8252
8253 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8254 {
8255+
8256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8257+ unsigned int i;
8258+ pgd_t *pgd;
8259+
8260+ pax_open_kernel();
8261+ pgd = get_cpu_pgd(smp_processor_id());
8262+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8263+ set_pgd_batched(pgd+i, native_make_pgd(0));
8264+ pax_close_kernel();
8265+#endif
8266+
8267 #ifdef CONFIG_SMP
8268 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8269 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8270@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
8271 struct task_struct *tsk)
8272 {
8273 unsigned cpu = smp_processor_id();
8274+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8275+ int tlbstate = TLBSTATE_OK;
8276+#endif
8277
8278 if (likely(prev != next)) {
8279 #ifdef CONFIG_SMP
8280+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8281+ tlbstate = percpu_read(cpu_tlbstate.state);
8282+#endif
8283 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8284 percpu_write(cpu_tlbstate.active_mm, next);
8285 #endif
8286 cpumask_set_cpu(cpu, mm_cpumask(next));
8287
8288 /* Re-load page tables */
8289+#ifdef CONFIG_PAX_PER_CPU_PGD
8290+ pax_open_kernel();
8291+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8292+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8293+ pax_close_kernel();
8294+ load_cr3(get_cpu_pgd(cpu));
8295+#else
8296 load_cr3(next->pgd);
8297+#endif
8298
8299 /* stop flush ipis for the previous mm */
8300 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8301@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
8302 */
8303 if (unlikely(prev->context.ldt != next->context.ldt))
8304 load_LDT_nolock(&next->context);
8305- }
8306+
8307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8308+ if (!(__supported_pte_mask & _PAGE_NX)) {
8309+ smp_mb__before_clear_bit();
8310+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8311+ smp_mb__after_clear_bit();
8312+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8313+ }
8314+#endif
8315+
8316+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8317+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8318+ prev->context.user_cs_limit != next->context.user_cs_limit))
8319+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8320 #ifdef CONFIG_SMP
8321+ else if (unlikely(tlbstate != TLBSTATE_OK))
8322+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8323+#endif
8324+#endif
8325+
8326+ }
8327 else {
8328+
8329+#ifdef CONFIG_PAX_PER_CPU_PGD
8330+ pax_open_kernel();
8331+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8332+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8333+ pax_close_kernel();
8334+ load_cr3(get_cpu_pgd(cpu));
8335+#endif
8336+
8337+#ifdef CONFIG_SMP
8338 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8339 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8340
8341@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
8342 * tlb flush IPI delivery. We must reload CR3
8343 * to make sure to use no freed page tables.
8344 */
8345+
8346+#ifndef CONFIG_PAX_PER_CPU_PGD
8347 load_cr3(next->pgd);
8348+#endif
8349+
8350 load_LDT_nolock(&next->context);
8351+
8352+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8353+ if (!(__supported_pte_mask & _PAGE_NX))
8354+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8355+#endif
8356+
8357+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8358+#ifdef CONFIG_PAX_PAGEEXEC
8359+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8360+#endif
8361+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8362+#endif
8363+
8364 }
8365- }
8366 #endif
8367+ }
8368 }
8369
8370 #define activate_mm(prev, next) \
8371diff -urNp linux-3.0.7/arch/x86/include/asm/module.h linux-3.0.7/arch/x86/include/asm/module.h
8372--- linux-3.0.7/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
8373+++ linux-3.0.7/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
8374@@ -5,6 +5,7 @@
8375
8376 #ifdef CONFIG_X86_64
8377 /* X86_64 does not define MODULE_PROC_FAMILY */
8378+#define MODULE_PROC_FAMILY ""
8379 #elif defined CONFIG_M386
8380 #define MODULE_PROC_FAMILY "386 "
8381 #elif defined CONFIG_M486
8382@@ -59,8 +60,18 @@
8383 #error unknown processor family
8384 #endif
8385
8386-#ifdef CONFIG_X86_32
8387-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8388+#ifdef CONFIG_PAX_KERNEXEC
8389+#define MODULE_PAX_KERNEXEC "KERNEXEC "
8390+#else
8391+#define MODULE_PAX_KERNEXEC ""
8392 #endif
8393
8394+#ifdef CONFIG_PAX_MEMORY_UDEREF
8395+#define MODULE_PAX_UDEREF "UDEREF "
8396+#else
8397+#define MODULE_PAX_UDEREF ""
8398+#endif
8399+
8400+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8401+
8402 #endif /* _ASM_X86_MODULE_H */
8403diff -urNp linux-3.0.7/arch/x86/include/asm/page_64_types.h linux-3.0.7/arch/x86/include/asm/page_64_types.h
8404--- linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
8405+++ linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
8406@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8407
8408 /* duplicated to the one in bootmem.h */
8409 extern unsigned long max_pfn;
8410-extern unsigned long phys_base;
8411+extern const unsigned long phys_base;
8412
8413 extern unsigned long __phys_addr(unsigned long);
8414 #define __phys_reloc_hide(x) (x)
8415diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt.h linux-3.0.7/arch/x86/include/asm/paravirt.h
8416--- linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
8417+++ linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
8418@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
8419 val);
8420 }
8421
8422+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8423+{
8424+ pgdval_t val = native_pgd_val(pgd);
8425+
8426+ if (sizeof(pgdval_t) > sizeof(long))
8427+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
8428+ val, (u64)val >> 32);
8429+ else
8430+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
8431+ val);
8432+}
8433+
8434 static inline void pgd_clear(pgd_t *pgdp)
8435 {
8436 set_pgd(pgdp, __pgd(0));
8437@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
8438 pv_mmu_ops.set_fixmap(idx, phys, flags);
8439 }
8440
8441+#ifdef CONFIG_PAX_KERNEXEC
8442+static inline unsigned long pax_open_kernel(void)
8443+{
8444+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8445+}
8446+
8447+static inline unsigned long pax_close_kernel(void)
8448+{
8449+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8450+}
8451+#else
8452+static inline unsigned long pax_open_kernel(void) { return 0; }
8453+static inline unsigned long pax_close_kernel(void) { return 0; }
8454+#endif
8455+
8456 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8457
8458 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
8459@@ -955,7 +982,7 @@ extern void default_banner(void);
8460
8461 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8462 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8463-#define PARA_INDIRECT(addr) *%cs:addr
8464+#define PARA_INDIRECT(addr) *%ss:addr
8465 #endif
8466
8467 #define INTERRUPT_RETURN \
8468@@ -1032,6 +1059,21 @@ extern void default_banner(void);
8469 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8470 CLBR_NONE, \
8471 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8472+
8473+#define GET_CR0_INTO_RDI \
8474+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8475+ mov %rax,%rdi
8476+
8477+#define SET_RDI_INTO_CR0 \
8478+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8479+
8480+#define GET_CR3_INTO_RDI \
8481+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8482+ mov %rax,%rdi
8483+
8484+#define SET_RDI_INTO_CR3 \
8485+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8486+
8487 #endif /* CONFIG_X86_32 */
8488
8489 #endif /* __ASSEMBLY__ */
8490diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt_types.h linux-3.0.7/arch/x86/include/asm/paravirt_types.h
8491--- linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8492+++ linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8493@@ -78,19 +78,19 @@ struct pv_init_ops {
8494 */
8495 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8496 unsigned long addr, unsigned len);
8497-};
8498+} __no_const;
8499
8500
8501 struct pv_lazy_ops {
8502 /* Set deferred update mode, used for batching operations. */
8503 void (*enter)(void);
8504 void (*leave)(void);
8505-};
8506+} __no_const;
8507
8508 struct pv_time_ops {
8509 unsigned long long (*sched_clock)(void);
8510 unsigned long (*get_tsc_khz)(void);
8511-};
8512+} __no_const;
8513
8514 struct pv_cpu_ops {
8515 /* hooks for various privileged instructions */
8516@@ -186,7 +186,7 @@ struct pv_cpu_ops {
8517
8518 void (*start_context_switch)(struct task_struct *prev);
8519 void (*end_context_switch)(struct task_struct *next);
8520-};
8521+} __no_const;
8522
8523 struct pv_irq_ops {
8524 /*
8525@@ -217,7 +217,7 @@ struct pv_apic_ops {
8526 unsigned long start_eip,
8527 unsigned long start_esp);
8528 #endif
8529-};
8530+} __no_const;
8531
8532 struct pv_mmu_ops {
8533 unsigned long (*read_cr2)(void);
8534@@ -306,6 +306,7 @@ struct pv_mmu_ops {
8535 struct paravirt_callee_save make_pud;
8536
8537 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8538+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8539 #endif /* PAGETABLE_LEVELS == 4 */
8540 #endif /* PAGETABLE_LEVELS >= 3 */
8541
8542@@ -317,6 +318,12 @@ struct pv_mmu_ops {
8543 an mfn. We can tell which is which from the index. */
8544 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8545 phys_addr_t phys, pgprot_t flags);
8546+
8547+#ifdef CONFIG_PAX_KERNEXEC
8548+ unsigned long (*pax_open_kernel)(void);
8549+ unsigned long (*pax_close_kernel)(void);
8550+#endif
8551+
8552 };
8553
8554 struct arch_spinlock;
8555@@ -327,7 +334,7 @@ struct pv_lock_ops {
8556 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8557 int (*spin_trylock)(struct arch_spinlock *lock);
8558 void (*spin_unlock)(struct arch_spinlock *lock);
8559-};
8560+} __no_const;
8561
8562 /* This contains all the paravirt structures: we get a convenient
8563 * number for each function using the offset which we use to indicate
8564diff -urNp linux-3.0.7/arch/x86/include/asm/pgalloc.h linux-3.0.7/arch/x86/include/asm/pgalloc.h
8565--- linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8566+++ linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8567@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8568 pmd_t *pmd, pte_t *pte)
8569 {
8570 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8571+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8572+}
8573+
8574+static inline void pmd_populate_user(struct mm_struct *mm,
8575+ pmd_t *pmd, pte_t *pte)
8576+{
8577+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8578 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8579 }
8580
8581diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-2level.h linux-3.0.7/arch/x86/include/asm/pgtable-2level.h
8582--- linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8583+++ linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8584@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8585
8586 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8587 {
8588+ pax_open_kernel();
8589 *pmdp = pmd;
8590+ pax_close_kernel();
8591 }
8592
8593 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8594diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-3level.h linux-3.0.7/arch/x86/include/asm/pgtable-3level.h
8595--- linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8596+++ linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8597@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8598
8599 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8600 {
8601+ pax_open_kernel();
8602 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8603+ pax_close_kernel();
8604 }
8605
8606 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8607 {
8608+ pax_open_kernel();
8609 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8610+ pax_close_kernel();
8611 }
8612
8613 /*
8614diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable.h linux-3.0.7/arch/x86/include/asm/pgtable.h
8615--- linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8616+++ linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8617@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8618
8619 #ifndef __PAGETABLE_PUD_FOLDED
8620 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8621+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8622 #define pgd_clear(pgd) native_pgd_clear(pgd)
8623 #endif
8624
8625@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8626
8627 #define arch_end_context_switch(prev) do {} while(0)
8628
8629+#define pax_open_kernel() native_pax_open_kernel()
8630+#define pax_close_kernel() native_pax_close_kernel()
8631 #endif /* CONFIG_PARAVIRT */
8632
8633+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8634+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8635+
8636+#ifdef CONFIG_PAX_KERNEXEC
8637+static inline unsigned long native_pax_open_kernel(void)
8638+{
8639+ unsigned long cr0;
8640+
8641+ preempt_disable();
8642+ barrier();
8643+ cr0 = read_cr0() ^ X86_CR0_WP;
8644+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8645+ write_cr0(cr0);
8646+ return cr0 ^ X86_CR0_WP;
8647+}
8648+
8649+static inline unsigned long native_pax_close_kernel(void)
8650+{
8651+ unsigned long cr0;
8652+
8653+ cr0 = read_cr0() ^ X86_CR0_WP;
8654+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8655+ write_cr0(cr0);
8656+ barrier();
8657+ preempt_enable_no_resched();
8658+ return cr0 ^ X86_CR0_WP;
8659+}
8660+#else
8661+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8662+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8663+#endif
8664+
8665 /*
8666 * The following only work if pte_present() is true.
8667 * Undefined behaviour if not..
8668 */
8669+static inline int pte_user(pte_t pte)
8670+{
8671+ return pte_val(pte) & _PAGE_USER;
8672+}
8673+
8674 static inline int pte_dirty(pte_t pte)
8675 {
8676 return pte_flags(pte) & _PAGE_DIRTY;
8677@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8678 return pte_clear_flags(pte, _PAGE_RW);
8679 }
8680
8681+static inline pte_t pte_mkread(pte_t pte)
8682+{
8683+ return __pte(pte_val(pte) | _PAGE_USER);
8684+}
8685+
8686 static inline pte_t pte_mkexec(pte_t pte)
8687 {
8688- return pte_clear_flags(pte, _PAGE_NX);
8689+#ifdef CONFIG_X86_PAE
8690+ if (__supported_pte_mask & _PAGE_NX)
8691+ return pte_clear_flags(pte, _PAGE_NX);
8692+ else
8693+#endif
8694+ return pte_set_flags(pte, _PAGE_USER);
8695+}
8696+
8697+static inline pte_t pte_exprotect(pte_t pte)
8698+{
8699+#ifdef CONFIG_X86_PAE
8700+ if (__supported_pte_mask & _PAGE_NX)
8701+ return pte_set_flags(pte, _PAGE_NX);
8702+ else
8703+#endif
8704+ return pte_clear_flags(pte, _PAGE_USER);
8705 }
8706
8707 static inline pte_t pte_mkdirty(pte_t pte)
8708@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8709 #endif
8710
8711 #ifndef __ASSEMBLY__
8712+
8713+#ifdef CONFIG_PAX_PER_CPU_PGD
8714+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8715+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8716+{
8717+ return cpu_pgd[cpu];
8718+}
8719+#endif
8720+
8721 #include <linux/mm_types.h>
8722
8723 static inline int pte_none(pte_t pte)
8724@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8725
8726 static inline int pgd_bad(pgd_t pgd)
8727 {
8728- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8729+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8730 }
8731
8732 static inline int pgd_none(pgd_t pgd)
8733@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8734 * pgd_offset() returns a (pgd_t *)
8735 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8736 */
8737-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8738+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8739+
8740+#ifdef CONFIG_PAX_PER_CPU_PGD
8741+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8742+#endif
8743+
8744 /*
8745 * a shortcut which implies the use of the kernel's pgd, instead
8746 * of a process's
8747@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8748 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8749 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8750
8751+#ifdef CONFIG_X86_32
8752+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8753+#else
8754+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8755+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8756+
8757+#ifdef CONFIG_PAX_MEMORY_UDEREF
8758+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8759+#else
8760+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8761+#endif
8762+
8763+#endif
8764+
8765 #ifndef __ASSEMBLY__
8766
8767 extern int direct_gbpages;
8768@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8769 * dst and src can be on the same page, but the range must not overlap,
8770 * and must not cross a page boundary.
8771 */
8772-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8773+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8774 {
8775- memcpy(dst, src, count * sizeof(pgd_t));
8776+ pax_open_kernel();
8777+ while (count--)
8778+ *dst++ = *src++;
8779+ pax_close_kernel();
8780 }
8781
8782+#ifdef CONFIG_PAX_PER_CPU_PGD
8783+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8784+#endif
8785+
8786+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8787+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8788+#else
8789+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8790+#endif
8791
8792 #include <asm-generic/pgtable.h>
8793 #endif /* __ASSEMBLY__ */
8794diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32.h linux-3.0.7/arch/x86/include/asm/pgtable_32.h
8795--- linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8796+++ linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8797@@ -25,9 +25,6 @@
8798 struct mm_struct;
8799 struct vm_area_struct;
8800
8801-extern pgd_t swapper_pg_dir[1024];
8802-extern pgd_t initial_page_table[1024];
8803-
8804 static inline void pgtable_cache_init(void) { }
8805 static inline void check_pgt_cache(void) { }
8806 void paging_init(void);
8807@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8808 # include <asm/pgtable-2level.h>
8809 #endif
8810
8811+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8812+extern pgd_t initial_page_table[PTRS_PER_PGD];
8813+#ifdef CONFIG_X86_PAE
8814+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8815+#endif
8816+
8817 #if defined(CONFIG_HIGHPTE)
8818 #define pte_offset_map(dir, address) \
8819 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8820@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8821 /* Clear a kernel PTE and flush it from the TLB */
8822 #define kpte_clear_flush(ptep, vaddr) \
8823 do { \
8824+ pax_open_kernel(); \
8825 pte_clear(&init_mm, (vaddr), (ptep)); \
8826+ pax_close_kernel(); \
8827 __flush_tlb_one((vaddr)); \
8828 } while (0)
8829
8830@@ -74,6 +79,9 @@ do { \
8831
8832 #endif /* !__ASSEMBLY__ */
8833
8834+#define HAVE_ARCH_UNMAPPED_AREA
8835+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8836+
8837 /*
8838 * kern_addr_valid() is (1) for FLATMEM and (0) for
8839 * SPARSEMEM and DISCONTIGMEM
8840diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h
8841--- linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8842+++ linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8843@@ -8,7 +8,7 @@
8844 */
8845 #ifdef CONFIG_X86_PAE
8846 # include <asm/pgtable-3level_types.h>
8847-# define PMD_SIZE (1UL << PMD_SHIFT)
8848+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8849 # define PMD_MASK (~(PMD_SIZE - 1))
8850 #else
8851 # include <asm/pgtable-2level_types.h>
8852@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8853 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8854 #endif
8855
8856+#ifdef CONFIG_PAX_KERNEXEC
8857+#ifndef __ASSEMBLY__
8858+extern unsigned char MODULES_EXEC_VADDR[];
8859+extern unsigned char MODULES_EXEC_END[];
8860+#endif
8861+#include <asm/boot.h>
8862+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8863+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8864+#else
8865+#define ktla_ktva(addr) (addr)
8866+#define ktva_ktla(addr) (addr)
8867+#endif
8868+
8869 #define MODULES_VADDR VMALLOC_START
8870 #define MODULES_END VMALLOC_END
8871 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8872diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64.h linux-3.0.7/arch/x86/include/asm/pgtable_64.h
8873--- linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8874+++ linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8875@@ -16,10 +16,13 @@
8876
8877 extern pud_t level3_kernel_pgt[512];
8878 extern pud_t level3_ident_pgt[512];
8879+extern pud_t level3_vmalloc_pgt[512];
8880+extern pud_t level3_vmemmap_pgt[512];
8881+extern pud_t level2_vmemmap_pgt[512];
8882 extern pmd_t level2_kernel_pgt[512];
8883 extern pmd_t level2_fixmap_pgt[512];
8884-extern pmd_t level2_ident_pgt[512];
8885-extern pgd_t init_level4_pgt[];
8886+extern pmd_t level2_ident_pgt[512*2];
8887+extern pgd_t init_level4_pgt[512];
8888
8889 #define swapper_pg_dir init_level4_pgt
8890
8891@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8892
8893 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8894 {
8895+ pax_open_kernel();
8896 *pmdp = pmd;
8897+ pax_close_kernel();
8898 }
8899
8900 static inline void native_pmd_clear(pmd_t *pmd)
8901@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8902
8903 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8904 {
8905+ pax_open_kernel();
8906+ *pgdp = pgd;
8907+ pax_close_kernel();
8908+}
8909+
8910+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8911+{
8912 *pgdp = pgd;
8913 }
8914
8915diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h
8916--- linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8917+++ linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8918@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8919 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8920 #define MODULES_END _AC(0xffffffffff000000, UL)
8921 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8922+#define MODULES_EXEC_VADDR MODULES_VADDR
8923+#define MODULES_EXEC_END MODULES_END
8924+
8925+#define ktla_ktva(addr) (addr)
8926+#define ktva_ktla(addr) (addr)
8927
8928 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8929diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_types.h linux-3.0.7/arch/x86/include/asm/pgtable_types.h
8930--- linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8931+++ linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8932@@ -16,13 +16,12 @@
8933 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8934 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8935 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8936-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8937+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8938 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8939 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8940 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8941-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8942-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8943-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8944+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8945+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8946 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8947
8948 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8949@@ -40,7 +39,6 @@
8950 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8951 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8952 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8953-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8954 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8955 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8956 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8957@@ -57,8 +55,10 @@
8958
8959 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8960 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8961-#else
8962+#elif defined(CONFIG_KMEMCHECK)
8963 #define _PAGE_NX (_AT(pteval_t, 0))
8964+#else
8965+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8966 #endif
8967
8968 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8969@@ -96,6 +96,9 @@
8970 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8971 _PAGE_ACCESSED)
8972
8973+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8974+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8975+
8976 #define __PAGE_KERNEL_EXEC \
8977 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8978 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8979@@ -106,8 +109,8 @@
8980 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8981 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8982 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8983-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8984-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8985+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8986+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8987 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8988 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8989 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8990@@ -166,8 +169,8 @@
8991 * bits are combined, this will alow user to access the high address mapped
8992 * VDSO in the presence of CONFIG_COMPAT_VDSO
8993 */
8994-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8995-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8996+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8997+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8998 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8999 #endif
9000
9001@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9002 {
9003 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9004 }
9005+#endif
9006
9007+#if PAGETABLE_LEVELS == 3
9008+#include <asm-generic/pgtable-nopud.h>
9009+#endif
9010+
9011+#if PAGETABLE_LEVELS == 2
9012+#include <asm-generic/pgtable-nopmd.h>
9013+#endif
9014+
9015+#ifndef __ASSEMBLY__
9016 #if PAGETABLE_LEVELS > 3
9017 typedef struct { pudval_t pud; } pud_t;
9018
9019@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
9020 return pud.pud;
9021 }
9022 #else
9023-#include <asm-generic/pgtable-nopud.h>
9024-
9025 static inline pudval_t native_pud_val(pud_t pud)
9026 {
9027 return native_pgd_val(pud.pgd);
9028@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
9029 return pmd.pmd;
9030 }
9031 #else
9032-#include <asm-generic/pgtable-nopmd.h>
9033-
9034 static inline pmdval_t native_pmd_val(pmd_t pmd)
9035 {
9036 return native_pgd_val(pmd.pud.pgd);
9037@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
9038
9039 extern pteval_t __supported_pte_mask;
9040 extern void set_nx(void);
9041-extern int nx_enabled;
9042
9043 #define pgprot_writecombine pgprot_writecombine
9044 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9045diff -urNp linux-3.0.7/arch/x86/include/asm/processor.h linux-3.0.7/arch/x86/include/asm/processor.h
9046--- linux-3.0.7/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
9047+++ linux-3.0.7/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
9048@@ -266,7 +266,7 @@ struct tss_struct {
9049
9050 } ____cacheline_aligned;
9051
9052-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9053+extern struct tss_struct init_tss[NR_CPUS];
9054
9055 /*
9056 * Save the original ist values for checking stack pointers during debugging
9057@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
9058 */
9059 #define TASK_SIZE PAGE_OFFSET
9060 #define TASK_SIZE_MAX TASK_SIZE
9061+
9062+#ifdef CONFIG_PAX_SEGMEXEC
9063+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9064+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9065+#else
9066 #define STACK_TOP TASK_SIZE
9067-#define STACK_TOP_MAX STACK_TOP
9068+#endif
9069+
9070+#define STACK_TOP_MAX TASK_SIZE
9071
9072 #define INIT_THREAD { \
9073- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9074+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9075 .vm86_info = NULL, \
9076 .sysenter_cs = __KERNEL_CS, \
9077 .io_bitmap_ptr = NULL, \
9078@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
9079 */
9080 #define INIT_TSS { \
9081 .x86_tss = { \
9082- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9083+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9084 .ss0 = __KERNEL_DS, \
9085 .ss1 = __KERNEL_CS, \
9086 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9087@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
9088 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9089
9090 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9091-#define KSTK_TOP(info) \
9092-({ \
9093- unsigned long *__ptr = (unsigned long *)(info); \
9094- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9095-})
9096+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9097
9098 /*
9099 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9100@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
9101 #define task_pt_regs(task) \
9102 ({ \
9103 struct pt_regs *__regs__; \
9104- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9105+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9106 __regs__ - 1; \
9107 })
9108
9109@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
9110 /*
9111 * User space process size. 47bits minus one guard page.
9112 */
9113-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9114+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9115
9116 /* This decides where the kernel will search for a free chunk of vm
9117 * space during mmap's.
9118 */
9119 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9120- 0xc0000000 : 0xFFFFe000)
9121+ 0xc0000000 : 0xFFFFf000)
9122
9123 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9124 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9125@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
9126 #define STACK_TOP_MAX TASK_SIZE_MAX
9127
9128 #define INIT_THREAD { \
9129- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9130+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9131 }
9132
9133 #define INIT_TSS { \
9134- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9135+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9136 }
9137
9138 /*
9139@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
9140 */
9141 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9142
9143+#ifdef CONFIG_PAX_SEGMEXEC
9144+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9145+#endif
9146+
9147 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9148
9149 /* Get/set a process' ability to use the timestamp counter instruction */
9150diff -urNp linux-3.0.7/arch/x86/include/asm/ptrace.h linux-3.0.7/arch/x86/include/asm/ptrace.h
9151--- linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
9152+++ linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
9153@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
9154 }
9155
9156 /*
9157- * user_mode_vm(regs) determines whether a register set came from user mode.
9158+ * user_mode(regs) determines whether a register set came from user mode.
9159 * This is true if V8086 mode was enabled OR if the register set was from
9160 * protected mode with RPL-3 CS value. This tricky test checks that with
9161 * one comparison. Many places in the kernel can bypass this full check
9162- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9163+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9164+ * be used.
9165 */
9166-static inline int user_mode(struct pt_regs *regs)
9167+static inline int user_mode_novm(struct pt_regs *regs)
9168 {
9169 #ifdef CONFIG_X86_32
9170 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9171 #else
9172- return !!(regs->cs & 3);
9173+ return !!(regs->cs & SEGMENT_RPL_MASK);
9174 #endif
9175 }
9176
9177-static inline int user_mode_vm(struct pt_regs *regs)
9178+static inline int user_mode(struct pt_regs *regs)
9179 {
9180 #ifdef CONFIG_X86_32
9181 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9182 USER_RPL;
9183 #else
9184- return user_mode(regs);
9185+ return user_mode_novm(regs);
9186 #endif
9187 }
9188
9189diff -urNp linux-3.0.7/arch/x86/include/asm/reboot.h linux-3.0.7/arch/x86/include/asm/reboot.h
9190--- linux-3.0.7/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
9191+++ linux-3.0.7/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
9192@@ -6,19 +6,19 @@
9193 struct pt_regs;
9194
9195 struct machine_ops {
9196- void (*restart)(char *cmd);
9197- void (*halt)(void);
9198- void (*power_off)(void);
9199+ void (* __noreturn restart)(char *cmd);
9200+ void (* __noreturn halt)(void);
9201+ void (* __noreturn power_off)(void);
9202 void (*shutdown)(void);
9203 void (*crash_shutdown)(struct pt_regs *);
9204- void (*emergency_restart)(void);
9205-};
9206+ void (* __noreturn emergency_restart)(void);
9207+} __no_const;
9208
9209 extern struct machine_ops machine_ops;
9210
9211 void native_machine_crash_shutdown(struct pt_regs *regs);
9212 void native_machine_shutdown(void);
9213-void machine_real_restart(unsigned int type);
9214+void machine_real_restart(unsigned int type) __noreturn;
9215 /* These must match dispatch_table in reboot_32.S */
9216 #define MRR_BIOS 0
9217 #define MRR_APM 1
9218diff -urNp linux-3.0.7/arch/x86/include/asm/rwsem.h linux-3.0.7/arch/x86/include/asm/rwsem.h
9219--- linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
9220+++ linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
9221@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
9222 {
9223 asm volatile("# beginning down_read\n\t"
9224 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9225+
9226+#ifdef CONFIG_PAX_REFCOUNT
9227+ "jno 0f\n"
9228+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9229+ "int $4\n0:\n"
9230+ _ASM_EXTABLE(0b, 0b)
9231+#endif
9232+
9233 /* adds 0x00000001 */
9234 " jns 1f\n"
9235 " call call_rwsem_down_read_failed\n"
9236@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
9237 "1:\n\t"
9238 " mov %1,%2\n\t"
9239 " add %3,%2\n\t"
9240+
9241+#ifdef CONFIG_PAX_REFCOUNT
9242+ "jno 0f\n"
9243+ "sub %3,%2\n"
9244+ "int $4\n0:\n"
9245+ _ASM_EXTABLE(0b, 0b)
9246+#endif
9247+
9248 " jle 2f\n\t"
9249 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9250 " jnz 1b\n\t"
9251@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
9252 long tmp;
9253 asm volatile("# beginning down_write\n\t"
9254 LOCK_PREFIX " xadd %1,(%2)\n\t"
9255+
9256+#ifdef CONFIG_PAX_REFCOUNT
9257+ "jno 0f\n"
9258+ "mov %1,(%2)\n"
9259+ "int $4\n0:\n"
9260+ _ASM_EXTABLE(0b, 0b)
9261+#endif
9262+
9263 /* adds 0xffff0001, returns the old value */
9264 " test %1,%1\n\t"
9265 /* was the count 0 before? */
9266@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
9267 long tmp;
9268 asm volatile("# beginning __up_read\n\t"
9269 LOCK_PREFIX " xadd %1,(%2)\n\t"
9270+
9271+#ifdef CONFIG_PAX_REFCOUNT
9272+ "jno 0f\n"
9273+ "mov %1,(%2)\n"
9274+ "int $4\n0:\n"
9275+ _ASM_EXTABLE(0b, 0b)
9276+#endif
9277+
9278 /* subtracts 1, returns the old value */
9279 " jns 1f\n\t"
9280 " call call_rwsem_wake\n" /* expects old value in %edx */
9281@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
9282 long tmp;
9283 asm volatile("# beginning __up_write\n\t"
9284 LOCK_PREFIX " xadd %1,(%2)\n\t"
9285+
9286+#ifdef CONFIG_PAX_REFCOUNT
9287+ "jno 0f\n"
9288+ "mov %1,(%2)\n"
9289+ "int $4\n0:\n"
9290+ _ASM_EXTABLE(0b, 0b)
9291+#endif
9292+
9293 /* subtracts 0xffff0001, returns the old value */
9294 " jns 1f\n\t"
9295 " call call_rwsem_wake\n" /* expects old value in %edx */
9296@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
9297 {
9298 asm volatile("# beginning __downgrade_write\n\t"
9299 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9300+
9301+#ifdef CONFIG_PAX_REFCOUNT
9302+ "jno 0f\n"
9303+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9304+ "int $4\n0:\n"
9305+ _ASM_EXTABLE(0b, 0b)
9306+#endif
9307+
9308 /*
9309 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9310 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9311@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
9312 */
9313 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9314 {
9315- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9316+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+ "jno 0f\n"
9320+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
9321+ "int $4\n0:\n"
9322+ _ASM_EXTABLE(0b, 0b)
9323+#endif
9324+
9325 : "+m" (sem->count)
9326 : "er" (delta));
9327 }
9328@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
9329 {
9330 long tmp = delta;
9331
9332- asm volatile(LOCK_PREFIX "xadd %0,%1"
9333+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9334+
9335+#ifdef CONFIG_PAX_REFCOUNT
9336+ "jno 0f\n"
9337+ "mov %0,%1\n"
9338+ "int $4\n0:\n"
9339+ _ASM_EXTABLE(0b, 0b)
9340+#endif
9341+
9342 : "+r" (tmp), "+m" (sem->count)
9343 : : "memory");
9344
9345diff -urNp linux-3.0.7/arch/x86/include/asm/segment.h linux-3.0.7/arch/x86/include/asm/segment.h
9346--- linux-3.0.7/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
9347+++ linux-3.0.7/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
9348@@ -64,10 +64,15 @@
9349 * 26 - ESPFIX small SS
9350 * 27 - per-cpu [ offset to per-cpu data area ]
9351 * 28 - stack_canary-20 [ for stack protector ]
9352- * 29 - unused
9353- * 30 - unused
9354+ * 29 - PCI BIOS CS
9355+ * 30 - PCI BIOS DS
9356 * 31 - TSS for double fault handler
9357 */
9358+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9359+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9360+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9361+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9362+
9363 #define GDT_ENTRY_TLS_MIN 6
9364 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9365
9366@@ -79,6 +84,8 @@
9367
9368 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9369
9370+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9371+
9372 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9373
9374 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
9375@@ -104,6 +111,12 @@
9376 #define __KERNEL_STACK_CANARY 0
9377 #endif
9378
9379+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
9380+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9381+
9382+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
9383+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9384+
9385 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9386
9387 /*
9388@@ -141,7 +154,7 @@
9389 */
9390
9391 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9392-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9393+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9394
9395
9396 #else
9397@@ -165,6 +178,8 @@
9398 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9399 #define __USER32_DS __USER_DS
9400
9401+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9402+
9403 #define GDT_ENTRY_TSS 8 /* needs two entries */
9404 #define GDT_ENTRY_LDT 10 /* needs two entries */
9405 #define GDT_ENTRY_TLS_MIN 12
9406@@ -185,6 +200,7 @@
9407 #endif
9408
9409 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
9410+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
9411 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
9412 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
9413 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
9414diff -urNp linux-3.0.7/arch/x86/include/asm/smp.h linux-3.0.7/arch/x86/include/asm/smp.h
9415--- linux-3.0.7/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
9416+++ linux-3.0.7/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
9417@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
9418 /* cpus sharing the last level cache: */
9419 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
9420 DECLARE_PER_CPU(u16, cpu_llc_id);
9421-DECLARE_PER_CPU(int, cpu_number);
9422+DECLARE_PER_CPU(unsigned int, cpu_number);
9423
9424 static inline struct cpumask *cpu_sibling_mask(int cpu)
9425 {
9426@@ -77,7 +77,7 @@ struct smp_ops {
9427
9428 void (*send_call_func_ipi)(const struct cpumask *mask);
9429 void (*send_call_func_single_ipi)(int cpu);
9430-};
9431+} __no_const;
9432
9433 /* Globals due to paravirt */
9434 extern void set_cpu_sibling_map(int cpu);
9435@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
9436 extern int safe_smp_processor_id(void);
9437
9438 #elif defined(CONFIG_X86_64_SMP)
9439-#define raw_smp_processor_id() (percpu_read(cpu_number))
9440-
9441-#define stack_smp_processor_id() \
9442-({ \
9443- struct thread_info *ti; \
9444- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9445- ti->cpu; \
9446-})
9447+#define raw_smp_processor_id() (percpu_read(cpu_number))
9448+#define stack_smp_processor_id() raw_smp_processor_id()
9449 #define safe_smp_processor_id() smp_processor_id()
9450
9451 #endif
9452diff -urNp linux-3.0.7/arch/x86/include/asm/spinlock.h linux-3.0.7/arch/x86/include/asm/spinlock.h
9453--- linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
9454+++ linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
9455@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
9456 static inline void arch_read_lock(arch_rwlock_t *rw)
9457 {
9458 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9459+
9460+#ifdef CONFIG_PAX_REFCOUNT
9461+ "jno 0f\n"
9462+ LOCK_PREFIX " addl $1,(%0)\n"
9463+ "int $4\n0:\n"
9464+ _ASM_EXTABLE(0b, 0b)
9465+#endif
9466+
9467 "jns 1f\n"
9468 "call __read_lock_failed\n\t"
9469 "1:\n"
9470@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
9471 static inline void arch_write_lock(arch_rwlock_t *rw)
9472 {
9473 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9474+
9475+#ifdef CONFIG_PAX_REFCOUNT
9476+ "jno 0f\n"
9477+ LOCK_PREFIX " addl %1,(%0)\n"
9478+ "int $4\n0:\n"
9479+ _ASM_EXTABLE(0b, 0b)
9480+#endif
9481+
9482 "jz 1f\n"
9483 "call __write_lock_failed\n\t"
9484 "1:\n"
9485@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9486
9487 static inline void arch_read_unlock(arch_rwlock_t *rw)
9488 {
9489- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9490+ asm volatile(LOCK_PREFIX "incl %0\n"
9491+
9492+#ifdef CONFIG_PAX_REFCOUNT
9493+ "jno 0f\n"
9494+ LOCK_PREFIX "decl %0\n"
9495+ "int $4\n0:\n"
9496+ _ASM_EXTABLE(0b, 0b)
9497+#endif
9498+
9499+ :"+m" (rw->lock) : : "memory");
9500 }
9501
9502 static inline void arch_write_unlock(arch_rwlock_t *rw)
9503 {
9504- asm volatile(LOCK_PREFIX "addl %1, %0"
9505+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9506+
9507+#ifdef CONFIG_PAX_REFCOUNT
9508+ "jno 0f\n"
9509+ LOCK_PREFIX "subl %1, %0\n"
9510+ "int $4\n0:\n"
9511+ _ASM_EXTABLE(0b, 0b)
9512+#endif
9513+
9514 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9515 }
9516
9517diff -urNp linux-3.0.7/arch/x86/include/asm/stackprotector.h linux-3.0.7/arch/x86/include/asm/stackprotector.h
9518--- linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9519+++ linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9520@@ -48,7 +48,7 @@
9521 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9522 */
9523 #define GDT_STACK_CANARY_INIT \
9524- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9525+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9526
9527 /*
9528 * Initialize the stackprotector canary value.
9529@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9530
9531 static inline void load_stack_canary_segment(void)
9532 {
9533-#ifdef CONFIG_X86_32
9534+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9535 asm volatile ("mov %0, %%gs" : : "r" (0));
9536 #endif
9537 }
9538diff -urNp linux-3.0.7/arch/x86/include/asm/stacktrace.h linux-3.0.7/arch/x86/include/asm/stacktrace.h
9539--- linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9540+++ linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9541@@ -11,28 +11,20 @@
9542
9543 extern int kstack_depth_to_print;
9544
9545-struct thread_info;
9546+struct task_struct;
9547 struct stacktrace_ops;
9548
9549-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9550- unsigned long *stack,
9551- unsigned long bp,
9552- const struct stacktrace_ops *ops,
9553- void *data,
9554- unsigned long *end,
9555- int *graph);
9556-
9557-extern unsigned long
9558-print_context_stack(struct thread_info *tinfo,
9559- unsigned long *stack, unsigned long bp,
9560- const struct stacktrace_ops *ops, void *data,
9561- unsigned long *end, int *graph);
9562-
9563-extern unsigned long
9564-print_context_stack_bp(struct thread_info *tinfo,
9565- unsigned long *stack, unsigned long bp,
9566- const struct stacktrace_ops *ops, void *data,
9567- unsigned long *end, int *graph);
9568+typedef unsigned long walk_stack_t(struct task_struct *task,
9569+ void *stack_start,
9570+ unsigned long *stack,
9571+ unsigned long bp,
9572+ const struct stacktrace_ops *ops,
9573+ void *data,
9574+ unsigned long *end,
9575+ int *graph);
9576+
9577+extern walk_stack_t print_context_stack;
9578+extern walk_stack_t print_context_stack_bp;
9579
9580 /* Generic stack tracer with callbacks */
9581
9582@@ -40,7 +32,7 @@ struct stacktrace_ops {
9583 void (*address)(void *data, unsigned long address, int reliable);
9584 /* On negative return stop dumping */
9585 int (*stack)(void *data, char *name);
9586- walk_stack_t walk_stack;
9587+ walk_stack_t *walk_stack;
9588 };
9589
9590 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9591diff -urNp linux-3.0.7/arch/x86/include/asm/sys_ia32.h linux-3.0.7/arch/x86/include/asm/sys_ia32.h
9592--- linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9593+++ linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9594@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9595 compat_sigset_t __user *, unsigned int);
9596 asmlinkage long sys32_alarm(unsigned int);
9597
9598-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9599+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9600 asmlinkage long sys32_sysfs(int, u32, u32);
9601
9602 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9603diff -urNp linux-3.0.7/arch/x86/include/asm/system.h linux-3.0.7/arch/x86/include/asm/system.h
9604--- linux-3.0.7/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9605+++ linux-3.0.7/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9606@@ -129,7 +129,7 @@ do { \
9607 "call __switch_to\n\t" \
9608 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9609 __switch_canary \
9610- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9611+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9612 "movq %%rax,%%rdi\n\t" \
9613 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9614 "jnz ret_from_fork\n\t" \
9615@@ -140,7 +140,7 @@ do { \
9616 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9617 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9618 [_tif_fork] "i" (_TIF_FORK), \
9619- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9620+ [thread_info] "m" (current_tinfo), \
9621 [current_task] "m" (current_task) \
9622 __switch_canary_iparam \
9623 : "memory", "cc" __EXTRA_CLOBBER)
9624@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9625 {
9626 unsigned long __limit;
9627 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9628- return __limit + 1;
9629+ return __limit;
9630 }
9631
9632 static inline void native_clts(void)
9633@@ -397,12 +397,12 @@ void enable_hlt(void);
9634
9635 void cpu_idle_wait(void);
9636
9637-extern unsigned long arch_align_stack(unsigned long sp);
9638+#define arch_align_stack(x) ((x) & ~0xfUL)
9639 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9640
9641 void default_idle(void);
9642
9643-void stop_this_cpu(void *dummy);
9644+void stop_this_cpu(void *dummy) __noreturn;
9645
9646 /*
9647 * Force strict CPU ordering.
9648diff -urNp linux-3.0.7/arch/x86/include/asm/thread_info.h linux-3.0.7/arch/x86/include/asm/thread_info.h
9649--- linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9650+++ linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9651@@ -10,6 +10,7 @@
9652 #include <linux/compiler.h>
9653 #include <asm/page.h>
9654 #include <asm/types.h>
9655+#include <asm/percpu.h>
9656
9657 /*
9658 * low level task data that entry.S needs immediate access to
9659@@ -24,7 +25,6 @@ struct exec_domain;
9660 #include <asm/atomic.h>
9661
9662 struct thread_info {
9663- struct task_struct *task; /* main task structure */
9664 struct exec_domain *exec_domain; /* execution domain */
9665 __u32 flags; /* low level flags */
9666 __u32 status; /* thread synchronous flags */
9667@@ -34,18 +34,12 @@ struct thread_info {
9668 mm_segment_t addr_limit;
9669 struct restart_block restart_block;
9670 void __user *sysenter_return;
9671-#ifdef CONFIG_X86_32
9672- unsigned long previous_esp; /* ESP of the previous stack in
9673- case of nested (IRQ) stacks
9674- */
9675- __u8 supervisor_stack[0];
9676-#endif
9677+ unsigned long lowest_stack;
9678 int uaccess_err;
9679 };
9680
9681-#define INIT_THREAD_INFO(tsk) \
9682+#define INIT_THREAD_INFO \
9683 { \
9684- .task = &tsk, \
9685 .exec_domain = &default_exec_domain, \
9686 .flags = 0, \
9687 .cpu = 0, \
9688@@ -56,7 +50,7 @@ struct thread_info {
9689 }, \
9690 }
9691
9692-#define init_thread_info (init_thread_union.thread_info)
9693+#define init_thread_info (init_thread_union.stack)
9694 #define init_stack (init_thread_union.stack)
9695
9696 #else /* !__ASSEMBLY__ */
9697@@ -170,6 +164,23 @@ struct thread_info {
9698 ret; \
9699 })
9700
9701+#ifdef __ASSEMBLY__
9702+/* how to get the thread information struct from ASM */
9703+#define GET_THREAD_INFO(reg) \
9704+ mov PER_CPU_VAR(current_tinfo), reg
9705+
9706+/* use this one if reg already contains %esp */
9707+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9708+#else
9709+/* how to get the thread information struct from C */
9710+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9711+
9712+static __always_inline struct thread_info *current_thread_info(void)
9713+{
9714+ return percpu_read_stable(current_tinfo);
9715+}
9716+#endif
9717+
9718 #ifdef CONFIG_X86_32
9719
9720 #define STACK_WARN (THREAD_SIZE/8)
9721@@ -180,35 +191,13 @@ struct thread_info {
9722 */
9723 #ifndef __ASSEMBLY__
9724
9725-
9726 /* how to get the current stack pointer from C */
9727 register unsigned long current_stack_pointer asm("esp") __used;
9728
9729-/* how to get the thread information struct from C */
9730-static inline struct thread_info *current_thread_info(void)
9731-{
9732- return (struct thread_info *)
9733- (current_stack_pointer & ~(THREAD_SIZE - 1));
9734-}
9735-
9736-#else /* !__ASSEMBLY__ */
9737-
9738-/* how to get the thread information struct from ASM */
9739-#define GET_THREAD_INFO(reg) \
9740- movl $-THREAD_SIZE, reg; \
9741- andl %esp, reg
9742-
9743-/* use this one if reg already contains %esp */
9744-#define GET_THREAD_INFO_WITH_ESP(reg) \
9745- andl $-THREAD_SIZE, reg
9746-
9747 #endif
9748
9749 #else /* X86_32 */
9750
9751-#include <asm/percpu.h>
9752-#define KERNEL_STACK_OFFSET (5*8)
9753-
9754 /*
9755 * macros/functions for gaining access to the thread information structure
9756 * preempt_count needs to be 1 initially, until the scheduler is functional.
9757@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9758 #ifndef __ASSEMBLY__
9759 DECLARE_PER_CPU(unsigned long, kernel_stack);
9760
9761-static inline struct thread_info *current_thread_info(void)
9762-{
9763- struct thread_info *ti;
9764- ti = (void *)(percpu_read_stable(kernel_stack) +
9765- KERNEL_STACK_OFFSET - THREAD_SIZE);
9766- return ti;
9767-}
9768-
9769-#else /* !__ASSEMBLY__ */
9770-
9771-/* how to get the thread information struct from ASM */
9772-#define GET_THREAD_INFO(reg) \
9773- movq PER_CPU_VAR(kernel_stack),reg ; \
9774- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9775-
9776+/* how to get the current stack pointer from C */
9777+register unsigned long current_stack_pointer asm("rsp") __used;
9778 #endif
9779
9780 #endif /* !X86_32 */
9781@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9782 extern void free_thread_info(struct thread_info *ti);
9783 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9784 #define arch_task_cache_init arch_task_cache_init
9785+
9786+#define __HAVE_THREAD_FUNCTIONS
9787+#define task_thread_info(task) (&(task)->tinfo)
9788+#define task_stack_page(task) ((task)->stack)
9789+#define setup_thread_stack(p, org) do {} while (0)
9790+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9791+
9792+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9793+extern struct task_struct *alloc_task_struct_node(int node);
9794+extern void free_task_struct(struct task_struct *);
9795+
9796 #endif
9797 #endif /* _ASM_X86_THREAD_INFO_H */
9798diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess.h linux-3.0.7/arch/x86/include/asm/uaccess.h
9799--- linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9800+++ linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9801@@ -7,12 +7,15 @@
9802 #include <linux/compiler.h>
9803 #include <linux/thread_info.h>
9804 #include <linux/string.h>
9805+#include <linux/sched.h>
9806 #include <asm/asm.h>
9807 #include <asm/page.h>
9808
9809 #define VERIFY_READ 0
9810 #define VERIFY_WRITE 1
9811
9812+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9813+
9814 /*
9815 * The fs value determines whether argument validity checking should be
9816 * performed or not. If get_fs() == USER_DS, checking is performed, with
9817@@ -28,7 +31,12 @@
9818
9819 #define get_ds() (KERNEL_DS)
9820 #define get_fs() (current_thread_info()->addr_limit)
9821+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9822+void __set_fs(mm_segment_t x);
9823+void set_fs(mm_segment_t x);
9824+#else
9825 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9826+#endif
9827
9828 #define segment_eq(a, b) ((a).seg == (b).seg)
9829
9830@@ -76,7 +84,33 @@
9831 * checks that the pointer is in the user space range - after calling
9832 * this function, memory access functions may still return -EFAULT.
9833 */
9834-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9835+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9836+#define access_ok(type, addr, size) \
9837+({ \
9838+ long __size = size; \
9839+ unsigned long __addr = (unsigned long)addr; \
9840+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9841+ unsigned long __end_ao = __addr + __size - 1; \
9842+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9843+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9844+ while(__addr_ao <= __end_ao) { \
9845+ char __c_ao; \
9846+ __addr_ao += PAGE_SIZE; \
9847+ if (__size > PAGE_SIZE) \
9848+ cond_resched(); \
9849+ if (__get_user(__c_ao, (char __user *)__addr)) \
9850+ break; \
9851+ if (type != VERIFY_WRITE) { \
9852+ __addr = __addr_ao; \
9853+ continue; \
9854+ } \
9855+ if (__put_user(__c_ao, (char __user *)__addr)) \
9856+ break; \
9857+ __addr = __addr_ao; \
9858+ } \
9859+ } \
9860+ __ret_ao; \
9861+})
9862
9863 /*
9864 * The exception table consists of pairs of addresses: the first is the
9865@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9866 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9867 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9868
9869-
9870+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871+#define __copyuser_seg "gs;"
9872+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9873+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9874+#else
9875+#define __copyuser_seg
9876+#define __COPYUSER_SET_ES
9877+#define __COPYUSER_RESTORE_ES
9878+#endif
9879
9880 #ifdef CONFIG_X86_32
9881 #define __put_user_asm_u64(x, addr, err, errret) \
9882- asm volatile("1: movl %%eax,0(%2)\n" \
9883- "2: movl %%edx,4(%2)\n" \
9884+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9885+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9886 "3:\n" \
9887 ".section .fixup,\"ax\"\n" \
9888 "4: movl %3,%0\n" \
9889@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9890 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9891
9892 #define __put_user_asm_ex_u64(x, addr) \
9893- asm volatile("1: movl %%eax,0(%1)\n" \
9894- "2: movl %%edx,4(%1)\n" \
9895+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9896+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9897 "3:\n" \
9898 _ASM_EXTABLE(1b, 2b - 1b) \
9899 _ASM_EXTABLE(2b, 3b - 2b) \
9900@@ -252,7 +294,7 @@ extern void __put_user_8(void);
9901 __typeof__(*(ptr)) __pu_val; \
9902 __chk_user_ptr(ptr); \
9903 might_fault(); \
9904- __pu_val = x; \
9905+ __pu_val = (x); \
9906 switch (sizeof(*(ptr))) { \
9907 case 1: \
9908 __put_user_x(1, __pu_val, ptr, __ret_pu); \
9909@@ -373,7 +415,7 @@ do { \
9910 } while (0)
9911
9912 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9913- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9914+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9915 "2:\n" \
9916 ".section .fixup,\"ax\"\n" \
9917 "3: mov %3,%0\n" \
9918@@ -381,7 +423,7 @@ do { \
9919 " jmp 2b\n" \
9920 ".previous\n" \
9921 _ASM_EXTABLE(1b, 3b) \
9922- : "=r" (err), ltype(x) \
9923+ : "=r" (err), ltype (x) \
9924 : "m" (__m(addr)), "i" (errret), "0" (err))
9925
9926 #define __get_user_size_ex(x, ptr, size) \
9927@@ -406,7 +448,7 @@ do { \
9928 } while (0)
9929
9930 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9931- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9932+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9933 "2:\n" \
9934 _ASM_EXTABLE(1b, 2b - 1b) \
9935 : ltype(x) : "m" (__m(addr)))
9936@@ -423,13 +465,24 @@ do { \
9937 int __gu_err; \
9938 unsigned long __gu_val; \
9939 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9940- (x) = (__force __typeof__(*(ptr)))__gu_val; \
9941+ (x) = (__typeof__(*(ptr)))__gu_val; \
9942 __gu_err; \
9943 })
9944
9945 /* FIXME: this hack is definitely wrong -AK */
9946 struct __large_struct { unsigned long buf[100]; };
9947-#define __m(x) (*(struct __large_struct __user *)(x))
9948+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9949+#define ____m(x) \
9950+({ \
9951+ unsigned long ____x = (unsigned long)(x); \
9952+ if (____x < PAX_USER_SHADOW_BASE) \
9953+ ____x += PAX_USER_SHADOW_BASE; \
9954+ (void __user *)____x; \
9955+})
9956+#else
9957+#define ____m(x) (x)
9958+#endif
9959+#define __m(x) (*(struct __large_struct __user *)____m(x))
9960
9961 /*
9962 * Tell gcc we read from memory instead of writing: this is because
9963@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9964 * aliasing issues.
9965 */
9966 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9967- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9968+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9969 "2:\n" \
9970 ".section .fixup,\"ax\"\n" \
9971 "3: mov %3,%0\n" \
9972@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9973 ".previous\n" \
9974 _ASM_EXTABLE(1b, 3b) \
9975 : "=r"(err) \
9976- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9977+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9978
9979 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9980- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9981+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9982 "2:\n" \
9983 _ASM_EXTABLE(1b, 2b - 1b) \
9984 : : ltype(x), "m" (__m(addr)))
9985@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9986 * On error, the variable @x is set to zero.
9987 */
9988
9989+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9990+#define __get_user(x, ptr) get_user((x), (ptr))
9991+#else
9992 #define __get_user(x, ptr) \
9993 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9994+#endif
9995
9996 /**
9997 * __put_user: - Write a simple value into user space, with less checking.
9998@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9999 * Returns zero on success, or -EFAULT on error.
10000 */
10001
10002+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10003+#define __put_user(x, ptr) put_user((x), (ptr))
10004+#else
10005 #define __put_user(x, ptr) \
10006 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10007+#endif
10008
10009 #define __get_user_unaligned __get_user
10010 #define __put_user_unaligned __put_user
10011@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10012 #define get_user_ex(x, ptr) do { \
10013 unsigned long __gue_val; \
10014 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10015- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10016+ (x) = (__typeof__(*(ptr)))__gue_val; \
10017 } while (0)
10018
10019 #ifdef CONFIG_X86_WP_WORKS_OK
10020diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_32.h linux-3.0.7/arch/x86/include/asm/uaccess_32.h
10021--- linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
10022+++ linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
10023@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
10024 static __always_inline unsigned long __must_check
10025 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10026 {
10027+ pax_track_stack();
10028+
10029+ if ((long)n < 0)
10030+ return n;
10031+
10032 if (__builtin_constant_p(n)) {
10033 unsigned long ret;
10034
10035@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
10036 return ret;
10037 }
10038 }
10039+ if (!__builtin_constant_p(n))
10040+ check_object_size(from, n, true);
10041 return __copy_to_user_ll(to, from, n);
10042 }
10043
10044@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
10045 __copy_to_user(void __user *to, const void *from, unsigned long n)
10046 {
10047 might_fault();
10048+
10049 return __copy_to_user_inatomic(to, from, n);
10050 }
10051
10052 static __always_inline unsigned long
10053 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10054 {
10055+ if ((long)n < 0)
10056+ return n;
10057+
10058 /* Avoid zeroing the tail if the copy fails..
10059 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10060 * but as the zeroing behaviour is only significant when n is not
10061@@ -137,6 +148,12 @@ static __always_inline unsigned long
10062 __copy_from_user(void *to, const void __user *from, unsigned long n)
10063 {
10064 might_fault();
10065+
10066+ pax_track_stack();
10067+
10068+ if ((long)n < 0)
10069+ return n;
10070+
10071 if (__builtin_constant_p(n)) {
10072 unsigned long ret;
10073
10074@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
10075 return ret;
10076 }
10077 }
10078+ if (!__builtin_constant_p(n))
10079+ check_object_size(to, n, false);
10080 return __copy_from_user_ll(to, from, n);
10081 }
10082
10083@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
10084 const void __user *from, unsigned long n)
10085 {
10086 might_fault();
10087+
10088+ if ((long)n < 0)
10089+ return n;
10090+
10091 if (__builtin_constant_p(n)) {
10092 unsigned long ret;
10093
10094@@ -181,15 +204,19 @@ static __always_inline unsigned long
10095 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10096 unsigned long n)
10097 {
10098- return __copy_from_user_ll_nocache_nozero(to, from, n);
10099-}
10100+ if ((long)n < 0)
10101+ return n;
10102
10103-unsigned long __must_check copy_to_user(void __user *to,
10104- const void *from, unsigned long n);
10105-unsigned long __must_check _copy_from_user(void *to,
10106- const void __user *from,
10107- unsigned long n);
10108+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10109+}
10110
10111+extern void copy_to_user_overflow(void)
10112+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10113+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10114+#else
10115+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10116+#endif
10117+;
10118
10119 extern void copy_from_user_overflow(void)
10120 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10121@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
10122 #endif
10123 ;
10124
10125-static inline unsigned long __must_check copy_from_user(void *to,
10126- const void __user *from,
10127- unsigned long n)
10128+/**
10129+ * copy_to_user: - Copy a block of data into user space.
10130+ * @to: Destination address, in user space.
10131+ * @from: Source address, in kernel space.
10132+ * @n: Number of bytes to copy.
10133+ *
10134+ * Context: User context only. This function may sleep.
10135+ *
10136+ * Copy data from kernel space to user space.
10137+ *
10138+ * Returns number of bytes that could not be copied.
10139+ * On success, this will be zero.
10140+ */
10141+static inline unsigned long __must_check
10142+copy_to_user(void __user *to, const void *from, unsigned long n)
10143+{
10144+ int sz = __compiletime_object_size(from);
10145+
10146+ if (unlikely(sz != -1 && sz < n))
10147+ copy_to_user_overflow();
10148+ else if (access_ok(VERIFY_WRITE, to, n))
10149+ n = __copy_to_user(to, from, n);
10150+ return n;
10151+}
10152+
10153+/**
10154+ * copy_from_user: - Copy a block of data from user space.
10155+ * @to: Destination address, in kernel space.
10156+ * @from: Source address, in user space.
10157+ * @n: Number of bytes to copy.
10158+ *
10159+ * Context: User context only. This function may sleep.
10160+ *
10161+ * Copy data from user space to kernel space.
10162+ *
10163+ * Returns number of bytes that could not be copied.
10164+ * On success, this will be zero.
10165+ *
10166+ * If some data could not be copied, this function will pad the copied
10167+ * data to the requested size using zero bytes.
10168+ */
10169+static inline unsigned long __must_check
10170+copy_from_user(void *to, const void __user *from, unsigned long n)
10171 {
10172 int sz = __compiletime_object_size(to);
10173
10174- if (likely(sz == -1 || sz >= n))
10175- n = _copy_from_user(to, from, n);
10176- else
10177+ if (unlikely(sz != -1 && sz < n))
10178 copy_from_user_overflow();
10179-
10180+ else if (access_ok(VERIFY_READ, from, n))
10181+ n = __copy_from_user(to, from, n);
10182+ else if ((long)n > 0) {
10183+ if (!__builtin_constant_p(n))
10184+ check_object_size(to, n, false);
10185+ memset(to, 0, n);
10186+ }
10187 return n;
10188 }
10189
10190diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_64.h linux-3.0.7/arch/x86/include/asm/uaccess_64.h
10191--- linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
10192+++ linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
10193@@ -10,6 +10,9 @@
10194 #include <asm/alternative.h>
10195 #include <asm/cpufeature.h>
10196 #include <asm/page.h>
10197+#include <asm/pgtable.h>
10198+
10199+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10200
10201 /*
10202 * Copy To/From Userspace
10203@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
10204 return ret;
10205 }
10206
10207-__must_check unsigned long
10208-_copy_to_user(void __user *to, const void *from, unsigned len);
10209-__must_check unsigned long
10210-_copy_from_user(void *to, const void __user *from, unsigned len);
10211+static __always_inline __must_check unsigned long
10212+__copy_to_user(void __user *to, const void *from, unsigned len);
10213+static __always_inline __must_check unsigned long
10214+__copy_from_user(void *to, const void __user *from, unsigned len);
10215 __must_check unsigned long
10216 copy_in_user(void __user *to, const void __user *from, unsigned len);
10217
10218 static inline unsigned long __must_check copy_from_user(void *to,
10219 const void __user *from,
10220- unsigned long n)
10221+ unsigned n)
10222 {
10223- int sz = __compiletime_object_size(to);
10224-
10225 might_fault();
10226- if (likely(sz == -1 || sz >= n))
10227- n = _copy_from_user(to, from, n);
10228-#ifdef CONFIG_DEBUG_VM
10229- else
10230- WARN(1, "Buffer overflow detected!\n");
10231-#endif
10232+
10233+ if (access_ok(VERIFY_READ, from, n))
10234+ n = __copy_from_user(to, from, n);
10235+ else if ((int)n > 0) {
10236+ if (!__builtin_constant_p(n))
10237+ check_object_size(to, n, false);
10238+ memset(to, 0, n);
10239+ }
10240 return n;
10241 }
10242
10243@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
10244 {
10245 might_fault();
10246
10247- return _copy_to_user(dst, src, size);
10248+ if (access_ok(VERIFY_WRITE, dst, size))
10249+ size = __copy_to_user(dst, src, size);
10250+ return size;
10251 }
10252
10253 static __always_inline __must_check
10254-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10255+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10256 {
10257- int ret = 0;
10258+ int sz = __compiletime_object_size(dst);
10259+ unsigned ret = 0;
10260
10261 might_fault();
10262- if (!__builtin_constant_p(size))
10263- return copy_user_generic(dst, (__force void *)src, size);
10264+
10265+ pax_track_stack();
10266+
10267+ if ((int)size < 0)
10268+ return size;
10269+
10270+#ifdef CONFIG_PAX_MEMORY_UDEREF
10271+ if (!__access_ok(VERIFY_READ, src, size))
10272+ return size;
10273+#endif
10274+
10275+ if (unlikely(sz != -1 && sz < size)) {
10276+#ifdef CONFIG_DEBUG_VM
10277+ WARN(1, "Buffer overflow detected!\n");
10278+#endif
10279+ return size;
10280+ }
10281+
10282+ if (!__builtin_constant_p(size)) {
10283+ check_object_size(dst, size, false);
10284+
10285+#ifdef CONFIG_PAX_MEMORY_UDEREF
10286+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10287+ src += PAX_USER_SHADOW_BASE;
10288+#endif
10289+
10290+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10291+ }
10292 switch (size) {
10293- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10294+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10295 ret, "b", "b", "=q", 1);
10296 return ret;
10297- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10298+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10299 ret, "w", "w", "=r", 2);
10300 return ret;
10301- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10302+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10303 ret, "l", "k", "=r", 4);
10304 return ret;
10305- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10306+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10307 ret, "q", "", "=r", 8);
10308 return ret;
10309 case 10:
10310- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10311+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10312 ret, "q", "", "=r", 10);
10313 if (unlikely(ret))
10314 return ret;
10315 __get_user_asm(*(u16 *)(8 + (char *)dst),
10316- (u16 __user *)(8 + (char __user *)src),
10317+ (const u16 __user *)(8 + (const char __user *)src),
10318 ret, "w", "w", "=r", 2);
10319 return ret;
10320 case 16:
10321- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10322+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10323 ret, "q", "", "=r", 16);
10324 if (unlikely(ret))
10325 return ret;
10326 __get_user_asm(*(u64 *)(8 + (char *)dst),
10327- (u64 __user *)(8 + (char __user *)src),
10328+ (const u64 __user *)(8 + (const char __user *)src),
10329 ret, "q", "", "=r", 8);
10330 return ret;
10331 default:
10332- return copy_user_generic(dst, (__force void *)src, size);
10333+
10334+#ifdef CONFIG_PAX_MEMORY_UDEREF
10335+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10336+ src += PAX_USER_SHADOW_BASE;
10337+#endif
10338+
10339+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10340 }
10341 }
10342
10343 static __always_inline __must_check
10344-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10345+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10346 {
10347- int ret = 0;
10348+ int sz = __compiletime_object_size(src);
10349+ unsigned ret = 0;
10350
10351 might_fault();
10352- if (!__builtin_constant_p(size))
10353- return copy_user_generic((__force void *)dst, src, size);
10354+
10355+ pax_track_stack();
10356+
10357+ if ((int)size < 0)
10358+ return size;
10359+
10360+#ifdef CONFIG_PAX_MEMORY_UDEREF
10361+ if (!__access_ok(VERIFY_WRITE, dst, size))
10362+ return size;
10363+#endif
10364+
10365+ if (unlikely(sz != -1 && sz < size)) {
10366+#ifdef CONFIG_DEBUG_VM
10367+ WARN(1, "Buffer overflow detected!\n");
10368+#endif
10369+ return size;
10370+ }
10371+
10372+ if (!__builtin_constant_p(size)) {
10373+ check_object_size(src, size, true);
10374+
10375+#ifdef CONFIG_PAX_MEMORY_UDEREF
10376+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10377+ dst += PAX_USER_SHADOW_BASE;
10378+#endif
10379+
10380+ return copy_user_generic((__force_kernel void *)dst, src, size);
10381+ }
10382 switch (size) {
10383- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10384+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10385 ret, "b", "b", "iq", 1);
10386 return ret;
10387- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10388+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10389 ret, "w", "w", "ir", 2);
10390 return ret;
10391- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10392+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10393 ret, "l", "k", "ir", 4);
10394 return ret;
10395- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10396+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10397 ret, "q", "", "er", 8);
10398 return ret;
10399 case 10:
10400- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10401+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10402 ret, "q", "", "er", 10);
10403 if (unlikely(ret))
10404 return ret;
10405 asm("":::"memory");
10406- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10407+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10408 ret, "w", "w", "ir", 2);
10409 return ret;
10410 case 16:
10411- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10412+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10413 ret, "q", "", "er", 16);
10414 if (unlikely(ret))
10415 return ret;
10416 asm("":::"memory");
10417- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10418+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10419 ret, "q", "", "er", 8);
10420 return ret;
10421 default:
10422- return copy_user_generic((__force void *)dst, src, size);
10423+
10424+#ifdef CONFIG_PAX_MEMORY_UDEREF
10425+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10426+ dst += PAX_USER_SHADOW_BASE;
10427+#endif
10428+
10429+ return copy_user_generic((__force_kernel void *)dst, src, size);
10430 }
10431 }
10432
10433 static __always_inline __must_check
10434-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10435+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10436 {
10437- int ret = 0;
10438+ unsigned ret = 0;
10439
10440 might_fault();
10441- if (!__builtin_constant_p(size))
10442- return copy_user_generic((__force void *)dst,
10443- (__force void *)src, size);
10444+
10445+ if ((int)size < 0)
10446+ return size;
10447+
10448+#ifdef CONFIG_PAX_MEMORY_UDEREF
10449+ if (!__access_ok(VERIFY_READ, src, size))
10450+ return size;
10451+ if (!__access_ok(VERIFY_WRITE, dst, size))
10452+ return size;
10453+#endif
10454+
10455+ if (!__builtin_constant_p(size)) {
10456+
10457+#ifdef CONFIG_PAX_MEMORY_UDEREF
10458+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10459+ src += PAX_USER_SHADOW_BASE;
10460+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10461+ dst += PAX_USER_SHADOW_BASE;
10462+#endif
10463+
10464+ return copy_user_generic((__force_kernel void *)dst,
10465+ (__force_kernel const void *)src, size);
10466+ }
10467 switch (size) {
10468 case 1: {
10469 u8 tmp;
10470- __get_user_asm(tmp, (u8 __user *)src,
10471+ __get_user_asm(tmp, (const u8 __user *)src,
10472 ret, "b", "b", "=q", 1);
10473 if (likely(!ret))
10474 __put_user_asm(tmp, (u8 __user *)dst,
10475@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
10476 }
10477 case 2: {
10478 u16 tmp;
10479- __get_user_asm(tmp, (u16 __user *)src,
10480+ __get_user_asm(tmp, (const u16 __user *)src,
10481 ret, "w", "w", "=r", 2);
10482 if (likely(!ret))
10483 __put_user_asm(tmp, (u16 __user *)dst,
10484@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
10485
10486 case 4: {
10487 u32 tmp;
10488- __get_user_asm(tmp, (u32 __user *)src,
10489+ __get_user_asm(tmp, (const u32 __user *)src,
10490 ret, "l", "k", "=r", 4);
10491 if (likely(!ret))
10492 __put_user_asm(tmp, (u32 __user *)dst,
10493@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
10494 }
10495 case 8: {
10496 u64 tmp;
10497- __get_user_asm(tmp, (u64 __user *)src,
10498+ __get_user_asm(tmp, (const u64 __user *)src,
10499 ret, "q", "", "=r", 8);
10500 if (likely(!ret))
10501 __put_user_asm(tmp, (u64 __user *)dst,
10502@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
10503 return ret;
10504 }
10505 default:
10506- return copy_user_generic((__force void *)dst,
10507- (__force void *)src, size);
10508+
10509+#ifdef CONFIG_PAX_MEMORY_UDEREF
10510+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10511+ src += PAX_USER_SHADOW_BASE;
10512+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10513+ dst += PAX_USER_SHADOW_BASE;
10514+#endif
10515+
10516+ return copy_user_generic((__force_kernel void *)dst,
10517+ (__force_kernel const void *)src, size);
10518 }
10519 }
10520
10521@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
10522 static __must_check __always_inline int
10523 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10524 {
10525- return copy_user_generic(dst, (__force const void *)src, size);
10526+ pax_track_stack();
10527+
10528+ if ((int)size < 0)
10529+ return size;
10530+
10531+#ifdef CONFIG_PAX_MEMORY_UDEREF
10532+ if (!__access_ok(VERIFY_READ, src, size))
10533+ return size;
10534+
10535+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10536+ src += PAX_USER_SHADOW_BASE;
10537+#endif
10538+
10539+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10540 }
10541
10542-static __must_check __always_inline int
10543+static __must_check __always_inline unsigned long
10544 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10545 {
10546- return copy_user_generic((__force void *)dst, src, size);
10547+ if ((int)size < 0)
10548+ return size;
10549+
10550+#ifdef CONFIG_PAX_MEMORY_UDEREF
10551+ if (!__access_ok(VERIFY_WRITE, dst, size))
10552+ return size;
10553+
10554+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10555+ dst += PAX_USER_SHADOW_BASE;
10556+#endif
10557+
10558+ return copy_user_generic((__force_kernel void *)dst, src, size);
10559 }
10560
10561-extern long __copy_user_nocache(void *dst, const void __user *src,
10562+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10563 unsigned size, int zerorest);
10564
10565-static inline int
10566-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10567+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10568 {
10569 might_sleep();
10570+
10571+ if ((int)size < 0)
10572+ return size;
10573+
10574+#ifdef CONFIG_PAX_MEMORY_UDEREF
10575+ if (!__access_ok(VERIFY_READ, src, size))
10576+ return size;
10577+#endif
10578+
10579 return __copy_user_nocache(dst, src, size, 1);
10580 }
10581
10582-static inline int
10583-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10584+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10585 unsigned size)
10586 {
10587+ if ((int)size < 0)
10588+ return size;
10589+
10590+#ifdef CONFIG_PAX_MEMORY_UDEREF
10591+ if (!__access_ok(VERIFY_READ, src, size))
10592+ return size;
10593+#endif
10594+
10595 return __copy_user_nocache(dst, src, size, 0);
10596 }
10597
10598-unsigned long
10599-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10600+extern unsigned long
10601+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
10602
10603 #endif /* _ASM_X86_UACCESS_64_H */
10604diff -urNp linux-3.0.7/arch/x86/include/asm/vdso.h linux-3.0.7/arch/x86/include/asm/vdso.h
10605--- linux-3.0.7/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10606+++ linux-3.0.7/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10607@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10608 #define VDSO32_SYMBOL(base, name) \
10609 ({ \
10610 extern const char VDSO32_##name[]; \
10611- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10612+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10613 })
10614 #endif
10615
10616diff -urNp linux-3.0.7/arch/x86/include/asm/x86_init.h linux-3.0.7/arch/x86/include/asm/x86_init.h
10617--- linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10618+++ linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10619@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10620 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10621 void (*find_smp_config)(void);
10622 void (*get_smp_config)(unsigned int early);
10623-};
10624+} __no_const;
10625
10626 /**
10627 * struct x86_init_resources - platform specific resource related ops
10628@@ -42,7 +42,7 @@ struct x86_init_resources {
10629 void (*probe_roms)(void);
10630 void (*reserve_resources)(void);
10631 char *(*memory_setup)(void);
10632-};
10633+} __no_const;
10634
10635 /**
10636 * struct x86_init_irqs - platform specific interrupt setup
10637@@ -55,7 +55,7 @@ struct x86_init_irqs {
10638 void (*pre_vector_init)(void);
10639 void (*intr_init)(void);
10640 void (*trap_init)(void);
10641-};
10642+} __no_const;
10643
10644 /**
10645 * struct x86_init_oem - oem platform specific customizing functions
10646@@ -65,7 +65,7 @@ struct x86_init_irqs {
10647 struct x86_init_oem {
10648 void (*arch_setup)(void);
10649 void (*banner)(void);
10650-};
10651+} __no_const;
10652
10653 /**
10654 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10655@@ -76,7 +76,7 @@ struct x86_init_oem {
10656 */
10657 struct x86_init_mapping {
10658 void (*pagetable_reserve)(u64 start, u64 end);
10659-};
10660+} __no_const;
10661
10662 /**
10663 * struct x86_init_paging - platform specific paging functions
10664@@ -86,7 +86,7 @@ struct x86_init_mapping {
10665 struct x86_init_paging {
10666 void (*pagetable_setup_start)(pgd_t *base);
10667 void (*pagetable_setup_done)(pgd_t *base);
10668-};
10669+} __no_const;
10670
10671 /**
10672 * struct x86_init_timers - platform specific timer setup
10673@@ -101,7 +101,7 @@ struct x86_init_timers {
10674 void (*tsc_pre_init)(void);
10675 void (*timer_init)(void);
10676 void (*wallclock_init)(void);
10677-};
10678+} __no_const;
10679
10680 /**
10681 * struct x86_init_iommu - platform specific iommu setup
10682@@ -109,7 +109,7 @@ struct x86_init_timers {
10683 */
10684 struct x86_init_iommu {
10685 int (*iommu_init)(void);
10686-};
10687+} __no_const;
10688
10689 /**
10690 * struct x86_init_pci - platform specific pci init functions
10691@@ -123,7 +123,7 @@ struct x86_init_pci {
10692 int (*init)(void);
10693 void (*init_irq)(void);
10694 void (*fixup_irqs)(void);
10695-};
10696+} __no_const;
10697
10698 /**
10699 * struct x86_init_ops - functions for platform specific setup
10700@@ -139,7 +139,7 @@ struct x86_init_ops {
10701 struct x86_init_timers timers;
10702 struct x86_init_iommu iommu;
10703 struct x86_init_pci pci;
10704-};
10705+} __no_const;
10706
10707 /**
10708 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10709@@ -147,7 +147,7 @@ struct x86_init_ops {
10710 */
10711 struct x86_cpuinit_ops {
10712 void (*setup_percpu_clockev)(void);
10713-};
10714+} __no_const;
10715
10716 /**
10717 * struct x86_platform_ops - platform specific runtime functions
10718@@ -166,7 +166,7 @@ struct x86_platform_ops {
10719 bool (*is_untracked_pat_range)(u64 start, u64 end);
10720 void (*nmi_init)(void);
10721 int (*i8042_detect)(void);
10722-};
10723+} __no_const;
10724
10725 struct pci_dev;
10726
10727@@ -174,7 +174,7 @@ struct x86_msi_ops {
10728 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10729 void (*teardown_msi_irq)(unsigned int irq);
10730 void (*teardown_msi_irqs)(struct pci_dev *dev);
10731-};
10732+} __no_const;
10733
10734 extern struct x86_init_ops x86_init;
10735 extern struct x86_cpuinit_ops x86_cpuinit;
10736diff -urNp linux-3.0.7/arch/x86/include/asm/xsave.h linux-3.0.7/arch/x86/include/asm/xsave.h
10737--- linux-3.0.7/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10738+++ linux-3.0.7/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10739@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10740 {
10741 int err;
10742
10743+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10744+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10745+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10746+#endif
10747+
10748 /*
10749 * Clear the xsave header first, so that reserved fields are
10750 * initialized to zero.
10751@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10752 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10753 {
10754 int err;
10755- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10756+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10757 u32 lmask = mask;
10758 u32 hmask = mask >> 32;
10759
10760+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10761+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10762+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10763+#endif
10764+
10765 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10766 "2:\n"
10767 ".section .fixup,\"ax\"\n"
10768diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile
10769--- linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10770+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10771@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10772 $(call cc-option, -fno-stack-protector) \
10773 $(call cc-option, -mpreferred-stack-boundary=2)
10774 KBUILD_CFLAGS += $(call cc-option, -m32)
10775+ifdef CONSTIFY_PLUGIN
10776+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10777+endif
10778 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10779 GCOV_PROFILE := n
10780
10781diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S
10782--- linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10783+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10784@@ -108,6 +108,9 @@ wakeup_code:
10785 /* Do any other stuff... */
10786
10787 #ifndef CONFIG_64BIT
10788+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10789+ call verify_cpu
10790+
10791 /* This could also be done in C code... */
10792 movl pmode_cr3, %eax
10793 movl %eax, %cr3
10794@@ -131,6 +134,7 @@ wakeup_code:
10795 movl pmode_cr0, %eax
10796 movl %eax, %cr0
10797 jmp pmode_return
10798+# include "../../verify_cpu.S"
10799 #else
10800 pushw $0
10801 pushw trampoline_segment
10802diff -urNp linux-3.0.7/arch/x86/kernel/acpi/sleep.c linux-3.0.7/arch/x86/kernel/acpi/sleep.c
10803--- linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10804+++ linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10805@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10806 header->trampoline_segment = trampoline_address() >> 4;
10807 #ifdef CONFIG_SMP
10808 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10809+
10810+ pax_open_kernel();
10811 early_gdt_descr.address =
10812 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10813+ pax_close_kernel();
10814+
10815 initial_gs = per_cpu_offset(smp_processor_id());
10816 #endif
10817 initial_code = (unsigned long)wakeup_long64;
10818diff -urNp linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S
10819--- linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10820+++ linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10821@@ -30,13 +30,11 @@ wakeup_pmode_return:
10822 # and restore the stack ... but you need gdt for this to work
10823 movl saved_context_esp, %esp
10824
10825- movl %cs:saved_magic, %eax
10826- cmpl $0x12345678, %eax
10827+ cmpl $0x12345678, saved_magic
10828 jne bogus_magic
10829
10830 # jump to place where we left off
10831- movl saved_eip, %eax
10832- jmp *%eax
10833+ jmp *(saved_eip)
10834
10835 bogus_magic:
10836 jmp bogus_magic
10837diff -urNp linux-3.0.7/arch/x86/kernel/alternative.c linux-3.0.7/arch/x86/kernel/alternative.c
10838--- linux-3.0.7/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10839+++ linux-3.0.7/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10840@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10841 if (!*poff || ptr < text || ptr >= text_end)
10842 continue;
10843 /* turn DS segment override prefix into lock prefix */
10844- if (*ptr == 0x3e)
10845+ if (*ktla_ktva(ptr) == 0x3e)
10846 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10847 };
10848 mutex_unlock(&text_mutex);
10849@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10850 if (!*poff || ptr < text || ptr >= text_end)
10851 continue;
10852 /* turn lock prefix into DS segment override prefix */
10853- if (*ptr == 0xf0)
10854+ if (*ktla_ktva(ptr) == 0xf0)
10855 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10856 };
10857 mutex_unlock(&text_mutex);
10858@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10859
10860 BUG_ON(p->len > MAX_PATCH_LEN);
10861 /* prep the buffer with the original instructions */
10862- memcpy(insnbuf, p->instr, p->len);
10863+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10864 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10865 (unsigned long)p->instr, p->len);
10866
10867@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10868 if (smp_alt_once)
10869 free_init_pages("SMP alternatives",
10870 (unsigned long)__smp_locks,
10871- (unsigned long)__smp_locks_end);
10872+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10873
10874 restart_nmi();
10875 }
10876@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10877 * instructions. And on the local CPU you need to be protected again NMI or MCE
10878 * handlers seeing an inconsistent instruction while you patch.
10879 */
10880-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10881+void *__kprobes text_poke_early(void *addr, const void *opcode,
10882 size_t len)
10883 {
10884 unsigned long flags;
10885 local_irq_save(flags);
10886- memcpy(addr, opcode, len);
10887+
10888+ pax_open_kernel();
10889+ memcpy(ktla_ktva(addr), opcode, len);
10890 sync_core();
10891+ pax_close_kernel();
10892+
10893 local_irq_restore(flags);
10894 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10895 that causes hangs on some VIA CPUs. */
10896@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10897 */
10898 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10899 {
10900- unsigned long flags;
10901- char *vaddr;
10902+ unsigned char *vaddr = ktla_ktva(addr);
10903 struct page *pages[2];
10904- int i;
10905+ size_t i;
10906
10907 if (!core_kernel_text((unsigned long)addr)) {
10908- pages[0] = vmalloc_to_page(addr);
10909- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10910+ pages[0] = vmalloc_to_page(vaddr);
10911+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10912 } else {
10913- pages[0] = virt_to_page(addr);
10914+ pages[0] = virt_to_page(vaddr);
10915 WARN_ON(!PageReserved(pages[0]));
10916- pages[1] = virt_to_page(addr + PAGE_SIZE);
10917+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10918 }
10919 BUG_ON(!pages[0]);
10920- local_irq_save(flags);
10921- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10922- if (pages[1])
10923- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10924- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10925- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10926- clear_fixmap(FIX_TEXT_POKE0);
10927- if (pages[1])
10928- clear_fixmap(FIX_TEXT_POKE1);
10929- local_flush_tlb();
10930- sync_core();
10931- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10932- that causes hangs on some VIA CPUs. */
10933+ text_poke_early(addr, opcode, len);
10934 for (i = 0; i < len; i++)
10935- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10936- local_irq_restore(flags);
10937+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10938 return addr;
10939 }
10940
10941diff -urNp linux-3.0.7/arch/x86/kernel/apic/apic.c linux-3.0.7/arch/x86/kernel/apic/apic.c
10942--- linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10943+++ linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10944@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10945 /*
10946 * Debug level, exported for io_apic.c
10947 */
10948-unsigned int apic_verbosity;
10949+int apic_verbosity;
10950
10951 int pic_mode;
10952
10953@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10954 apic_write(APIC_ESR, 0);
10955 v1 = apic_read(APIC_ESR);
10956 ack_APIC_irq();
10957- atomic_inc(&irq_err_count);
10958+ atomic_inc_unchecked(&irq_err_count);
10959
10960 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10961 smp_processor_id(), v0 , v1);
10962@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10963 u16 *bios_cpu_apicid;
10964 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10965
10966+ pax_track_stack();
10967+
10968 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10969 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10970
10971diff -urNp linux-3.0.7/arch/x86/kernel/apic/io_apic.c linux-3.0.7/arch/x86/kernel/apic/io_apic.c
10972--- linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10973+++ linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10974@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10975 }
10976 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10977
10978-void lock_vector_lock(void)
10979+void lock_vector_lock(void) __acquires(vector_lock)
10980 {
10981 /* Used to the online set of cpus does not change
10982 * during assign_irq_vector.
10983@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10984 raw_spin_lock(&vector_lock);
10985 }
10986
10987-void unlock_vector_lock(void)
10988+void unlock_vector_lock(void) __releases(vector_lock)
10989 {
10990 raw_spin_unlock(&vector_lock);
10991 }
10992@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10993 ack_APIC_irq();
10994 }
10995
10996-atomic_t irq_mis_count;
10997+atomic_unchecked_t irq_mis_count;
10998
10999 /*
11000 * IO-APIC versions below 0x20 don't support EOI register.
11001@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
11002 * at the cpu.
11003 */
11004 if (!(v & (1 << (i & 0x1f)))) {
11005- atomic_inc(&irq_mis_count);
11006+ atomic_inc_unchecked(&irq_mis_count);
11007
11008 eoi_ioapic_irq(irq, cfg);
11009 }
11010diff -urNp linux-3.0.7/arch/x86/kernel/apm_32.c linux-3.0.7/arch/x86/kernel/apm_32.c
11011--- linux-3.0.7/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
11012+++ linux-3.0.7/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
11013@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11014 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11015 * even though they are called in protected mode.
11016 */
11017-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11018+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11019 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11020
11021 static const char driver_version[] = "1.16ac"; /* no spaces */
11022@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11023 BUG_ON(cpu != 0);
11024 gdt = get_cpu_gdt_table(cpu);
11025 save_desc_40 = gdt[0x40 / 8];
11026+
11027+ pax_open_kernel();
11028 gdt[0x40 / 8] = bad_bios_desc;
11029+ pax_close_kernel();
11030
11031 apm_irq_save(flags);
11032 APM_DO_SAVE_SEGS;
11033@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11034 &call->esi);
11035 APM_DO_RESTORE_SEGS;
11036 apm_irq_restore(flags);
11037+
11038+ pax_open_kernel();
11039 gdt[0x40 / 8] = save_desc_40;
11040+ pax_close_kernel();
11041+
11042 put_cpu();
11043
11044 return call->eax & 0xff;
11045@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
11046 BUG_ON(cpu != 0);
11047 gdt = get_cpu_gdt_table(cpu);
11048 save_desc_40 = gdt[0x40 / 8];
11049+
11050+ pax_open_kernel();
11051 gdt[0x40 / 8] = bad_bios_desc;
11052+ pax_close_kernel();
11053
11054 apm_irq_save(flags);
11055 APM_DO_SAVE_SEGS;
11056@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
11057 &call->eax);
11058 APM_DO_RESTORE_SEGS;
11059 apm_irq_restore(flags);
11060+
11061+ pax_open_kernel();
11062 gdt[0x40 / 8] = save_desc_40;
11063+ pax_close_kernel();
11064+
11065 put_cpu();
11066 return error;
11067 }
11068@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11069 * code to that CPU.
11070 */
11071 gdt = get_cpu_gdt_table(0);
11072+
11073+ pax_open_kernel();
11074 set_desc_base(&gdt[APM_CS >> 3],
11075 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11076 set_desc_base(&gdt[APM_CS_16 >> 3],
11077 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11078 set_desc_base(&gdt[APM_DS >> 3],
11079 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11080+ pax_close_kernel();
11081
11082 proc_create("apm", 0, NULL, &apm_file_ops);
11083
11084diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets.c linux-3.0.7/arch/x86/kernel/asm-offsets.c
11085--- linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
11086+++ linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
11087@@ -33,6 +33,8 @@ void common(void) {
11088 OFFSET(TI_status, thread_info, status);
11089 OFFSET(TI_addr_limit, thread_info, addr_limit);
11090 OFFSET(TI_preempt_count, thread_info, preempt_count);
11091+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11092+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11093
11094 BLANK();
11095 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11096@@ -53,8 +55,26 @@ void common(void) {
11097 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11098 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11099 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11100+
11101+#ifdef CONFIG_PAX_KERNEXEC
11102+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11103+#endif
11104+
11105+#ifdef CONFIG_PAX_MEMORY_UDEREF
11106+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11107+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11108+#ifdef CONFIG_X86_64
11109+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11110+#endif
11111 #endif
11112
11113+#endif
11114+
11115+ BLANK();
11116+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11117+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11118+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11119+
11120 #ifdef CONFIG_XEN
11121 BLANK();
11122 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11123diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets_64.c linux-3.0.7/arch/x86/kernel/asm-offsets_64.c
11124--- linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
11125+++ linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
11126@@ -69,6 +69,7 @@ int main(void)
11127 BLANK();
11128 #undef ENTRY
11129
11130+ DEFINE(TSS_size, sizeof(struct tss_struct));
11131 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11132 BLANK();
11133
11134diff -urNp linux-3.0.7/arch/x86/kernel/cpu/Makefile linux-3.0.7/arch/x86/kernel/cpu/Makefile
11135--- linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
11136+++ linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
11137@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11138 CFLAGS_REMOVE_perf_event.o = -pg
11139 endif
11140
11141-# Make sure load_percpu_segment has no stackprotector
11142-nostackp := $(call cc-option, -fno-stack-protector)
11143-CFLAGS_common.o := $(nostackp)
11144-
11145 obj-y := intel_cacheinfo.o scattered.o topology.o
11146 obj-y += proc.o capflags.o powerflags.o common.o
11147 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11148diff -urNp linux-3.0.7/arch/x86/kernel/cpu/amd.c linux-3.0.7/arch/x86/kernel/cpu/amd.c
11149--- linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
11150+++ linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
11151@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
11152 unsigned int size)
11153 {
11154 /* AMD errata T13 (order #21922) */
11155- if ((c->x86 == 6)) {
11156+ if (c->x86 == 6) {
11157 /* Duron Rev A0 */
11158 if (c->x86_model == 3 && c->x86_mask == 0)
11159 size = 64;
11160diff -urNp linux-3.0.7/arch/x86/kernel/cpu/common.c linux-3.0.7/arch/x86/kernel/cpu/common.c
11161--- linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
11162+++ linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
11163@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11164
11165 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11166
11167-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11168-#ifdef CONFIG_X86_64
11169- /*
11170- * We need valid kernel segments for data and code in long mode too
11171- * IRET will check the segment types kkeil 2000/10/28
11172- * Also sysret mandates a special GDT layout
11173- *
11174- * TLS descriptors are currently at a different place compared to i386.
11175- * Hopefully nobody expects them at a fixed place (Wine?)
11176- */
11177- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11178- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11179- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11180- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11181- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11182- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11183-#else
11184- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11185- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11186- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11187- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11188- /*
11189- * Segments used for calling PnP BIOS have byte granularity.
11190- * They code segments and data segments have fixed 64k limits,
11191- * the transfer segment sizes are set at run time.
11192- */
11193- /* 32-bit code */
11194- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11195- /* 16-bit code */
11196- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11197- /* 16-bit data */
11198- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11199- /* 16-bit data */
11200- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11201- /* 16-bit data */
11202- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11203- /*
11204- * The APM segments have byte granularity and their bases
11205- * are set at run time. All have 64k limits.
11206- */
11207- /* 32-bit code */
11208- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11209- /* 16-bit code */
11210- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11211- /* data */
11212- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11213-
11214- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11215- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11216- GDT_STACK_CANARY_INIT
11217-#endif
11218-} };
11219-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11220-
11221 static int __init x86_xsave_setup(char *s)
11222 {
11223 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11224@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11225 {
11226 struct desc_ptr gdt_descr;
11227
11228- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11229+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11230 gdt_descr.size = GDT_SIZE - 1;
11231 load_gdt(&gdt_descr);
11232 /* Reload the per-cpu base */
11233@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
11234 /* Filter out anything that depends on CPUID levels we don't have */
11235 filter_cpuid_features(c, true);
11236
11237+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11238+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11239+#endif
11240+
11241 /* If the model name is still unset, do table lookup. */
11242 if (!c->x86_model_id[0]) {
11243 const char *p;
11244@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
11245 }
11246 __setup("clearcpuid=", setup_disablecpuid);
11247
11248+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11249+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11250+
11251 #ifdef CONFIG_X86_64
11252 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11253
11254@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11255 EXPORT_PER_CPU_SYMBOL(current_task);
11256
11257 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11258- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11259+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11260 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11261
11262 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11263@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
11264 {
11265 memset(regs, 0, sizeof(struct pt_regs));
11266 regs->fs = __KERNEL_PERCPU;
11267- regs->gs = __KERNEL_STACK_CANARY;
11268+ savesegment(gs, regs->gs);
11269
11270 return regs;
11271 }
11272@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11273 int i;
11274
11275 cpu = stack_smp_processor_id();
11276- t = &per_cpu(init_tss, cpu);
11277+ t = init_tss + cpu;
11278 oist = &per_cpu(orig_ist, cpu);
11279
11280 #ifdef CONFIG_NUMA
11281@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11282 switch_to_new_gdt(cpu);
11283 loadsegment(fs, 0);
11284
11285- load_idt((const struct desc_ptr *)&idt_descr);
11286+ load_idt(&idt_descr);
11287
11288 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11289 syscall_init();
11290@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11291 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11292 barrier();
11293
11294- x86_configure_nx();
11295 if (cpu != 0)
11296 enable_x2apic();
11297
11298@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
11299 {
11300 int cpu = smp_processor_id();
11301 struct task_struct *curr = current;
11302- struct tss_struct *t = &per_cpu(init_tss, cpu);
11303+ struct tss_struct *t = init_tss + cpu;
11304 struct thread_struct *thread = &curr->thread;
11305
11306 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11307diff -urNp linux-3.0.7/arch/x86/kernel/cpu/intel.c linux-3.0.7/arch/x86/kernel/cpu/intel.c
11308--- linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
11309+++ linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
11310@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11311 * Update the IDT descriptor and reload the IDT so that
11312 * it uses the read-only mapped virtual address.
11313 */
11314- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11315+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11316 load_idt(&idt_descr);
11317 }
11318 #endif
11319diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c
11320--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11321+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11322@@ -215,7 +215,9 @@ static int inject_init(void)
11323 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11324 return -ENOMEM;
11325 printk(KERN_INFO "Machine check injector initialized\n");
11326- mce_chrdev_ops.write = mce_write;
11327+ pax_open_kernel();
11328+ *(void **)&mce_chrdev_ops.write = mce_write;
11329+ pax_close_kernel();
11330 register_die_notifier(&mce_raise_nb);
11331 return 0;
11332 }
11333diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c
11334--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
11335+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
11336@@ -46,6 +46,7 @@
11337 #include <asm/ipi.h>
11338 #include <asm/mce.h>
11339 #include <asm/msr.h>
11340+#include <asm/local.h>
11341
11342 #include "mce-internal.h"
11343
11344@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
11345 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11346 m->cs, m->ip);
11347
11348- if (m->cs == __KERNEL_CS)
11349+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11350 print_symbol("{%s}", m->ip);
11351 pr_cont("\n");
11352 }
11353@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
11354
11355 #define PANIC_TIMEOUT 5 /* 5 seconds */
11356
11357-static atomic_t mce_paniced;
11358+static atomic_unchecked_t mce_paniced;
11359
11360 static int fake_panic;
11361-static atomic_t mce_fake_paniced;
11362+static atomic_unchecked_t mce_fake_paniced;
11363
11364 /* Panic in progress. Enable interrupts and wait for final IPI */
11365 static void wait_for_panic(void)
11366@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11367 /*
11368 * Make sure only one CPU runs in machine check panic
11369 */
11370- if (atomic_inc_return(&mce_paniced) > 1)
11371+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11372 wait_for_panic();
11373 barrier();
11374
11375@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11376 console_verbose();
11377 } else {
11378 /* Don't log too much for fake panic */
11379- if (atomic_inc_return(&mce_fake_paniced) > 1)
11380+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11381 return;
11382 }
11383 /* First print corrected ones that are still unlogged */
11384@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11385 * might have been modified by someone else.
11386 */
11387 rmb();
11388- if (atomic_read(&mce_paniced))
11389+ if (atomic_read_unchecked(&mce_paniced))
11390 wait_for_panic();
11391 if (!monarch_timeout)
11392 goto out;
11393@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11394 */
11395
11396 static DEFINE_SPINLOCK(mce_state_lock);
11397-static int open_count; /* #times opened */
11398+static local_t open_count; /* #times opened */
11399 static int open_exclu; /* already open exclusive? */
11400
11401 static int mce_open(struct inode *inode, struct file *file)
11402 {
11403 spin_lock(&mce_state_lock);
11404
11405- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11406+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11407 spin_unlock(&mce_state_lock);
11408
11409 return -EBUSY;
11410@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11411
11412 if (file->f_flags & O_EXCL)
11413 open_exclu = 1;
11414- open_count++;
11415+ local_inc(&open_count);
11416
11417 spin_unlock(&mce_state_lock);
11418
11419@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11420 {
11421 spin_lock(&mce_state_lock);
11422
11423- open_count--;
11424+ local_dec(&open_count);
11425 open_exclu = 0;
11426
11427 spin_unlock(&mce_state_lock);
11428@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11429 static void mce_reset(void)
11430 {
11431 cpu_missing = 0;
11432- atomic_set(&mce_fake_paniced, 0);
11433+ atomic_set_unchecked(&mce_fake_paniced, 0);
11434 atomic_set(&mce_executing, 0);
11435 atomic_set(&mce_callin, 0);
11436 atomic_set(&global_nwo, 0);
11437diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c
11438--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11439+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11440@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11441 u64 size_or_mask, size_and_mask;
11442 static bool mtrr_aps_delayed_init;
11443
11444-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11445+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11446
11447 const struct mtrr_ops *mtrr_if;
11448
11449diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h
11450--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11451+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11452@@ -25,7 +25,7 @@ struct mtrr_ops {
11453 int (*validate_add_page)(unsigned long base, unsigned long size,
11454 unsigned int type);
11455 int (*have_wrcomb)(void);
11456-};
11457+} __do_const;
11458
11459 extern int generic_get_free_region(unsigned long base, unsigned long size,
11460 int replace_reg);
11461diff -urNp linux-3.0.7/arch/x86/kernel/cpu/perf_event.c linux-3.0.7/arch/x86/kernel/cpu/perf_event.c
11462--- linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:54:53.000000000 -0400
11463+++ linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:55:27.000000000 -0400
11464@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11465 int i, j, w, wmax, num = 0;
11466 struct hw_perf_event *hwc;
11467
11468+ pax_track_stack();
11469+
11470 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11471
11472 for (i = 0; i < n; i++) {
11473@@ -1875,7 +1877,7 @@ perf_callchain_user(struct perf_callchai
11474 break;
11475
11476 perf_callchain_store(entry, frame.return_address);
11477- fp = frame.next_frame;
11478+ fp = (const void __force_user *)frame.next_frame;
11479 }
11480 }
11481
11482diff -urNp linux-3.0.7/arch/x86/kernel/crash.c linux-3.0.7/arch/x86/kernel/crash.c
11483--- linux-3.0.7/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11484+++ linux-3.0.7/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11485@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11486 regs = args->regs;
11487
11488 #ifdef CONFIG_X86_32
11489- if (!user_mode_vm(regs)) {
11490+ if (!user_mode(regs)) {
11491 crash_fixup_ss_esp(&fixed_regs, regs);
11492 regs = &fixed_regs;
11493 }
11494diff -urNp linux-3.0.7/arch/x86/kernel/doublefault_32.c linux-3.0.7/arch/x86/kernel/doublefault_32.c
11495--- linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11496+++ linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11497@@ -11,7 +11,7 @@
11498
11499 #define DOUBLEFAULT_STACKSIZE (1024)
11500 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11501-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11502+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11503
11504 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11505
11506@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11507 unsigned long gdt, tss;
11508
11509 store_gdt(&gdt_desc);
11510- gdt = gdt_desc.address;
11511+ gdt = (unsigned long)gdt_desc.address;
11512
11513 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11514
11515@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11516 /* 0x2 bit is always set */
11517 .flags = X86_EFLAGS_SF | 0x2,
11518 .sp = STACK_START,
11519- .es = __USER_DS,
11520+ .es = __KERNEL_DS,
11521 .cs = __KERNEL_CS,
11522 .ss = __KERNEL_DS,
11523- .ds = __USER_DS,
11524+ .ds = __KERNEL_DS,
11525 .fs = __KERNEL_PERCPU,
11526
11527 .__cr3 = __pa_nodebug(swapper_pg_dir),
11528diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack.c linux-3.0.7/arch/x86/kernel/dumpstack.c
11529--- linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11530+++ linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11531@@ -2,6 +2,9 @@
11532 * Copyright (C) 1991, 1992 Linus Torvalds
11533 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11534 */
11535+#ifdef CONFIG_GRKERNSEC_HIDESYM
11536+#define __INCLUDED_BY_HIDESYM 1
11537+#endif
11538 #include <linux/kallsyms.h>
11539 #include <linux/kprobes.h>
11540 #include <linux/uaccess.h>
11541@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11542 static void
11543 print_ftrace_graph_addr(unsigned long addr, void *data,
11544 const struct stacktrace_ops *ops,
11545- struct thread_info *tinfo, int *graph)
11546+ struct task_struct *task, int *graph)
11547 {
11548- struct task_struct *task = tinfo->task;
11549 unsigned long ret_addr;
11550 int index = task->curr_ret_stack;
11551
11552@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11553 static inline void
11554 print_ftrace_graph_addr(unsigned long addr, void *data,
11555 const struct stacktrace_ops *ops,
11556- struct thread_info *tinfo, int *graph)
11557+ struct task_struct *task, int *graph)
11558 { }
11559 #endif
11560
11561@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11562 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11563 */
11564
11565-static inline int valid_stack_ptr(struct thread_info *tinfo,
11566- void *p, unsigned int size, void *end)
11567+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11568 {
11569- void *t = tinfo;
11570 if (end) {
11571 if (p < end && p >= (end-THREAD_SIZE))
11572 return 1;
11573@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11574 }
11575
11576 unsigned long
11577-print_context_stack(struct thread_info *tinfo,
11578+print_context_stack(struct task_struct *task, void *stack_start,
11579 unsigned long *stack, unsigned long bp,
11580 const struct stacktrace_ops *ops, void *data,
11581 unsigned long *end, int *graph)
11582 {
11583 struct stack_frame *frame = (struct stack_frame *)bp;
11584
11585- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11586+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11587 unsigned long addr;
11588
11589 addr = *stack;
11590@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11591 } else {
11592 ops->address(data, addr, 0);
11593 }
11594- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11595+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11596 }
11597 stack++;
11598 }
11599@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11600 EXPORT_SYMBOL_GPL(print_context_stack);
11601
11602 unsigned long
11603-print_context_stack_bp(struct thread_info *tinfo,
11604+print_context_stack_bp(struct task_struct *task, void *stack_start,
11605 unsigned long *stack, unsigned long bp,
11606 const struct stacktrace_ops *ops, void *data,
11607 unsigned long *end, int *graph)
11608@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11609 struct stack_frame *frame = (struct stack_frame *)bp;
11610 unsigned long *ret_addr = &frame->return_address;
11611
11612- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11613+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11614 unsigned long addr = *ret_addr;
11615
11616 if (!__kernel_text_address(addr))
11617@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11618 ops->address(data, addr, 1);
11619 frame = frame->next_frame;
11620 ret_addr = &frame->return_address;
11621- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11622+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11623 }
11624
11625 return (unsigned long)frame;
11626@@ -186,7 +186,7 @@ void dump_stack(void)
11627
11628 bp = stack_frame(current, NULL);
11629 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11630- current->pid, current->comm, print_tainted(),
11631+ task_pid_nr(current), current->comm, print_tainted(),
11632 init_utsname()->release,
11633 (int)strcspn(init_utsname()->version, " "),
11634 init_utsname()->version);
11635@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11636 }
11637 EXPORT_SYMBOL_GPL(oops_begin);
11638
11639+extern void gr_handle_kernel_exploit(void);
11640+
11641 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11642 {
11643 if (regs && kexec_should_crash(current))
11644@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11645 panic("Fatal exception in interrupt");
11646 if (panic_on_oops)
11647 panic("Fatal exception");
11648- do_exit(signr);
11649+
11650+ gr_handle_kernel_exploit();
11651+
11652+ do_group_exit(signr);
11653 }
11654
11655 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11656@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11657
11658 show_registers(regs);
11659 #ifdef CONFIG_X86_32
11660- if (user_mode_vm(regs)) {
11661+ if (user_mode(regs)) {
11662 sp = regs->sp;
11663 ss = regs->ss & 0xffff;
11664 } else {
11665@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11666 unsigned long flags = oops_begin();
11667 int sig = SIGSEGV;
11668
11669- if (!user_mode_vm(regs))
11670+ if (!user_mode(regs))
11671 report_bug(regs->ip, regs);
11672
11673 if (__die(str, regs, err))
11674diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_32.c linux-3.0.7/arch/x86/kernel/dumpstack_32.c
11675--- linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11676+++ linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11677@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11678 bp = stack_frame(task, regs);
11679
11680 for (;;) {
11681- struct thread_info *context;
11682+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11683
11684- context = (struct thread_info *)
11685- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11686- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11687+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11688
11689- stack = (unsigned long *)context->previous_esp;
11690- if (!stack)
11691+ if (stack_start == task_stack_page(task))
11692 break;
11693+ stack = *(unsigned long **)stack_start;
11694 if (ops->stack(data, "IRQ") < 0)
11695 break;
11696 touch_nmi_watchdog();
11697@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11698 * When in-kernel, we also print out the stack and code at the
11699 * time of the fault..
11700 */
11701- if (!user_mode_vm(regs)) {
11702+ if (!user_mode(regs)) {
11703 unsigned int code_prologue = code_bytes * 43 / 64;
11704 unsigned int code_len = code_bytes;
11705 unsigned char c;
11706 u8 *ip;
11707+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11708
11709 printk(KERN_EMERG "Stack:\n");
11710 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11711
11712 printk(KERN_EMERG "Code: ");
11713
11714- ip = (u8 *)regs->ip - code_prologue;
11715+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11716 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11717 /* try starting at IP */
11718- ip = (u8 *)regs->ip;
11719+ ip = (u8 *)regs->ip + cs_base;
11720 code_len = code_len - code_prologue + 1;
11721 }
11722 for (i = 0; i < code_len; i++, ip++) {
11723@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11724 printk(" Bad EIP value.");
11725 break;
11726 }
11727- if (ip == (u8 *)regs->ip)
11728+ if (ip == (u8 *)regs->ip + cs_base)
11729 printk("<%02x> ", c);
11730 else
11731 printk("%02x ", c);
11732@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11733 {
11734 unsigned short ud2;
11735
11736+ ip = ktla_ktva(ip);
11737 if (ip < PAGE_OFFSET)
11738 return 0;
11739 if (probe_kernel_address((unsigned short *)ip, ud2))
11740diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_64.c linux-3.0.7/arch/x86/kernel/dumpstack_64.c
11741--- linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11742+++ linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11743@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11744 unsigned long *irq_stack_end =
11745 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11746 unsigned used = 0;
11747- struct thread_info *tinfo;
11748 int graph = 0;
11749 unsigned long dummy;
11750+ void *stack_start;
11751
11752 if (!task)
11753 task = current;
11754@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11755 * current stack address. If the stacks consist of nested
11756 * exceptions
11757 */
11758- tinfo = task_thread_info(task);
11759 for (;;) {
11760 char *id;
11761 unsigned long *estack_end;
11762+
11763 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11764 &used, &id);
11765
11766@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11767 if (ops->stack(data, id) < 0)
11768 break;
11769
11770- bp = ops->walk_stack(tinfo, stack, bp, ops,
11771+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11772 data, estack_end, &graph);
11773 ops->stack(data, "<EOE>");
11774 /*
11775@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11776 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11777 if (ops->stack(data, "IRQ") < 0)
11778 break;
11779- bp = ops->walk_stack(tinfo, stack, bp,
11780+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11781 ops, data, irq_stack_end, &graph);
11782 /*
11783 * We link to the next stack (which would be
11784@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11785 /*
11786 * This handles the process stack:
11787 */
11788- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11789+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11790+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11791 put_cpu();
11792 }
11793 EXPORT_SYMBOL(dump_trace);
11794diff -urNp linux-3.0.7/arch/x86/kernel/early_printk.c linux-3.0.7/arch/x86/kernel/early_printk.c
11795--- linux-3.0.7/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11796+++ linux-3.0.7/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11797@@ -7,6 +7,7 @@
11798 #include <linux/pci_regs.h>
11799 #include <linux/pci_ids.h>
11800 #include <linux/errno.h>
11801+#include <linux/sched.h>
11802 #include <asm/io.h>
11803 #include <asm/processor.h>
11804 #include <asm/fcntl.h>
11805@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11806 int n;
11807 va_list ap;
11808
11809+ pax_track_stack();
11810+
11811 va_start(ap, fmt);
11812 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11813 early_console->write(early_console, buf, n);
11814diff -urNp linux-3.0.7/arch/x86/kernel/entry_32.S linux-3.0.7/arch/x86/kernel/entry_32.S
11815--- linux-3.0.7/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11816+++ linux-3.0.7/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11817@@ -185,13 +185,146 @@
11818 /*CFI_REL_OFFSET gs, PT_GS*/
11819 .endm
11820 .macro SET_KERNEL_GS reg
11821+
11822+#ifdef CONFIG_CC_STACKPROTECTOR
11823 movl $(__KERNEL_STACK_CANARY), \reg
11824+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11825+ movl $(__USER_DS), \reg
11826+#else
11827+ xorl \reg, \reg
11828+#endif
11829+
11830 movl \reg, %gs
11831 .endm
11832
11833 #endif /* CONFIG_X86_32_LAZY_GS */
11834
11835-.macro SAVE_ALL
11836+.macro pax_enter_kernel
11837+#ifdef CONFIG_PAX_KERNEXEC
11838+ call pax_enter_kernel
11839+#endif
11840+.endm
11841+
11842+.macro pax_exit_kernel
11843+#ifdef CONFIG_PAX_KERNEXEC
11844+ call pax_exit_kernel
11845+#endif
11846+.endm
11847+
11848+#ifdef CONFIG_PAX_KERNEXEC
11849+ENTRY(pax_enter_kernel)
11850+#ifdef CONFIG_PARAVIRT
11851+ pushl %eax
11852+ pushl %ecx
11853+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11854+ mov %eax, %esi
11855+#else
11856+ mov %cr0, %esi
11857+#endif
11858+ bts $16, %esi
11859+ jnc 1f
11860+ mov %cs, %esi
11861+ cmp $__KERNEL_CS, %esi
11862+ jz 3f
11863+ ljmp $__KERNEL_CS, $3f
11864+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11865+2:
11866+#ifdef CONFIG_PARAVIRT
11867+ mov %esi, %eax
11868+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11869+#else
11870+ mov %esi, %cr0
11871+#endif
11872+3:
11873+#ifdef CONFIG_PARAVIRT
11874+ popl %ecx
11875+ popl %eax
11876+#endif
11877+ ret
11878+ENDPROC(pax_enter_kernel)
11879+
11880+ENTRY(pax_exit_kernel)
11881+#ifdef CONFIG_PARAVIRT
11882+ pushl %eax
11883+ pushl %ecx
11884+#endif
11885+ mov %cs, %esi
11886+ cmp $__KERNEXEC_KERNEL_CS, %esi
11887+ jnz 2f
11888+#ifdef CONFIG_PARAVIRT
11889+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11890+ mov %eax, %esi
11891+#else
11892+ mov %cr0, %esi
11893+#endif
11894+ btr $16, %esi
11895+ ljmp $__KERNEL_CS, $1f
11896+1:
11897+#ifdef CONFIG_PARAVIRT
11898+ mov %esi, %eax
11899+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11900+#else
11901+ mov %esi, %cr0
11902+#endif
11903+2:
11904+#ifdef CONFIG_PARAVIRT
11905+ popl %ecx
11906+ popl %eax
11907+#endif
11908+ ret
11909+ENDPROC(pax_exit_kernel)
11910+#endif
11911+
11912+.macro pax_erase_kstack
11913+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11914+ call pax_erase_kstack
11915+#endif
11916+.endm
11917+
11918+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11919+/*
11920+ * ebp: thread_info
11921+ * ecx, edx: can be clobbered
11922+ */
11923+ENTRY(pax_erase_kstack)
11924+ pushl %edi
11925+ pushl %eax
11926+
11927+ mov TI_lowest_stack(%ebp), %edi
11928+ mov $-0xBEEF, %eax
11929+ std
11930+
11931+1: mov %edi, %ecx
11932+ and $THREAD_SIZE_asm - 1, %ecx
11933+ shr $2, %ecx
11934+ repne scasl
11935+ jecxz 2f
11936+
11937+ cmp $2*16, %ecx
11938+ jc 2f
11939+
11940+ mov $2*16, %ecx
11941+ repe scasl
11942+ jecxz 2f
11943+ jne 1b
11944+
11945+2: cld
11946+ mov %esp, %ecx
11947+ sub %edi, %ecx
11948+ shr $2, %ecx
11949+ rep stosl
11950+
11951+ mov TI_task_thread_sp0(%ebp), %edi
11952+ sub $128, %edi
11953+ mov %edi, TI_lowest_stack(%ebp)
11954+
11955+ popl %eax
11956+ popl %edi
11957+ ret
11958+ENDPROC(pax_erase_kstack)
11959+#endif
11960+
11961+.macro __SAVE_ALL _DS
11962 cld
11963 PUSH_GS
11964 pushl_cfi %fs
11965@@ -214,7 +347,7 @@
11966 CFI_REL_OFFSET ecx, 0
11967 pushl_cfi %ebx
11968 CFI_REL_OFFSET ebx, 0
11969- movl $(__USER_DS), %edx
11970+ movl $\_DS, %edx
11971 movl %edx, %ds
11972 movl %edx, %es
11973 movl $(__KERNEL_PERCPU), %edx
11974@@ -222,6 +355,15 @@
11975 SET_KERNEL_GS %edx
11976 .endm
11977
11978+.macro SAVE_ALL
11979+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11980+ __SAVE_ALL __KERNEL_DS
11981+ pax_enter_kernel
11982+#else
11983+ __SAVE_ALL __USER_DS
11984+#endif
11985+.endm
11986+
11987 .macro RESTORE_INT_REGS
11988 popl_cfi %ebx
11989 CFI_RESTORE ebx
11990@@ -332,7 +474,15 @@ check_userspace:
11991 movb PT_CS(%esp), %al
11992 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11993 cmpl $USER_RPL, %eax
11994+
11995+#ifdef CONFIG_PAX_KERNEXEC
11996+ jae resume_userspace
11997+
11998+ PAX_EXIT_KERNEL
11999+ jmp resume_kernel
12000+#else
12001 jb resume_kernel # not returning to v8086 or userspace
12002+#endif
12003
12004 ENTRY(resume_userspace)
12005 LOCKDEP_SYS_EXIT
12006@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
12007 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12008 # int/exception return?
12009 jne work_pending
12010- jmp restore_all
12011+ jmp restore_all_pax
12012 END(ret_from_exception)
12013
12014 #ifdef CONFIG_PREEMPT
12015@@ -394,23 +544,34 @@ sysenter_past_esp:
12016 /*CFI_REL_OFFSET cs, 0*/
12017 /*
12018 * Push current_thread_info()->sysenter_return to the stack.
12019- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12020- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12021 */
12022- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12023+ pushl_cfi $0
12024 CFI_REL_OFFSET eip, 0
12025
12026 pushl_cfi %eax
12027 SAVE_ALL
12028+ GET_THREAD_INFO(%ebp)
12029+ movl TI_sysenter_return(%ebp),%ebp
12030+ movl %ebp,PT_EIP(%esp)
12031 ENABLE_INTERRUPTS(CLBR_NONE)
12032
12033 /*
12034 * Load the potential sixth argument from user stack.
12035 * Careful about security.
12036 */
12037+ movl PT_OLDESP(%esp),%ebp
12038+
12039+#ifdef CONFIG_PAX_MEMORY_UDEREF
12040+ mov PT_OLDSS(%esp),%ds
12041+1: movl %ds:(%ebp),%ebp
12042+ push %ss
12043+ pop %ds
12044+#else
12045 cmpl $__PAGE_OFFSET-3,%ebp
12046 jae syscall_fault
12047 1: movl (%ebp),%ebp
12048+#endif
12049+
12050 movl %ebp,PT_EBP(%esp)
12051 .section __ex_table,"a"
12052 .align 4
12053@@ -433,12 +594,24 @@ sysenter_do_call:
12054 testl $_TIF_ALLWORK_MASK, %ecx
12055 jne sysexit_audit
12056 sysenter_exit:
12057+
12058+#ifdef CONFIG_PAX_RANDKSTACK
12059+ pushl_cfi %eax
12060+ movl %esp, %eax
12061+ call pax_randomize_kstack
12062+ popl_cfi %eax
12063+#endif
12064+
12065+ pax_erase_kstack
12066+
12067 /* if something modifies registers it must also disable sysexit */
12068 movl PT_EIP(%esp), %edx
12069 movl PT_OLDESP(%esp), %ecx
12070 xorl %ebp,%ebp
12071 TRACE_IRQS_ON
12072 1: mov PT_FS(%esp), %fs
12073+2: mov PT_DS(%esp), %ds
12074+3: mov PT_ES(%esp), %es
12075 PTGS_TO_GS
12076 ENABLE_INTERRUPTS_SYSEXIT
12077
12078@@ -455,6 +628,9 @@ sysenter_audit:
12079 movl %eax,%edx /* 2nd arg: syscall number */
12080 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12081 call audit_syscall_entry
12082+
12083+ pax_erase_kstack
12084+
12085 pushl_cfi %ebx
12086 movl PT_EAX(%esp),%eax /* reload syscall number */
12087 jmp sysenter_do_call
12088@@ -481,11 +657,17 @@ sysexit_audit:
12089
12090 CFI_ENDPROC
12091 .pushsection .fixup,"ax"
12092-2: movl $0,PT_FS(%esp)
12093+4: movl $0,PT_FS(%esp)
12094+ jmp 1b
12095+5: movl $0,PT_DS(%esp)
12096+ jmp 1b
12097+6: movl $0,PT_ES(%esp)
12098 jmp 1b
12099 .section __ex_table,"a"
12100 .align 4
12101- .long 1b,2b
12102+ .long 1b,4b
12103+ .long 2b,5b
12104+ .long 3b,6b
12105 .popsection
12106 PTGS_TO_GS_EX
12107 ENDPROC(ia32_sysenter_target)
12108@@ -518,6 +700,15 @@ syscall_exit:
12109 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12110 jne syscall_exit_work
12111
12112+restore_all_pax:
12113+
12114+#ifdef CONFIG_PAX_RANDKSTACK
12115+ movl %esp, %eax
12116+ call pax_randomize_kstack
12117+#endif
12118+
12119+ pax_erase_kstack
12120+
12121 restore_all:
12122 TRACE_IRQS_IRET
12123 restore_all_notrace:
12124@@ -577,14 +768,34 @@ ldt_ss:
12125 * compensating for the offset by changing to the ESPFIX segment with
12126 * a base address that matches for the difference.
12127 */
12128-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12129+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12130 mov %esp, %edx /* load kernel esp */
12131 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12132 mov %dx, %ax /* eax: new kernel esp */
12133 sub %eax, %edx /* offset (low word is 0) */
12134+#ifdef CONFIG_SMP
12135+ movl PER_CPU_VAR(cpu_number), %ebx
12136+ shll $PAGE_SHIFT_asm, %ebx
12137+ addl $cpu_gdt_table, %ebx
12138+#else
12139+ movl $cpu_gdt_table, %ebx
12140+#endif
12141 shr $16, %edx
12142- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12143- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
12144+
12145+#ifdef CONFIG_PAX_KERNEXEC
12146+ mov %cr0, %esi
12147+ btr $16, %esi
12148+ mov %esi, %cr0
12149+#endif
12150+
12151+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
12152+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
12153+
12154+#ifdef CONFIG_PAX_KERNEXEC
12155+ bts $16, %esi
12156+ mov %esi, %cr0
12157+#endif
12158+
12159 pushl_cfi $__ESPFIX_SS
12160 pushl_cfi %eax /* new kernel esp */
12161 /* Disable interrupts, but do not irqtrace this section: we
12162@@ -613,29 +824,23 @@ work_resched:
12163 movl TI_flags(%ebp), %ecx
12164 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12165 # than syscall tracing?
12166- jz restore_all
12167+ jz restore_all_pax
12168 testb $_TIF_NEED_RESCHED, %cl
12169 jnz work_resched
12170
12171 work_notifysig: # deal with pending signals and
12172 # notify-resume requests
12173+ movl %esp, %eax
12174 #ifdef CONFIG_VM86
12175 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12176- movl %esp, %eax
12177- jne work_notifysig_v86 # returning to kernel-space or
12178+ jz 1f # returning to kernel-space or
12179 # vm86-space
12180- xorl %edx, %edx
12181- call do_notify_resume
12182- jmp resume_userspace_sig
12183
12184- ALIGN
12185-work_notifysig_v86:
12186 pushl_cfi %ecx # save ti_flags for do_notify_resume
12187 call save_v86_state # %eax contains pt_regs pointer
12188 popl_cfi %ecx
12189 movl %eax, %esp
12190-#else
12191- movl %esp, %eax
12192+1:
12193 #endif
12194 xorl %edx, %edx
12195 call do_notify_resume
12196@@ -648,6 +853,9 @@ syscall_trace_entry:
12197 movl $-ENOSYS,PT_EAX(%esp)
12198 movl %esp, %eax
12199 call syscall_trace_enter
12200+
12201+ pax_erase_kstack
12202+
12203 /* What it returned is what we'll actually use. */
12204 cmpl $(nr_syscalls), %eax
12205 jnae syscall_call
12206@@ -670,6 +878,10 @@ END(syscall_exit_work)
12207
12208 RING0_INT_FRAME # can't unwind into user space anyway
12209 syscall_fault:
12210+#ifdef CONFIG_PAX_MEMORY_UDEREF
12211+ push %ss
12212+ pop %ds
12213+#endif
12214 GET_THREAD_INFO(%ebp)
12215 movl $-EFAULT,PT_EAX(%esp)
12216 jmp resume_userspace
12217@@ -752,6 +964,36 @@ ptregs_clone:
12218 CFI_ENDPROC
12219 ENDPROC(ptregs_clone)
12220
12221+ ALIGN;
12222+ENTRY(kernel_execve)
12223+ CFI_STARTPROC
12224+ pushl_cfi %ebp
12225+ sub $PT_OLDSS+4,%esp
12226+ pushl_cfi %edi
12227+ pushl_cfi %ecx
12228+ pushl_cfi %eax
12229+ lea 3*4(%esp),%edi
12230+ mov $PT_OLDSS/4+1,%ecx
12231+ xorl %eax,%eax
12232+ rep stosl
12233+ popl_cfi %eax
12234+ popl_cfi %ecx
12235+ popl_cfi %edi
12236+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12237+ pushl_cfi %esp
12238+ call sys_execve
12239+ add $4,%esp
12240+ CFI_ADJUST_CFA_OFFSET -4
12241+ GET_THREAD_INFO(%ebp)
12242+ test %eax,%eax
12243+ jz syscall_exit
12244+ add $PT_OLDSS+4,%esp
12245+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12246+ popl_cfi %ebp
12247+ ret
12248+ CFI_ENDPROC
12249+ENDPROC(kernel_execve)
12250+
12251 .macro FIXUP_ESPFIX_STACK
12252 /*
12253 * Switch back for ESPFIX stack to the normal zerobased stack
12254@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
12255 * normal stack and adjusts ESP with the matching offset.
12256 */
12257 /* fixup the stack */
12258- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12259- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12260+#ifdef CONFIG_SMP
12261+ movl PER_CPU_VAR(cpu_number), %ebx
12262+ shll $PAGE_SHIFT_asm, %ebx
12263+ addl $cpu_gdt_table, %ebx
12264+#else
12265+ movl $cpu_gdt_table, %ebx
12266+#endif
12267+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12268+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12269 shl $16, %eax
12270 addl %esp, %eax /* the adjusted stack pointer */
12271 pushl_cfi $__KERNEL_DS
12272@@ -1213,7 +1462,6 @@ return_to_handler:
12273 jmp *%ecx
12274 #endif
12275
12276-.section .rodata,"a"
12277 #include "syscall_table_32.S"
12278
12279 syscall_table_size=(.-sys_call_table)
12280@@ -1259,9 +1507,12 @@ error_code:
12281 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12282 REG_TO_PTGS %ecx
12283 SET_KERNEL_GS %ecx
12284- movl $(__USER_DS), %ecx
12285+ movl $(__KERNEL_DS), %ecx
12286 movl %ecx, %ds
12287 movl %ecx, %es
12288+
12289+ pax_enter_kernel
12290+
12291 TRACE_IRQS_OFF
12292 movl %esp,%eax # pt_regs pointer
12293 call *%edi
12294@@ -1346,6 +1597,9 @@ nmi_stack_correct:
12295 xorl %edx,%edx # zero error code
12296 movl %esp,%eax # pt_regs pointer
12297 call do_nmi
12298+
12299+ pax_exit_kernel
12300+
12301 jmp restore_all_notrace
12302 CFI_ENDPROC
12303
12304@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
12305 FIXUP_ESPFIX_STACK # %eax == %esp
12306 xorl %edx,%edx # zero error code
12307 call do_nmi
12308+
12309+ pax_exit_kernel
12310+
12311 RESTORE_REGS
12312 lss 12+4(%esp), %esp # back to espfix stack
12313 CFI_ADJUST_CFA_OFFSET -24
12314diff -urNp linux-3.0.7/arch/x86/kernel/entry_64.S linux-3.0.7/arch/x86/kernel/entry_64.S
12315--- linux-3.0.7/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
12316+++ linux-3.0.7/arch/x86/kernel/entry_64.S 2011-10-11 10:44:33.000000000 -0400
12317@@ -53,6 +53,8 @@
12318 #include <asm/paravirt.h>
12319 #include <asm/ftrace.h>
12320 #include <asm/percpu.h>
12321+#include <asm/pgtable.h>
12322+#include <asm/alternative-asm.h>
12323
12324 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12325 #include <linux/elf-em.h>
12326@@ -66,6 +68,7 @@
12327 #ifdef CONFIG_FUNCTION_TRACER
12328 #ifdef CONFIG_DYNAMIC_FTRACE
12329 ENTRY(mcount)
12330+ pax_force_retaddr
12331 retq
12332 END(mcount)
12333
12334@@ -90,6 +93,7 @@ GLOBAL(ftrace_graph_call)
12335 #endif
12336
12337 GLOBAL(ftrace_stub)
12338+ pax_force_retaddr
12339 retq
12340 END(ftrace_caller)
12341
12342@@ -110,6 +114,7 @@ ENTRY(mcount)
12343 #endif
12344
12345 GLOBAL(ftrace_stub)
12346+ pax_force_retaddr
12347 retq
12348
12349 trace:
12350@@ -119,6 +124,7 @@ trace:
12351 movq 8(%rbp), %rsi
12352 subq $MCOUNT_INSN_SIZE, %rdi
12353
12354+ pax_force_fptr ftrace_trace_function
12355 call *ftrace_trace_function
12356
12357 MCOUNT_RESTORE_FRAME
12358@@ -144,6 +150,7 @@ ENTRY(ftrace_graph_caller)
12359
12360 MCOUNT_RESTORE_FRAME
12361
12362+ pax_force_retaddr
12363 retq
12364 END(ftrace_graph_caller)
12365
12366@@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12367 movq 8(%rsp), %rdx
12368 movq (%rsp), %rax
12369 addq $24, %rsp
12370+ pax_force_fptr %rdi
12371 jmp *%rdi
12372 #endif
12373
12374@@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12375 ENDPROC(native_usergs_sysret64)
12376 #endif /* CONFIG_PARAVIRT */
12377
12378+ .macro ljmpq sel, off
12379+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12380+ .byte 0x48; ljmp *1234f(%rip)
12381+ .pushsection .rodata
12382+ .align 16
12383+ 1234: .quad \off; .word \sel
12384+ .popsection
12385+#else
12386+ pushq $\sel
12387+ pushq $\off
12388+ lretq
12389+#endif
12390+ .endm
12391+
12392+ .macro pax_enter_kernel
12393+#ifdef CONFIG_PAX_KERNEXEC
12394+ call pax_enter_kernel
12395+#endif
12396+ .endm
12397+
12398+ .macro pax_exit_kernel
12399+#ifdef CONFIG_PAX_KERNEXEC
12400+ call pax_exit_kernel
12401+#endif
12402+ .endm
12403+
12404+#ifdef CONFIG_PAX_KERNEXEC
12405+ENTRY(pax_enter_kernel)
12406+ pushq %rdi
12407+
12408+#ifdef CONFIG_PARAVIRT
12409+ PV_SAVE_REGS(CLBR_RDI)
12410+#endif
12411+
12412+ GET_CR0_INTO_RDI
12413+ bts $16,%rdi
12414+ jnc 1f
12415+ mov %cs,%edi
12416+ cmp $__KERNEL_CS,%edi
12417+ jz 3f
12418+ ljmpq __KERNEL_CS,3f
12419+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12420+2: SET_RDI_INTO_CR0
12421+3:
12422+
12423+#ifdef CONFIG_PARAVIRT
12424+ PV_RESTORE_REGS(CLBR_RDI)
12425+#endif
12426+
12427+ popq %rdi
12428+ pax_force_retaddr
12429+ retq
12430+ENDPROC(pax_enter_kernel)
12431+
12432+ENTRY(pax_exit_kernel)
12433+ pushq %rdi
12434+
12435+#ifdef CONFIG_PARAVIRT
12436+ PV_SAVE_REGS(CLBR_RDI)
12437+#endif
12438+
12439+ mov %cs,%rdi
12440+ cmp $__KERNEXEC_KERNEL_CS,%edi
12441+ jnz 2f
12442+ GET_CR0_INTO_RDI
12443+ btr $16,%rdi
12444+ ljmpq __KERNEL_CS,1f
12445+1: SET_RDI_INTO_CR0
12446+2:
12447+
12448+#ifdef CONFIG_PARAVIRT
12449+ PV_RESTORE_REGS(CLBR_RDI);
12450+#endif
12451+
12452+ popq %rdi
12453+ pax_force_retaddr
12454+ retq
12455+ENDPROC(pax_exit_kernel)
12456+#endif
12457+
12458+ .macro pax_enter_kernel_user
12459+#ifdef CONFIG_PAX_MEMORY_UDEREF
12460+ call pax_enter_kernel_user
12461+#endif
12462+ .endm
12463+
12464+ .macro pax_exit_kernel_user
12465+#ifdef CONFIG_PAX_MEMORY_UDEREF
12466+ call pax_exit_kernel_user
12467+#endif
12468+#ifdef CONFIG_PAX_RANDKSTACK
12469+ push %rax
12470+ call pax_randomize_kstack
12471+ pop %rax
12472+#endif
12473+ .endm
12474+
12475+#ifdef CONFIG_PAX_MEMORY_UDEREF
12476+ENTRY(pax_enter_kernel_user)
12477+ pushq %rdi
12478+ pushq %rbx
12479+
12480+#ifdef CONFIG_PARAVIRT
12481+ PV_SAVE_REGS(CLBR_RDI)
12482+#endif
12483+
12484+ GET_CR3_INTO_RDI
12485+ mov %rdi,%rbx
12486+ add $__START_KERNEL_map,%rbx
12487+ sub phys_base(%rip),%rbx
12488+
12489+#ifdef CONFIG_PARAVIRT
12490+ pushq %rdi
12491+ cmpl $0, pv_info+PARAVIRT_enabled
12492+ jz 1f
12493+ i = 0
12494+ .rept USER_PGD_PTRS
12495+ mov i*8(%rbx),%rsi
12496+ mov $0,%sil
12497+ lea i*8(%rbx),%rdi
12498+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12499+ i = i + 1
12500+ .endr
12501+ jmp 2f
12502+1:
12503+#endif
12504+
12505+ i = 0
12506+ .rept USER_PGD_PTRS
12507+ movb $0,i*8(%rbx)
12508+ i = i + 1
12509+ .endr
12510+
12511+#ifdef CONFIG_PARAVIRT
12512+2: popq %rdi
12513+#endif
12514+ SET_RDI_INTO_CR3
12515+
12516+#ifdef CONFIG_PAX_KERNEXEC
12517+ GET_CR0_INTO_RDI
12518+ bts $16,%rdi
12519+ SET_RDI_INTO_CR0
12520+#endif
12521+
12522+#ifdef CONFIG_PARAVIRT
12523+ PV_RESTORE_REGS(CLBR_RDI)
12524+#endif
12525+
12526+ popq %rbx
12527+ popq %rdi
12528+ pax_force_retaddr
12529+ retq
12530+ENDPROC(pax_enter_kernel_user)
12531+
12532+ENTRY(pax_exit_kernel_user)
12533+ push %rdi
12534+
12535+#ifdef CONFIG_PARAVIRT
12536+ pushq %rbx
12537+ PV_SAVE_REGS(CLBR_RDI)
12538+#endif
12539+
12540+#ifdef CONFIG_PAX_KERNEXEC
12541+ GET_CR0_INTO_RDI
12542+ btr $16,%rdi
12543+ SET_RDI_INTO_CR0
12544+#endif
12545+
12546+ GET_CR3_INTO_RDI
12547+ add $__START_KERNEL_map,%rdi
12548+ sub phys_base(%rip),%rdi
12549+
12550+#ifdef CONFIG_PARAVIRT
12551+ cmpl $0, pv_info+PARAVIRT_enabled
12552+ jz 1f
12553+ mov %rdi,%rbx
12554+ i = 0
12555+ .rept USER_PGD_PTRS
12556+ mov i*8(%rbx),%rsi
12557+ mov $0x67,%sil
12558+ lea i*8(%rbx),%rdi
12559+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12560+ i = i + 1
12561+ .endr
12562+ jmp 2f
12563+1:
12564+#endif
12565+
12566+ i = 0
12567+ .rept USER_PGD_PTRS
12568+ movb $0x67,i*8(%rdi)
12569+ i = i + 1
12570+ .endr
12571+
12572+#ifdef CONFIG_PARAVIRT
12573+2: PV_RESTORE_REGS(CLBR_RDI)
12574+ popq %rbx
12575+#endif
12576+
12577+ popq %rdi
12578+ pax_force_retaddr
12579+ retq
12580+ENDPROC(pax_exit_kernel_user)
12581+#endif
12582+
12583+ .macro pax_erase_kstack
12584+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12585+ call pax_erase_kstack
12586+#endif
12587+ .endm
12588+
12589+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12590+/*
12591+ * r10: thread_info
12592+ * rcx, rdx: can be clobbered
12593+ */
12594+ENTRY(pax_erase_kstack)
12595+ pushq %rdi
12596+ pushq %rax
12597+ pushq %r10
12598+
12599+ GET_THREAD_INFO(%r10)
12600+ mov TI_lowest_stack(%r10), %rdi
12601+ mov $-0xBEEF, %rax
12602+ std
12603+
12604+1: mov %edi, %ecx
12605+ and $THREAD_SIZE_asm - 1, %ecx
12606+ shr $3, %ecx
12607+ repne scasq
12608+ jecxz 2f
12609+
12610+ cmp $2*8, %ecx
12611+ jc 2f
12612+
12613+ mov $2*8, %ecx
12614+ repe scasq
12615+ jecxz 2f
12616+ jne 1b
12617+
12618+2: cld
12619+ mov %esp, %ecx
12620+ sub %edi, %ecx
12621+
12622+ cmp $THREAD_SIZE_asm, %rcx
12623+ jb 3f
12624+ ud2
12625+3:
12626+
12627+ shr $3, %ecx
12628+ rep stosq
12629+
12630+ mov TI_task_thread_sp0(%r10), %rdi
12631+ sub $256, %rdi
12632+ mov %rdi, TI_lowest_stack(%r10)
12633+
12634+ popq %r10
12635+ popq %rax
12636+ popq %rdi
12637+ pax_force_retaddr
12638+ ret
12639+ENDPROC(pax_erase_kstack)
12640+#endif
12641
12642 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12643 #ifdef CONFIG_TRACE_IRQFLAGS
12644@@ -318,7 +589,7 @@ ENTRY(save_args)
12645 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12646 movq_cfi rbp, 8 /* push %rbp */
12647 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12648- testl $3, CS(%rdi)
12649+ testb $3, CS(%rdi)
12650 je 1f
12651 SWAPGS
12652 /*
12653@@ -338,6 +609,7 @@ ENTRY(save_args)
12654 * We entered an interrupt context - irqs are off:
12655 */
12656 2: TRACE_IRQS_OFF
12657+ pax_force_retaddr
12658 ret
12659 CFI_ENDPROC
12660 END(save_args)
12661@@ -354,6 +626,7 @@ ENTRY(save_rest)
12662 movq_cfi r15, R15+16
12663 movq %r11, 8(%rsp) /* return address */
12664 FIXUP_TOP_OF_STACK %r11, 16
12665+ pax_force_retaddr
12666 ret
12667 CFI_ENDPROC
12668 END(save_rest)
12669@@ -385,7 +658,8 @@ ENTRY(save_paranoid)
12670 js 1f /* negative -> in kernel */
12671 SWAPGS
12672 xorl %ebx,%ebx
12673-1: ret
12674+1: pax_force_retaddr
12675+ ret
12676 CFI_ENDPROC
12677 END(save_paranoid)
12678 .popsection
12679@@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12680
12681 RESTORE_REST
12682
12683- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12684+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12685 je int_ret_from_sys_call
12686
12687 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12688@@ -455,7 +729,7 @@ END(ret_from_fork)
12689 ENTRY(system_call)
12690 CFI_STARTPROC simple
12691 CFI_SIGNAL_FRAME
12692- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12693+ CFI_DEF_CFA rsp,0
12694 CFI_REGISTER rip,rcx
12695 /*CFI_REGISTER rflags,r11*/
12696 SWAPGS_UNSAFE_STACK
12697@@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12698
12699 movq %rsp,PER_CPU_VAR(old_rsp)
12700 movq PER_CPU_VAR(kernel_stack),%rsp
12701+ pax_enter_kernel_user
12702 /*
12703 * No need to follow this irqs off/on section - it's straight
12704 * and short:
12705 */
12706 ENABLE_INTERRUPTS(CLBR_NONE)
12707- SAVE_ARGS 8,1
12708+ SAVE_ARGS 8*6,1
12709 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12710 movq %rcx,RIP-ARGOFFSET(%rsp)
12711 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12712@@ -502,6 +777,8 @@ sysret_check:
12713 andl %edi,%edx
12714 jnz sysret_careful
12715 CFI_REMEMBER_STATE
12716+ pax_exit_kernel_user
12717+ pax_erase_kstack
12718 /*
12719 * sysretq will re-enable interrupts:
12720 */
12721@@ -560,6 +837,9 @@ auditsys:
12722 movq %rax,%rsi /* 2nd arg: syscall number */
12723 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12724 call audit_syscall_entry
12725+
12726+ pax_erase_kstack
12727+
12728 LOAD_ARGS 0 /* reload call-clobbered registers */
12729 jmp system_call_fastpath
12730
12731@@ -590,6 +870,9 @@ tracesys:
12732 FIXUP_TOP_OF_STACK %rdi
12733 movq %rsp,%rdi
12734 call syscall_trace_enter
12735+
12736+ pax_erase_kstack
12737+
12738 /*
12739 * Reload arg registers from stack in case ptrace changed them.
12740 * We don't reload %rax because syscall_trace_enter() returned
12741@@ -611,7 +894,7 @@ tracesys:
12742 GLOBAL(int_ret_from_sys_call)
12743 DISABLE_INTERRUPTS(CLBR_NONE)
12744 TRACE_IRQS_OFF
12745- testl $3,CS-ARGOFFSET(%rsp)
12746+ testb $3,CS-ARGOFFSET(%rsp)
12747 je retint_restore_args
12748 movl $_TIF_ALLWORK_MASK,%edi
12749 /* edi: mask to check */
12750@@ -702,6 +985,7 @@ ENTRY(ptregscall_common)
12751 movq_cfi_restore R12+8, r12
12752 movq_cfi_restore RBP+8, rbp
12753 movq_cfi_restore RBX+8, rbx
12754+ pax_force_retaddr
12755 ret $REST_SKIP /* pop extended registers */
12756 CFI_ENDPROC
12757 END(ptregscall_common)
12758@@ -793,6 +1077,16 @@ END(interrupt)
12759 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12760 call save_args
12761 PARTIAL_FRAME 0
12762+#ifdef CONFIG_PAX_MEMORY_UDEREF
12763+ testb $3, CS(%rdi)
12764+ jnz 1f
12765+ pax_enter_kernel
12766+ jmp 2f
12767+1: pax_enter_kernel_user
12768+2:
12769+#else
12770+ pax_enter_kernel
12771+#endif
12772 call \func
12773 .endm
12774
12775@@ -825,7 +1119,7 @@ ret_from_intr:
12776 CFI_ADJUST_CFA_OFFSET -8
12777 exit_intr:
12778 GET_THREAD_INFO(%rcx)
12779- testl $3,CS-ARGOFFSET(%rsp)
12780+ testb $3,CS-ARGOFFSET(%rsp)
12781 je retint_kernel
12782
12783 /* Interrupt came from user space */
12784@@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12785 * The iretq could re-enable interrupts:
12786 */
12787 DISABLE_INTERRUPTS(CLBR_ANY)
12788+ pax_exit_kernel_user
12789+ pax_erase_kstack
12790 TRACE_IRQS_IRETQ
12791 SWAPGS
12792 jmp restore_args
12793
12794 retint_restore_args: /* return to kernel space */
12795 DISABLE_INTERRUPTS(CLBR_ANY)
12796+ pax_exit_kernel
12797+ pax_force_retaddr RIP-ARGOFFSET
12798 /*
12799 * The iretq could re-enable interrupts:
12800 */
12801@@ -1027,6 +1325,16 @@ ENTRY(\sym)
12802 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12803 call error_entry
12804 DEFAULT_FRAME 0
12805+#ifdef CONFIG_PAX_MEMORY_UDEREF
12806+ testb $3, CS(%rsp)
12807+ jnz 1f
12808+ pax_enter_kernel
12809+ jmp 2f
12810+1: pax_enter_kernel_user
12811+2:
12812+#else
12813+ pax_enter_kernel
12814+#endif
12815 movq %rsp,%rdi /* pt_regs pointer */
12816 xorl %esi,%esi /* no error code */
12817 call \do_sym
12818@@ -1044,6 +1352,16 @@ ENTRY(\sym)
12819 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12820 call save_paranoid
12821 TRACE_IRQS_OFF
12822+#ifdef CONFIG_PAX_MEMORY_UDEREF
12823+ testb $3, CS(%rsp)
12824+ jnz 1f
12825+ pax_enter_kernel
12826+ jmp 2f
12827+1: pax_enter_kernel_user
12828+2:
12829+#else
12830+ pax_enter_kernel
12831+#endif
12832 movq %rsp,%rdi /* pt_regs pointer */
12833 xorl %esi,%esi /* no error code */
12834 call \do_sym
12835@@ -1052,7 +1370,7 @@ ENTRY(\sym)
12836 END(\sym)
12837 .endm
12838
12839-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12840+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12841 .macro paranoidzeroentry_ist sym do_sym ist
12842 ENTRY(\sym)
12843 INTR_FRAME
12844@@ -1062,8 +1380,24 @@ ENTRY(\sym)
12845 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12846 call save_paranoid
12847 TRACE_IRQS_OFF
12848+#ifdef CONFIG_PAX_MEMORY_UDEREF
12849+ testb $3, CS(%rsp)
12850+ jnz 1f
12851+ pax_enter_kernel
12852+ jmp 2f
12853+1: pax_enter_kernel_user
12854+2:
12855+#else
12856+ pax_enter_kernel
12857+#endif
12858 movq %rsp,%rdi /* pt_regs pointer */
12859 xorl %esi,%esi /* no error code */
12860+#ifdef CONFIG_SMP
12861+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12862+ lea init_tss(%r12), %r12
12863+#else
12864+ lea init_tss(%rip), %r12
12865+#endif
12866 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12867 call \do_sym
12868 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12869@@ -1080,6 +1414,16 @@ ENTRY(\sym)
12870 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12871 call error_entry
12872 DEFAULT_FRAME 0
12873+#ifdef CONFIG_PAX_MEMORY_UDEREF
12874+ testb $3, CS(%rsp)
12875+ jnz 1f
12876+ pax_enter_kernel
12877+ jmp 2f
12878+1: pax_enter_kernel_user
12879+2:
12880+#else
12881+ pax_enter_kernel
12882+#endif
12883 movq %rsp,%rdi /* pt_regs pointer */
12884 movq ORIG_RAX(%rsp),%rsi /* get error code */
12885 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12886@@ -1099,6 +1443,16 @@ ENTRY(\sym)
12887 call save_paranoid
12888 DEFAULT_FRAME 0
12889 TRACE_IRQS_OFF
12890+#ifdef CONFIG_PAX_MEMORY_UDEREF
12891+ testb $3, CS(%rsp)
12892+ jnz 1f
12893+ pax_enter_kernel
12894+ jmp 2f
12895+1: pax_enter_kernel_user
12896+2:
12897+#else
12898+ pax_enter_kernel
12899+#endif
12900 movq %rsp,%rdi /* pt_regs pointer */
12901 movq ORIG_RAX(%rsp),%rsi /* get error code */
12902 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12903@@ -1134,6 +1488,7 @@ gs_change:
12904 2: mfence /* workaround */
12905 SWAPGS
12906 popfq_cfi
12907+ pax_force_retaddr
12908 ret
12909 CFI_ENDPROC
12910 END(native_load_gs_index)
12911@@ -1158,6 +1513,7 @@ ENTRY(kernel_thread_helper)
12912 * Here we are in the child and the registers are set as they were
12913 * at kernel_thread() invocation in the parent.
12914 */
12915+ pax_force_fptr %rsi
12916 call *%rsi
12917 # exit
12918 mov %eax, %edi
12919@@ -1193,9 +1549,10 @@ ENTRY(kernel_execve)
12920 je int_ret_from_sys_call
12921 RESTORE_ARGS
12922 UNFAKE_STACK_FRAME
12923+ pax_force_retaddr
12924 ret
12925 CFI_ENDPROC
12926-END(kernel_execve)
12927+ENDPROC(kernel_execve)
12928
12929 /* Call softirq on interrupt stack. Interrupts are off. */
12930 ENTRY(call_softirq)
12931@@ -1213,9 +1570,10 @@ ENTRY(call_softirq)
12932 CFI_DEF_CFA_REGISTER rsp
12933 CFI_ADJUST_CFA_OFFSET -8
12934 decl PER_CPU_VAR(irq_count)
12935+ pax_force_retaddr
12936 ret
12937 CFI_ENDPROC
12938-END(call_softirq)
12939+ENDPROC(call_softirq)
12940
12941 #ifdef CONFIG_XEN
12942 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
12943@@ -1361,16 +1719,31 @@ ENTRY(paranoid_exit)
12944 TRACE_IRQS_OFF
12945 testl %ebx,%ebx /* swapgs needed? */
12946 jnz paranoid_restore
12947- testl $3,CS(%rsp)
12948+ testb $3,CS(%rsp)
12949 jnz paranoid_userspace
12950+#ifdef CONFIG_PAX_MEMORY_UDEREF
12951+ pax_exit_kernel
12952+ TRACE_IRQS_IRETQ 0
12953+ SWAPGS_UNSAFE_STACK
12954+ RESTORE_ALL 8
12955+ pax_force_retaddr
12956+ jmp irq_return
12957+#endif
12958 paranoid_swapgs:
12959+#ifdef CONFIG_PAX_MEMORY_UDEREF
12960+ pax_exit_kernel_user
12961+#else
12962+ pax_exit_kernel
12963+#endif
12964 TRACE_IRQS_IRETQ 0
12965 SWAPGS_UNSAFE_STACK
12966 RESTORE_ALL 8
12967 jmp irq_return
12968 paranoid_restore:
12969+ pax_exit_kernel
12970 TRACE_IRQS_IRETQ 0
12971 RESTORE_ALL 8
12972+ pax_force_retaddr
12973 jmp irq_return
12974 paranoid_userspace:
12975 GET_THREAD_INFO(%rcx)
12976@@ -1426,12 +1799,13 @@ ENTRY(error_entry)
12977 movq_cfi r14, R14+8
12978 movq_cfi r15, R15+8
12979 xorl %ebx,%ebx
12980- testl $3,CS+8(%rsp)
12981+ testb $3,CS+8(%rsp)
12982 je error_kernelspace
12983 error_swapgs:
12984 SWAPGS
12985 error_sti:
12986 TRACE_IRQS_OFF
12987+ pax_force_retaddr
12988 ret
12989
12990 /*
12991@@ -1490,6 +1864,16 @@ ENTRY(nmi)
12992 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12993 call save_paranoid
12994 DEFAULT_FRAME 0
12995+#ifdef CONFIG_PAX_MEMORY_UDEREF
12996+ testb $3, CS(%rsp)
12997+ jnz 1f
12998+ pax_enter_kernel
12999+ jmp 2f
13000+1: pax_enter_kernel_user
13001+2:
13002+#else
13003+ pax_enter_kernel
13004+#endif
13005 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13006 movq %rsp,%rdi
13007 movq $-1,%rsi
13008@@ -1500,12 +1884,28 @@ ENTRY(nmi)
13009 DISABLE_INTERRUPTS(CLBR_NONE)
13010 testl %ebx,%ebx /* swapgs needed? */
13011 jnz nmi_restore
13012- testl $3,CS(%rsp)
13013+ testb $3,CS(%rsp)
13014 jnz nmi_userspace
13015+#ifdef CONFIG_PAX_MEMORY_UDEREF
13016+ pax_exit_kernel
13017+ SWAPGS_UNSAFE_STACK
13018+ RESTORE_ALL 8
13019+ pax_force_retaddr
13020+ jmp irq_return
13021+#endif
13022 nmi_swapgs:
13023+#ifdef CONFIG_PAX_MEMORY_UDEREF
13024+ pax_exit_kernel_user
13025+#else
13026+ pax_exit_kernel
13027+#endif
13028 SWAPGS_UNSAFE_STACK
13029+ RESTORE_ALL 8
13030+ jmp irq_return
13031 nmi_restore:
13032+ pax_exit_kernel
13033 RESTORE_ALL 8
13034+ pax_force_retaddr
13035 jmp irq_return
13036 nmi_userspace:
13037 GET_THREAD_INFO(%rcx)
13038diff -urNp linux-3.0.7/arch/x86/kernel/ftrace.c linux-3.0.7/arch/x86/kernel/ftrace.c
13039--- linux-3.0.7/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
13040+++ linux-3.0.7/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
13041@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13042 static const void *mod_code_newcode; /* holds the text to write to the IP */
13043
13044 static unsigned nmi_wait_count;
13045-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13046+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13047
13048 int ftrace_arch_read_dyn_info(char *buf, int size)
13049 {
13050@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13051
13052 r = snprintf(buf, size, "%u %u",
13053 nmi_wait_count,
13054- atomic_read(&nmi_update_count));
13055+ atomic_read_unchecked(&nmi_update_count));
13056 return r;
13057 }
13058
13059@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13060
13061 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13062 smp_rmb();
13063+ pax_open_kernel();
13064 ftrace_mod_code();
13065- atomic_inc(&nmi_update_count);
13066+ pax_close_kernel();
13067+ atomic_inc_unchecked(&nmi_update_count);
13068 }
13069 /* Must have previous changes seen before executions */
13070 smp_mb();
13071@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13072 {
13073 unsigned char replaced[MCOUNT_INSN_SIZE];
13074
13075+ ip = ktla_ktva(ip);
13076+
13077 /*
13078 * Note: Due to modules and __init, code can
13079 * disappear and change, we need to protect against faulting
13080@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13081 unsigned char old[MCOUNT_INSN_SIZE], *new;
13082 int ret;
13083
13084- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13085+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13086 new = ftrace_call_replace(ip, (unsigned long)func);
13087 ret = ftrace_modify_code(ip, old, new);
13088
13089@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13090 {
13091 unsigned char code[MCOUNT_INSN_SIZE];
13092
13093+ ip = ktla_ktva(ip);
13094+
13095 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13096 return -EFAULT;
13097
13098diff -urNp linux-3.0.7/arch/x86/kernel/head32.c linux-3.0.7/arch/x86/kernel/head32.c
13099--- linux-3.0.7/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
13100+++ linux-3.0.7/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
13101@@ -19,6 +19,7 @@
13102 #include <asm/io_apic.h>
13103 #include <asm/bios_ebda.h>
13104 #include <asm/tlbflush.h>
13105+#include <asm/boot.h>
13106
13107 static void __init i386_default_early_setup(void)
13108 {
13109@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13110 {
13111 memblock_init();
13112
13113- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13114+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13115
13116 #ifdef CONFIG_BLK_DEV_INITRD
13117 /* Reserve INITRD */
13118diff -urNp linux-3.0.7/arch/x86/kernel/head_32.S linux-3.0.7/arch/x86/kernel/head_32.S
13119--- linux-3.0.7/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
13120+++ linux-3.0.7/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
13121@@ -25,6 +25,12 @@
13122 /* Physical address */
13123 #define pa(X) ((X) - __PAGE_OFFSET)
13124
13125+#ifdef CONFIG_PAX_KERNEXEC
13126+#define ta(X) (X)
13127+#else
13128+#define ta(X) ((X) - __PAGE_OFFSET)
13129+#endif
13130+
13131 /*
13132 * References to members of the new_cpu_data structure.
13133 */
13134@@ -54,11 +60,7 @@
13135 * and small than max_low_pfn, otherwise will waste some page table entries
13136 */
13137
13138-#if PTRS_PER_PMD > 1
13139-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13140-#else
13141-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13142-#endif
13143+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13144
13145 /* Number of possible pages in the lowmem region */
13146 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13147@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13148 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13149
13150 /*
13151+ * Real beginning of normal "text" segment
13152+ */
13153+ENTRY(stext)
13154+ENTRY(_stext)
13155+
13156+/*
13157 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13158 * %esi points to the real-mode code as a 32-bit pointer.
13159 * CS and DS must be 4 GB flat segments, but we don't depend on
13160@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13161 * can.
13162 */
13163 __HEAD
13164+
13165+#ifdef CONFIG_PAX_KERNEXEC
13166+ jmp startup_32
13167+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13168+.fill PAGE_SIZE-5,1,0xcc
13169+#endif
13170+
13171 ENTRY(startup_32)
13172 movl pa(stack_start),%ecx
13173
13174@@ -105,6 +120,57 @@ ENTRY(startup_32)
13175 2:
13176 leal -__PAGE_OFFSET(%ecx),%esp
13177
13178+#ifdef CONFIG_SMP
13179+ movl $pa(cpu_gdt_table),%edi
13180+ movl $__per_cpu_load,%eax
13181+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13182+ rorl $16,%eax
13183+ movb %al,__KERNEL_PERCPU + 4(%edi)
13184+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13185+ movl $__per_cpu_end - 1,%eax
13186+ subl $__per_cpu_start,%eax
13187+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13188+#endif
13189+
13190+#ifdef CONFIG_PAX_MEMORY_UDEREF
13191+ movl $NR_CPUS,%ecx
13192+ movl $pa(cpu_gdt_table),%edi
13193+1:
13194+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13195+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13196+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13197+ addl $PAGE_SIZE_asm,%edi
13198+ loop 1b
13199+#endif
13200+
13201+#ifdef CONFIG_PAX_KERNEXEC
13202+ movl $pa(boot_gdt),%edi
13203+ movl $__LOAD_PHYSICAL_ADDR,%eax
13204+ movw %ax,__BOOT_CS + 2(%edi)
13205+ rorl $16,%eax
13206+ movb %al,__BOOT_CS + 4(%edi)
13207+ movb %ah,__BOOT_CS + 7(%edi)
13208+ rorl $16,%eax
13209+
13210+ ljmp $(__BOOT_CS),$1f
13211+1:
13212+
13213+ movl $NR_CPUS,%ecx
13214+ movl $pa(cpu_gdt_table),%edi
13215+ addl $__PAGE_OFFSET,%eax
13216+1:
13217+ movw %ax,__KERNEL_CS + 2(%edi)
13218+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13219+ rorl $16,%eax
13220+ movb %al,__KERNEL_CS + 4(%edi)
13221+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13222+ movb %ah,__KERNEL_CS + 7(%edi)
13223+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13224+ rorl $16,%eax
13225+ addl $PAGE_SIZE_asm,%edi
13226+ loop 1b
13227+#endif
13228+
13229 /*
13230 * Clear BSS first so that there are no surprises...
13231 */
13232@@ -195,8 +261,11 @@ ENTRY(startup_32)
13233 movl %eax, pa(max_pfn_mapped)
13234
13235 /* Do early initialization of the fixmap area */
13236- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13237- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13238+#ifdef CONFIG_COMPAT_VDSO
13239+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13240+#else
13241+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13242+#endif
13243 #else /* Not PAE */
13244
13245 page_pde_offset = (__PAGE_OFFSET >> 20);
13246@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13247 movl %eax, pa(max_pfn_mapped)
13248
13249 /* Do early initialization of the fixmap area */
13250- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13251- movl %eax,pa(initial_page_table+0xffc)
13252+#ifdef CONFIG_COMPAT_VDSO
13253+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13254+#else
13255+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13256+#endif
13257 #endif
13258
13259 #ifdef CONFIG_PARAVIRT
13260@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13261 cmpl $num_subarch_entries, %eax
13262 jae bad_subarch
13263
13264- movl pa(subarch_entries)(,%eax,4), %eax
13265- subl $__PAGE_OFFSET, %eax
13266- jmp *%eax
13267+ jmp *pa(subarch_entries)(,%eax,4)
13268
13269 bad_subarch:
13270 WEAK(lguest_entry)
13271@@ -255,10 +325,10 @@ WEAK(xen_entry)
13272 __INITDATA
13273
13274 subarch_entries:
13275- .long default_entry /* normal x86/PC */
13276- .long lguest_entry /* lguest hypervisor */
13277- .long xen_entry /* Xen hypervisor */
13278- .long default_entry /* Moorestown MID */
13279+ .long ta(default_entry) /* normal x86/PC */
13280+ .long ta(lguest_entry) /* lguest hypervisor */
13281+ .long ta(xen_entry) /* Xen hypervisor */
13282+ .long ta(default_entry) /* Moorestown MID */
13283 num_subarch_entries = (. - subarch_entries) / 4
13284 .previous
13285 #else
13286@@ -312,6 +382,7 @@ default_entry:
13287 orl %edx,%eax
13288 movl %eax,%cr4
13289
13290+#ifdef CONFIG_X86_PAE
13291 testb $X86_CR4_PAE, %al # check if PAE is enabled
13292 jz 6f
13293
13294@@ -340,6 +411,9 @@ default_entry:
13295 /* Make changes effective */
13296 wrmsr
13297
13298+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13299+#endif
13300+
13301 6:
13302
13303 /*
13304@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13305 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13306 movl %eax,%ss # after changing gdt.
13307
13308- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13309+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13310 movl %eax,%ds
13311 movl %eax,%es
13312
13313@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13314 */
13315 cmpb $0,ready
13316 jne 1f
13317- movl $gdt_page,%eax
13318+ movl $cpu_gdt_table,%eax
13319 movl $stack_canary,%ecx
13320+#ifdef CONFIG_SMP
13321+ addl $__per_cpu_load,%ecx
13322+#endif
13323 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13324 shrl $16, %ecx
13325 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13326 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13327 1:
13328-#endif
13329 movl $(__KERNEL_STACK_CANARY),%eax
13330+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13331+ movl $(__USER_DS),%eax
13332+#else
13333+ xorl %eax,%eax
13334+#endif
13335 movl %eax,%gs
13336
13337 xorl %eax,%eax # Clear LDT
13338@@ -558,22 +639,22 @@ early_page_fault:
13339 jmp early_fault
13340
13341 early_fault:
13342- cld
13343 #ifdef CONFIG_PRINTK
13344+ cmpl $1,%ss:early_recursion_flag
13345+ je hlt_loop
13346+ incl %ss:early_recursion_flag
13347+ cld
13348 pusha
13349 movl $(__KERNEL_DS),%eax
13350 movl %eax,%ds
13351 movl %eax,%es
13352- cmpl $2,early_recursion_flag
13353- je hlt_loop
13354- incl early_recursion_flag
13355 movl %cr2,%eax
13356 pushl %eax
13357 pushl %edx /* trapno */
13358 pushl $fault_msg
13359 call printk
13360+; call dump_stack
13361 #endif
13362- call dump_stack
13363 hlt_loop:
13364 hlt
13365 jmp hlt_loop
13366@@ -581,8 +662,11 @@ hlt_loop:
13367 /* This is the default interrupt "handler" :-) */
13368 ALIGN
13369 ignore_int:
13370- cld
13371 #ifdef CONFIG_PRINTK
13372+ cmpl $2,%ss:early_recursion_flag
13373+ je hlt_loop
13374+ incl %ss:early_recursion_flag
13375+ cld
13376 pushl %eax
13377 pushl %ecx
13378 pushl %edx
13379@@ -591,9 +675,6 @@ ignore_int:
13380 movl $(__KERNEL_DS),%eax
13381 movl %eax,%ds
13382 movl %eax,%es
13383- cmpl $2,early_recursion_flag
13384- je hlt_loop
13385- incl early_recursion_flag
13386 pushl 16(%esp)
13387 pushl 24(%esp)
13388 pushl 32(%esp)
13389@@ -622,29 +703,43 @@ ENTRY(initial_code)
13390 /*
13391 * BSS section
13392 */
13393-__PAGE_ALIGNED_BSS
13394- .align PAGE_SIZE
13395 #ifdef CONFIG_X86_PAE
13396+.section .initial_pg_pmd,"a",@progbits
13397 initial_pg_pmd:
13398 .fill 1024*KPMDS,4,0
13399 #else
13400+.section .initial_page_table,"a",@progbits
13401 ENTRY(initial_page_table)
13402 .fill 1024,4,0
13403 #endif
13404+.section .initial_pg_fixmap,"a",@progbits
13405 initial_pg_fixmap:
13406 .fill 1024,4,0
13407+.section .empty_zero_page,"a",@progbits
13408 ENTRY(empty_zero_page)
13409 .fill 4096,1,0
13410+.section .swapper_pg_dir,"a",@progbits
13411 ENTRY(swapper_pg_dir)
13412+#ifdef CONFIG_X86_PAE
13413+ .fill 4,8,0
13414+#else
13415 .fill 1024,4,0
13416+#endif
13417+
13418+/*
13419+ * The IDT has to be page-aligned to simplify the Pentium
13420+ * F0 0F bug workaround.. We have a special link segment
13421+ * for this.
13422+ */
13423+.section .idt,"a",@progbits
13424+ENTRY(idt_table)
13425+ .fill 256,8,0
13426
13427 /*
13428 * This starts the data section.
13429 */
13430 #ifdef CONFIG_X86_PAE
13431-__PAGE_ALIGNED_DATA
13432- /* Page-aligned for the benefit of paravirt? */
13433- .align PAGE_SIZE
13434+.section .initial_page_table,"a",@progbits
13435 ENTRY(initial_page_table)
13436 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13437 # if KPMDS == 3
13438@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13439 # error "Kernel PMDs should be 1, 2 or 3"
13440 # endif
13441 .align PAGE_SIZE /* needs to be page-sized too */
13442+
13443+#ifdef CONFIG_PAX_PER_CPU_PGD
13444+ENTRY(cpu_pgd)
13445+ .rept NR_CPUS
13446+ .fill 4,8,0
13447+ .endr
13448+#endif
13449+
13450 #endif
13451
13452 .data
13453 .balign 4
13454 ENTRY(stack_start)
13455- .long init_thread_union+THREAD_SIZE
13456+ .long init_thread_union+THREAD_SIZE-8
13457+
13458+ready: .byte 0
13459
13460+.section .rodata,"a",@progbits
13461 early_recursion_flag:
13462 .long 0
13463
13464-ready: .byte 0
13465-
13466 int_msg:
13467 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13468
13469@@ -707,7 +811,7 @@ fault_msg:
13470 .word 0 # 32 bit align gdt_desc.address
13471 boot_gdt_descr:
13472 .word __BOOT_DS+7
13473- .long boot_gdt - __PAGE_OFFSET
13474+ .long pa(boot_gdt)
13475
13476 .word 0 # 32-bit align idt_desc.address
13477 idt_descr:
13478@@ -718,7 +822,7 @@ idt_descr:
13479 .word 0 # 32 bit align gdt_desc.address
13480 ENTRY(early_gdt_descr)
13481 .word GDT_ENTRIES*8-1
13482- .long gdt_page /* Overwritten for secondary CPUs */
13483+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13484
13485 /*
13486 * The boot_gdt must mirror the equivalent in setup.S and is
13487@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13488 .align L1_CACHE_BYTES
13489 ENTRY(boot_gdt)
13490 .fill GDT_ENTRY_BOOT_CS,8,0
13491- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13492- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13493+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13494+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13495+
13496+ .align PAGE_SIZE_asm
13497+ENTRY(cpu_gdt_table)
13498+ .rept NR_CPUS
13499+ .quad 0x0000000000000000 /* NULL descriptor */
13500+ .quad 0x0000000000000000 /* 0x0b reserved */
13501+ .quad 0x0000000000000000 /* 0x13 reserved */
13502+ .quad 0x0000000000000000 /* 0x1b reserved */
13503+
13504+#ifdef CONFIG_PAX_KERNEXEC
13505+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13506+#else
13507+ .quad 0x0000000000000000 /* 0x20 unused */
13508+#endif
13509+
13510+ .quad 0x0000000000000000 /* 0x28 unused */
13511+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13512+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13513+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13514+ .quad 0x0000000000000000 /* 0x4b reserved */
13515+ .quad 0x0000000000000000 /* 0x53 reserved */
13516+ .quad 0x0000000000000000 /* 0x5b reserved */
13517+
13518+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13519+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13520+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13521+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13522+
13523+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13524+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13525+
13526+ /*
13527+ * Segments used for calling PnP BIOS have byte granularity.
13528+ * The code segments and data segments have fixed 64k limits,
13529+ * the transfer segment sizes are set at run time.
13530+ */
13531+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13532+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13533+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13534+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13535+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13536+
13537+ /*
13538+ * The APM segments have byte granularity and their bases
13539+ * are set at run time. All have 64k limits.
13540+ */
13541+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13542+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13543+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13544+
13545+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13546+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13547+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13548+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13549+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13550+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13551+
13552+ /* Be sure this is zeroed to avoid false validations in Xen */
13553+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13554+ .endr
13555diff -urNp linux-3.0.7/arch/x86/kernel/head_64.S linux-3.0.7/arch/x86/kernel/head_64.S
13556--- linux-3.0.7/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13557+++ linux-3.0.7/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13558@@ -19,6 +19,7 @@
13559 #include <asm/cache.h>
13560 #include <asm/processor-flags.h>
13561 #include <asm/percpu.h>
13562+#include <asm/cpufeature.h>
13563
13564 #ifdef CONFIG_PARAVIRT
13565 #include <asm/asm-offsets.h>
13566@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13567 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13568 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13569 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13570+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13571+L3_VMALLOC_START = pud_index(VMALLOC_START)
13572+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13573+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13574
13575 .text
13576 __HEAD
13577@@ -85,35 +90,22 @@ startup_64:
13578 */
13579 addq %rbp, init_level4_pgt + 0(%rip)
13580 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13581+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13582+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13583 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13584
13585 addq %rbp, level3_ident_pgt + 0(%rip)
13586+#ifndef CONFIG_XEN
13587+ addq %rbp, level3_ident_pgt + 8(%rip)
13588+#endif
13589
13590- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13591- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13592+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13593
13594- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13595+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13596+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13597
13598- /* Add an Identity mapping if I am above 1G */
13599- leaq _text(%rip), %rdi
13600- andq $PMD_PAGE_MASK, %rdi
13601-
13602- movq %rdi, %rax
13603- shrq $PUD_SHIFT, %rax
13604- andq $(PTRS_PER_PUD - 1), %rax
13605- jz ident_complete
13606-
13607- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13608- leaq level3_ident_pgt(%rip), %rbx
13609- movq %rdx, 0(%rbx, %rax, 8)
13610-
13611- movq %rdi, %rax
13612- shrq $PMD_SHIFT, %rax
13613- andq $(PTRS_PER_PMD - 1), %rax
13614- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13615- leaq level2_spare_pgt(%rip), %rbx
13616- movq %rdx, 0(%rbx, %rax, 8)
13617-ident_complete:
13618+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13619+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13620
13621 /*
13622 * Fixup the kernel text+data virtual addresses. Note that
13623@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13624 * after the boot processor executes this code.
13625 */
13626
13627- /* Enable PAE mode and PGE */
13628- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13629+ /* Enable PAE mode and PSE/PGE */
13630+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13631 movq %rax, %cr4
13632
13633 /* Setup early boot stage 4 level pagetables. */
13634@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13635 movl $MSR_EFER, %ecx
13636 rdmsr
13637 btsl $_EFER_SCE, %eax /* Enable System Call */
13638- btl $20,%edi /* No Execute supported? */
13639+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13640 jnc 1f
13641 btsl $_EFER_NX, %eax
13642+ leaq init_level4_pgt(%rip), %rdi
13643+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13644+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13645+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13646+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13647 1: wrmsr /* Make changes effective */
13648
13649 /* Setup cr0 */
13650@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13651 bad_address:
13652 jmp bad_address
13653
13654- .section ".init.text","ax"
13655+ __INIT
13656 #ifdef CONFIG_EARLY_PRINTK
13657 .globl early_idt_handlers
13658 early_idt_handlers:
13659@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13660 #endif /* EARLY_PRINTK */
13661 1: hlt
13662 jmp 1b
13663+ .previous
13664
13665 #ifdef CONFIG_EARLY_PRINTK
13666+ __INITDATA
13667 early_recursion_flag:
13668 .long 0
13669+ .previous
13670
13671+ .section .rodata,"a",@progbits
13672 early_idt_msg:
13673 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13674 early_idt_ripmsg:
13675 .asciz "RIP %s\n"
13676-#endif /* CONFIG_EARLY_PRINTK */
13677 .previous
13678+#endif /* CONFIG_EARLY_PRINTK */
13679
13680+ .section .rodata,"a",@progbits
13681 #define NEXT_PAGE(name) \
13682 .balign PAGE_SIZE; \
13683 ENTRY(name)
13684@@ -338,7 +340,6 @@ ENTRY(name)
13685 i = i + 1 ; \
13686 .endr
13687
13688- .data
13689 /*
13690 * This default setting generates an ident mapping at address 0x100000
13691 * and a mapping for the kernel that precisely maps virtual address
13692@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13693 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13694 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13695 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13696+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13697+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13698+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13699+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13700 .org init_level4_pgt + L4_START_KERNEL*8, 0
13701 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13702 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13703
13704+#ifdef CONFIG_PAX_PER_CPU_PGD
13705+NEXT_PAGE(cpu_pgd)
13706+ .rept NR_CPUS
13707+ .fill 512,8,0
13708+ .endr
13709+#endif
13710+
13711 NEXT_PAGE(level3_ident_pgt)
13712 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13713+#ifdef CONFIG_XEN
13714 .fill 511,8,0
13715+#else
13716+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13717+ .fill 510,8,0
13718+#endif
13719+
13720+NEXT_PAGE(level3_vmalloc_pgt)
13721+ .fill 512,8,0
13722+
13723+NEXT_PAGE(level3_vmemmap_pgt)
13724+ .fill L3_VMEMMAP_START,8,0
13725+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13726
13727 NEXT_PAGE(level3_kernel_pgt)
13728 .fill L3_START_KERNEL,8,0
13729@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13730 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13731 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13732
13733+NEXT_PAGE(level2_vmemmap_pgt)
13734+ .fill 512,8,0
13735+
13736 NEXT_PAGE(level2_fixmap_pgt)
13737- .fill 506,8,0
13738- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13739- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13740- .fill 5,8,0
13741+ .fill 507,8,0
13742+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13743+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13744+ .fill 4,8,0
13745
13746-NEXT_PAGE(level1_fixmap_pgt)
13747+NEXT_PAGE(level1_vsyscall_pgt)
13748 .fill 512,8,0
13749
13750-NEXT_PAGE(level2_ident_pgt)
13751- /* Since I easily can, map the first 1G.
13752+ /* Since I easily can, map the first 2G.
13753 * Don't set NX because code runs from these pages.
13754 */
13755- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13756+NEXT_PAGE(level2_ident_pgt)
13757+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13758
13759 NEXT_PAGE(level2_kernel_pgt)
13760 /*
13761@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13762 * If you want to increase this then increase MODULES_VADDR
13763 * too.)
13764 */
13765- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13766- KERNEL_IMAGE_SIZE/PMD_SIZE)
13767-
13768-NEXT_PAGE(level2_spare_pgt)
13769- .fill 512, 8, 0
13770+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13771
13772 #undef PMDS
13773 #undef NEXT_PAGE
13774
13775- .data
13776+ .align PAGE_SIZE
13777+ENTRY(cpu_gdt_table)
13778+ .rept NR_CPUS
13779+ .quad 0x0000000000000000 /* NULL descriptor */
13780+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13781+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13782+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13783+ .quad 0x00cffb000000ffff /* __USER32_CS */
13784+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13785+ .quad 0x00affb000000ffff /* __USER_CS */
13786+
13787+#ifdef CONFIG_PAX_KERNEXEC
13788+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13789+#else
13790+ .quad 0x0 /* unused */
13791+#endif
13792+
13793+ .quad 0,0 /* TSS */
13794+ .quad 0,0 /* LDT */
13795+ .quad 0,0,0 /* three TLS descriptors */
13796+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13797+ /* asm/segment.h:GDT_ENTRIES must match this */
13798+
13799+ /* zero the remaining page */
13800+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13801+ .endr
13802+
13803 .align 16
13804 .globl early_gdt_descr
13805 early_gdt_descr:
13806 .word GDT_ENTRIES*8-1
13807 early_gdt_descr_base:
13808- .quad INIT_PER_CPU_VAR(gdt_page)
13809+ .quad cpu_gdt_table
13810
13811 ENTRY(phys_base)
13812 /* This must match the first entry in level2_kernel_pgt */
13813 .quad 0x0000000000000000
13814
13815 #include "../../x86/xen/xen-head.S"
13816-
13817- .section .bss, "aw", @nobits
13818+
13819+ .section .rodata,"a",@progbits
13820 .align L1_CACHE_BYTES
13821 ENTRY(idt_table)
13822- .skip IDT_ENTRIES * 16
13823+ .fill 512,8,0
13824
13825 __PAGE_ALIGNED_BSS
13826 .align PAGE_SIZE
13827diff -urNp linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c
13828--- linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13829+++ linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13830@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13831 EXPORT_SYMBOL(cmpxchg8b_emu);
13832 #endif
13833
13834+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13835+
13836 /* Networking helper routines. */
13837 EXPORT_SYMBOL(csum_partial_copy_generic);
13838+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13839+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13840
13841 EXPORT_SYMBOL(__get_user_1);
13842 EXPORT_SYMBOL(__get_user_2);
13843@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13844
13845 EXPORT_SYMBOL(csum_partial);
13846 EXPORT_SYMBOL(empty_zero_page);
13847+
13848+#ifdef CONFIG_PAX_KERNEXEC
13849+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13850+#endif
13851diff -urNp linux-3.0.7/arch/x86/kernel/i8259.c linux-3.0.7/arch/x86/kernel/i8259.c
13852--- linux-3.0.7/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13853+++ linux-3.0.7/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13854@@ -210,7 +210,7 @@ spurious_8259A_irq:
13855 "spurious 8259A interrupt: IRQ%d.\n", irq);
13856 spurious_irq_mask |= irqmask;
13857 }
13858- atomic_inc(&irq_err_count);
13859+ atomic_inc_unchecked(&irq_err_count);
13860 /*
13861 * Theoretically we do not have to handle this IRQ,
13862 * but in Linux this does not cause problems and is
13863diff -urNp linux-3.0.7/arch/x86/kernel/init_task.c linux-3.0.7/arch/x86/kernel/init_task.c
13864--- linux-3.0.7/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13865+++ linux-3.0.7/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13866@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13867 * way process stacks are handled. This is done by having a special
13868 * "init_task" linker map entry..
13869 */
13870-union thread_union init_thread_union __init_task_data =
13871- { INIT_THREAD_INFO(init_task) };
13872+union thread_union init_thread_union __init_task_data;
13873
13874 /*
13875 * Initial task structure.
13876@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13877 * section. Since TSS's are completely CPU-local, we want them
13878 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13879 */
13880-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13881-
13882+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13883+EXPORT_SYMBOL(init_tss);
13884diff -urNp linux-3.0.7/arch/x86/kernel/ioport.c linux-3.0.7/arch/x86/kernel/ioport.c
13885--- linux-3.0.7/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13886+++ linux-3.0.7/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13887@@ -6,6 +6,7 @@
13888 #include <linux/sched.h>
13889 #include <linux/kernel.h>
13890 #include <linux/capability.h>
13891+#include <linux/security.h>
13892 #include <linux/errno.h>
13893 #include <linux/types.h>
13894 #include <linux/ioport.h>
13895@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13896
13897 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13898 return -EINVAL;
13899+#ifdef CONFIG_GRKERNSEC_IO
13900+ if (turn_on && grsec_disable_privio) {
13901+ gr_handle_ioperm();
13902+ return -EPERM;
13903+ }
13904+#endif
13905 if (turn_on && !capable(CAP_SYS_RAWIO))
13906 return -EPERM;
13907
13908@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13909 * because the ->io_bitmap_max value must match the bitmap
13910 * contents:
13911 */
13912- tss = &per_cpu(init_tss, get_cpu());
13913+ tss = init_tss + get_cpu();
13914
13915 if (turn_on)
13916 bitmap_clear(t->io_bitmap_ptr, from, num);
13917@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13918 return -EINVAL;
13919 /* Trying to gain more privileges? */
13920 if (level > old) {
13921+#ifdef CONFIG_GRKERNSEC_IO
13922+ if (grsec_disable_privio) {
13923+ gr_handle_iopl();
13924+ return -EPERM;
13925+ }
13926+#endif
13927 if (!capable(CAP_SYS_RAWIO))
13928 return -EPERM;
13929 }
13930diff -urNp linux-3.0.7/arch/x86/kernel/irq.c linux-3.0.7/arch/x86/kernel/irq.c
13931--- linux-3.0.7/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13932+++ linux-3.0.7/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13933@@ -17,7 +17,7 @@
13934 #include <asm/mce.h>
13935 #include <asm/hw_irq.h>
13936
13937-atomic_t irq_err_count;
13938+atomic_unchecked_t irq_err_count;
13939
13940 /* Function pointer for generic interrupt vector handling */
13941 void (*x86_platform_ipi_callback)(void) = NULL;
13942@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13943 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13944 seq_printf(p, " Machine check polls\n");
13945 #endif
13946- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13947+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13948 #if defined(CONFIG_X86_IO_APIC)
13949- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13950+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13951 #endif
13952 return 0;
13953 }
13954@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13955
13956 u64 arch_irq_stat(void)
13957 {
13958- u64 sum = atomic_read(&irq_err_count);
13959+ u64 sum = atomic_read_unchecked(&irq_err_count);
13960
13961 #ifdef CONFIG_X86_IO_APIC
13962- sum += atomic_read(&irq_mis_count);
13963+ sum += atomic_read_unchecked(&irq_mis_count);
13964 #endif
13965 return sum;
13966 }
13967diff -urNp linux-3.0.7/arch/x86/kernel/irq_32.c linux-3.0.7/arch/x86/kernel/irq_32.c
13968--- linux-3.0.7/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13969+++ linux-3.0.7/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13970@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13971 __asm__ __volatile__("andl %%esp,%0" :
13972 "=r" (sp) : "0" (THREAD_SIZE - 1));
13973
13974- return sp < (sizeof(struct thread_info) + STACK_WARN);
13975+ return sp < STACK_WARN;
13976 }
13977
13978 static void print_stack_overflow(void)
13979@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13980 * per-CPU IRQ handling contexts (thread information and stack)
13981 */
13982 union irq_ctx {
13983- struct thread_info tinfo;
13984- u32 stack[THREAD_SIZE/sizeof(u32)];
13985+ unsigned long previous_esp;
13986+ u32 stack[THREAD_SIZE/sizeof(u32)];
13987 } __attribute__((aligned(THREAD_SIZE)));
13988
13989 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13990@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13991 static inline int
13992 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13993 {
13994- union irq_ctx *curctx, *irqctx;
13995+ union irq_ctx *irqctx;
13996 u32 *isp, arg1, arg2;
13997
13998- curctx = (union irq_ctx *) current_thread_info();
13999 irqctx = __this_cpu_read(hardirq_ctx);
14000
14001 /*
14002@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14003 * handler) we can't do that and just have to keep using the
14004 * current stack (which is the irq stack already after all)
14005 */
14006- if (unlikely(curctx == irqctx))
14007+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14008 return 0;
14009
14010 /* build the stack frame on the IRQ stack */
14011- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14012- irqctx->tinfo.task = curctx->tinfo.task;
14013- irqctx->tinfo.previous_esp = current_stack_pointer;
14014+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14015+ irqctx->previous_esp = current_stack_pointer;
14016
14017- /*
14018- * Copy the softirq bits in preempt_count so that the
14019- * softirq checks work in the hardirq context.
14020- */
14021- irqctx->tinfo.preempt_count =
14022- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14023- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14024+#ifdef CONFIG_PAX_MEMORY_UDEREF
14025+ __set_fs(MAKE_MM_SEG(0));
14026+#endif
14027
14028 if (unlikely(overflow))
14029 call_on_stack(print_stack_overflow, isp);
14030@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14031 : "0" (irq), "1" (desc), "2" (isp),
14032 "D" (desc->handle_irq)
14033 : "memory", "cc", "ecx");
14034+
14035+#ifdef CONFIG_PAX_MEMORY_UDEREF
14036+ __set_fs(current_thread_info()->addr_limit);
14037+#endif
14038+
14039 return 1;
14040 }
14041
14042@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14043 */
14044 void __cpuinit irq_ctx_init(int cpu)
14045 {
14046- union irq_ctx *irqctx;
14047-
14048 if (per_cpu(hardirq_ctx, cpu))
14049 return;
14050
14051- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14052- THREAD_FLAGS,
14053- THREAD_ORDER));
14054- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14055- irqctx->tinfo.cpu = cpu;
14056- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14057- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14058-
14059- per_cpu(hardirq_ctx, cpu) = irqctx;
14060-
14061- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14062- THREAD_FLAGS,
14063- THREAD_ORDER));
14064- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14065- irqctx->tinfo.cpu = cpu;
14066- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14067-
14068- per_cpu(softirq_ctx, cpu) = irqctx;
14069+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14070+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14071
14072 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14073 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14074@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14075 asmlinkage void do_softirq(void)
14076 {
14077 unsigned long flags;
14078- struct thread_info *curctx;
14079 union irq_ctx *irqctx;
14080 u32 *isp;
14081
14082@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14083 local_irq_save(flags);
14084
14085 if (local_softirq_pending()) {
14086- curctx = current_thread_info();
14087 irqctx = __this_cpu_read(softirq_ctx);
14088- irqctx->tinfo.task = curctx->task;
14089- irqctx->tinfo.previous_esp = current_stack_pointer;
14090+ irqctx->previous_esp = current_stack_pointer;
14091
14092 /* build the stack frame on the softirq stack */
14093- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14094+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14095+
14096+#ifdef CONFIG_PAX_MEMORY_UDEREF
14097+ __set_fs(MAKE_MM_SEG(0));
14098+#endif
14099
14100 call_on_stack(__do_softirq, isp);
14101+
14102+#ifdef CONFIG_PAX_MEMORY_UDEREF
14103+ __set_fs(current_thread_info()->addr_limit);
14104+#endif
14105+
14106 /*
14107 * Shouldn't happen, we returned above if in_interrupt():
14108 */
14109diff -urNp linux-3.0.7/arch/x86/kernel/kgdb.c linux-3.0.7/arch/x86/kernel/kgdb.c
14110--- linux-3.0.7/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
14111+++ linux-3.0.7/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
14112@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14113 #ifdef CONFIG_X86_32
14114 switch (regno) {
14115 case GDB_SS:
14116- if (!user_mode_vm(regs))
14117+ if (!user_mode(regs))
14118 *(unsigned long *)mem = __KERNEL_DS;
14119 break;
14120 case GDB_SP:
14121- if (!user_mode_vm(regs))
14122+ if (!user_mode(regs))
14123 *(unsigned long *)mem = kernel_stack_pointer(regs);
14124 break;
14125 case GDB_GS:
14126@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14127 case 'k':
14128 /* clear the trace bit */
14129 linux_regs->flags &= ~X86_EFLAGS_TF;
14130- atomic_set(&kgdb_cpu_doing_single_step, -1);
14131+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14132
14133 /* set the trace bit if we're stepping */
14134 if (remcomInBuffer[0] == 's') {
14135 linux_regs->flags |= X86_EFLAGS_TF;
14136- atomic_set(&kgdb_cpu_doing_single_step,
14137+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14138 raw_smp_processor_id());
14139 }
14140
14141@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14142 return NOTIFY_DONE;
14143
14144 case DIE_DEBUG:
14145- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14146+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14147 if (user_mode(regs))
14148 return single_step_cont(regs, args);
14149 break;
14150diff -urNp linux-3.0.7/arch/x86/kernel/kprobes.c linux-3.0.7/arch/x86/kernel/kprobes.c
14151--- linux-3.0.7/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
14152+++ linux-3.0.7/arch/x86/kernel/kprobes.c 2011-10-11 10:44:33.000000000 -0400
14153@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
14154 } __attribute__((packed)) *insn;
14155
14156 insn = (struct __arch_relative_insn *)from;
14157+
14158+ pax_open_kernel();
14159 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14160 insn->op = op;
14161+ pax_close_kernel();
14162 }
14163
14164 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14165@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
14166 kprobe_opcode_t opcode;
14167 kprobe_opcode_t *orig_opcodes = opcodes;
14168
14169- if (search_exception_tables((unsigned long)opcodes))
14170+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14171 return 0; /* Page fault may occur on this address. */
14172
14173 retry:
14174@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
14175 }
14176 }
14177 insn_get_length(&insn);
14178+ pax_open_kernel();
14179 memcpy(dest, insn.kaddr, insn.length);
14180+ pax_close_kernel();
14181
14182 #ifdef CONFIG_X86_64
14183 if (insn_rip_relative(&insn)) {
14184@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
14185 (u8 *) dest;
14186 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14187 disp = (u8 *) dest + insn_offset_displacement(&insn);
14188+ pax_open_kernel();
14189 *(s32 *) disp = (s32) newdisp;
14190+ pax_close_kernel();
14191 }
14192 #endif
14193 return insn.length;
14194@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
14195 */
14196 __copy_instruction(p->ainsn.insn, p->addr, 0);
14197
14198- if (can_boost(p->addr))
14199+ if (can_boost(ktla_ktva(p->addr)))
14200 p->ainsn.boostable = 0;
14201 else
14202 p->ainsn.boostable = -1;
14203
14204- p->opcode = *p->addr;
14205+ p->opcode = *(ktla_ktva(p->addr));
14206 }
14207
14208 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14209@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
14210 * nor set current_kprobe, because it doesn't use single
14211 * stepping.
14212 */
14213- regs->ip = (unsigned long)p->ainsn.insn;
14214+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14215 preempt_enable_no_resched();
14216 return;
14217 }
14218@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
14219 if (p->opcode == BREAKPOINT_INSTRUCTION)
14220 regs->ip = (unsigned long)p->addr;
14221 else
14222- regs->ip = (unsigned long)p->ainsn.insn;
14223+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14224 }
14225
14226 /*
14227@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
14228 setup_singlestep(p, regs, kcb, 0);
14229 return 1;
14230 }
14231- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14232+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14233 /*
14234 * The breakpoint instruction was removed right
14235 * after we hit it. Another cpu has removed
14236@@ -680,6 +687,9 @@ static void __used __kprobes kretprobe_t
14237 " movq %rax, 152(%rsp)\n"
14238 RESTORE_REGS_STRING
14239 " popfq\n"
14240+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14241+ " btsq $63,(%rsp)\n"
14242+#endif
14243 #else
14244 " pushf\n"
14245 SAVE_REGS_STRING
14246@@ -817,7 +827,7 @@ static void __kprobes resume_execution(s
14247 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14248 {
14249 unsigned long *tos = stack_addr(regs);
14250- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14251+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14252 unsigned long orig_ip = (unsigned long)p->addr;
14253 kprobe_opcode_t *insn = p->ainsn.insn;
14254
14255@@ -999,7 +1009,7 @@ int __kprobes kprobe_exceptions_notify(s
14256 struct die_args *args = data;
14257 int ret = NOTIFY_DONE;
14258
14259- if (args->regs && user_mode_vm(args->regs))
14260+ if (args->regs && user_mode(args->regs))
14261 return ret;
14262
14263 switch (val) {
14264@@ -1381,7 +1391,7 @@ int __kprobes arch_prepare_optimized_kpr
14265 * Verify if the address gap is in 2GB range, because this uses
14266 * a relative jump.
14267 */
14268- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14269+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14270 if (abs(rel) > 0x7fffffff)
14271 return -ERANGE;
14272
14273@@ -1402,11 +1412,11 @@ int __kprobes arch_prepare_optimized_kpr
14274 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14275
14276 /* Set probe function call */
14277- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14278+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14279
14280 /* Set returning jmp instruction at the tail of out-of-line buffer */
14281 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14282- (u8 *)op->kp.addr + op->optinsn.size);
14283+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14284
14285 flush_icache_range((unsigned long) buf,
14286 (unsigned long) buf + TMPL_END_IDX +
14287@@ -1428,7 +1438,7 @@ static void __kprobes setup_optimize_kpr
14288 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14289
14290 /* Backup instructions which will be replaced by jump address */
14291- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14292+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14293 RELATIVE_ADDR_SIZE);
14294
14295 insn_buf[0] = RELATIVEJUMP_OPCODE;
14296diff -urNp linux-3.0.7/arch/x86/kernel/kvm.c linux-3.0.7/arch/x86/kernel/kvm.c
14297--- linux-3.0.7/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
14298+++ linux-3.0.7/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
14299@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
14300 pv_mmu_ops.set_pud = kvm_set_pud;
14301 #if PAGETABLE_LEVELS == 4
14302 pv_mmu_ops.set_pgd = kvm_set_pgd;
14303+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14304 #endif
14305 #endif
14306 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14307diff -urNp linux-3.0.7/arch/x86/kernel/ldt.c linux-3.0.7/arch/x86/kernel/ldt.c
14308--- linux-3.0.7/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
14309+++ linux-3.0.7/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
14310@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14311 if (reload) {
14312 #ifdef CONFIG_SMP
14313 preempt_disable();
14314- load_LDT(pc);
14315+ load_LDT_nolock(pc);
14316 if (!cpumask_equal(mm_cpumask(current->mm),
14317 cpumask_of(smp_processor_id())))
14318 smp_call_function(flush_ldt, current->mm, 1);
14319 preempt_enable();
14320 #else
14321- load_LDT(pc);
14322+ load_LDT_nolock(pc);
14323 #endif
14324 }
14325 if (oldsize) {
14326@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14327 return err;
14328
14329 for (i = 0; i < old->size; i++)
14330- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14331+ write_ldt_entry(new->ldt, i, old->ldt + i);
14332 return 0;
14333 }
14334
14335@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14336 retval = copy_ldt(&mm->context, &old_mm->context);
14337 mutex_unlock(&old_mm->context.lock);
14338 }
14339+
14340+ if (tsk == current) {
14341+ mm->context.vdso = 0;
14342+
14343+#ifdef CONFIG_X86_32
14344+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14345+ mm->context.user_cs_base = 0UL;
14346+ mm->context.user_cs_limit = ~0UL;
14347+
14348+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14349+ cpus_clear(mm->context.cpu_user_cs_mask);
14350+#endif
14351+
14352+#endif
14353+#endif
14354+
14355+ }
14356+
14357 return retval;
14358 }
14359
14360@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14361 }
14362 }
14363
14364+#ifdef CONFIG_PAX_SEGMEXEC
14365+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14366+ error = -EINVAL;
14367+ goto out_unlock;
14368+ }
14369+#endif
14370+
14371 fill_ldt(&ldt, &ldt_info);
14372 if (oldmode)
14373 ldt.avl = 0;
14374diff -urNp linux-3.0.7/arch/x86/kernel/machine_kexec_32.c linux-3.0.7/arch/x86/kernel/machine_kexec_32.c
14375--- linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
14376+++ linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
14377@@ -27,7 +27,7 @@
14378 #include <asm/cacheflush.h>
14379 #include <asm/debugreg.h>
14380
14381-static void set_idt(void *newidt, __u16 limit)
14382+static void set_idt(struct desc_struct *newidt, __u16 limit)
14383 {
14384 struct desc_ptr curidt;
14385
14386@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14387 }
14388
14389
14390-static void set_gdt(void *newgdt, __u16 limit)
14391+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14392 {
14393 struct desc_ptr curgdt;
14394
14395@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14396 }
14397
14398 control_page = page_address(image->control_code_page);
14399- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14400+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14401
14402 relocate_kernel_ptr = control_page;
14403 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14404diff -urNp linux-3.0.7/arch/x86/kernel/microcode_intel.c linux-3.0.7/arch/x86/kernel/microcode_intel.c
14405--- linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14406+++ linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14407@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14408
14409 static int get_ucode_user(void *to, const void *from, size_t n)
14410 {
14411- return copy_from_user(to, from, n);
14412+ return copy_from_user(to, (const void __force_user *)from, n);
14413 }
14414
14415 static enum ucode_state
14416 request_microcode_user(int cpu, const void __user *buf, size_t size)
14417 {
14418- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14419+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14420 }
14421
14422 static void microcode_fini_cpu(int cpu)
14423diff -urNp linux-3.0.7/arch/x86/kernel/module.c linux-3.0.7/arch/x86/kernel/module.c
14424--- linux-3.0.7/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14425+++ linux-3.0.7/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14426@@ -36,21 +36,66 @@
14427 #define DEBUGP(fmt...)
14428 #endif
14429
14430-void *module_alloc(unsigned long size)
14431+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14432 {
14433 if (PAGE_ALIGN(size) > MODULES_LEN)
14434 return NULL;
14435 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14436- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14437+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14438 -1, __builtin_return_address(0));
14439 }
14440
14441+void *module_alloc(unsigned long size)
14442+{
14443+
14444+#ifdef CONFIG_PAX_KERNEXEC
14445+ return __module_alloc(size, PAGE_KERNEL);
14446+#else
14447+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14448+#endif
14449+
14450+}
14451+
14452 /* Free memory returned from module_alloc */
14453 void module_free(struct module *mod, void *module_region)
14454 {
14455 vfree(module_region);
14456 }
14457
14458+#ifdef CONFIG_PAX_KERNEXEC
14459+#ifdef CONFIG_X86_32
14460+void *module_alloc_exec(unsigned long size)
14461+{
14462+ struct vm_struct *area;
14463+
14464+ if (size == 0)
14465+ return NULL;
14466+
14467+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14468+ return area ? area->addr : NULL;
14469+}
14470+EXPORT_SYMBOL(module_alloc_exec);
14471+
14472+void module_free_exec(struct module *mod, void *module_region)
14473+{
14474+ vunmap(module_region);
14475+}
14476+EXPORT_SYMBOL(module_free_exec);
14477+#else
14478+void module_free_exec(struct module *mod, void *module_region)
14479+{
14480+ module_free(mod, module_region);
14481+}
14482+EXPORT_SYMBOL(module_free_exec);
14483+
14484+void *module_alloc_exec(unsigned long size)
14485+{
14486+ return __module_alloc(size, PAGE_KERNEL_RX);
14487+}
14488+EXPORT_SYMBOL(module_alloc_exec);
14489+#endif
14490+#endif
14491+
14492 /* We don't need anything special. */
14493 int module_frob_arch_sections(Elf_Ehdr *hdr,
14494 Elf_Shdr *sechdrs,
14495@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14496 unsigned int i;
14497 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14498 Elf32_Sym *sym;
14499- uint32_t *location;
14500+ uint32_t *plocation, location;
14501
14502 DEBUGP("Applying relocate section %u to %u\n", relsec,
14503 sechdrs[relsec].sh_info);
14504 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14505 /* This is where to make the change */
14506- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14507- + rel[i].r_offset;
14508+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14509+ location = (uint32_t)plocation;
14510+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14511+ plocation = ktla_ktva((void *)plocation);
14512 /* This is the symbol it is referring to. Note that all
14513 undefined symbols have been resolved. */
14514 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14515@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14516 switch (ELF32_R_TYPE(rel[i].r_info)) {
14517 case R_386_32:
14518 /* We add the value into the location given */
14519- *location += sym->st_value;
14520+ pax_open_kernel();
14521+ *plocation += sym->st_value;
14522+ pax_close_kernel();
14523 break;
14524 case R_386_PC32:
14525 /* Add the value, subtract its postition */
14526- *location += sym->st_value - (uint32_t)location;
14527+ pax_open_kernel();
14528+ *plocation += sym->st_value - location;
14529+ pax_close_kernel();
14530 break;
14531 default:
14532 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14533@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14534 case R_X86_64_NONE:
14535 break;
14536 case R_X86_64_64:
14537+ pax_open_kernel();
14538 *(u64 *)loc = val;
14539+ pax_close_kernel();
14540 break;
14541 case R_X86_64_32:
14542+ pax_open_kernel();
14543 *(u32 *)loc = val;
14544+ pax_close_kernel();
14545 if (val != *(u32 *)loc)
14546 goto overflow;
14547 break;
14548 case R_X86_64_32S:
14549+ pax_open_kernel();
14550 *(s32 *)loc = val;
14551+ pax_close_kernel();
14552 if ((s64)val != *(s32 *)loc)
14553 goto overflow;
14554 break;
14555 case R_X86_64_PC32:
14556 val -= (u64)loc;
14557+ pax_open_kernel();
14558 *(u32 *)loc = val;
14559+ pax_close_kernel();
14560+
14561 #if 0
14562 if ((s64)val != *(s32 *)loc)
14563 goto overflow;
14564diff -urNp linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c
14565--- linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14566+++ linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14567@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14568 arch_spin_lock(lock);
14569 }
14570
14571-struct pv_lock_ops pv_lock_ops = {
14572+struct pv_lock_ops pv_lock_ops __read_only = {
14573 #ifdef CONFIG_SMP
14574 .spin_is_locked = __ticket_spin_is_locked,
14575 .spin_is_contended = __ticket_spin_is_contended,
14576diff -urNp linux-3.0.7/arch/x86/kernel/paravirt.c linux-3.0.7/arch/x86/kernel/paravirt.c
14577--- linux-3.0.7/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14578+++ linux-3.0.7/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14579@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14580 {
14581 return x;
14582 }
14583+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14584+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14585+#endif
14586
14587 void __init default_banner(void)
14588 {
14589@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14590 * corresponding structure. */
14591 static void *get_call_destination(u8 type)
14592 {
14593- struct paravirt_patch_template tmpl = {
14594+ const struct paravirt_patch_template tmpl = {
14595 .pv_init_ops = pv_init_ops,
14596 .pv_time_ops = pv_time_ops,
14597 .pv_cpu_ops = pv_cpu_ops,
14598@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14599 .pv_lock_ops = pv_lock_ops,
14600 #endif
14601 };
14602+
14603+ pax_track_stack();
14604+
14605 return *((void **)&tmpl + type);
14606 }
14607
14608@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14609 if (opfunc == NULL)
14610 /* If there's no function, patch it with a ud2a (BUG) */
14611 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14612- else if (opfunc == _paravirt_nop)
14613+ else if (opfunc == (void *)_paravirt_nop)
14614 /* If the operation is a nop, then nop the callsite */
14615 ret = paravirt_patch_nop();
14616
14617 /* identity functions just return their single argument */
14618- else if (opfunc == _paravirt_ident_32)
14619+ else if (opfunc == (void *)_paravirt_ident_32)
14620 ret = paravirt_patch_ident_32(insnbuf, len);
14621- else if (opfunc == _paravirt_ident_64)
14622+ else if (opfunc == (void *)_paravirt_ident_64)
14623 ret = paravirt_patch_ident_64(insnbuf, len);
14624+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14625+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14626+ ret = paravirt_patch_ident_64(insnbuf, len);
14627+#endif
14628
14629 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14630 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14631@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14632 if (insn_len > len || start == NULL)
14633 insn_len = len;
14634 else
14635- memcpy(insnbuf, start, insn_len);
14636+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14637
14638 return insn_len;
14639 }
14640@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14641 preempt_enable();
14642 }
14643
14644-struct pv_info pv_info = {
14645+struct pv_info pv_info __read_only = {
14646 .name = "bare hardware",
14647 .paravirt_enabled = 0,
14648 .kernel_rpl = 0,
14649 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14650 };
14651
14652-struct pv_init_ops pv_init_ops = {
14653+struct pv_init_ops pv_init_ops __read_only = {
14654 .patch = native_patch,
14655 };
14656
14657-struct pv_time_ops pv_time_ops = {
14658+struct pv_time_ops pv_time_ops __read_only = {
14659 .sched_clock = native_sched_clock,
14660 };
14661
14662-struct pv_irq_ops pv_irq_ops = {
14663+struct pv_irq_ops pv_irq_ops __read_only = {
14664 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14665 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14666 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14667@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14668 #endif
14669 };
14670
14671-struct pv_cpu_ops pv_cpu_ops = {
14672+struct pv_cpu_ops pv_cpu_ops __read_only = {
14673 .cpuid = native_cpuid,
14674 .get_debugreg = native_get_debugreg,
14675 .set_debugreg = native_set_debugreg,
14676@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14677 .end_context_switch = paravirt_nop,
14678 };
14679
14680-struct pv_apic_ops pv_apic_ops = {
14681+struct pv_apic_ops pv_apic_ops __read_only = {
14682 #ifdef CONFIG_X86_LOCAL_APIC
14683 .startup_ipi_hook = paravirt_nop,
14684 #endif
14685 };
14686
14687-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14688+#ifdef CONFIG_X86_32
14689+#ifdef CONFIG_X86_PAE
14690+/* 64-bit pagetable entries */
14691+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14692+#else
14693 /* 32-bit pagetable entries */
14694 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14695+#endif
14696 #else
14697 /* 64-bit pagetable entries */
14698 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14699 #endif
14700
14701-struct pv_mmu_ops pv_mmu_ops = {
14702+struct pv_mmu_ops pv_mmu_ops __read_only = {
14703
14704 .read_cr2 = native_read_cr2,
14705 .write_cr2 = native_write_cr2,
14706@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14707 .make_pud = PTE_IDENT,
14708
14709 .set_pgd = native_set_pgd,
14710+ .set_pgd_batched = native_set_pgd_batched,
14711 #endif
14712 #endif /* PAGETABLE_LEVELS >= 3 */
14713
14714@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14715 },
14716
14717 .set_fixmap = native_set_fixmap,
14718+
14719+#ifdef CONFIG_PAX_KERNEXEC
14720+ .pax_open_kernel = native_pax_open_kernel,
14721+ .pax_close_kernel = native_pax_close_kernel,
14722+#endif
14723+
14724 };
14725
14726 EXPORT_SYMBOL_GPL(pv_time_ops);
14727diff -urNp linux-3.0.7/arch/x86/kernel/pci-iommu_table.c linux-3.0.7/arch/x86/kernel/pci-iommu_table.c
14728--- linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14729+++ linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14730@@ -2,7 +2,7 @@
14731 #include <asm/iommu_table.h>
14732 #include <linux/string.h>
14733 #include <linux/kallsyms.h>
14734-
14735+#include <linux/sched.h>
14736
14737 #define DEBUG 1
14738
14739@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14740 {
14741 struct iommu_table_entry *p, *q, *x;
14742
14743+ pax_track_stack();
14744+
14745 /* Simple cyclic dependency checker. */
14746 for (p = start; p < finish; p++) {
14747 q = find_dependents_of(start, finish, p);
14748diff -urNp linux-3.0.7/arch/x86/kernel/process.c linux-3.0.7/arch/x86/kernel/process.c
14749--- linux-3.0.7/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14750+++ linux-3.0.7/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14751@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14752
14753 void free_thread_info(struct thread_info *ti)
14754 {
14755- free_thread_xstate(ti->task);
14756 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14757 }
14758
14759+static struct kmem_cache *task_struct_cachep;
14760+
14761 void arch_task_cache_init(void)
14762 {
14763- task_xstate_cachep =
14764- kmem_cache_create("task_xstate", xstate_size,
14765+ /* create a slab on which task_structs can be allocated */
14766+ task_struct_cachep =
14767+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14768+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14769+
14770+ task_xstate_cachep =
14771+ kmem_cache_create("task_xstate", xstate_size,
14772 __alignof__(union thread_xstate),
14773- SLAB_PANIC | SLAB_NOTRACK, NULL);
14774+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14775+}
14776+
14777+struct task_struct *alloc_task_struct_node(int node)
14778+{
14779+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14780+}
14781+
14782+void free_task_struct(struct task_struct *task)
14783+{
14784+ free_thread_xstate(task);
14785+ kmem_cache_free(task_struct_cachep, task);
14786 }
14787
14788 /*
14789@@ -70,7 +87,7 @@ void exit_thread(void)
14790 unsigned long *bp = t->io_bitmap_ptr;
14791
14792 if (bp) {
14793- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14794+ struct tss_struct *tss = init_tss + get_cpu();
14795
14796 t->io_bitmap_ptr = NULL;
14797 clear_thread_flag(TIF_IO_BITMAP);
14798@@ -106,7 +123,7 @@ void show_regs_common(void)
14799
14800 printk(KERN_CONT "\n");
14801 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14802- current->pid, current->comm, print_tainted(),
14803+ task_pid_nr(current), current->comm, print_tainted(),
14804 init_utsname()->release,
14805 (int)strcspn(init_utsname()->version, " "),
14806 init_utsname()->version);
14807@@ -120,6 +137,9 @@ void flush_thread(void)
14808 {
14809 struct task_struct *tsk = current;
14810
14811+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14812+ loadsegment(gs, 0);
14813+#endif
14814 flush_ptrace_hw_breakpoint(tsk);
14815 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14816 /*
14817@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14818 regs.di = (unsigned long) arg;
14819
14820 #ifdef CONFIG_X86_32
14821- regs.ds = __USER_DS;
14822- regs.es = __USER_DS;
14823+ regs.ds = __KERNEL_DS;
14824+ regs.es = __KERNEL_DS;
14825 regs.fs = __KERNEL_PERCPU;
14826- regs.gs = __KERNEL_STACK_CANARY;
14827+ savesegment(gs, regs.gs);
14828 #else
14829 regs.ss = __KERNEL_DS;
14830 #endif
14831@@ -403,7 +423,7 @@ void default_idle(void)
14832 EXPORT_SYMBOL(default_idle);
14833 #endif
14834
14835-void stop_this_cpu(void *dummy)
14836+__noreturn void stop_this_cpu(void *dummy)
14837 {
14838 local_irq_disable();
14839 /*
14840@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14841 }
14842 early_param("idle", idle_setup);
14843
14844-unsigned long arch_align_stack(unsigned long sp)
14845+#ifdef CONFIG_PAX_RANDKSTACK
14846+void pax_randomize_kstack(struct pt_regs *regs)
14847 {
14848- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14849- sp -= get_random_int() % 8192;
14850- return sp & ~0xf;
14851-}
14852+ struct thread_struct *thread = &current->thread;
14853+ unsigned long time;
14854
14855-unsigned long arch_randomize_brk(struct mm_struct *mm)
14856-{
14857- unsigned long range_end = mm->brk + 0x02000000;
14858- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14859-}
14860+ if (!randomize_va_space)
14861+ return;
14862+
14863+ if (v8086_mode(regs))
14864+ return;
14865
14866+ rdtscl(time);
14867+
14868+ /* P4 seems to return a 0 LSB, ignore it */
14869+#ifdef CONFIG_MPENTIUM4
14870+ time &= 0x3EUL;
14871+ time <<= 2;
14872+#elif defined(CONFIG_X86_64)
14873+ time &= 0xFUL;
14874+ time <<= 4;
14875+#else
14876+ time &= 0x1FUL;
14877+ time <<= 3;
14878+#endif
14879+
14880+ thread->sp0 ^= time;
14881+ load_sp0(init_tss + smp_processor_id(), thread);
14882+
14883+#ifdef CONFIG_X86_64
14884+ percpu_write(kernel_stack, thread->sp0);
14885+#endif
14886+}
14887+#endif
14888diff -urNp linux-3.0.7/arch/x86/kernel/process_32.c linux-3.0.7/arch/x86/kernel/process_32.c
14889--- linux-3.0.7/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14890+++ linux-3.0.7/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14891@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14892 unsigned long thread_saved_pc(struct task_struct *tsk)
14893 {
14894 return ((unsigned long *)tsk->thread.sp)[3];
14895+//XXX return tsk->thread.eip;
14896 }
14897
14898 #ifndef CONFIG_SMP
14899@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14900 unsigned long sp;
14901 unsigned short ss, gs;
14902
14903- if (user_mode_vm(regs)) {
14904+ if (user_mode(regs)) {
14905 sp = regs->sp;
14906 ss = regs->ss & 0xffff;
14907- gs = get_user_gs(regs);
14908 } else {
14909 sp = kernel_stack_pointer(regs);
14910 savesegment(ss, ss);
14911- savesegment(gs, gs);
14912 }
14913+ gs = get_user_gs(regs);
14914
14915 show_regs_common();
14916
14917@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14918 struct task_struct *tsk;
14919 int err;
14920
14921- childregs = task_pt_regs(p);
14922+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14923 *childregs = *regs;
14924 childregs->ax = 0;
14925 childregs->sp = sp;
14926
14927 p->thread.sp = (unsigned long) childregs;
14928 p->thread.sp0 = (unsigned long) (childregs+1);
14929+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14930
14931 p->thread.ip = (unsigned long) ret_from_fork;
14932
14933@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14934 struct thread_struct *prev = &prev_p->thread,
14935 *next = &next_p->thread;
14936 int cpu = smp_processor_id();
14937- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14938+ struct tss_struct *tss = init_tss + cpu;
14939 bool preload_fpu;
14940
14941 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14942@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14943 */
14944 lazy_save_gs(prev->gs);
14945
14946+#ifdef CONFIG_PAX_MEMORY_UDEREF
14947+ __set_fs(task_thread_info(next_p)->addr_limit);
14948+#endif
14949+
14950 /*
14951 * Load the per-thread Thread-Local Storage descriptor.
14952 */
14953@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14954 */
14955 arch_end_context_switch(next_p);
14956
14957+ percpu_write(current_task, next_p);
14958+ percpu_write(current_tinfo, &next_p->tinfo);
14959+
14960 if (preload_fpu)
14961 __math_state_restore();
14962
14963@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14964 if (prev->gs | next->gs)
14965 lazy_load_gs(next->gs);
14966
14967- percpu_write(current_task, next_p);
14968-
14969 return prev_p;
14970 }
14971
14972@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14973 } while (count++ < 16);
14974 return 0;
14975 }
14976-
14977diff -urNp linux-3.0.7/arch/x86/kernel/process_64.c linux-3.0.7/arch/x86/kernel/process_64.c
14978--- linux-3.0.7/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14979+++ linux-3.0.7/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14980@@ -87,7 +87,7 @@ static void __exit_idle(void)
14981 void exit_idle(void)
14982 {
14983 /* idle loop has pid 0 */
14984- if (current->pid)
14985+ if (task_pid_nr(current))
14986 return;
14987 __exit_idle();
14988 }
14989@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14990 struct pt_regs *childregs;
14991 struct task_struct *me = current;
14992
14993- childregs = ((struct pt_regs *)
14994- (THREAD_SIZE + task_stack_page(p))) - 1;
14995+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14996 *childregs = *regs;
14997
14998 childregs->ax = 0;
14999@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
15000 p->thread.sp = (unsigned long) childregs;
15001 p->thread.sp0 = (unsigned long) (childregs+1);
15002 p->thread.usersp = me->thread.usersp;
15003+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15004
15005 set_tsk_thread_flag(p, TIF_FORK);
15006
15007@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
15008 struct thread_struct *prev = &prev_p->thread;
15009 struct thread_struct *next = &next_p->thread;
15010 int cpu = smp_processor_id();
15011- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15012+ struct tss_struct *tss = init_tss + cpu;
15013 unsigned fsindex, gsindex;
15014 bool preload_fpu;
15015
15016@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
15017 prev->usersp = percpu_read(old_rsp);
15018 percpu_write(old_rsp, next->usersp);
15019 percpu_write(current_task, next_p);
15020+ percpu_write(current_tinfo, &next_p->tinfo);
15021
15022- percpu_write(kernel_stack,
15023- (unsigned long)task_stack_page(next_p) +
15024- THREAD_SIZE - KERNEL_STACK_OFFSET);
15025+ percpu_write(kernel_stack, next->sp0);
15026
15027 /*
15028 * Now maybe reload the debug registers and handle I/O bitmaps
15029@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
15030 if (!p || p == current || p->state == TASK_RUNNING)
15031 return 0;
15032 stack = (unsigned long)task_stack_page(p);
15033- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15034+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15035 return 0;
15036 fp = *(u64 *)(p->thread.sp);
15037 do {
15038- if (fp < (unsigned long)stack ||
15039- fp >= (unsigned long)stack+THREAD_SIZE)
15040+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15041 return 0;
15042 ip = *(u64 *)(fp+8);
15043 if (!in_sched_functions(ip))
15044diff -urNp linux-3.0.7/arch/x86/kernel/ptrace.c linux-3.0.7/arch/x86/kernel/ptrace.c
15045--- linux-3.0.7/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
15046+++ linux-3.0.7/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
15047@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
15048 unsigned long addr, unsigned long data)
15049 {
15050 int ret;
15051- unsigned long __user *datap = (unsigned long __user *)data;
15052+ unsigned long __user *datap = (__force unsigned long __user *)data;
15053
15054 switch (request) {
15055 /* read the word at location addr in the USER area. */
15056@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
15057 if ((int) addr < 0)
15058 return -EIO;
15059 ret = do_get_thread_area(child, addr,
15060- (struct user_desc __user *)data);
15061+ (__force struct user_desc __user *) data);
15062 break;
15063
15064 case PTRACE_SET_THREAD_AREA:
15065 if ((int) addr < 0)
15066 return -EIO;
15067 ret = do_set_thread_area(child, addr,
15068- (struct user_desc __user *)data, 0);
15069+ (__force struct user_desc __user *) data, 0);
15070 break;
15071 #endif
15072
15073@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
15074 memset(info, 0, sizeof(*info));
15075 info->si_signo = SIGTRAP;
15076 info->si_code = si_code;
15077- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15078+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15079 }
15080
15081 void user_single_step_siginfo(struct task_struct *tsk,
15082diff -urNp linux-3.0.7/arch/x86/kernel/pvclock.c linux-3.0.7/arch/x86/kernel/pvclock.c
15083--- linux-3.0.7/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
15084+++ linux-3.0.7/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
15085@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15086 return pv_tsc_khz;
15087 }
15088
15089-static atomic64_t last_value = ATOMIC64_INIT(0);
15090+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15091
15092 void pvclock_resume(void)
15093 {
15094- atomic64_set(&last_value, 0);
15095+ atomic64_set_unchecked(&last_value, 0);
15096 }
15097
15098 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15099@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15100 * updating at the same time, and one of them could be slightly behind,
15101 * making the assumption that last_value always go forward fail to hold.
15102 */
15103- last = atomic64_read(&last_value);
15104+ last = atomic64_read_unchecked(&last_value);
15105 do {
15106 if (ret < last)
15107 return last;
15108- last = atomic64_cmpxchg(&last_value, last, ret);
15109+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15110 } while (unlikely(last != ret));
15111
15112 return ret;
15113diff -urNp linux-3.0.7/arch/x86/kernel/reboot.c linux-3.0.7/arch/x86/kernel/reboot.c
15114--- linux-3.0.7/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
15115+++ linux-3.0.7/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
15116@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15117 EXPORT_SYMBOL(pm_power_off);
15118
15119 static const struct desc_ptr no_idt = {};
15120-static int reboot_mode;
15121+static unsigned short reboot_mode;
15122 enum reboot_type reboot_type = BOOT_ACPI;
15123 int reboot_force;
15124
15125@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15126 extern const unsigned char machine_real_restart_asm[];
15127 extern const u64 machine_real_restart_gdt[3];
15128
15129-void machine_real_restart(unsigned int type)
15130+__noreturn void machine_real_restart(unsigned int type)
15131 {
15132 void *restart_va;
15133 unsigned long restart_pa;
15134- void (*restart_lowmem)(unsigned int);
15135+ void (* __noreturn restart_lowmem)(unsigned int);
15136 u64 *lowmem_gdt;
15137
15138+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15139+ struct desc_struct *gdt;
15140+#endif
15141+
15142 local_irq_disable();
15143
15144 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15145@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15146 boot)". This seems like a fairly standard thing that gets set by
15147 REBOOT.COM programs, and the previous reset routine did this
15148 too. */
15149- *((unsigned short *)0x472) = reboot_mode;
15150+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15151
15152 /* Patch the GDT in the low memory trampoline */
15153 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15154
15155 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15156 restart_pa = virt_to_phys(restart_va);
15157- restart_lowmem = (void (*)(unsigned int))restart_pa;
15158+ restart_lowmem = (void *)restart_pa;
15159
15160 /* GDT[0]: GDT self-pointer */
15161 lowmem_gdt[0] =
15162@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15163 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15164
15165 /* Jump to the identity-mapped low memory code */
15166+
15167+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15168+ gdt = get_cpu_gdt_table(smp_processor_id());
15169+ pax_open_kernel();
15170+#ifdef CONFIG_PAX_MEMORY_UDEREF
15171+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15172+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15173+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15174+#endif
15175+#ifdef CONFIG_PAX_KERNEXEC
15176+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15177+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15178+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15179+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15180+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15181+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15182+#endif
15183+ pax_close_kernel();
15184+#endif
15185+
15186+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15187+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15188+ unreachable();
15189+#else
15190 restart_lowmem(type);
15191+#endif
15192+
15193 }
15194 #ifdef CONFIG_APM_MODULE
15195 EXPORT_SYMBOL(machine_real_restart);
15196@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15197 * try to force a triple fault and then cycle between hitting the keyboard
15198 * controller and doing that
15199 */
15200-static void native_machine_emergency_restart(void)
15201+__noreturn static void native_machine_emergency_restart(void)
15202 {
15203 int i;
15204 int attempt = 0;
15205@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15206 #endif
15207 }
15208
15209-static void __machine_emergency_restart(int emergency)
15210+static __noreturn void __machine_emergency_restart(int emergency)
15211 {
15212 reboot_emergency = emergency;
15213 machine_ops.emergency_restart();
15214 }
15215
15216-static void native_machine_restart(char *__unused)
15217+static __noreturn void native_machine_restart(char *__unused)
15218 {
15219 printk("machine restart\n");
15220
15221@@ -662,7 +692,7 @@ static void native_machine_restart(char
15222 __machine_emergency_restart(0);
15223 }
15224
15225-static void native_machine_halt(void)
15226+static __noreturn void native_machine_halt(void)
15227 {
15228 /* stop other cpus and apics */
15229 machine_shutdown();
15230@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15231 stop_this_cpu(NULL);
15232 }
15233
15234-static void native_machine_power_off(void)
15235+__noreturn static void native_machine_power_off(void)
15236 {
15237 if (pm_power_off) {
15238 if (!reboot_force)
15239@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15240 }
15241 /* a fallback in case there is no PM info available */
15242 tboot_shutdown(TB_SHUTDOWN_HALT);
15243+ unreachable();
15244 }
15245
15246 struct machine_ops machine_ops = {
15247diff -urNp linux-3.0.7/arch/x86/kernel/setup.c linux-3.0.7/arch/x86/kernel/setup.c
15248--- linux-3.0.7/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
15249+++ linux-3.0.7/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
15250@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15251
15252 switch (data->type) {
15253 case SETUP_E820_EXT:
15254- parse_e820_ext(data);
15255+ parse_e820_ext((struct setup_data __force_kernel *)data);
15256 break;
15257 case SETUP_DTB:
15258 add_dtb(pa_data);
15259@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15260 * area (640->1Mb) as ram even though it is not.
15261 * take them out.
15262 */
15263- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15264+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15265 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15266 }
15267
15268@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15269
15270 if (!boot_params.hdr.root_flags)
15271 root_mountflags &= ~MS_RDONLY;
15272- init_mm.start_code = (unsigned long) _text;
15273- init_mm.end_code = (unsigned long) _etext;
15274+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15275+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15276 init_mm.end_data = (unsigned long) _edata;
15277 init_mm.brk = _brk_end;
15278
15279- code_resource.start = virt_to_phys(_text);
15280- code_resource.end = virt_to_phys(_etext)-1;
15281- data_resource.start = virt_to_phys(_etext);
15282+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15283+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15284+ data_resource.start = virt_to_phys(_sdata);
15285 data_resource.end = virt_to_phys(_edata)-1;
15286 bss_resource.start = virt_to_phys(&__bss_start);
15287 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15288diff -urNp linux-3.0.7/arch/x86/kernel/setup_percpu.c linux-3.0.7/arch/x86/kernel/setup_percpu.c
15289--- linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
15290+++ linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
15291@@ -21,19 +21,17 @@
15292 #include <asm/cpu.h>
15293 #include <asm/stackprotector.h>
15294
15295-DEFINE_PER_CPU(int, cpu_number);
15296+#ifdef CONFIG_SMP
15297+DEFINE_PER_CPU(unsigned int, cpu_number);
15298 EXPORT_PER_CPU_SYMBOL(cpu_number);
15299+#endif
15300
15301-#ifdef CONFIG_X86_64
15302 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15303-#else
15304-#define BOOT_PERCPU_OFFSET 0
15305-#endif
15306
15307 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15308 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15309
15310-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15311+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15312 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15313 };
15314 EXPORT_SYMBOL(__per_cpu_offset);
15315@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15316 {
15317 #ifdef CONFIG_X86_32
15318 struct desc_struct gdt;
15319+ unsigned long base = per_cpu_offset(cpu);
15320
15321- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15322- 0x2 | DESCTYPE_S, 0x8);
15323- gdt.s = 1;
15324+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15325+ 0x83 | DESCTYPE_S, 0xC);
15326 write_gdt_entry(get_cpu_gdt_table(cpu),
15327 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15328 #endif
15329@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15330 /* alrighty, percpu areas up and running */
15331 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15332 for_each_possible_cpu(cpu) {
15333+#ifdef CONFIG_CC_STACKPROTECTOR
15334+#ifdef CONFIG_X86_32
15335+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
15336+#endif
15337+#endif
15338 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15339 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15340 per_cpu(cpu_number, cpu) = cpu;
15341@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15342 */
15343 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15344 #endif
15345+#ifdef CONFIG_CC_STACKPROTECTOR
15346+#ifdef CONFIG_X86_32
15347+ if (!cpu)
15348+ per_cpu(stack_canary.canary, cpu) = canary;
15349+#endif
15350+#endif
15351 /*
15352 * Up to this point, the boot CPU has been using .init.data
15353 * area. Reload any changed state for the boot CPU.
15354diff -urNp linux-3.0.7/arch/x86/kernel/signal.c linux-3.0.7/arch/x86/kernel/signal.c
15355--- linux-3.0.7/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
15356+++ linux-3.0.7/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
15357@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15358 * Align the stack pointer according to the i386 ABI,
15359 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15360 */
15361- sp = ((sp + 4) & -16ul) - 4;
15362+ sp = ((sp - 12) & -16ul) - 4;
15363 #else /* !CONFIG_X86_32 */
15364 sp = round_down(sp, 16) - 8;
15365 #endif
15366@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15367 * Return an always-bogus address instead so we will die with SIGSEGV.
15368 */
15369 if (onsigstack && !likely(on_sig_stack(sp)))
15370- return (void __user *)-1L;
15371+ return (__force void __user *)-1L;
15372
15373 /* save i387 state */
15374 if (used_math() && save_i387_xstate(*fpstate) < 0)
15375- return (void __user *)-1L;
15376+ return (__force void __user *)-1L;
15377
15378 return (void __user *)sp;
15379 }
15380@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15381 }
15382
15383 if (current->mm->context.vdso)
15384- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15385+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15386 else
15387- restorer = &frame->retcode;
15388+ restorer = (void __user *)&frame->retcode;
15389 if (ka->sa.sa_flags & SA_RESTORER)
15390 restorer = ka->sa.sa_restorer;
15391
15392@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15393 * reasons and because gdb uses it as a signature to notice
15394 * signal handler stack frames.
15395 */
15396- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15397+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15398
15399 if (err)
15400 return -EFAULT;
15401@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15402 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15403
15404 /* Set up to return from userspace. */
15405- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15406+ if (current->mm->context.vdso)
15407+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15408+ else
15409+ restorer = (void __user *)&frame->retcode;
15410 if (ka->sa.sa_flags & SA_RESTORER)
15411 restorer = ka->sa.sa_restorer;
15412 put_user_ex(restorer, &frame->pretcode);
15413@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15414 * reasons and because gdb uses it as a signature to notice
15415 * signal handler stack frames.
15416 */
15417- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15418+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15419 } put_user_catch(err);
15420
15421 if (err)
15422@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15423 int signr;
15424 sigset_t *oldset;
15425
15426+ pax_track_stack();
15427+
15428 /*
15429 * We want the common case to go fast, which is why we may in certain
15430 * cases get here from kernel mode. Just return without doing anything
15431@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15432 * X86_32: vm86 regs switched out by assembly code before reaching
15433 * here, so testing against kernel CS suffices.
15434 */
15435- if (!user_mode(regs))
15436+ if (!user_mode_novm(regs))
15437 return;
15438
15439 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15440diff -urNp linux-3.0.7/arch/x86/kernel/smpboot.c linux-3.0.7/arch/x86/kernel/smpboot.c
15441--- linux-3.0.7/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15442+++ linux-3.0.7/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15443@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15444 set_idle_for_cpu(cpu, c_idle.idle);
15445 do_rest:
15446 per_cpu(current_task, cpu) = c_idle.idle;
15447+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15448 #ifdef CONFIG_X86_32
15449 /* Stack for startup_32 can be just as for start_secondary onwards */
15450 irq_ctx_init(cpu);
15451 #else
15452 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15453 initial_gs = per_cpu_offset(cpu);
15454- per_cpu(kernel_stack, cpu) =
15455- (unsigned long)task_stack_page(c_idle.idle) -
15456- KERNEL_STACK_OFFSET + THREAD_SIZE;
15457+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15458 #endif
15459+
15460+ pax_open_kernel();
15461 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15462+ pax_close_kernel();
15463+
15464 initial_code = (unsigned long)start_secondary;
15465 stack_start = c_idle.idle->thread.sp;
15466
15467@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15468
15469 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15470
15471+#ifdef CONFIG_PAX_PER_CPU_PGD
15472+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15473+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15474+ KERNEL_PGD_PTRS);
15475+#endif
15476+
15477 err = do_boot_cpu(apicid, cpu);
15478 if (err) {
15479 pr_debug("do_boot_cpu failed %d\n", err);
15480diff -urNp linux-3.0.7/arch/x86/kernel/step.c linux-3.0.7/arch/x86/kernel/step.c
15481--- linux-3.0.7/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15482+++ linux-3.0.7/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15483@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15484 struct desc_struct *desc;
15485 unsigned long base;
15486
15487- seg &= ~7UL;
15488+ seg >>= 3;
15489
15490 mutex_lock(&child->mm->context.lock);
15491- if (unlikely((seg >> 3) >= child->mm->context.size))
15492+ if (unlikely(seg >= child->mm->context.size))
15493 addr = -1L; /* bogus selector, access would fault */
15494 else {
15495 desc = child->mm->context.ldt + seg;
15496@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15497 addr += base;
15498 }
15499 mutex_unlock(&child->mm->context.lock);
15500- }
15501+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15502+ addr = ktla_ktva(addr);
15503
15504 return addr;
15505 }
15506@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15507 unsigned char opcode[15];
15508 unsigned long addr = convert_ip_to_linear(child, regs);
15509
15510+ if (addr == -EINVAL)
15511+ return 0;
15512+
15513 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15514 for (i = 0; i < copied; i++) {
15515 switch (opcode[i]) {
15516@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15517
15518 #ifdef CONFIG_X86_64
15519 case 0x40 ... 0x4f:
15520- if (regs->cs != __USER_CS)
15521+ if ((regs->cs & 0xffff) != __USER_CS)
15522 /* 32-bit mode: register increment */
15523 return 0;
15524 /* 64-bit mode: REX prefix */
15525diff -urNp linux-3.0.7/arch/x86/kernel/sys_i386_32.c linux-3.0.7/arch/x86/kernel/sys_i386_32.c
15526--- linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15527+++ linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15528@@ -24,17 +24,224 @@
15529
15530 #include <asm/syscalls.h>
15531
15532-/*
15533- * Do a system call from kernel instead of calling sys_execve so we
15534- * end up with proper pt_regs.
15535- */
15536-int kernel_execve(const char *filename,
15537- const char *const argv[],
15538- const char *const envp[])
15539+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15540 {
15541- long __res;
15542- asm volatile ("int $0x80"
15543- : "=a" (__res)
15544- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15545- return __res;
15546+ unsigned long pax_task_size = TASK_SIZE;
15547+
15548+#ifdef CONFIG_PAX_SEGMEXEC
15549+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15550+ pax_task_size = SEGMEXEC_TASK_SIZE;
15551+#endif
15552+
15553+ if (len > pax_task_size || addr > pax_task_size - len)
15554+ return -EINVAL;
15555+
15556+ return 0;
15557+}
15558+
15559+unsigned long
15560+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15561+ unsigned long len, unsigned long pgoff, unsigned long flags)
15562+{
15563+ struct mm_struct *mm = current->mm;
15564+ struct vm_area_struct *vma;
15565+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15566+
15567+#ifdef CONFIG_PAX_SEGMEXEC
15568+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15569+ pax_task_size = SEGMEXEC_TASK_SIZE;
15570+#endif
15571+
15572+ pax_task_size -= PAGE_SIZE;
15573+
15574+ if (len > pax_task_size)
15575+ return -ENOMEM;
15576+
15577+ if (flags & MAP_FIXED)
15578+ return addr;
15579+
15580+#ifdef CONFIG_PAX_RANDMMAP
15581+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15582+#endif
15583+
15584+ if (addr) {
15585+ addr = PAGE_ALIGN(addr);
15586+ if (pax_task_size - len >= addr) {
15587+ vma = find_vma(mm, addr);
15588+ if (check_heap_stack_gap(vma, addr, len))
15589+ return addr;
15590+ }
15591+ }
15592+ if (len > mm->cached_hole_size) {
15593+ start_addr = addr = mm->free_area_cache;
15594+ } else {
15595+ start_addr = addr = mm->mmap_base;
15596+ mm->cached_hole_size = 0;
15597+ }
15598+
15599+#ifdef CONFIG_PAX_PAGEEXEC
15600+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15601+ start_addr = 0x00110000UL;
15602+
15603+#ifdef CONFIG_PAX_RANDMMAP
15604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15605+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15606+#endif
15607+
15608+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15609+ start_addr = addr = mm->mmap_base;
15610+ else
15611+ addr = start_addr;
15612+ }
15613+#endif
15614+
15615+full_search:
15616+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15617+ /* At this point: (!vma || addr < vma->vm_end). */
15618+ if (pax_task_size - len < addr) {
15619+ /*
15620+ * Start a new search - just in case we missed
15621+ * some holes.
15622+ */
15623+ if (start_addr != mm->mmap_base) {
15624+ start_addr = addr = mm->mmap_base;
15625+ mm->cached_hole_size = 0;
15626+ goto full_search;
15627+ }
15628+ return -ENOMEM;
15629+ }
15630+ if (check_heap_stack_gap(vma, addr, len))
15631+ break;
15632+ if (addr + mm->cached_hole_size < vma->vm_start)
15633+ mm->cached_hole_size = vma->vm_start - addr;
15634+ addr = vma->vm_end;
15635+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15636+ start_addr = addr = mm->mmap_base;
15637+ mm->cached_hole_size = 0;
15638+ goto full_search;
15639+ }
15640+ }
15641+
15642+ /*
15643+ * Remember the place where we stopped the search:
15644+ */
15645+ mm->free_area_cache = addr + len;
15646+ return addr;
15647+}
15648+
15649+unsigned long
15650+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15651+ const unsigned long len, const unsigned long pgoff,
15652+ const unsigned long flags)
15653+{
15654+ struct vm_area_struct *vma;
15655+ struct mm_struct *mm = current->mm;
15656+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15657+
15658+#ifdef CONFIG_PAX_SEGMEXEC
15659+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15660+ pax_task_size = SEGMEXEC_TASK_SIZE;
15661+#endif
15662+
15663+ pax_task_size -= PAGE_SIZE;
15664+
15665+ /* requested length too big for entire address space */
15666+ if (len > pax_task_size)
15667+ return -ENOMEM;
15668+
15669+ if (flags & MAP_FIXED)
15670+ return addr;
15671+
15672+#ifdef CONFIG_PAX_PAGEEXEC
15673+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15674+ goto bottomup;
15675+#endif
15676+
15677+#ifdef CONFIG_PAX_RANDMMAP
15678+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15679+#endif
15680+
15681+ /* requesting a specific address */
15682+ if (addr) {
15683+ addr = PAGE_ALIGN(addr);
15684+ if (pax_task_size - len >= addr) {
15685+ vma = find_vma(mm, addr);
15686+ if (check_heap_stack_gap(vma, addr, len))
15687+ return addr;
15688+ }
15689+ }
15690+
15691+ /* check if free_area_cache is useful for us */
15692+ if (len <= mm->cached_hole_size) {
15693+ mm->cached_hole_size = 0;
15694+ mm->free_area_cache = mm->mmap_base;
15695+ }
15696+
15697+ /* either no address requested or can't fit in requested address hole */
15698+ addr = mm->free_area_cache;
15699+
15700+ /* make sure it can fit in the remaining address space */
15701+ if (addr > len) {
15702+ vma = find_vma(mm, addr-len);
15703+ if (check_heap_stack_gap(vma, addr - len, len))
15704+ /* remember the address as a hint for next time */
15705+ return (mm->free_area_cache = addr-len);
15706+ }
15707+
15708+ if (mm->mmap_base < len)
15709+ goto bottomup;
15710+
15711+ addr = mm->mmap_base-len;
15712+
15713+ do {
15714+ /*
15715+ * Lookup failure means no vma is above this address,
15716+ * else if new region fits below vma->vm_start,
15717+ * return with success:
15718+ */
15719+ vma = find_vma(mm, addr);
15720+ if (check_heap_stack_gap(vma, addr, len))
15721+ /* remember the address as a hint for next time */
15722+ return (mm->free_area_cache = addr);
15723+
15724+ /* remember the largest hole we saw so far */
15725+ if (addr + mm->cached_hole_size < vma->vm_start)
15726+ mm->cached_hole_size = vma->vm_start - addr;
15727+
15728+ /* try just below the current vma->vm_start */
15729+ addr = skip_heap_stack_gap(vma, len);
15730+ } while (!IS_ERR_VALUE(addr));
15731+
15732+bottomup:
15733+ /*
15734+ * A failed mmap() very likely causes application failure,
15735+ * so fall back to the bottom-up function here. This scenario
15736+ * can happen with large stack limits and large mmap()
15737+ * allocations.
15738+ */
15739+
15740+#ifdef CONFIG_PAX_SEGMEXEC
15741+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15742+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15743+ else
15744+#endif
15745+
15746+ mm->mmap_base = TASK_UNMAPPED_BASE;
15747+
15748+#ifdef CONFIG_PAX_RANDMMAP
15749+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15750+ mm->mmap_base += mm->delta_mmap;
15751+#endif
15752+
15753+ mm->free_area_cache = mm->mmap_base;
15754+ mm->cached_hole_size = ~0UL;
15755+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15756+ /*
15757+ * Restore the topdown base:
15758+ */
15759+ mm->mmap_base = base;
15760+ mm->free_area_cache = base;
15761+ mm->cached_hole_size = ~0UL;
15762+
15763+ return addr;
15764 }
15765diff -urNp linux-3.0.7/arch/x86/kernel/sys_x86_64.c linux-3.0.7/arch/x86/kernel/sys_x86_64.c
15766--- linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15767+++ linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15768@@ -32,8 +32,8 @@ out:
15769 return error;
15770 }
15771
15772-static void find_start_end(unsigned long flags, unsigned long *begin,
15773- unsigned long *end)
15774+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15775+ unsigned long *begin, unsigned long *end)
15776 {
15777 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15778 unsigned long new_begin;
15779@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15780 *begin = new_begin;
15781 }
15782 } else {
15783- *begin = TASK_UNMAPPED_BASE;
15784+ *begin = mm->mmap_base;
15785 *end = TASK_SIZE;
15786 }
15787 }
15788@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15789 if (flags & MAP_FIXED)
15790 return addr;
15791
15792- find_start_end(flags, &begin, &end);
15793+ find_start_end(mm, flags, &begin, &end);
15794
15795 if (len > end)
15796 return -ENOMEM;
15797
15798+#ifdef CONFIG_PAX_RANDMMAP
15799+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15800+#endif
15801+
15802 if (addr) {
15803 addr = PAGE_ALIGN(addr);
15804 vma = find_vma(mm, addr);
15805- if (end - len >= addr &&
15806- (!vma || addr + len <= vma->vm_start))
15807+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15808 return addr;
15809 }
15810 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15811@@ -106,7 +109,7 @@ full_search:
15812 }
15813 return -ENOMEM;
15814 }
15815- if (!vma || addr + len <= vma->vm_start) {
15816+ if (check_heap_stack_gap(vma, addr, len)) {
15817 /*
15818 * Remember the place where we stopped the search:
15819 */
15820@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15821 {
15822 struct vm_area_struct *vma;
15823 struct mm_struct *mm = current->mm;
15824- unsigned long addr = addr0;
15825+ unsigned long base = mm->mmap_base, addr = addr0;
15826
15827 /* requested length too big for entire address space */
15828 if (len > TASK_SIZE)
15829@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15830 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15831 goto bottomup;
15832
15833+#ifdef CONFIG_PAX_RANDMMAP
15834+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15835+#endif
15836+
15837 /* requesting a specific address */
15838 if (addr) {
15839 addr = PAGE_ALIGN(addr);
15840- vma = find_vma(mm, addr);
15841- if (TASK_SIZE - len >= addr &&
15842- (!vma || addr + len <= vma->vm_start))
15843- return addr;
15844+ if (TASK_SIZE - len >= addr) {
15845+ vma = find_vma(mm, addr);
15846+ if (check_heap_stack_gap(vma, addr, len))
15847+ return addr;
15848+ }
15849 }
15850
15851 /* check if free_area_cache is useful for us */
15852@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15853 /* make sure it can fit in the remaining address space */
15854 if (addr > len) {
15855 vma = find_vma(mm, addr-len);
15856- if (!vma || addr <= vma->vm_start)
15857+ if (check_heap_stack_gap(vma, addr - len, len))
15858 /* remember the address as a hint for next time */
15859 return mm->free_area_cache = addr-len;
15860 }
15861@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15862 * return with success:
15863 */
15864 vma = find_vma(mm, addr);
15865- if (!vma || addr+len <= vma->vm_start)
15866+ if (check_heap_stack_gap(vma, addr, len))
15867 /* remember the address as a hint for next time */
15868 return mm->free_area_cache = addr;
15869
15870@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15871 mm->cached_hole_size = vma->vm_start - addr;
15872
15873 /* try just below the current vma->vm_start */
15874- addr = vma->vm_start-len;
15875- } while (len < vma->vm_start);
15876+ addr = skip_heap_stack_gap(vma, len);
15877+ } while (!IS_ERR_VALUE(addr));
15878
15879 bottomup:
15880 /*
15881@@ -198,13 +206,21 @@ bottomup:
15882 * can happen with large stack limits and large mmap()
15883 * allocations.
15884 */
15885+ mm->mmap_base = TASK_UNMAPPED_BASE;
15886+
15887+#ifdef CONFIG_PAX_RANDMMAP
15888+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15889+ mm->mmap_base += mm->delta_mmap;
15890+#endif
15891+
15892+ mm->free_area_cache = mm->mmap_base;
15893 mm->cached_hole_size = ~0UL;
15894- mm->free_area_cache = TASK_UNMAPPED_BASE;
15895 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15896 /*
15897 * Restore the topdown base:
15898 */
15899- mm->free_area_cache = mm->mmap_base;
15900+ mm->mmap_base = base;
15901+ mm->free_area_cache = base;
15902 mm->cached_hole_size = ~0UL;
15903
15904 return addr;
15905diff -urNp linux-3.0.7/arch/x86/kernel/syscall_table_32.S linux-3.0.7/arch/x86/kernel/syscall_table_32.S
15906--- linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15907+++ linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15908@@ -1,3 +1,4 @@
15909+.section .rodata,"a",@progbits
15910 ENTRY(sys_call_table)
15911 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15912 .long sys_exit
15913diff -urNp linux-3.0.7/arch/x86/kernel/tboot.c linux-3.0.7/arch/x86/kernel/tboot.c
15914--- linux-3.0.7/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15915+++ linux-3.0.7/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15916@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15917
15918 void tboot_shutdown(u32 shutdown_type)
15919 {
15920- void (*shutdown)(void);
15921+ void (* __noreturn shutdown)(void);
15922
15923 if (!tboot_enabled())
15924 return;
15925@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15926
15927 switch_to_tboot_pt();
15928
15929- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15930+ shutdown = (void *)tboot->shutdown_entry;
15931 shutdown();
15932
15933 /* should not reach here */
15934@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15935 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15936 }
15937
15938-static atomic_t ap_wfs_count;
15939+static atomic_unchecked_t ap_wfs_count;
15940
15941 static int tboot_wait_for_aps(int num_aps)
15942 {
15943@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15944 {
15945 switch (action) {
15946 case CPU_DYING:
15947- atomic_inc(&ap_wfs_count);
15948+ atomic_inc_unchecked(&ap_wfs_count);
15949 if (num_online_cpus() == 1)
15950- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15951+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15952 return NOTIFY_BAD;
15953 break;
15954 }
15955@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15956
15957 tboot_create_trampoline();
15958
15959- atomic_set(&ap_wfs_count, 0);
15960+ atomic_set_unchecked(&ap_wfs_count, 0);
15961 register_hotcpu_notifier(&tboot_cpu_notifier);
15962 return 0;
15963 }
15964diff -urNp linux-3.0.7/arch/x86/kernel/time.c linux-3.0.7/arch/x86/kernel/time.c
15965--- linux-3.0.7/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15966+++ linux-3.0.7/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15967@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15968 {
15969 unsigned long pc = instruction_pointer(regs);
15970
15971- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15972+ if (!user_mode(regs) && in_lock_functions(pc)) {
15973 #ifdef CONFIG_FRAME_POINTER
15974- return *(unsigned long *)(regs->bp + sizeof(long));
15975+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15976 #else
15977 unsigned long *sp =
15978 (unsigned long *)kernel_stack_pointer(regs);
15979@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15980 * or above a saved flags. Eflags has bits 22-31 zero,
15981 * kernel addresses don't.
15982 */
15983+
15984+#ifdef CONFIG_PAX_KERNEXEC
15985+ return ktla_ktva(sp[0]);
15986+#else
15987 if (sp[0] >> 22)
15988 return sp[0];
15989 if (sp[1] >> 22)
15990 return sp[1];
15991 #endif
15992+
15993+#endif
15994 }
15995 return pc;
15996 }
15997diff -urNp linux-3.0.7/arch/x86/kernel/tls.c linux-3.0.7/arch/x86/kernel/tls.c
15998--- linux-3.0.7/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15999+++ linux-3.0.7/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
16000@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16001 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16002 return -EINVAL;
16003
16004+#ifdef CONFIG_PAX_SEGMEXEC
16005+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16006+ return -EINVAL;
16007+#endif
16008+
16009 set_tls_desc(p, idx, &info, 1);
16010
16011 return 0;
16012diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_32.S linux-3.0.7/arch/x86/kernel/trampoline_32.S
16013--- linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
16014+++ linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
16015@@ -32,6 +32,12 @@
16016 #include <asm/segment.h>
16017 #include <asm/page_types.h>
16018
16019+#ifdef CONFIG_PAX_KERNEXEC
16020+#define ta(X) (X)
16021+#else
16022+#define ta(X) ((X) - __PAGE_OFFSET)
16023+#endif
16024+
16025 #ifdef CONFIG_SMP
16026
16027 .section ".x86_trampoline","a"
16028@@ -62,7 +68,7 @@ r_base = .
16029 inc %ax # protected mode (PE) bit
16030 lmsw %ax # into protected mode
16031 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16032- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16033+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16034
16035 # These need to be in the same 64K segment as the above;
16036 # hence we don't use the boot_gdt_descr defined in head.S
16037diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_64.S linux-3.0.7/arch/x86/kernel/trampoline_64.S
16038--- linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
16039+++ linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
16040@@ -90,7 +90,7 @@ startup_32:
16041 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16042 movl %eax, %ds
16043
16044- movl $X86_CR4_PAE, %eax
16045+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16046 movl %eax, %cr4 # Enable PAE mode
16047
16048 # Setup trampoline 4 level pagetables
16049@@ -138,7 +138,7 @@ tidt:
16050 # so the kernel can live anywhere
16051 .balign 4
16052 tgdt:
16053- .short tgdt_end - tgdt # gdt limit
16054+ .short tgdt_end - tgdt - 1 # gdt limit
16055 .long tgdt - r_base
16056 .short 0
16057 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16058diff -urNp linux-3.0.7/arch/x86/kernel/traps.c linux-3.0.7/arch/x86/kernel/traps.c
16059--- linux-3.0.7/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
16060+++ linux-3.0.7/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
16061@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16062
16063 /* Do we ignore FPU interrupts ? */
16064 char ignore_fpu_irq;
16065-
16066-/*
16067- * The IDT has to be page-aligned to simplify the Pentium
16068- * F0 0F bug workaround.
16069- */
16070-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16071 #endif
16072
16073 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16074@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16075 }
16076
16077 static void __kprobes
16078-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16079+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16080 long error_code, siginfo_t *info)
16081 {
16082 struct task_struct *tsk = current;
16083
16084 #ifdef CONFIG_X86_32
16085- if (regs->flags & X86_VM_MASK) {
16086+ if (v8086_mode(regs)) {
16087 /*
16088 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16089 * On nmi (interrupt 2), do_trap should not be called.
16090@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16091 }
16092 #endif
16093
16094- if (!user_mode(regs))
16095+ if (!user_mode_novm(regs))
16096 goto kernel_trap;
16097
16098 #ifdef CONFIG_X86_32
16099@@ -157,7 +151,7 @@ trap_signal:
16100 printk_ratelimit()) {
16101 printk(KERN_INFO
16102 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16103- tsk->comm, tsk->pid, str,
16104+ tsk->comm, task_pid_nr(tsk), str,
16105 regs->ip, regs->sp, error_code);
16106 print_vma_addr(" in ", regs->ip);
16107 printk("\n");
16108@@ -174,8 +168,20 @@ kernel_trap:
16109 if (!fixup_exception(regs)) {
16110 tsk->thread.error_code = error_code;
16111 tsk->thread.trap_no = trapnr;
16112+
16113+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16114+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16115+ str = "PAX: suspicious stack segment fault";
16116+#endif
16117+
16118 die(str, regs, error_code);
16119 }
16120+
16121+#ifdef CONFIG_PAX_REFCOUNT
16122+ if (trapnr == 4)
16123+ pax_report_refcount_overflow(regs);
16124+#endif
16125+
16126 return;
16127
16128 #ifdef CONFIG_X86_32
16129@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16130 conditional_sti(regs);
16131
16132 #ifdef CONFIG_X86_32
16133- if (regs->flags & X86_VM_MASK)
16134+ if (v8086_mode(regs))
16135 goto gp_in_vm86;
16136 #endif
16137
16138 tsk = current;
16139- if (!user_mode(regs))
16140+ if (!user_mode_novm(regs))
16141 goto gp_in_kernel;
16142
16143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16144+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16145+ struct mm_struct *mm = tsk->mm;
16146+ unsigned long limit;
16147+
16148+ down_write(&mm->mmap_sem);
16149+ limit = mm->context.user_cs_limit;
16150+ if (limit < TASK_SIZE) {
16151+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16152+ up_write(&mm->mmap_sem);
16153+ return;
16154+ }
16155+ up_write(&mm->mmap_sem);
16156+ }
16157+#endif
16158+
16159 tsk->thread.error_code = error_code;
16160 tsk->thread.trap_no = 13;
16161
16162@@ -304,6 +326,13 @@ gp_in_kernel:
16163 if (notify_die(DIE_GPF, "general protection fault", regs,
16164 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16165 return;
16166+
16167+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16168+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16169+ die("PAX: suspicious general protection fault", regs, error_code);
16170+ else
16171+#endif
16172+
16173 die("general protection fault", regs, error_code);
16174 }
16175
16176@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16177 dotraplinkage notrace __kprobes void
16178 do_nmi(struct pt_regs *regs, long error_code)
16179 {
16180+
16181+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16182+ if (!user_mode(regs)) {
16183+ unsigned long cs = regs->cs & 0xFFFF;
16184+ unsigned long ip = ktva_ktla(regs->ip);
16185+
16186+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16187+ regs->ip = ip;
16188+ }
16189+#endif
16190+
16191 nmi_enter();
16192
16193 inc_irq_stat(__nmi_count);
16194@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16195 /* It's safe to allow irq's after DR6 has been saved */
16196 preempt_conditional_sti(regs);
16197
16198- if (regs->flags & X86_VM_MASK) {
16199+ if (v8086_mode(regs)) {
16200 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16201 error_code, 1);
16202 preempt_conditional_cli(regs);
16203@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16204 * We already checked v86 mode above, so we can check for kernel mode
16205 * by just checking the CPL of CS.
16206 */
16207- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16208+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16209 tsk->thread.debugreg6 &= ~DR_STEP;
16210 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16211 regs->flags &= ~X86_EFLAGS_TF;
16212@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16213 return;
16214 conditional_sti(regs);
16215
16216- if (!user_mode_vm(regs))
16217+ if (!user_mode(regs))
16218 {
16219 if (!fixup_exception(regs)) {
16220 task->thread.error_code = error_code;
16221@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16222 void __math_state_restore(void)
16223 {
16224 struct thread_info *thread = current_thread_info();
16225- struct task_struct *tsk = thread->task;
16226+ struct task_struct *tsk = current;
16227
16228 /*
16229 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16230@@ -750,8 +790,7 @@ void __math_state_restore(void)
16231 */
16232 asmlinkage void math_state_restore(void)
16233 {
16234- struct thread_info *thread = current_thread_info();
16235- struct task_struct *tsk = thread->task;
16236+ struct task_struct *tsk = current;
16237
16238 if (!tsk_used_math(tsk)) {
16239 local_irq_enable();
16240diff -urNp linux-3.0.7/arch/x86/kernel/verify_cpu.S linux-3.0.7/arch/x86/kernel/verify_cpu.S
16241--- linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
16242+++ linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
16243@@ -20,6 +20,7 @@
16244 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16245 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16246 * arch/x86/kernel/head_32.S: processor startup
16247+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16248 *
16249 * verify_cpu, returns the status of longmode and SSE in register %eax.
16250 * 0: Success 1: Failure
16251diff -urNp linux-3.0.7/arch/x86/kernel/vm86_32.c linux-3.0.7/arch/x86/kernel/vm86_32.c
16252--- linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
16253+++ linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
16254@@ -41,6 +41,7 @@
16255 #include <linux/ptrace.h>
16256 #include <linux/audit.h>
16257 #include <linux/stddef.h>
16258+#include <linux/grsecurity.h>
16259
16260 #include <asm/uaccess.h>
16261 #include <asm/io.h>
16262@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16263 do_exit(SIGSEGV);
16264 }
16265
16266- tss = &per_cpu(init_tss, get_cpu());
16267+ tss = init_tss + get_cpu();
16268 current->thread.sp0 = current->thread.saved_sp0;
16269 current->thread.sysenter_cs = __KERNEL_CS;
16270 load_sp0(tss, &current->thread);
16271@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16272 struct task_struct *tsk;
16273 int tmp, ret = -EPERM;
16274
16275+#ifdef CONFIG_GRKERNSEC_VM86
16276+ if (!capable(CAP_SYS_RAWIO)) {
16277+ gr_handle_vm86();
16278+ goto out;
16279+ }
16280+#endif
16281+
16282 tsk = current;
16283 if (tsk->thread.saved_sp0)
16284 goto out;
16285@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16286 int tmp, ret;
16287 struct vm86plus_struct __user *v86;
16288
16289+#ifdef CONFIG_GRKERNSEC_VM86
16290+ if (!capable(CAP_SYS_RAWIO)) {
16291+ gr_handle_vm86();
16292+ ret = -EPERM;
16293+ goto out;
16294+ }
16295+#endif
16296+
16297 tsk = current;
16298 switch (cmd) {
16299 case VM86_REQUEST_IRQ:
16300@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16301 tsk->thread.saved_fs = info->regs32->fs;
16302 tsk->thread.saved_gs = get_user_gs(info->regs32);
16303
16304- tss = &per_cpu(init_tss, get_cpu());
16305+ tss = init_tss + get_cpu();
16306 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16307 if (cpu_has_sep)
16308 tsk->thread.sysenter_cs = 0;
16309@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16310 goto cannot_handle;
16311 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16312 goto cannot_handle;
16313- intr_ptr = (unsigned long __user *) (i << 2);
16314+ intr_ptr = (__force unsigned long __user *) (i << 2);
16315 if (get_user(segoffs, intr_ptr))
16316 goto cannot_handle;
16317 if ((segoffs >> 16) == BIOSSEG)
16318diff -urNp linux-3.0.7/arch/x86/kernel/vmlinux.lds.S linux-3.0.7/arch/x86/kernel/vmlinux.lds.S
16319--- linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
16320+++ linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
16321@@ -26,6 +26,13 @@
16322 #include <asm/page_types.h>
16323 #include <asm/cache.h>
16324 #include <asm/boot.h>
16325+#include <asm/segment.h>
16326+
16327+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16328+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16329+#else
16330+#define __KERNEL_TEXT_OFFSET 0
16331+#endif
16332
16333 #undef i386 /* in case the preprocessor is a 32bit one */
16334
16335@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
16336
16337 PHDRS {
16338 text PT_LOAD FLAGS(5); /* R_E */
16339+#ifdef CONFIG_X86_32
16340+ module PT_LOAD FLAGS(5); /* R_E */
16341+#endif
16342+#ifdef CONFIG_XEN
16343+ rodata PT_LOAD FLAGS(5); /* R_E */
16344+#else
16345+ rodata PT_LOAD FLAGS(4); /* R__ */
16346+#endif
16347 data PT_LOAD FLAGS(6); /* RW_ */
16348 #ifdef CONFIG_X86_64
16349 user PT_LOAD FLAGS(5); /* R_E */
16350+#endif
16351+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16352 #ifdef CONFIG_SMP
16353 percpu PT_LOAD FLAGS(6); /* RW_ */
16354 #endif
16355+ text.init PT_LOAD FLAGS(5); /* R_E */
16356+ text.exit PT_LOAD FLAGS(5); /* R_E */
16357 init PT_LOAD FLAGS(7); /* RWE */
16358-#endif
16359 note PT_NOTE FLAGS(0); /* ___ */
16360 }
16361
16362 SECTIONS
16363 {
16364 #ifdef CONFIG_X86_32
16365- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16366- phys_startup_32 = startup_32 - LOAD_OFFSET;
16367+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16368 #else
16369- . = __START_KERNEL;
16370- phys_startup_64 = startup_64 - LOAD_OFFSET;
16371+ . = __START_KERNEL;
16372 #endif
16373
16374 /* Text and read-only data */
16375- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16376- _text = .;
16377+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16378 /* bootstrapping code */
16379+#ifdef CONFIG_X86_32
16380+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16381+#else
16382+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16383+#endif
16384+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16385+ _text = .;
16386 HEAD_TEXT
16387 #ifdef CONFIG_X86_32
16388 . = ALIGN(PAGE_SIZE);
16389@@ -109,13 +131,47 @@ SECTIONS
16390 IRQENTRY_TEXT
16391 *(.fixup)
16392 *(.gnu.warning)
16393- /* End of text section */
16394- _etext = .;
16395 } :text = 0x9090
16396
16397- NOTES :text :note
16398+ . += __KERNEL_TEXT_OFFSET;
16399+
16400+#ifdef CONFIG_X86_32
16401+ . = ALIGN(PAGE_SIZE);
16402+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16403+
16404+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16405+ MODULES_EXEC_VADDR = .;
16406+ BYTE(0)
16407+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16408+ . = ALIGN(HPAGE_SIZE);
16409+ MODULES_EXEC_END = . - 1;
16410+#endif
16411+
16412+ } :module
16413+#endif
16414+
16415+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16416+ /* End of text section */
16417+ _etext = . - __KERNEL_TEXT_OFFSET;
16418+ }
16419+
16420+#ifdef CONFIG_X86_32
16421+ . = ALIGN(PAGE_SIZE);
16422+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16423+ *(.idt)
16424+ . = ALIGN(PAGE_SIZE);
16425+ *(.empty_zero_page)
16426+ *(.initial_pg_fixmap)
16427+ *(.initial_pg_pmd)
16428+ *(.initial_page_table)
16429+ *(.swapper_pg_dir)
16430+ } :rodata
16431+#endif
16432+
16433+ . = ALIGN(PAGE_SIZE);
16434+ NOTES :rodata :note
16435
16436- EXCEPTION_TABLE(16) :text = 0x9090
16437+ EXCEPTION_TABLE(16) :rodata
16438
16439 #if defined(CONFIG_DEBUG_RODATA)
16440 /* .text should occupy whole number of pages */
16441@@ -127,16 +183,20 @@ SECTIONS
16442
16443 /* Data */
16444 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16445+
16446+#ifdef CONFIG_PAX_KERNEXEC
16447+ . = ALIGN(HPAGE_SIZE);
16448+#else
16449+ . = ALIGN(PAGE_SIZE);
16450+#endif
16451+
16452 /* Start of data section */
16453 _sdata = .;
16454
16455 /* init_task */
16456 INIT_TASK_DATA(THREAD_SIZE)
16457
16458-#ifdef CONFIG_X86_32
16459- /* 32 bit has nosave before _edata */
16460 NOSAVE_DATA
16461-#endif
16462
16463 PAGE_ALIGNED_DATA(PAGE_SIZE)
16464
16465@@ -208,12 +268,19 @@ SECTIONS
16466 #endif /* CONFIG_X86_64 */
16467
16468 /* Init code and data - will be freed after init */
16469- . = ALIGN(PAGE_SIZE);
16470 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16471+ BYTE(0)
16472+
16473+#ifdef CONFIG_PAX_KERNEXEC
16474+ . = ALIGN(HPAGE_SIZE);
16475+#else
16476+ . = ALIGN(PAGE_SIZE);
16477+#endif
16478+
16479 __init_begin = .; /* paired with __init_end */
16480- }
16481+ } :init.begin
16482
16483-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16484+#ifdef CONFIG_SMP
16485 /*
16486 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16487 * output PHDR, so the next output section - .init.text - should
16488@@ -222,12 +289,27 @@ SECTIONS
16489 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16490 #endif
16491
16492- INIT_TEXT_SECTION(PAGE_SIZE)
16493-#ifdef CONFIG_X86_64
16494- :init
16495-#endif
16496+ . = ALIGN(PAGE_SIZE);
16497+ init_begin = .;
16498+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16499+ VMLINUX_SYMBOL(_sinittext) = .;
16500+ INIT_TEXT
16501+ VMLINUX_SYMBOL(_einittext) = .;
16502+ . = ALIGN(PAGE_SIZE);
16503+ } :text.init
16504
16505- INIT_DATA_SECTION(16)
16506+ /*
16507+ * .exit.text is discard at runtime, not link time, to deal with
16508+ * references from .altinstructions and .eh_frame
16509+ */
16510+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16511+ EXIT_TEXT
16512+ . = ALIGN(16);
16513+ } :text.exit
16514+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16515+
16516+ . = ALIGN(PAGE_SIZE);
16517+ INIT_DATA_SECTION(16) :init
16518
16519 /*
16520 * Code and data for a variety of lowlevel trampolines, to be
16521@@ -301,19 +383,12 @@ SECTIONS
16522 }
16523
16524 . = ALIGN(8);
16525- /*
16526- * .exit.text is discard at runtime, not link time, to deal with
16527- * references from .altinstructions and .eh_frame
16528- */
16529- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16530- EXIT_TEXT
16531- }
16532
16533 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16534 EXIT_DATA
16535 }
16536
16537-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16538+#ifndef CONFIG_SMP
16539 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16540 #endif
16541
16542@@ -332,16 +407,10 @@ SECTIONS
16543 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16544 __smp_locks = .;
16545 *(.smp_locks)
16546- . = ALIGN(PAGE_SIZE);
16547 __smp_locks_end = .;
16548+ . = ALIGN(PAGE_SIZE);
16549 }
16550
16551-#ifdef CONFIG_X86_64
16552- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16553- NOSAVE_DATA
16554- }
16555-#endif
16556-
16557 /* BSS */
16558 . = ALIGN(PAGE_SIZE);
16559 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16560@@ -357,6 +426,7 @@ SECTIONS
16561 __brk_base = .;
16562 . += 64 * 1024; /* 64k alignment slop space */
16563 *(.brk_reservation) /* areas brk users have reserved */
16564+ . = ALIGN(HPAGE_SIZE);
16565 __brk_limit = .;
16566 }
16567
16568@@ -383,13 +453,12 @@ SECTIONS
16569 * for the boot processor.
16570 */
16571 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16572-INIT_PER_CPU(gdt_page);
16573 INIT_PER_CPU(irq_stack_union);
16574
16575 /*
16576 * Build-time check on the image size:
16577 */
16578-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16579+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16580 "kernel image bigger than KERNEL_IMAGE_SIZE");
16581
16582 #ifdef CONFIG_SMP
16583diff -urNp linux-3.0.7/arch/x86/kernel/vsyscall_64.c linux-3.0.7/arch/x86/kernel/vsyscall_64.c
16584--- linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16585+++ linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16586@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16587 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16588 {
16589 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16590- .sysctl_enabled = 1,
16591+ .sysctl_enabled = 0,
16592 };
16593
16594 void update_vsyscall_tz(void)
16595@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16596 static ctl_table kernel_table2[] = {
16597 { .procname = "vsyscall64",
16598 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16599- .mode = 0644,
16600+ .mode = 0444,
16601 .proc_handler = proc_dointvec },
16602 {}
16603 };
16604diff -urNp linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c
16605--- linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16606+++ linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16607@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16608 EXPORT_SYMBOL(copy_user_generic_string);
16609 EXPORT_SYMBOL(copy_user_generic_unrolled);
16610 EXPORT_SYMBOL(__copy_user_nocache);
16611-EXPORT_SYMBOL(_copy_from_user);
16612-EXPORT_SYMBOL(_copy_to_user);
16613
16614 EXPORT_SYMBOL(copy_page);
16615 EXPORT_SYMBOL(clear_page);
16616diff -urNp linux-3.0.7/arch/x86/kernel/xsave.c linux-3.0.7/arch/x86/kernel/xsave.c
16617--- linux-3.0.7/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16618+++ linux-3.0.7/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16619@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16620 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16621 return -EINVAL;
16622
16623- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16624+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16625 fx_sw_user->extended_size -
16626 FP_XSTATE_MAGIC2_SIZE));
16627 if (err)
16628@@ -267,7 +267,7 @@ fx_only:
16629 * the other extended state.
16630 */
16631 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16632- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16633+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16634 }
16635
16636 /*
16637@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16638 if (use_xsave())
16639 err = restore_user_xstate(buf);
16640 else
16641- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16642+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16643 buf);
16644 if (unlikely(err)) {
16645 /*
16646diff -urNp linux-3.0.7/arch/x86/kvm/emulate.c linux-3.0.7/arch/x86/kvm/emulate.c
16647--- linux-3.0.7/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16648+++ linux-3.0.7/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16649@@ -96,7 +96,7 @@
16650 #define Src2ImmByte (2<<29)
16651 #define Src2One (3<<29)
16652 #define Src2Imm (4<<29)
16653-#define Src2Mask (7<<29)
16654+#define Src2Mask (7U<<29)
16655
16656 #define X2(x...) x, x
16657 #define X3(x...) X2(x), x
16658@@ -207,6 +207,7 @@ struct gprefix {
16659
16660 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16661 do { \
16662+ unsigned long _tmp; \
16663 __asm__ __volatile__ ( \
16664 _PRE_EFLAGS("0", "4", "2") \
16665 _op _suffix " %"_x"3,%1; " \
16666@@ -220,8 +221,6 @@ struct gprefix {
16667 /* Raw emulation: instruction has two explicit operands. */
16668 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16669 do { \
16670- unsigned long _tmp; \
16671- \
16672 switch ((_dst).bytes) { \
16673 case 2: \
16674 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16675@@ -237,7 +236,6 @@ struct gprefix {
16676
16677 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16678 do { \
16679- unsigned long _tmp; \
16680 switch ((_dst).bytes) { \
16681 case 1: \
16682 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16683diff -urNp linux-3.0.7/arch/x86/kvm/lapic.c linux-3.0.7/arch/x86/kvm/lapic.c
16684--- linux-3.0.7/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16685+++ linux-3.0.7/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16686@@ -53,7 +53,7 @@
16687 #define APIC_BUS_CYCLE_NS 1
16688
16689 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16690-#define apic_debug(fmt, arg...)
16691+#define apic_debug(fmt, arg...) do {} while (0)
16692
16693 #define APIC_LVT_NUM 6
16694 /* 14 is the version for Xeon and Pentium 8.4.8*/
16695diff -urNp linux-3.0.7/arch/x86/kvm/mmu.c linux-3.0.7/arch/x86/kvm/mmu.c
16696--- linux-3.0.7/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16697+++ linux-3.0.7/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16698@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16699
16700 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16701
16702- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16703+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16704
16705 /*
16706 * Assume that the pte write on a page table of the same type
16707@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16708 }
16709
16710 spin_lock(&vcpu->kvm->mmu_lock);
16711- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16712+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16713 gentry = 0;
16714 kvm_mmu_free_some_pages(vcpu);
16715 ++vcpu->kvm->stat.mmu_pte_write;
16716diff -urNp linux-3.0.7/arch/x86/kvm/paging_tmpl.h linux-3.0.7/arch/x86/kvm/paging_tmpl.h
16717--- linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16718+++ linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16719@@ -182,7 +182,7 @@ walk:
16720 break;
16721 }
16722
16723- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16724+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16725 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16726 present = false;
16727 break;
16728@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16729 unsigned long mmu_seq;
16730 bool map_writable;
16731
16732+ pax_track_stack();
16733+
16734 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16735
16736 r = mmu_topup_memory_caches(vcpu);
16737@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16738 if (need_flush)
16739 kvm_flush_remote_tlbs(vcpu->kvm);
16740
16741- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16742+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16743
16744 spin_unlock(&vcpu->kvm->mmu_lock);
16745
16746diff -urNp linux-3.0.7/arch/x86/kvm/svm.c linux-3.0.7/arch/x86/kvm/svm.c
16747--- linux-3.0.7/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16748+++ linux-3.0.7/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16749@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16750 int cpu = raw_smp_processor_id();
16751
16752 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16753+
16754+ pax_open_kernel();
16755 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16756+ pax_close_kernel();
16757+
16758 load_TR_desc();
16759 }
16760
16761@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16762 #endif
16763 #endif
16764
16765+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16766+ __set_fs(current_thread_info()->addr_limit);
16767+#endif
16768+
16769 reload_tss(vcpu);
16770
16771 local_irq_disable();
16772diff -urNp linux-3.0.7/arch/x86/kvm/vmx.c linux-3.0.7/arch/x86/kvm/vmx.c
16773--- linux-3.0.7/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16774+++ linux-3.0.7/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16775@@ -797,7 +797,11 @@ static void reload_tss(void)
16776 struct desc_struct *descs;
16777
16778 descs = (void *)gdt->address;
16779+
16780+ pax_open_kernel();
16781 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16782+ pax_close_kernel();
16783+
16784 load_TR_desc();
16785 }
16786
16787@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16788 if (!cpu_has_vmx_flexpriority())
16789 flexpriority_enabled = 0;
16790
16791- if (!cpu_has_vmx_tpr_shadow())
16792- kvm_x86_ops->update_cr8_intercept = NULL;
16793+ if (!cpu_has_vmx_tpr_shadow()) {
16794+ pax_open_kernel();
16795+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16796+ pax_close_kernel();
16797+ }
16798
16799 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16800 kvm_disable_largepages();
16801@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16802 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16803
16804 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16805- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16806+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16807 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16808 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16809 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16810@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16811 "jmp .Lkvm_vmx_return \n\t"
16812 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16813 ".Lkvm_vmx_return: "
16814+
16815+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16816+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16817+ ".Lkvm_vmx_return2: "
16818+#endif
16819+
16820 /* Save guest registers, load host registers, keep flags */
16821 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16822 "pop %0 \n\t"
16823@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16824 #endif
16825 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16826 [wordsize]"i"(sizeof(ulong))
16827+
16828+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16829+ ,[cs]"i"(__KERNEL_CS)
16830+#endif
16831+
16832 : "cc", "memory"
16833 , R"ax", R"bx", R"di", R"si"
16834 #ifdef CONFIG_X86_64
16835@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16836
16837 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16838
16839- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16840+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16841+
16842+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16843+ loadsegment(fs, __KERNEL_PERCPU);
16844+#endif
16845+
16846+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16847+ __set_fs(current_thread_info()->addr_limit);
16848+#endif
16849+
16850 vmx->launched = 1;
16851
16852 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16853diff -urNp linux-3.0.7/arch/x86/kvm/x86.c linux-3.0.7/arch/x86/kvm/x86.c
16854--- linux-3.0.7/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16855+++ linux-3.0.7/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16856@@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16857 {
16858 struct kvm *kvm = vcpu->kvm;
16859 int lm = is_long_mode(vcpu);
16860- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16861- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16862+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16863+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16864 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16865 : kvm->arch.xen_hvm_config.blob_size_32;
16866 u32 page_num = data & ~PAGE_MASK;
16867@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16868 if (n < msr_list.nmsrs)
16869 goto out;
16870 r = -EFAULT;
16871+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16872+ goto out;
16873 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16874 num_msrs_to_save * sizeof(u32)))
16875 goto out;
16876@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16877 struct kvm_cpuid2 *cpuid,
16878 struct kvm_cpuid_entry2 __user *entries)
16879 {
16880- int r;
16881+ int r, i;
16882
16883 r = -E2BIG;
16884 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16885 goto out;
16886 r = -EFAULT;
16887- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16888- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16889+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16890 goto out;
16891+ for (i = 0; i < cpuid->nent; ++i) {
16892+ struct kvm_cpuid_entry2 cpuid_entry;
16893+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16894+ goto out;
16895+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16896+ }
16897 vcpu->arch.cpuid_nent = cpuid->nent;
16898 kvm_apic_set_version(vcpu);
16899 kvm_x86_ops->cpuid_update(vcpu);
16900@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16901 struct kvm_cpuid2 *cpuid,
16902 struct kvm_cpuid_entry2 __user *entries)
16903 {
16904- int r;
16905+ int r, i;
16906
16907 r = -E2BIG;
16908 if (cpuid->nent < vcpu->arch.cpuid_nent)
16909 goto out;
16910 r = -EFAULT;
16911- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16912- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16913+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16914 goto out;
16915+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16916+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16917+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16918+ goto out;
16919+ }
16920 return 0;
16921
16922 out:
16923@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16924 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16925 struct kvm_interrupt *irq)
16926 {
16927- if (irq->irq < 0 || irq->irq >= 256)
16928+ if (irq->irq >= 256)
16929 return -EINVAL;
16930 if (irqchip_in_kernel(vcpu->kvm))
16931 return -ENXIO;
16932@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16933 }
16934 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16935
16936-int kvm_arch_init(void *opaque)
16937+int kvm_arch_init(const void *opaque)
16938 {
16939 int r;
16940 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16941diff -urNp linux-3.0.7/arch/x86/lguest/boot.c linux-3.0.7/arch/x86/lguest/boot.c
16942--- linux-3.0.7/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16943+++ linux-3.0.7/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16944@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16945 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16946 * Launcher to reboot us.
16947 */
16948-static void lguest_restart(char *reason)
16949+static __noreturn void lguest_restart(char *reason)
16950 {
16951 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16952+ BUG();
16953 }
16954
16955 /*G:050
16956diff -urNp linux-3.0.7/arch/x86/lib/atomic64_32.c linux-3.0.7/arch/x86/lib/atomic64_32.c
16957--- linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16958+++ linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16959@@ -8,18 +8,30 @@
16960
16961 long long atomic64_read_cx8(long long, const atomic64_t *v);
16962 EXPORT_SYMBOL(atomic64_read_cx8);
16963+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16964+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16965 long long atomic64_set_cx8(long long, const atomic64_t *v);
16966 EXPORT_SYMBOL(atomic64_set_cx8);
16967+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16968+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16969 long long atomic64_xchg_cx8(long long, unsigned high);
16970 EXPORT_SYMBOL(atomic64_xchg_cx8);
16971 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16972 EXPORT_SYMBOL(atomic64_add_return_cx8);
16973+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16974+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16975 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16976 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16977+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16978+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16979 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16980 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16981+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16982+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16983 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16984 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16985+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16986+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16987 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16988 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16989 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16990@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16991 #ifndef CONFIG_X86_CMPXCHG64
16992 long long atomic64_read_386(long long, const atomic64_t *v);
16993 EXPORT_SYMBOL(atomic64_read_386);
16994+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16995+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16996 long long atomic64_set_386(long long, const atomic64_t *v);
16997 EXPORT_SYMBOL(atomic64_set_386);
16998+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16999+EXPORT_SYMBOL(atomic64_set_unchecked_386);
17000 long long atomic64_xchg_386(long long, unsigned high);
17001 EXPORT_SYMBOL(atomic64_xchg_386);
17002 long long atomic64_add_return_386(long long a, atomic64_t *v);
17003 EXPORT_SYMBOL(atomic64_add_return_386);
17004+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17005+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17006 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17007 EXPORT_SYMBOL(atomic64_sub_return_386);
17008+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17009+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17010 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17011 EXPORT_SYMBOL(atomic64_inc_return_386);
17012+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17013+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17014 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17015 EXPORT_SYMBOL(atomic64_dec_return_386);
17016+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17017+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17018 long long atomic64_add_386(long long a, atomic64_t *v);
17019 EXPORT_SYMBOL(atomic64_add_386);
17020+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17021+EXPORT_SYMBOL(atomic64_add_unchecked_386);
17022 long long atomic64_sub_386(long long a, atomic64_t *v);
17023 EXPORT_SYMBOL(atomic64_sub_386);
17024+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17025+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17026 long long atomic64_inc_386(long long a, atomic64_t *v);
17027 EXPORT_SYMBOL(atomic64_inc_386);
17028+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17029+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17030 long long atomic64_dec_386(long long a, atomic64_t *v);
17031 EXPORT_SYMBOL(atomic64_dec_386);
17032+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17033+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17034 long long atomic64_dec_if_positive_386(atomic64_t *v);
17035 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17036 int atomic64_inc_not_zero_386(atomic64_t *v);
17037diff -urNp linux-3.0.7/arch/x86/lib/atomic64_386_32.S linux-3.0.7/arch/x86/lib/atomic64_386_32.S
17038--- linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
17039+++ linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
17040@@ -48,6 +48,10 @@ BEGIN(read)
17041 movl (v), %eax
17042 movl 4(v), %edx
17043 RET_ENDP
17044+BEGIN(read_unchecked)
17045+ movl (v), %eax
17046+ movl 4(v), %edx
17047+RET_ENDP
17048 #undef v
17049
17050 #define v %esi
17051@@ -55,6 +59,10 @@ BEGIN(set)
17052 movl %ebx, (v)
17053 movl %ecx, 4(v)
17054 RET_ENDP
17055+BEGIN(set_unchecked)
17056+ movl %ebx, (v)
17057+ movl %ecx, 4(v)
17058+RET_ENDP
17059 #undef v
17060
17061 #define v %esi
17062@@ -70,6 +78,20 @@ RET_ENDP
17063 BEGIN(add)
17064 addl %eax, (v)
17065 adcl %edx, 4(v)
17066+
17067+#ifdef CONFIG_PAX_REFCOUNT
17068+ jno 0f
17069+ subl %eax, (v)
17070+ sbbl %edx, 4(v)
17071+ int $4
17072+0:
17073+ _ASM_EXTABLE(0b, 0b)
17074+#endif
17075+
17076+RET_ENDP
17077+BEGIN(add_unchecked)
17078+ addl %eax, (v)
17079+ adcl %edx, 4(v)
17080 RET_ENDP
17081 #undef v
17082
17083@@ -77,6 +99,24 @@ RET_ENDP
17084 BEGIN(add_return)
17085 addl (v), %eax
17086 adcl 4(v), %edx
17087+
17088+#ifdef CONFIG_PAX_REFCOUNT
17089+ into
17090+1234:
17091+ _ASM_EXTABLE(1234b, 2f)
17092+#endif
17093+
17094+ movl %eax, (v)
17095+ movl %edx, 4(v)
17096+
17097+#ifdef CONFIG_PAX_REFCOUNT
17098+2:
17099+#endif
17100+
17101+RET_ENDP
17102+BEGIN(add_return_unchecked)
17103+ addl (v), %eax
17104+ adcl 4(v), %edx
17105 movl %eax, (v)
17106 movl %edx, 4(v)
17107 RET_ENDP
17108@@ -86,6 +126,20 @@ RET_ENDP
17109 BEGIN(sub)
17110 subl %eax, (v)
17111 sbbl %edx, 4(v)
17112+
17113+#ifdef CONFIG_PAX_REFCOUNT
17114+ jno 0f
17115+ addl %eax, (v)
17116+ adcl %edx, 4(v)
17117+ int $4
17118+0:
17119+ _ASM_EXTABLE(0b, 0b)
17120+#endif
17121+
17122+RET_ENDP
17123+BEGIN(sub_unchecked)
17124+ subl %eax, (v)
17125+ sbbl %edx, 4(v)
17126 RET_ENDP
17127 #undef v
17128
17129@@ -96,6 +150,27 @@ BEGIN(sub_return)
17130 sbbl $0, %edx
17131 addl (v), %eax
17132 adcl 4(v), %edx
17133+
17134+#ifdef CONFIG_PAX_REFCOUNT
17135+ into
17136+1234:
17137+ _ASM_EXTABLE(1234b, 2f)
17138+#endif
17139+
17140+ movl %eax, (v)
17141+ movl %edx, 4(v)
17142+
17143+#ifdef CONFIG_PAX_REFCOUNT
17144+2:
17145+#endif
17146+
17147+RET_ENDP
17148+BEGIN(sub_return_unchecked)
17149+ negl %edx
17150+ negl %eax
17151+ sbbl $0, %edx
17152+ addl (v), %eax
17153+ adcl 4(v), %edx
17154 movl %eax, (v)
17155 movl %edx, 4(v)
17156 RET_ENDP
17157@@ -105,6 +180,20 @@ RET_ENDP
17158 BEGIN(inc)
17159 addl $1, (v)
17160 adcl $0, 4(v)
17161+
17162+#ifdef CONFIG_PAX_REFCOUNT
17163+ jno 0f
17164+ subl $1, (v)
17165+ sbbl $0, 4(v)
17166+ int $4
17167+0:
17168+ _ASM_EXTABLE(0b, 0b)
17169+#endif
17170+
17171+RET_ENDP
17172+BEGIN(inc_unchecked)
17173+ addl $1, (v)
17174+ adcl $0, 4(v)
17175 RET_ENDP
17176 #undef v
17177
17178@@ -114,6 +203,26 @@ BEGIN(inc_return)
17179 movl 4(v), %edx
17180 addl $1, %eax
17181 adcl $0, %edx
17182+
17183+#ifdef CONFIG_PAX_REFCOUNT
17184+ into
17185+1234:
17186+ _ASM_EXTABLE(1234b, 2f)
17187+#endif
17188+
17189+ movl %eax, (v)
17190+ movl %edx, 4(v)
17191+
17192+#ifdef CONFIG_PAX_REFCOUNT
17193+2:
17194+#endif
17195+
17196+RET_ENDP
17197+BEGIN(inc_return_unchecked)
17198+ movl (v), %eax
17199+ movl 4(v), %edx
17200+ addl $1, %eax
17201+ adcl $0, %edx
17202 movl %eax, (v)
17203 movl %edx, 4(v)
17204 RET_ENDP
17205@@ -123,6 +232,20 @@ RET_ENDP
17206 BEGIN(dec)
17207 subl $1, (v)
17208 sbbl $0, 4(v)
17209+
17210+#ifdef CONFIG_PAX_REFCOUNT
17211+ jno 0f
17212+ addl $1, (v)
17213+ adcl $0, 4(v)
17214+ int $4
17215+0:
17216+ _ASM_EXTABLE(0b, 0b)
17217+#endif
17218+
17219+RET_ENDP
17220+BEGIN(dec_unchecked)
17221+ subl $1, (v)
17222+ sbbl $0, 4(v)
17223 RET_ENDP
17224 #undef v
17225
17226@@ -132,6 +255,26 @@ BEGIN(dec_return)
17227 movl 4(v), %edx
17228 subl $1, %eax
17229 sbbl $0, %edx
17230+
17231+#ifdef CONFIG_PAX_REFCOUNT
17232+ into
17233+1234:
17234+ _ASM_EXTABLE(1234b, 2f)
17235+#endif
17236+
17237+ movl %eax, (v)
17238+ movl %edx, 4(v)
17239+
17240+#ifdef CONFIG_PAX_REFCOUNT
17241+2:
17242+#endif
17243+
17244+RET_ENDP
17245+BEGIN(dec_return_unchecked)
17246+ movl (v), %eax
17247+ movl 4(v), %edx
17248+ subl $1, %eax
17249+ sbbl $0, %edx
17250 movl %eax, (v)
17251 movl %edx, 4(v)
17252 RET_ENDP
17253@@ -143,6 +286,13 @@ BEGIN(add_unless)
17254 adcl %edx, %edi
17255 addl (v), %eax
17256 adcl 4(v), %edx
17257+
17258+#ifdef CONFIG_PAX_REFCOUNT
17259+ into
17260+1234:
17261+ _ASM_EXTABLE(1234b, 2f)
17262+#endif
17263+
17264 cmpl %eax, %esi
17265 je 3f
17266 1:
17267@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17268 1:
17269 addl $1, %eax
17270 adcl $0, %edx
17271+
17272+#ifdef CONFIG_PAX_REFCOUNT
17273+ into
17274+1234:
17275+ _ASM_EXTABLE(1234b, 2f)
17276+#endif
17277+
17278 movl %eax, (v)
17279 movl %edx, 4(v)
17280 movl $1, %eax
17281@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17282 movl 4(v), %edx
17283 subl $1, %eax
17284 sbbl $0, %edx
17285+
17286+#ifdef CONFIG_PAX_REFCOUNT
17287+ into
17288+1234:
17289+ _ASM_EXTABLE(1234b, 1f)
17290+#endif
17291+
17292 js 1f
17293 movl %eax, (v)
17294 movl %edx, 4(v)
17295diff -urNp linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S
17296--- linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
17297+++ linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
17298@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17299 CFI_STARTPROC
17300
17301 read64 %ecx
17302+ pax_force_retaddr
17303 ret
17304 CFI_ENDPROC
17305 ENDPROC(atomic64_read_cx8)
17306
17307+ENTRY(atomic64_read_unchecked_cx8)
17308+ CFI_STARTPROC
17309+
17310+ read64 %ecx
17311+ pax_force_retaddr
17312+ ret
17313+ CFI_ENDPROC
17314+ENDPROC(atomic64_read_unchecked_cx8)
17315+
17316 ENTRY(atomic64_set_cx8)
17317 CFI_STARTPROC
17318
17319@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17320 cmpxchg8b (%esi)
17321 jne 1b
17322
17323+ pax_force_retaddr
17324 ret
17325 CFI_ENDPROC
17326 ENDPROC(atomic64_set_cx8)
17327
17328+ENTRY(atomic64_set_unchecked_cx8)
17329+ CFI_STARTPROC
17330+
17331+1:
17332+/* we don't need LOCK_PREFIX since aligned 64-bit writes
17333+ * are atomic on 586 and newer */
17334+ cmpxchg8b (%esi)
17335+ jne 1b
17336+
17337+ pax_force_retaddr
17338+ ret
17339+ CFI_ENDPROC
17340+ENDPROC(atomic64_set_unchecked_cx8)
17341+
17342 ENTRY(atomic64_xchg_cx8)
17343 CFI_STARTPROC
17344
17345@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17346 cmpxchg8b (%esi)
17347 jne 1b
17348
17349+ pax_force_retaddr
17350 ret
17351 CFI_ENDPROC
17352 ENDPROC(atomic64_xchg_cx8)
17353
17354-.macro addsub_return func ins insc
17355-ENTRY(atomic64_\func\()_return_cx8)
17356+.macro addsub_return func ins insc unchecked=""
17357+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17358 CFI_STARTPROC
17359 SAVE ebp
17360 SAVE ebx
17361@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17362 movl %edx, %ecx
17363 \ins\()l %esi, %ebx
17364 \insc\()l %edi, %ecx
17365+
17366+.ifb \unchecked
17367+#ifdef CONFIG_PAX_REFCOUNT
17368+ into
17369+2:
17370+ _ASM_EXTABLE(2b, 3f)
17371+#endif
17372+.endif
17373+
17374 LOCK_PREFIX
17375 cmpxchg8b (%ebp)
17376 jne 1b
17377-
17378-10:
17379 movl %ebx, %eax
17380 movl %ecx, %edx
17381+
17382+.ifb \unchecked
17383+#ifdef CONFIG_PAX_REFCOUNT
17384+3:
17385+#endif
17386+.endif
17387+
17388 RESTORE edi
17389 RESTORE esi
17390 RESTORE ebx
17391 RESTORE ebp
17392+ pax_force_retaddr
17393 ret
17394 CFI_ENDPROC
17395-ENDPROC(atomic64_\func\()_return_cx8)
17396+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17397 .endm
17398
17399 addsub_return add add adc
17400 addsub_return sub sub sbb
17401+addsub_return add add adc _unchecked
17402+addsub_return sub sub sbb _unchecked
17403
17404-.macro incdec_return func ins insc
17405-ENTRY(atomic64_\func\()_return_cx8)
17406+.macro incdec_return func ins insc unchecked
17407+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17408 CFI_STARTPROC
17409 SAVE ebx
17410
17411@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17412 movl %edx, %ecx
17413 \ins\()l $1, %ebx
17414 \insc\()l $0, %ecx
17415+
17416+.ifb \unchecked
17417+#ifdef CONFIG_PAX_REFCOUNT
17418+ into
17419+2:
17420+ _ASM_EXTABLE(2b, 3f)
17421+#endif
17422+.endif
17423+
17424 LOCK_PREFIX
17425 cmpxchg8b (%esi)
17426 jne 1b
17427
17428-10:
17429 movl %ebx, %eax
17430 movl %ecx, %edx
17431+
17432+.ifb \unchecked
17433+#ifdef CONFIG_PAX_REFCOUNT
17434+3:
17435+#endif
17436+.endif
17437+
17438 RESTORE ebx
17439+ pax_force_retaddr
17440 ret
17441 CFI_ENDPROC
17442-ENDPROC(atomic64_\func\()_return_cx8)
17443+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17444 .endm
17445
17446 incdec_return inc add adc
17447 incdec_return dec sub sbb
17448+incdec_return inc add adc _unchecked
17449+incdec_return dec sub sbb _unchecked
17450
17451 ENTRY(atomic64_dec_if_positive_cx8)
17452 CFI_STARTPROC
17453@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17454 movl %edx, %ecx
17455 subl $1, %ebx
17456 sbb $0, %ecx
17457+
17458+#ifdef CONFIG_PAX_REFCOUNT
17459+ into
17460+1234:
17461+ _ASM_EXTABLE(1234b, 2f)
17462+#endif
17463+
17464 js 2f
17465 LOCK_PREFIX
17466 cmpxchg8b (%esi)
17467@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17468 movl %ebx, %eax
17469 movl %ecx, %edx
17470 RESTORE ebx
17471+ pax_force_retaddr
17472 ret
17473 CFI_ENDPROC
17474 ENDPROC(atomic64_dec_if_positive_cx8)
17475@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17476 movl %edx, %ecx
17477 addl %esi, %ebx
17478 adcl %edi, %ecx
17479+
17480+#ifdef CONFIG_PAX_REFCOUNT
17481+ into
17482+1234:
17483+ _ASM_EXTABLE(1234b, 3f)
17484+#endif
17485+
17486 LOCK_PREFIX
17487 cmpxchg8b (%ebp)
17488 jne 1b
17489@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17490 CFI_ADJUST_CFA_OFFSET -8
17491 RESTORE ebx
17492 RESTORE ebp
17493+ pax_force_retaddr
17494 ret
17495 4:
17496 cmpl %edx, 4(%esp)
17497@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17498 movl %edx, %ecx
17499 addl $1, %ebx
17500 adcl $0, %ecx
17501+
17502+#ifdef CONFIG_PAX_REFCOUNT
17503+ into
17504+1234:
17505+ _ASM_EXTABLE(1234b, 3f)
17506+#endif
17507+
17508 LOCK_PREFIX
17509 cmpxchg8b (%esi)
17510 jne 1b
17511@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17512 movl $1, %eax
17513 3:
17514 RESTORE ebx
17515+ pax_force_retaddr
17516 ret
17517 4:
17518 testl %edx, %edx
17519diff -urNp linux-3.0.7/arch/x86/lib/checksum_32.S linux-3.0.7/arch/x86/lib/checksum_32.S
17520--- linux-3.0.7/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17521+++ linux-3.0.7/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17522@@ -28,7 +28,8 @@
17523 #include <linux/linkage.h>
17524 #include <asm/dwarf2.h>
17525 #include <asm/errno.h>
17526-
17527+#include <asm/segment.h>
17528+
17529 /*
17530 * computes a partial checksum, e.g. for TCP/UDP fragments
17531 */
17532@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17533
17534 #define ARGBASE 16
17535 #define FP 12
17536-
17537-ENTRY(csum_partial_copy_generic)
17538+
17539+ENTRY(csum_partial_copy_generic_to_user)
17540 CFI_STARTPROC
17541+
17542+#ifdef CONFIG_PAX_MEMORY_UDEREF
17543+ pushl_cfi %gs
17544+ popl_cfi %es
17545+ jmp csum_partial_copy_generic
17546+#endif
17547+
17548+ENTRY(csum_partial_copy_generic_from_user)
17549+
17550+#ifdef CONFIG_PAX_MEMORY_UDEREF
17551+ pushl_cfi %gs
17552+ popl_cfi %ds
17553+#endif
17554+
17555+ENTRY(csum_partial_copy_generic)
17556 subl $4,%esp
17557 CFI_ADJUST_CFA_OFFSET 4
17558 pushl_cfi %edi
17559@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17560 jmp 4f
17561 SRC(1: movw (%esi), %bx )
17562 addl $2, %esi
17563-DST( movw %bx, (%edi) )
17564+DST( movw %bx, %es:(%edi) )
17565 addl $2, %edi
17566 addw %bx, %ax
17567 adcl $0, %eax
17568@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17569 SRC(1: movl (%esi), %ebx )
17570 SRC( movl 4(%esi), %edx )
17571 adcl %ebx, %eax
17572-DST( movl %ebx, (%edi) )
17573+DST( movl %ebx, %es:(%edi) )
17574 adcl %edx, %eax
17575-DST( movl %edx, 4(%edi) )
17576+DST( movl %edx, %es:4(%edi) )
17577
17578 SRC( movl 8(%esi), %ebx )
17579 SRC( movl 12(%esi), %edx )
17580 adcl %ebx, %eax
17581-DST( movl %ebx, 8(%edi) )
17582+DST( movl %ebx, %es:8(%edi) )
17583 adcl %edx, %eax
17584-DST( movl %edx, 12(%edi) )
17585+DST( movl %edx, %es:12(%edi) )
17586
17587 SRC( movl 16(%esi), %ebx )
17588 SRC( movl 20(%esi), %edx )
17589 adcl %ebx, %eax
17590-DST( movl %ebx, 16(%edi) )
17591+DST( movl %ebx, %es:16(%edi) )
17592 adcl %edx, %eax
17593-DST( movl %edx, 20(%edi) )
17594+DST( movl %edx, %es:20(%edi) )
17595
17596 SRC( movl 24(%esi), %ebx )
17597 SRC( movl 28(%esi), %edx )
17598 adcl %ebx, %eax
17599-DST( movl %ebx, 24(%edi) )
17600+DST( movl %ebx, %es:24(%edi) )
17601 adcl %edx, %eax
17602-DST( movl %edx, 28(%edi) )
17603+DST( movl %edx, %es:28(%edi) )
17604
17605 lea 32(%esi), %esi
17606 lea 32(%edi), %edi
17607@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17608 shrl $2, %edx # This clears CF
17609 SRC(3: movl (%esi), %ebx )
17610 adcl %ebx, %eax
17611-DST( movl %ebx, (%edi) )
17612+DST( movl %ebx, %es:(%edi) )
17613 lea 4(%esi), %esi
17614 lea 4(%edi), %edi
17615 dec %edx
17616@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17617 jb 5f
17618 SRC( movw (%esi), %cx )
17619 leal 2(%esi), %esi
17620-DST( movw %cx, (%edi) )
17621+DST( movw %cx, %es:(%edi) )
17622 leal 2(%edi), %edi
17623 je 6f
17624 shll $16,%ecx
17625 SRC(5: movb (%esi), %cl )
17626-DST( movb %cl, (%edi) )
17627+DST( movb %cl, %es:(%edi) )
17628 6: addl %ecx, %eax
17629 adcl $0, %eax
17630 7:
17631@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17632
17633 6001:
17634 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17635- movl $-EFAULT, (%ebx)
17636+ movl $-EFAULT, %ss:(%ebx)
17637
17638 # zero the complete destination - computing the rest
17639 # is too much work
17640@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17641
17642 6002:
17643 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17644- movl $-EFAULT,(%ebx)
17645+ movl $-EFAULT,%ss:(%ebx)
17646 jmp 5000b
17647
17648 .previous
17649
17650+ pushl_cfi %ss
17651+ popl_cfi %ds
17652+ pushl_cfi %ss
17653+ popl_cfi %es
17654 popl_cfi %ebx
17655 CFI_RESTORE ebx
17656 popl_cfi %esi
17657@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17658 popl_cfi %ecx # equivalent to addl $4,%esp
17659 ret
17660 CFI_ENDPROC
17661-ENDPROC(csum_partial_copy_generic)
17662+ENDPROC(csum_partial_copy_generic_to_user)
17663
17664 #else
17665
17666 /* Version for PentiumII/PPro */
17667
17668 #define ROUND1(x) \
17669+ nop; nop; nop; \
17670 SRC(movl x(%esi), %ebx ) ; \
17671 addl %ebx, %eax ; \
17672- DST(movl %ebx, x(%edi) ) ;
17673+ DST(movl %ebx, %es:x(%edi)) ;
17674
17675 #define ROUND(x) \
17676+ nop; nop; nop; \
17677 SRC(movl x(%esi), %ebx ) ; \
17678 adcl %ebx, %eax ; \
17679- DST(movl %ebx, x(%edi) ) ;
17680+ DST(movl %ebx, %es:x(%edi)) ;
17681
17682 #define ARGBASE 12
17683-
17684-ENTRY(csum_partial_copy_generic)
17685+
17686+ENTRY(csum_partial_copy_generic_to_user)
17687 CFI_STARTPROC
17688+
17689+#ifdef CONFIG_PAX_MEMORY_UDEREF
17690+ pushl_cfi %gs
17691+ popl_cfi %es
17692+ jmp csum_partial_copy_generic
17693+#endif
17694+
17695+ENTRY(csum_partial_copy_generic_from_user)
17696+
17697+#ifdef CONFIG_PAX_MEMORY_UDEREF
17698+ pushl_cfi %gs
17699+ popl_cfi %ds
17700+#endif
17701+
17702+ENTRY(csum_partial_copy_generic)
17703 pushl_cfi %ebx
17704 CFI_REL_OFFSET ebx, 0
17705 pushl_cfi %edi
17706@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17707 subl %ebx, %edi
17708 lea -1(%esi),%edx
17709 andl $-32,%edx
17710- lea 3f(%ebx,%ebx), %ebx
17711+ lea 3f(%ebx,%ebx,2), %ebx
17712 testl %esi, %esi
17713 jmp *%ebx
17714 1: addl $64,%esi
17715@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17716 jb 5f
17717 SRC( movw (%esi), %dx )
17718 leal 2(%esi), %esi
17719-DST( movw %dx, (%edi) )
17720+DST( movw %dx, %es:(%edi) )
17721 leal 2(%edi), %edi
17722 je 6f
17723 shll $16,%edx
17724 5:
17725 SRC( movb (%esi), %dl )
17726-DST( movb %dl, (%edi) )
17727+DST( movb %dl, %es:(%edi) )
17728 6: addl %edx, %eax
17729 adcl $0, %eax
17730 7:
17731 .section .fixup, "ax"
17732 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17733- movl $-EFAULT, (%ebx)
17734+ movl $-EFAULT, %ss:(%ebx)
17735 # zero the complete destination (computing the rest is too much work)
17736 movl ARGBASE+8(%esp),%edi # dst
17737 movl ARGBASE+12(%esp),%ecx # len
17738@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17739 rep; stosb
17740 jmp 7b
17741 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17742- movl $-EFAULT, (%ebx)
17743+ movl $-EFAULT, %ss:(%ebx)
17744 jmp 7b
17745 .previous
17746
17747+#ifdef CONFIG_PAX_MEMORY_UDEREF
17748+ pushl_cfi %ss
17749+ popl_cfi %ds
17750+ pushl_cfi %ss
17751+ popl_cfi %es
17752+#endif
17753+
17754 popl_cfi %esi
17755 CFI_RESTORE esi
17756 popl_cfi %edi
17757@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17758 CFI_RESTORE ebx
17759 ret
17760 CFI_ENDPROC
17761-ENDPROC(csum_partial_copy_generic)
17762+ENDPROC(csum_partial_copy_generic_to_user)
17763
17764 #undef ROUND
17765 #undef ROUND1
17766diff -urNp linux-3.0.7/arch/x86/lib/clear_page_64.S linux-3.0.7/arch/x86/lib/clear_page_64.S
17767--- linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17768+++ linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17769@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17770 movl $4096/8,%ecx
17771 xorl %eax,%eax
17772 rep stosq
17773+ pax_force_retaddr
17774 ret
17775 CFI_ENDPROC
17776 ENDPROC(clear_page_c)
17777@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17778 movl $4096,%ecx
17779 xorl %eax,%eax
17780 rep stosb
17781+ pax_force_retaddr
17782 ret
17783 CFI_ENDPROC
17784 ENDPROC(clear_page_c_e)
17785@@ -43,6 +45,7 @@ ENTRY(clear_page)
17786 leaq 64(%rdi),%rdi
17787 jnz .Lloop
17788 nop
17789+ pax_force_retaddr
17790 ret
17791 CFI_ENDPROC
17792 .Lclear_page_end:
17793@@ -58,7 +61,7 @@ ENDPROC(clear_page)
17794
17795 #include <asm/cpufeature.h>
17796
17797- .section .altinstr_replacement,"ax"
17798+ .section .altinstr_replacement,"a"
17799 1: .byte 0xeb /* jmp <disp8> */
17800 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17801 2: .byte 0xeb /* jmp <disp8> */
17802diff -urNp linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S
17803--- linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17804+++ linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17805@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17806
17807 popf
17808 mov $1, %al
17809+ pax_force_retaddr
17810 ret
17811
17812 not_same:
17813 popf
17814 xor %al,%al
17815+ pax_force_retaddr
17816 ret
17817
17818 CFI_ENDPROC
17819diff -urNp linux-3.0.7/arch/x86/lib/copy_page_64.S linux-3.0.7/arch/x86/lib/copy_page_64.S
17820--- linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17821+++ linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17822@@ -2,12 +2,14 @@
17823
17824 #include <linux/linkage.h>
17825 #include <asm/dwarf2.h>
17826+#include <asm/alternative-asm.h>
17827
17828 ALIGN
17829 copy_page_c:
17830 CFI_STARTPROC
17831 movl $4096/8,%ecx
17832 rep movsq
17833+ pax_force_retaddr
17834 ret
17835 CFI_ENDPROC
17836 ENDPROC(copy_page_c)
17837@@ -94,6 +96,7 @@ ENTRY(copy_page)
17838 CFI_RESTORE r13
17839 addq $3*8,%rsp
17840 CFI_ADJUST_CFA_OFFSET -3*8
17841+ pax_force_retaddr
17842 ret
17843 .Lcopy_page_end:
17844 CFI_ENDPROC
17845@@ -104,7 +107,7 @@ ENDPROC(copy_page)
17846
17847 #include <asm/cpufeature.h>
17848
17849- .section .altinstr_replacement,"ax"
17850+ .section .altinstr_replacement,"a"
17851 1: .byte 0xeb /* jmp <disp8> */
17852 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17853 2:
17854diff -urNp linux-3.0.7/arch/x86/lib/copy_user_64.S linux-3.0.7/arch/x86/lib/copy_user_64.S
17855--- linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17856+++ linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17857@@ -16,6 +16,7 @@
17858 #include <asm/thread_info.h>
17859 #include <asm/cpufeature.h>
17860 #include <asm/alternative-asm.h>
17861+#include <asm/pgtable.h>
17862
17863 /*
17864 * By placing feature2 after feature1 in altinstructions section, we logically
17865@@ -29,7 +30,7 @@
17866 .byte 0xe9 /* 32bit jump */
17867 .long \orig-1f /* by default jump to orig */
17868 1:
17869- .section .altinstr_replacement,"ax"
17870+ .section .altinstr_replacement,"a"
17871 2: .byte 0xe9 /* near jump with 32bit immediate */
17872 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17873 3: .byte 0xe9 /* near jump with 32bit immediate */
17874@@ -71,47 +72,20 @@
17875 #endif
17876 .endm
17877
17878-/* Standard copy_to_user with segment limit checking */
17879-ENTRY(_copy_to_user)
17880- CFI_STARTPROC
17881- GET_THREAD_INFO(%rax)
17882- movq %rdi,%rcx
17883- addq %rdx,%rcx
17884- jc bad_to_user
17885- cmpq TI_addr_limit(%rax),%rcx
17886- ja bad_to_user
17887- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17888- copy_user_generic_unrolled,copy_user_generic_string, \
17889- copy_user_enhanced_fast_string
17890- CFI_ENDPROC
17891-ENDPROC(_copy_to_user)
17892-
17893-/* Standard copy_from_user with segment limit checking */
17894-ENTRY(_copy_from_user)
17895- CFI_STARTPROC
17896- GET_THREAD_INFO(%rax)
17897- movq %rsi,%rcx
17898- addq %rdx,%rcx
17899- jc bad_from_user
17900- cmpq TI_addr_limit(%rax),%rcx
17901- ja bad_from_user
17902- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17903- copy_user_generic_unrolled,copy_user_generic_string, \
17904- copy_user_enhanced_fast_string
17905- CFI_ENDPROC
17906-ENDPROC(_copy_from_user)
17907-
17908 .section .fixup,"ax"
17909 /* must zero dest */
17910 ENTRY(bad_from_user)
17911 bad_from_user:
17912 CFI_STARTPROC
17913+ testl %edx,%edx
17914+ js bad_to_user
17915 movl %edx,%ecx
17916 xorl %eax,%eax
17917 rep
17918 stosb
17919 bad_to_user:
17920 movl %edx,%eax
17921+ pax_force_retaddr
17922 ret
17923 CFI_ENDPROC
17924 ENDPROC(bad_from_user)
17925@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17926 decl %ecx
17927 jnz 21b
17928 23: xor %eax,%eax
17929+ pax_force_retaddr
17930 ret
17931
17932 .section .fixup,"ax"
17933@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17934 3: rep
17935 movsb
17936 4: xorl %eax,%eax
17937+ pax_force_retaddr
17938 ret
17939
17940 .section .fixup,"ax"
17941@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17942 1: rep
17943 movsb
17944 2: xorl %eax,%eax
17945+ pax_force_retaddr
17946 ret
17947
17948 .section .fixup,"ax"
17949diff -urNp linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S
17950--- linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17951+++ linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17952@@ -8,12 +8,14 @@
17953
17954 #include <linux/linkage.h>
17955 #include <asm/dwarf2.h>
17956+#include <asm/alternative-asm.h>
17957
17958 #define FIX_ALIGNMENT 1
17959
17960 #include <asm/current.h>
17961 #include <asm/asm-offsets.h>
17962 #include <asm/thread_info.h>
17963+#include <asm/pgtable.h>
17964
17965 .macro ALIGN_DESTINATION
17966 #ifdef FIX_ALIGNMENT
17967@@ -50,6 +52,15 @@
17968 */
17969 ENTRY(__copy_user_nocache)
17970 CFI_STARTPROC
17971+
17972+#ifdef CONFIG_PAX_MEMORY_UDEREF
17973+ mov $PAX_USER_SHADOW_BASE,%rcx
17974+ cmp %rcx,%rsi
17975+ jae 1f
17976+ add %rcx,%rsi
17977+1:
17978+#endif
17979+
17980 cmpl $8,%edx
17981 jb 20f /* less then 8 bytes, go to byte copy loop */
17982 ALIGN_DESTINATION
17983@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17984 jnz 21b
17985 23: xorl %eax,%eax
17986 sfence
17987+ pax_force_retaddr
17988 ret
17989
17990 .section .fixup,"ax"
17991diff -urNp linux-3.0.7/arch/x86/lib/csum-copy_64.S linux-3.0.7/arch/x86/lib/csum-copy_64.S
17992--- linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17993+++ linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17994@@ -8,6 +8,7 @@
17995 #include <linux/linkage.h>
17996 #include <asm/dwarf2.h>
17997 #include <asm/errno.h>
17998+#include <asm/alternative-asm.h>
17999
18000 /*
18001 * Checksum copy with exception handling.
18002@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18003 CFI_RESTORE rbp
18004 addq $7*8, %rsp
18005 CFI_ADJUST_CFA_OFFSET -7*8
18006+ pax_force_retaddr
18007 ret
18008 CFI_RESTORE_STATE
18009
18010diff -urNp linux-3.0.7/arch/x86/lib/csum-wrappers_64.c linux-3.0.7/arch/x86/lib/csum-wrappers_64.c
18011--- linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
18012+++ linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
18013@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18014 len -= 2;
18015 }
18016 }
18017- isum = csum_partial_copy_generic((__force const void *)src,
18018+
18019+#ifdef CONFIG_PAX_MEMORY_UDEREF
18020+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18021+ src += PAX_USER_SHADOW_BASE;
18022+#endif
18023+
18024+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18025 dst, len, isum, errp, NULL);
18026 if (unlikely(*errp))
18027 goto out_err;
18028@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18029 }
18030
18031 *errp = 0;
18032- return csum_partial_copy_generic(src, (void __force *)dst,
18033+
18034+#ifdef CONFIG_PAX_MEMORY_UDEREF
18035+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18036+ dst += PAX_USER_SHADOW_BASE;
18037+#endif
18038+
18039+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18040 len, isum, NULL, errp);
18041 }
18042 EXPORT_SYMBOL(csum_partial_copy_to_user);
18043diff -urNp linux-3.0.7/arch/x86/lib/getuser.S linux-3.0.7/arch/x86/lib/getuser.S
18044--- linux-3.0.7/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
18045+++ linux-3.0.7/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
18046@@ -33,15 +33,38 @@
18047 #include <asm/asm-offsets.h>
18048 #include <asm/thread_info.h>
18049 #include <asm/asm.h>
18050+#include <asm/segment.h>
18051+#include <asm/pgtable.h>
18052+#include <asm/alternative-asm.h>
18053+
18054+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18055+#define __copyuser_seg gs;
18056+#else
18057+#define __copyuser_seg
18058+#endif
18059
18060 .text
18061 ENTRY(__get_user_1)
18062 CFI_STARTPROC
18063+
18064+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18065 GET_THREAD_INFO(%_ASM_DX)
18066 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18067 jae bad_get_user
18068-1: movzb (%_ASM_AX),%edx
18069+
18070+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18071+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18072+ cmp %_ASM_DX,%_ASM_AX
18073+ jae 1234f
18074+ add %_ASM_DX,%_ASM_AX
18075+1234:
18076+#endif
18077+
18078+#endif
18079+
18080+1: __copyuser_seg movzb (%_ASM_AX),%edx
18081 xor %eax,%eax
18082+ pax_force_retaddr
18083 ret
18084 CFI_ENDPROC
18085 ENDPROC(__get_user_1)
18086@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18087 ENTRY(__get_user_2)
18088 CFI_STARTPROC
18089 add $1,%_ASM_AX
18090+
18091+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18092 jc bad_get_user
18093 GET_THREAD_INFO(%_ASM_DX)
18094 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18095 jae bad_get_user
18096-2: movzwl -1(%_ASM_AX),%edx
18097+
18098+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18099+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18100+ cmp %_ASM_DX,%_ASM_AX
18101+ jae 1234f
18102+ add %_ASM_DX,%_ASM_AX
18103+1234:
18104+#endif
18105+
18106+#endif
18107+
18108+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18109 xor %eax,%eax
18110+ pax_force_retaddr
18111 ret
18112 CFI_ENDPROC
18113 ENDPROC(__get_user_2)
18114@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18115 ENTRY(__get_user_4)
18116 CFI_STARTPROC
18117 add $3,%_ASM_AX
18118+
18119+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18120 jc bad_get_user
18121 GET_THREAD_INFO(%_ASM_DX)
18122 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18123 jae bad_get_user
18124-3: mov -3(%_ASM_AX),%edx
18125+
18126+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18127+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18128+ cmp %_ASM_DX,%_ASM_AX
18129+ jae 1234f
18130+ add %_ASM_DX,%_ASM_AX
18131+1234:
18132+#endif
18133+
18134+#endif
18135+
18136+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18137 xor %eax,%eax
18138+ pax_force_retaddr
18139 ret
18140 CFI_ENDPROC
18141 ENDPROC(__get_user_4)
18142@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18143 GET_THREAD_INFO(%_ASM_DX)
18144 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18145 jae bad_get_user
18146+
18147+#ifdef CONFIG_PAX_MEMORY_UDEREF
18148+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18149+ cmp %_ASM_DX,%_ASM_AX
18150+ jae 1234f
18151+ add %_ASM_DX,%_ASM_AX
18152+1234:
18153+#endif
18154+
18155 4: movq -7(%_ASM_AX),%_ASM_DX
18156 xor %eax,%eax
18157+ pax_force_retaddr
18158 ret
18159 CFI_ENDPROC
18160 ENDPROC(__get_user_8)
18161@@ -91,6 +152,7 @@ bad_get_user:
18162 CFI_STARTPROC
18163 xor %edx,%edx
18164 mov $(-EFAULT),%_ASM_AX
18165+ pax_force_retaddr
18166 ret
18167 CFI_ENDPROC
18168 END(bad_get_user)
18169diff -urNp linux-3.0.7/arch/x86/lib/insn.c linux-3.0.7/arch/x86/lib/insn.c
18170--- linux-3.0.7/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
18171+++ linux-3.0.7/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
18172@@ -21,6 +21,11 @@
18173 #include <linux/string.h>
18174 #include <asm/inat.h>
18175 #include <asm/insn.h>
18176+#ifdef __KERNEL__
18177+#include <asm/pgtable_types.h>
18178+#else
18179+#define ktla_ktva(addr) addr
18180+#endif
18181
18182 #define get_next(t, insn) \
18183 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18184@@ -40,8 +45,8 @@
18185 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18186 {
18187 memset(insn, 0, sizeof(*insn));
18188- insn->kaddr = kaddr;
18189- insn->next_byte = kaddr;
18190+ insn->kaddr = ktla_ktva(kaddr);
18191+ insn->next_byte = ktla_ktva(kaddr);
18192 insn->x86_64 = x86_64 ? 1 : 0;
18193 insn->opnd_bytes = 4;
18194 if (x86_64)
18195diff -urNp linux-3.0.7/arch/x86/lib/iomap_copy_64.S linux-3.0.7/arch/x86/lib/iomap_copy_64.S
18196--- linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
18197+++ linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
18198@@ -17,6 +17,7 @@
18199
18200 #include <linux/linkage.h>
18201 #include <asm/dwarf2.h>
18202+#include <asm/alternative-asm.h>
18203
18204 /*
18205 * override generic version in lib/iomap_copy.c
18206@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18207 CFI_STARTPROC
18208 movl %edx,%ecx
18209 rep movsd
18210+ pax_force_retaddr
18211 ret
18212 CFI_ENDPROC
18213 ENDPROC(__iowrite32_copy)
18214diff -urNp linux-3.0.7/arch/x86/lib/memcpy_64.S linux-3.0.7/arch/x86/lib/memcpy_64.S
18215--- linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
18216+++ linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
18217@@ -34,6 +34,7 @@
18218 rep movsq
18219 movl %edx, %ecx
18220 rep movsb
18221+ pax_force_retaddr
18222 ret
18223 .Lmemcpy_e:
18224 .previous
18225@@ -51,6 +52,7 @@
18226
18227 movl %edx, %ecx
18228 rep movsb
18229+ pax_force_retaddr
18230 ret
18231 .Lmemcpy_e_e:
18232 .previous
18233@@ -141,6 +143,7 @@ ENTRY(memcpy)
18234 movq %r9, 1*8(%rdi)
18235 movq %r10, -2*8(%rdi, %rdx)
18236 movq %r11, -1*8(%rdi, %rdx)
18237+ pax_force_retaddr
18238 retq
18239 .p2align 4
18240 .Lless_16bytes:
18241@@ -153,6 +156,7 @@ ENTRY(memcpy)
18242 movq -1*8(%rsi, %rdx), %r9
18243 movq %r8, 0*8(%rdi)
18244 movq %r9, -1*8(%rdi, %rdx)
18245+ pax_force_retaddr
18246 retq
18247 .p2align 4
18248 .Lless_8bytes:
18249@@ -166,6 +170,7 @@ ENTRY(memcpy)
18250 movl -4(%rsi, %rdx), %r8d
18251 movl %ecx, (%rdi)
18252 movl %r8d, -4(%rdi, %rdx)
18253+ pax_force_retaddr
18254 retq
18255 .p2align 4
18256 .Lless_3bytes:
18257@@ -183,6 +188,7 @@ ENTRY(memcpy)
18258 jnz .Lloop_1
18259
18260 .Lend:
18261+ pax_force_retaddr
18262 retq
18263 CFI_ENDPROC
18264 ENDPROC(memcpy)
18265diff -urNp linux-3.0.7/arch/x86/lib/memmove_64.S linux-3.0.7/arch/x86/lib/memmove_64.S
18266--- linux-3.0.7/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
18267+++ linux-3.0.7/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
18268@@ -9,6 +9,7 @@
18269 #include <linux/linkage.h>
18270 #include <asm/dwarf2.h>
18271 #include <asm/cpufeature.h>
18272+#include <asm/alternative-asm.h>
18273
18274 #undef memmove
18275
18276@@ -201,6 +202,7 @@ ENTRY(memmove)
18277 movb (%rsi), %r11b
18278 movb %r11b, (%rdi)
18279 13:
18280+ pax_force_retaddr
18281 retq
18282 CFI_ENDPROC
18283
18284@@ -209,6 +211,7 @@ ENTRY(memmove)
18285 /* Forward moving data. */
18286 movq %rdx, %rcx
18287 rep movsb
18288+ pax_force_retaddr
18289 retq
18290 .Lmemmove_end_forward_efs:
18291 .previous
18292diff -urNp linux-3.0.7/arch/x86/lib/memset_64.S linux-3.0.7/arch/x86/lib/memset_64.S
18293--- linux-3.0.7/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
18294+++ linux-3.0.7/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
18295@@ -31,6 +31,7 @@
18296 movl %r8d,%ecx
18297 rep stosb
18298 movq %r9,%rax
18299+ pax_force_retaddr
18300 ret
18301 .Lmemset_e:
18302 .previous
18303@@ -53,6 +54,7 @@
18304 movl %edx,%ecx
18305 rep stosb
18306 movq %r9,%rax
18307+ pax_force_retaddr
18308 ret
18309 .Lmemset_e_e:
18310 .previous
18311@@ -121,6 +123,7 @@ ENTRY(__memset)
18312
18313 .Lende:
18314 movq %r10,%rax
18315+ pax_force_retaddr
18316 ret
18317
18318 CFI_RESTORE_STATE
18319diff -urNp linux-3.0.7/arch/x86/lib/mmx_32.c linux-3.0.7/arch/x86/lib/mmx_32.c
18320--- linux-3.0.7/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
18321+++ linux-3.0.7/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
18322@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18323 {
18324 void *p;
18325 int i;
18326+ unsigned long cr0;
18327
18328 if (unlikely(in_interrupt()))
18329 return __memcpy(to, from, len);
18330@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18331 kernel_fpu_begin();
18332
18333 __asm__ __volatile__ (
18334- "1: prefetch (%0)\n" /* This set is 28 bytes */
18335- " prefetch 64(%0)\n"
18336- " prefetch 128(%0)\n"
18337- " prefetch 192(%0)\n"
18338- " prefetch 256(%0)\n"
18339+ "1: prefetch (%1)\n" /* This set is 28 bytes */
18340+ " prefetch 64(%1)\n"
18341+ " prefetch 128(%1)\n"
18342+ " prefetch 192(%1)\n"
18343+ " prefetch 256(%1)\n"
18344 "2: \n"
18345 ".section .fixup, \"ax\"\n"
18346- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18347+ "3: \n"
18348+
18349+#ifdef CONFIG_PAX_KERNEXEC
18350+ " movl %%cr0, %0\n"
18351+ " movl %0, %%eax\n"
18352+ " andl $0xFFFEFFFF, %%eax\n"
18353+ " movl %%eax, %%cr0\n"
18354+#endif
18355+
18356+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18357+
18358+#ifdef CONFIG_PAX_KERNEXEC
18359+ " movl %0, %%cr0\n"
18360+#endif
18361+
18362 " jmp 2b\n"
18363 ".previous\n"
18364 _ASM_EXTABLE(1b, 3b)
18365- : : "r" (from));
18366+ : "=&r" (cr0) : "r" (from) : "ax");
18367
18368 for ( ; i > 5; i--) {
18369 __asm__ __volatile__ (
18370- "1: prefetch 320(%0)\n"
18371- "2: movq (%0), %%mm0\n"
18372- " movq 8(%0), %%mm1\n"
18373- " movq 16(%0), %%mm2\n"
18374- " movq 24(%0), %%mm3\n"
18375- " movq %%mm0, (%1)\n"
18376- " movq %%mm1, 8(%1)\n"
18377- " movq %%mm2, 16(%1)\n"
18378- " movq %%mm3, 24(%1)\n"
18379- " movq 32(%0), %%mm0\n"
18380- " movq 40(%0), %%mm1\n"
18381- " movq 48(%0), %%mm2\n"
18382- " movq 56(%0), %%mm3\n"
18383- " movq %%mm0, 32(%1)\n"
18384- " movq %%mm1, 40(%1)\n"
18385- " movq %%mm2, 48(%1)\n"
18386- " movq %%mm3, 56(%1)\n"
18387+ "1: prefetch 320(%1)\n"
18388+ "2: movq (%1), %%mm0\n"
18389+ " movq 8(%1), %%mm1\n"
18390+ " movq 16(%1), %%mm2\n"
18391+ " movq 24(%1), %%mm3\n"
18392+ " movq %%mm0, (%2)\n"
18393+ " movq %%mm1, 8(%2)\n"
18394+ " movq %%mm2, 16(%2)\n"
18395+ " movq %%mm3, 24(%2)\n"
18396+ " movq 32(%1), %%mm0\n"
18397+ " movq 40(%1), %%mm1\n"
18398+ " movq 48(%1), %%mm2\n"
18399+ " movq 56(%1), %%mm3\n"
18400+ " movq %%mm0, 32(%2)\n"
18401+ " movq %%mm1, 40(%2)\n"
18402+ " movq %%mm2, 48(%2)\n"
18403+ " movq %%mm3, 56(%2)\n"
18404 ".section .fixup, \"ax\"\n"
18405- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18406+ "3:\n"
18407+
18408+#ifdef CONFIG_PAX_KERNEXEC
18409+ " movl %%cr0, %0\n"
18410+ " movl %0, %%eax\n"
18411+ " andl $0xFFFEFFFF, %%eax\n"
18412+ " movl %%eax, %%cr0\n"
18413+#endif
18414+
18415+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18416+
18417+#ifdef CONFIG_PAX_KERNEXEC
18418+ " movl %0, %%cr0\n"
18419+#endif
18420+
18421 " jmp 2b\n"
18422 ".previous\n"
18423 _ASM_EXTABLE(1b, 3b)
18424- : : "r" (from), "r" (to) : "memory");
18425+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18426
18427 from += 64;
18428 to += 64;
18429@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18430 static void fast_copy_page(void *to, void *from)
18431 {
18432 int i;
18433+ unsigned long cr0;
18434
18435 kernel_fpu_begin();
18436
18437@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18438 * but that is for later. -AV
18439 */
18440 __asm__ __volatile__(
18441- "1: prefetch (%0)\n"
18442- " prefetch 64(%0)\n"
18443- " prefetch 128(%0)\n"
18444- " prefetch 192(%0)\n"
18445- " prefetch 256(%0)\n"
18446+ "1: prefetch (%1)\n"
18447+ " prefetch 64(%1)\n"
18448+ " prefetch 128(%1)\n"
18449+ " prefetch 192(%1)\n"
18450+ " prefetch 256(%1)\n"
18451 "2: \n"
18452 ".section .fixup, \"ax\"\n"
18453- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18454+ "3: \n"
18455+
18456+#ifdef CONFIG_PAX_KERNEXEC
18457+ " movl %%cr0, %0\n"
18458+ " movl %0, %%eax\n"
18459+ " andl $0xFFFEFFFF, %%eax\n"
18460+ " movl %%eax, %%cr0\n"
18461+#endif
18462+
18463+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18464+
18465+#ifdef CONFIG_PAX_KERNEXEC
18466+ " movl %0, %%cr0\n"
18467+#endif
18468+
18469 " jmp 2b\n"
18470 ".previous\n"
18471- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18472+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18473
18474 for (i = 0; i < (4096-320)/64; i++) {
18475 __asm__ __volatile__ (
18476- "1: prefetch 320(%0)\n"
18477- "2: movq (%0), %%mm0\n"
18478- " movntq %%mm0, (%1)\n"
18479- " movq 8(%0), %%mm1\n"
18480- " movntq %%mm1, 8(%1)\n"
18481- " movq 16(%0), %%mm2\n"
18482- " movntq %%mm2, 16(%1)\n"
18483- " movq 24(%0), %%mm3\n"
18484- " movntq %%mm3, 24(%1)\n"
18485- " movq 32(%0), %%mm4\n"
18486- " movntq %%mm4, 32(%1)\n"
18487- " movq 40(%0), %%mm5\n"
18488- " movntq %%mm5, 40(%1)\n"
18489- " movq 48(%0), %%mm6\n"
18490- " movntq %%mm6, 48(%1)\n"
18491- " movq 56(%0), %%mm7\n"
18492- " movntq %%mm7, 56(%1)\n"
18493+ "1: prefetch 320(%1)\n"
18494+ "2: movq (%1), %%mm0\n"
18495+ " movntq %%mm0, (%2)\n"
18496+ " movq 8(%1), %%mm1\n"
18497+ " movntq %%mm1, 8(%2)\n"
18498+ " movq 16(%1), %%mm2\n"
18499+ " movntq %%mm2, 16(%2)\n"
18500+ " movq 24(%1), %%mm3\n"
18501+ " movntq %%mm3, 24(%2)\n"
18502+ " movq 32(%1), %%mm4\n"
18503+ " movntq %%mm4, 32(%2)\n"
18504+ " movq 40(%1), %%mm5\n"
18505+ " movntq %%mm5, 40(%2)\n"
18506+ " movq 48(%1), %%mm6\n"
18507+ " movntq %%mm6, 48(%2)\n"
18508+ " movq 56(%1), %%mm7\n"
18509+ " movntq %%mm7, 56(%2)\n"
18510 ".section .fixup, \"ax\"\n"
18511- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18512+ "3:\n"
18513+
18514+#ifdef CONFIG_PAX_KERNEXEC
18515+ " movl %%cr0, %0\n"
18516+ " movl %0, %%eax\n"
18517+ " andl $0xFFFEFFFF, %%eax\n"
18518+ " movl %%eax, %%cr0\n"
18519+#endif
18520+
18521+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18522+
18523+#ifdef CONFIG_PAX_KERNEXEC
18524+ " movl %0, %%cr0\n"
18525+#endif
18526+
18527 " jmp 2b\n"
18528 ".previous\n"
18529- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18530+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18531
18532 from += 64;
18533 to += 64;
18534@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18535 static void fast_copy_page(void *to, void *from)
18536 {
18537 int i;
18538+ unsigned long cr0;
18539
18540 kernel_fpu_begin();
18541
18542 __asm__ __volatile__ (
18543- "1: prefetch (%0)\n"
18544- " prefetch 64(%0)\n"
18545- " prefetch 128(%0)\n"
18546- " prefetch 192(%0)\n"
18547- " prefetch 256(%0)\n"
18548+ "1: prefetch (%1)\n"
18549+ " prefetch 64(%1)\n"
18550+ " prefetch 128(%1)\n"
18551+ " prefetch 192(%1)\n"
18552+ " prefetch 256(%1)\n"
18553 "2: \n"
18554 ".section .fixup, \"ax\"\n"
18555- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18556+ "3: \n"
18557+
18558+#ifdef CONFIG_PAX_KERNEXEC
18559+ " movl %%cr0, %0\n"
18560+ " movl %0, %%eax\n"
18561+ " andl $0xFFFEFFFF, %%eax\n"
18562+ " movl %%eax, %%cr0\n"
18563+#endif
18564+
18565+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18566+
18567+#ifdef CONFIG_PAX_KERNEXEC
18568+ " movl %0, %%cr0\n"
18569+#endif
18570+
18571 " jmp 2b\n"
18572 ".previous\n"
18573- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18574+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18575
18576 for (i = 0; i < 4096/64; i++) {
18577 __asm__ __volatile__ (
18578- "1: prefetch 320(%0)\n"
18579- "2: movq (%0), %%mm0\n"
18580- " movq 8(%0), %%mm1\n"
18581- " movq 16(%0), %%mm2\n"
18582- " movq 24(%0), %%mm3\n"
18583- " movq %%mm0, (%1)\n"
18584- " movq %%mm1, 8(%1)\n"
18585- " movq %%mm2, 16(%1)\n"
18586- " movq %%mm3, 24(%1)\n"
18587- " movq 32(%0), %%mm0\n"
18588- " movq 40(%0), %%mm1\n"
18589- " movq 48(%0), %%mm2\n"
18590- " movq 56(%0), %%mm3\n"
18591- " movq %%mm0, 32(%1)\n"
18592- " movq %%mm1, 40(%1)\n"
18593- " movq %%mm2, 48(%1)\n"
18594- " movq %%mm3, 56(%1)\n"
18595+ "1: prefetch 320(%1)\n"
18596+ "2: movq (%1), %%mm0\n"
18597+ " movq 8(%1), %%mm1\n"
18598+ " movq 16(%1), %%mm2\n"
18599+ " movq 24(%1), %%mm3\n"
18600+ " movq %%mm0, (%2)\n"
18601+ " movq %%mm1, 8(%2)\n"
18602+ " movq %%mm2, 16(%2)\n"
18603+ " movq %%mm3, 24(%2)\n"
18604+ " movq 32(%1), %%mm0\n"
18605+ " movq 40(%1), %%mm1\n"
18606+ " movq 48(%1), %%mm2\n"
18607+ " movq 56(%1), %%mm3\n"
18608+ " movq %%mm0, 32(%2)\n"
18609+ " movq %%mm1, 40(%2)\n"
18610+ " movq %%mm2, 48(%2)\n"
18611+ " movq %%mm3, 56(%2)\n"
18612 ".section .fixup, \"ax\"\n"
18613- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18614+ "3:\n"
18615+
18616+#ifdef CONFIG_PAX_KERNEXEC
18617+ " movl %%cr0, %0\n"
18618+ " movl %0, %%eax\n"
18619+ " andl $0xFFFEFFFF, %%eax\n"
18620+ " movl %%eax, %%cr0\n"
18621+#endif
18622+
18623+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18624+
18625+#ifdef CONFIG_PAX_KERNEXEC
18626+ " movl %0, %%cr0\n"
18627+#endif
18628+
18629 " jmp 2b\n"
18630 ".previous\n"
18631 _ASM_EXTABLE(1b, 3b)
18632- : : "r" (from), "r" (to) : "memory");
18633+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18634
18635 from += 64;
18636 to += 64;
18637diff -urNp linux-3.0.7/arch/x86/lib/msr-reg.S linux-3.0.7/arch/x86/lib/msr-reg.S
18638--- linux-3.0.7/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18639+++ linux-3.0.7/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18640@@ -3,6 +3,7 @@
18641 #include <asm/dwarf2.h>
18642 #include <asm/asm.h>
18643 #include <asm/msr.h>
18644+#include <asm/alternative-asm.h>
18645
18646 #ifdef CONFIG_X86_64
18647 /*
18648@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18649 movl %edi, 28(%r10)
18650 popq_cfi %rbp
18651 popq_cfi %rbx
18652+ pax_force_retaddr
18653 ret
18654 3:
18655 CFI_RESTORE_STATE
18656diff -urNp linux-3.0.7/arch/x86/lib/putuser.S linux-3.0.7/arch/x86/lib/putuser.S
18657--- linux-3.0.7/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18658+++ linux-3.0.7/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18659@@ -15,7 +15,9 @@
18660 #include <asm/thread_info.h>
18661 #include <asm/errno.h>
18662 #include <asm/asm.h>
18663-
18664+#include <asm/segment.h>
18665+#include <asm/pgtable.h>
18666+#include <asm/alternative-asm.h>
18667
18668 /*
18669 * __put_user_X
18670@@ -29,52 +31,119 @@
18671 * as they get called from within inline assembly.
18672 */
18673
18674-#define ENTER CFI_STARTPROC ; \
18675- GET_THREAD_INFO(%_ASM_BX)
18676-#define EXIT ret ; \
18677+#define ENTER CFI_STARTPROC
18678+#define EXIT pax_force_retaddr; ret ; \
18679 CFI_ENDPROC
18680
18681+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18682+#define _DEST %_ASM_CX,%_ASM_BX
18683+#else
18684+#define _DEST %_ASM_CX
18685+#endif
18686+
18687+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18688+#define __copyuser_seg gs;
18689+#else
18690+#define __copyuser_seg
18691+#endif
18692+
18693 .text
18694 ENTRY(__put_user_1)
18695 ENTER
18696+
18697+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18698+ GET_THREAD_INFO(%_ASM_BX)
18699 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18700 jae bad_put_user
18701-1: movb %al,(%_ASM_CX)
18702+
18703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18704+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18705+ cmp %_ASM_BX,%_ASM_CX
18706+ jb 1234f
18707+ xor %ebx,%ebx
18708+1234:
18709+#endif
18710+
18711+#endif
18712+
18713+1: __copyuser_seg movb %al,(_DEST)
18714 xor %eax,%eax
18715 EXIT
18716 ENDPROC(__put_user_1)
18717
18718 ENTRY(__put_user_2)
18719 ENTER
18720+
18721+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18722+ GET_THREAD_INFO(%_ASM_BX)
18723 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18724 sub $1,%_ASM_BX
18725 cmp %_ASM_BX,%_ASM_CX
18726 jae bad_put_user
18727-2: movw %ax,(%_ASM_CX)
18728+
18729+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18730+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18731+ cmp %_ASM_BX,%_ASM_CX
18732+ jb 1234f
18733+ xor %ebx,%ebx
18734+1234:
18735+#endif
18736+
18737+#endif
18738+
18739+2: __copyuser_seg movw %ax,(_DEST)
18740 xor %eax,%eax
18741 EXIT
18742 ENDPROC(__put_user_2)
18743
18744 ENTRY(__put_user_4)
18745 ENTER
18746+
18747+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18748+ GET_THREAD_INFO(%_ASM_BX)
18749 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18750 sub $3,%_ASM_BX
18751 cmp %_ASM_BX,%_ASM_CX
18752 jae bad_put_user
18753-3: movl %eax,(%_ASM_CX)
18754+
18755+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18756+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18757+ cmp %_ASM_BX,%_ASM_CX
18758+ jb 1234f
18759+ xor %ebx,%ebx
18760+1234:
18761+#endif
18762+
18763+#endif
18764+
18765+3: __copyuser_seg movl %eax,(_DEST)
18766 xor %eax,%eax
18767 EXIT
18768 ENDPROC(__put_user_4)
18769
18770 ENTRY(__put_user_8)
18771 ENTER
18772+
18773+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18774+ GET_THREAD_INFO(%_ASM_BX)
18775 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18776 sub $7,%_ASM_BX
18777 cmp %_ASM_BX,%_ASM_CX
18778 jae bad_put_user
18779-4: mov %_ASM_AX,(%_ASM_CX)
18780+
18781+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18782+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18783+ cmp %_ASM_BX,%_ASM_CX
18784+ jb 1234f
18785+ xor %ebx,%ebx
18786+1234:
18787+#endif
18788+
18789+#endif
18790+
18791+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18792 #ifdef CONFIG_X86_32
18793-5: movl %edx,4(%_ASM_CX)
18794+5: __copyuser_seg movl %edx,4(_DEST)
18795 #endif
18796 xor %eax,%eax
18797 EXIT
18798diff -urNp linux-3.0.7/arch/x86/lib/rwlock_64.S linux-3.0.7/arch/x86/lib/rwlock_64.S
18799--- linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18800+++ linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18801@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18802 LOCK_PREFIX
18803 subl $RW_LOCK_BIAS,(%rdi)
18804 jnz __write_lock_failed
18805+ pax_force_retaddr
18806 ret
18807 CFI_ENDPROC
18808 END(__write_lock_failed)
18809@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18810 LOCK_PREFIX
18811 decl (%rdi)
18812 js __read_lock_failed
18813+ pax_force_retaddr
18814 ret
18815 CFI_ENDPROC
18816 END(__read_lock_failed)
18817diff -urNp linux-3.0.7/arch/x86/lib/rwsem_64.S linux-3.0.7/arch/x86/lib/rwsem_64.S
18818--- linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18819+++ linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
18820@@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18821 popq_cfi %rdx
18822 CFI_RESTORE rdx
18823 restore_common_regs
18824+ pax_force_retaddr
18825 ret
18826 CFI_ENDPROC
18827 ENDPROC(call_rwsem_down_read_failed)
18828@@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18829 movq %rax,%rdi
18830 call rwsem_down_write_failed
18831 restore_common_regs
18832+ pax_force_retaddr
18833 ret
18834 CFI_ENDPROC
18835 ENDPROC(call_rwsem_down_write_failed)
18836@@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
18837 movq %rax,%rdi
18838 call rwsem_wake
18839 restore_common_regs
18840-1: ret
18841+1: pax_force_retaddr
18842+ ret
18843 CFI_ENDPROC
18844 ENDPROC(call_rwsem_wake)
18845
18846@@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18847 popq_cfi %rdx
18848 CFI_RESTORE rdx
18849 restore_common_regs
18850+ pax_force_retaddr
18851 ret
18852 CFI_ENDPROC
18853 ENDPROC(call_rwsem_downgrade_wake)
18854diff -urNp linux-3.0.7/arch/x86/lib/thunk_64.S linux-3.0.7/arch/x86/lib/thunk_64.S
18855--- linux-3.0.7/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18856+++ linux-3.0.7/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18857@@ -10,7 +10,8 @@
18858 #include <asm/dwarf2.h>
18859 #include <asm/calling.h>
18860 #include <asm/rwlock.h>
18861-
18862+ #include <asm/alternative-asm.h>
18863+
18864 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18865 .macro thunk name,func
18866 .globl \name
18867@@ -50,5 +51,6 @@
18868 SAVE_ARGS
18869 restore:
18870 RESTORE_ARGS
18871- ret
18872+ pax_force_retaddr
18873+ ret
18874 CFI_ENDPROC
18875diff -urNp linux-3.0.7/arch/x86/lib/usercopy_32.c linux-3.0.7/arch/x86/lib/usercopy_32.c
18876--- linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18877+++ linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18878@@ -43,7 +43,7 @@ do { \
18879 __asm__ __volatile__( \
18880 " testl %1,%1\n" \
18881 " jz 2f\n" \
18882- "0: lodsb\n" \
18883+ "0: "__copyuser_seg"lodsb\n" \
18884 " stosb\n" \
18885 " testb %%al,%%al\n" \
18886 " jz 1f\n" \
18887@@ -128,10 +128,12 @@ do { \
18888 int __d0; \
18889 might_fault(); \
18890 __asm__ __volatile__( \
18891+ __COPYUSER_SET_ES \
18892 "0: rep; stosl\n" \
18893 " movl %2,%0\n" \
18894 "1: rep; stosb\n" \
18895 "2:\n" \
18896+ __COPYUSER_RESTORE_ES \
18897 ".section .fixup,\"ax\"\n" \
18898 "3: lea 0(%2,%0,4),%0\n" \
18899 " jmp 2b\n" \
18900@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18901 might_fault();
18902
18903 __asm__ __volatile__(
18904+ __COPYUSER_SET_ES
18905 " testl %0, %0\n"
18906 " jz 3f\n"
18907 " andl %0,%%ecx\n"
18908@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18909 " subl %%ecx,%0\n"
18910 " addl %0,%%eax\n"
18911 "1:\n"
18912+ __COPYUSER_RESTORE_ES
18913 ".section .fixup,\"ax\"\n"
18914 "2: xorl %%eax,%%eax\n"
18915 " jmp 1b\n"
18916@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18917
18918 #ifdef CONFIG_X86_INTEL_USERCOPY
18919 static unsigned long
18920-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18921+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18922 {
18923 int d0, d1;
18924 __asm__ __volatile__(
18925@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18926 " .align 2,0x90\n"
18927 "3: movl 0(%4), %%eax\n"
18928 "4: movl 4(%4), %%edx\n"
18929- "5: movl %%eax, 0(%3)\n"
18930- "6: movl %%edx, 4(%3)\n"
18931+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18932+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18933 "7: movl 8(%4), %%eax\n"
18934 "8: movl 12(%4),%%edx\n"
18935- "9: movl %%eax, 8(%3)\n"
18936- "10: movl %%edx, 12(%3)\n"
18937+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18938+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18939 "11: movl 16(%4), %%eax\n"
18940 "12: movl 20(%4), %%edx\n"
18941- "13: movl %%eax, 16(%3)\n"
18942- "14: movl %%edx, 20(%3)\n"
18943+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18944+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18945 "15: movl 24(%4), %%eax\n"
18946 "16: movl 28(%4), %%edx\n"
18947- "17: movl %%eax, 24(%3)\n"
18948- "18: movl %%edx, 28(%3)\n"
18949+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18950+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18951 "19: movl 32(%4), %%eax\n"
18952 "20: movl 36(%4), %%edx\n"
18953- "21: movl %%eax, 32(%3)\n"
18954- "22: movl %%edx, 36(%3)\n"
18955+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18956+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18957 "23: movl 40(%4), %%eax\n"
18958 "24: movl 44(%4), %%edx\n"
18959- "25: movl %%eax, 40(%3)\n"
18960- "26: movl %%edx, 44(%3)\n"
18961+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18962+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18963 "27: movl 48(%4), %%eax\n"
18964 "28: movl 52(%4), %%edx\n"
18965- "29: movl %%eax, 48(%3)\n"
18966- "30: movl %%edx, 52(%3)\n"
18967+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18968+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18969 "31: movl 56(%4), %%eax\n"
18970 "32: movl 60(%4), %%edx\n"
18971- "33: movl %%eax, 56(%3)\n"
18972- "34: movl %%edx, 60(%3)\n"
18973+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18974+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18975 " addl $-64, %0\n"
18976 " addl $64, %4\n"
18977 " addl $64, %3\n"
18978@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18979 " shrl $2, %0\n"
18980 " andl $3, %%eax\n"
18981 " cld\n"
18982+ __COPYUSER_SET_ES
18983 "99: rep; movsl\n"
18984 "36: movl %%eax, %0\n"
18985 "37: rep; movsb\n"
18986 "100:\n"
18987+ __COPYUSER_RESTORE_ES
18988+ ".section .fixup,\"ax\"\n"
18989+ "101: lea 0(%%eax,%0,4),%0\n"
18990+ " jmp 100b\n"
18991+ ".previous\n"
18992+ ".section __ex_table,\"a\"\n"
18993+ " .align 4\n"
18994+ " .long 1b,100b\n"
18995+ " .long 2b,100b\n"
18996+ " .long 3b,100b\n"
18997+ " .long 4b,100b\n"
18998+ " .long 5b,100b\n"
18999+ " .long 6b,100b\n"
19000+ " .long 7b,100b\n"
19001+ " .long 8b,100b\n"
19002+ " .long 9b,100b\n"
19003+ " .long 10b,100b\n"
19004+ " .long 11b,100b\n"
19005+ " .long 12b,100b\n"
19006+ " .long 13b,100b\n"
19007+ " .long 14b,100b\n"
19008+ " .long 15b,100b\n"
19009+ " .long 16b,100b\n"
19010+ " .long 17b,100b\n"
19011+ " .long 18b,100b\n"
19012+ " .long 19b,100b\n"
19013+ " .long 20b,100b\n"
19014+ " .long 21b,100b\n"
19015+ " .long 22b,100b\n"
19016+ " .long 23b,100b\n"
19017+ " .long 24b,100b\n"
19018+ " .long 25b,100b\n"
19019+ " .long 26b,100b\n"
19020+ " .long 27b,100b\n"
19021+ " .long 28b,100b\n"
19022+ " .long 29b,100b\n"
19023+ " .long 30b,100b\n"
19024+ " .long 31b,100b\n"
19025+ " .long 32b,100b\n"
19026+ " .long 33b,100b\n"
19027+ " .long 34b,100b\n"
19028+ " .long 35b,100b\n"
19029+ " .long 36b,100b\n"
19030+ " .long 37b,100b\n"
19031+ " .long 99b,101b\n"
19032+ ".previous"
19033+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19034+ : "1"(to), "2"(from), "0"(size)
19035+ : "eax", "edx", "memory");
19036+ return size;
19037+}
19038+
19039+static unsigned long
19040+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19041+{
19042+ int d0, d1;
19043+ __asm__ __volatile__(
19044+ " .align 2,0x90\n"
19045+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19046+ " cmpl $67, %0\n"
19047+ " jbe 3f\n"
19048+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19049+ " .align 2,0x90\n"
19050+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19051+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19052+ "5: movl %%eax, 0(%3)\n"
19053+ "6: movl %%edx, 4(%3)\n"
19054+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19055+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19056+ "9: movl %%eax, 8(%3)\n"
19057+ "10: movl %%edx, 12(%3)\n"
19058+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19059+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19060+ "13: movl %%eax, 16(%3)\n"
19061+ "14: movl %%edx, 20(%3)\n"
19062+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19063+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19064+ "17: movl %%eax, 24(%3)\n"
19065+ "18: movl %%edx, 28(%3)\n"
19066+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19067+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19068+ "21: movl %%eax, 32(%3)\n"
19069+ "22: movl %%edx, 36(%3)\n"
19070+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19071+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19072+ "25: movl %%eax, 40(%3)\n"
19073+ "26: movl %%edx, 44(%3)\n"
19074+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19075+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19076+ "29: movl %%eax, 48(%3)\n"
19077+ "30: movl %%edx, 52(%3)\n"
19078+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19079+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19080+ "33: movl %%eax, 56(%3)\n"
19081+ "34: movl %%edx, 60(%3)\n"
19082+ " addl $-64, %0\n"
19083+ " addl $64, %4\n"
19084+ " addl $64, %3\n"
19085+ " cmpl $63, %0\n"
19086+ " ja 1b\n"
19087+ "35: movl %0, %%eax\n"
19088+ " shrl $2, %0\n"
19089+ " andl $3, %%eax\n"
19090+ " cld\n"
19091+ "99: rep; "__copyuser_seg" movsl\n"
19092+ "36: movl %%eax, %0\n"
19093+ "37: rep; "__copyuser_seg" movsb\n"
19094+ "100:\n"
19095 ".section .fixup,\"ax\"\n"
19096 "101: lea 0(%%eax,%0,4),%0\n"
19097 " jmp 100b\n"
19098@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19099 int d0, d1;
19100 __asm__ __volatile__(
19101 " .align 2,0x90\n"
19102- "0: movl 32(%4), %%eax\n"
19103+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19104 " cmpl $67, %0\n"
19105 " jbe 2f\n"
19106- "1: movl 64(%4), %%eax\n"
19107+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19108 " .align 2,0x90\n"
19109- "2: movl 0(%4), %%eax\n"
19110- "21: movl 4(%4), %%edx\n"
19111+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19112+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19113 " movl %%eax, 0(%3)\n"
19114 " movl %%edx, 4(%3)\n"
19115- "3: movl 8(%4), %%eax\n"
19116- "31: movl 12(%4),%%edx\n"
19117+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19118+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19119 " movl %%eax, 8(%3)\n"
19120 " movl %%edx, 12(%3)\n"
19121- "4: movl 16(%4), %%eax\n"
19122- "41: movl 20(%4), %%edx\n"
19123+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19124+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19125 " movl %%eax, 16(%3)\n"
19126 " movl %%edx, 20(%3)\n"
19127- "10: movl 24(%4), %%eax\n"
19128- "51: movl 28(%4), %%edx\n"
19129+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19130+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19131 " movl %%eax, 24(%3)\n"
19132 " movl %%edx, 28(%3)\n"
19133- "11: movl 32(%4), %%eax\n"
19134- "61: movl 36(%4), %%edx\n"
19135+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19136+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19137 " movl %%eax, 32(%3)\n"
19138 " movl %%edx, 36(%3)\n"
19139- "12: movl 40(%4), %%eax\n"
19140- "71: movl 44(%4), %%edx\n"
19141+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19142+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19143 " movl %%eax, 40(%3)\n"
19144 " movl %%edx, 44(%3)\n"
19145- "13: movl 48(%4), %%eax\n"
19146- "81: movl 52(%4), %%edx\n"
19147+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19148+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19149 " movl %%eax, 48(%3)\n"
19150 " movl %%edx, 52(%3)\n"
19151- "14: movl 56(%4), %%eax\n"
19152- "91: movl 60(%4), %%edx\n"
19153+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19154+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19155 " movl %%eax, 56(%3)\n"
19156 " movl %%edx, 60(%3)\n"
19157 " addl $-64, %0\n"
19158@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19159 " shrl $2, %0\n"
19160 " andl $3, %%eax\n"
19161 " cld\n"
19162- "6: rep; movsl\n"
19163+ "6: rep; "__copyuser_seg" movsl\n"
19164 " movl %%eax,%0\n"
19165- "7: rep; movsb\n"
19166+ "7: rep; "__copyuser_seg" movsb\n"
19167 "8:\n"
19168 ".section .fixup,\"ax\"\n"
19169 "9: lea 0(%%eax,%0,4),%0\n"
19170@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19171
19172 __asm__ __volatile__(
19173 " .align 2,0x90\n"
19174- "0: movl 32(%4), %%eax\n"
19175+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19176 " cmpl $67, %0\n"
19177 " jbe 2f\n"
19178- "1: movl 64(%4), %%eax\n"
19179+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19180 " .align 2,0x90\n"
19181- "2: movl 0(%4), %%eax\n"
19182- "21: movl 4(%4), %%edx\n"
19183+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19184+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19185 " movnti %%eax, 0(%3)\n"
19186 " movnti %%edx, 4(%3)\n"
19187- "3: movl 8(%4), %%eax\n"
19188- "31: movl 12(%4),%%edx\n"
19189+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19190+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19191 " movnti %%eax, 8(%3)\n"
19192 " movnti %%edx, 12(%3)\n"
19193- "4: movl 16(%4), %%eax\n"
19194- "41: movl 20(%4), %%edx\n"
19195+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19196+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19197 " movnti %%eax, 16(%3)\n"
19198 " movnti %%edx, 20(%3)\n"
19199- "10: movl 24(%4), %%eax\n"
19200- "51: movl 28(%4), %%edx\n"
19201+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19202+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19203 " movnti %%eax, 24(%3)\n"
19204 " movnti %%edx, 28(%3)\n"
19205- "11: movl 32(%4), %%eax\n"
19206- "61: movl 36(%4), %%edx\n"
19207+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19208+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19209 " movnti %%eax, 32(%3)\n"
19210 " movnti %%edx, 36(%3)\n"
19211- "12: movl 40(%4), %%eax\n"
19212- "71: movl 44(%4), %%edx\n"
19213+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19214+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19215 " movnti %%eax, 40(%3)\n"
19216 " movnti %%edx, 44(%3)\n"
19217- "13: movl 48(%4), %%eax\n"
19218- "81: movl 52(%4), %%edx\n"
19219+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19220+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19221 " movnti %%eax, 48(%3)\n"
19222 " movnti %%edx, 52(%3)\n"
19223- "14: movl 56(%4), %%eax\n"
19224- "91: movl 60(%4), %%edx\n"
19225+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19226+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19227 " movnti %%eax, 56(%3)\n"
19228 " movnti %%edx, 60(%3)\n"
19229 " addl $-64, %0\n"
19230@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19231 " shrl $2, %0\n"
19232 " andl $3, %%eax\n"
19233 " cld\n"
19234- "6: rep; movsl\n"
19235+ "6: rep; "__copyuser_seg" movsl\n"
19236 " movl %%eax,%0\n"
19237- "7: rep; movsb\n"
19238+ "7: rep; "__copyuser_seg" movsb\n"
19239 "8:\n"
19240 ".section .fixup,\"ax\"\n"
19241 "9: lea 0(%%eax,%0,4),%0\n"
19242@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19243
19244 __asm__ __volatile__(
19245 " .align 2,0x90\n"
19246- "0: movl 32(%4), %%eax\n"
19247+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19248 " cmpl $67, %0\n"
19249 " jbe 2f\n"
19250- "1: movl 64(%4), %%eax\n"
19251+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19252 " .align 2,0x90\n"
19253- "2: movl 0(%4), %%eax\n"
19254- "21: movl 4(%4), %%edx\n"
19255+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19256+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19257 " movnti %%eax, 0(%3)\n"
19258 " movnti %%edx, 4(%3)\n"
19259- "3: movl 8(%4), %%eax\n"
19260- "31: movl 12(%4),%%edx\n"
19261+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19262+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19263 " movnti %%eax, 8(%3)\n"
19264 " movnti %%edx, 12(%3)\n"
19265- "4: movl 16(%4), %%eax\n"
19266- "41: movl 20(%4), %%edx\n"
19267+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19268+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19269 " movnti %%eax, 16(%3)\n"
19270 " movnti %%edx, 20(%3)\n"
19271- "10: movl 24(%4), %%eax\n"
19272- "51: movl 28(%4), %%edx\n"
19273+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19274+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19275 " movnti %%eax, 24(%3)\n"
19276 " movnti %%edx, 28(%3)\n"
19277- "11: movl 32(%4), %%eax\n"
19278- "61: movl 36(%4), %%edx\n"
19279+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19280+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19281 " movnti %%eax, 32(%3)\n"
19282 " movnti %%edx, 36(%3)\n"
19283- "12: movl 40(%4), %%eax\n"
19284- "71: movl 44(%4), %%edx\n"
19285+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19286+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19287 " movnti %%eax, 40(%3)\n"
19288 " movnti %%edx, 44(%3)\n"
19289- "13: movl 48(%4), %%eax\n"
19290- "81: movl 52(%4), %%edx\n"
19291+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19292+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19293 " movnti %%eax, 48(%3)\n"
19294 " movnti %%edx, 52(%3)\n"
19295- "14: movl 56(%4), %%eax\n"
19296- "91: movl 60(%4), %%edx\n"
19297+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19298+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19299 " movnti %%eax, 56(%3)\n"
19300 " movnti %%edx, 60(%3)\n"
19301 " addl $-64, %0\n"
19302@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19303 " shrl $2, %0\n"
19304 " andl $3, %%eax\n"
19305 " cld\n"
19306- "6: rep; movsl\n"
19307+ "6: rep; "__copyuser_seg" movsl\n"
19308 " movl %%eax,%0\n"
19309- "7: rep; movsb\n"
19310+ "7: rep; "__copyuser_seg" movsb\n"
19311 "8:\n"
19312 ".section .fixup,\"ax\"\n"
19313 "9: lea 0(%%eax,%0,4),%0\n"
19314@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19315 */
19316 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19317 unsigned long size);
19318-unsigned long __copy_user_intel(void __user *to, const void *from,
19319+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19320+ unsigned long size);
19321+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19322 unsigned long size);
19323 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19324 const void __user *from, unsigned long size);
19325 #endif /* CONFIG_X86_INTEL_USERCOPY */
19326
19327 /* Generic arbitrary sized copy. */
19328-#define __copy_user(to, from, size) \
19329+#define __copy_user(to, from, size, prefix, set, restore) \
19330 do { \
19331 int __d0, __d1, __d2; \
19332 __asm__ __volatile__( \
19333+ set \
19334 " cmp $7,%0\n" \
19335 " jbe 1f\n" \
19336 " movl %1,%0\n" \
19337 " negl %0\n" \
19338 " andl $7,%0\n" \
19339 " subl %0,%3\n" \
19340- "4: rep; movsb\n" \
19341+ "4: rep; "prefix"movsb\n" \
19342 " movl %3,%0\n" \
19343 " shrl $2,%0\n" \
19344 " andl $3,%3\n" \
19345 " .align 2,0x90\n" \
19346- "0: rep; movsl\n" \
19347+ "0: rep; "prefix"movsl\n" \
19348 " movl %3,%0\n" \
19349- "1: rep; movsb\n" \
19350+ "1: rep; "prefix"movsb\n" \
19351 "2:\n" \
19352+ restore \
19353 ".section .fixup,\"ax\"\n" \
19354 "5: addl %3,%0\n" \
19355 " jmp 2b\n" \
19356@@ -682,14 +799,14 @@ do { \
19357 " negl %0\n" \
19358 " andl $7,%0\n" \
19359 " subl %0,%3\n" \
19360- "4: rep; movsb\n" \
19361+ "4: rep; "__copyuser_seg"movsb\n" \
19362 " movl %3,%0\n" \
19363 " shrl $2,%0\n" \
19364 " andl $3,%3\n" \
19365 " .align 2,0x90\n" \
19366- "0: rep; movsl\n" \
19367+ "0: rep; "__copyuser_seg"movsl\n" \
19368 " movl %3,%0\n" \
19369- "1: rep; movsb\n" \
19370+ "1: rep; "__copyuser_seg"movsb\n" \
19371 "2:\n" \
19372 ".section .fixup,\"ax\"\n" \
19373 "5: addl %3,%0\n" \
19374@@ -775,9 +892,9 @@ survive:
19375 }
19376 #endif
19377 if (movsl_is_ok(to, from, n))
19378- __copy_user(to, from, n);
19379+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19380 else
19381- n = __copy_user_intel(to, from, n);
19382+ n = __generic_copy_to_user_intel(to, from, n);
19383 return n;
19384 }
19385 EXPORT_SYMBOL(__copy_to_user_ll);
19386@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19387 unsigned long n)
19388 {
19389 if (movsl_is_ok(to, from, n))
19390- __copy_user(to, from, n);
19391+ __copy_user(to, from, n, __copyuser_seg, "", "");
19392 else
19393- n = __copy_user_intel((void __user *)to,
19394- (const void *)from, n);
19395+ n = __generic_copy_from_user_intel(to, from, n);
19396 return n;
19397 }
19398 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19399@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19400 if (n > 64 && cpu_has_xmm2)
19401 n = __copy_user_intel_nocache(to, from, n);
19402 else
19403- __copy_user(to, from, n);
19404+ __copy_user(to, from, n, __copyuser_seg, "", "");
19405 #else
19406- __copy_user(to, from, n);
19407+ __copy_user(to, from, n, __copyuser_seg, "", "");
19408 #endif
19409 return n;
19410 }
19411 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19412
19413-/**
19414- * copy_to_user: - Copy a block of data into user space.
19415- * @to: Destination address, in user space.
19416- * @from: Source address, in kernel space.
19417- * @n: Number of bytes to copy.
19418- *
19419- * Context: User context only. This function may sleep.
19420- *
19421- * Copy data from kernel space to user space.
19422- *
19423- * Returns number of bytes that could not be copied.
19424- * On success, this will be zero.
19425- */
19426-unsigned long
19427-copy_to_user(void __user *to, const void *from, unsigned long n)
19428+void copy_from_user_overflow(void)
19429 {
19430- if (access_ok(VERIFY_WRITE, to, n))
19431- n = __copy_to_user(to, from, n);
19432- return n;
19433+ WARN(1, "Buffer overflow detected!\n");
19434 }
19435-EXPORT_SYMBOL(copy_to_user);
19436+EXPORT_SYMBOL(copy_from_user_overflow);
19437
19438-/**
19439- * copy_from_user: - Copy a block of data from user space.
19440- * @to: Destination address, in kernel space.
19441- * @from: Source address, in user space.
19442- * @n: Number of bytes to copy.
19443- *
19444- * Context: User context only. This function may sleep.
19445- *
19446- * Copy data from user space to kernel space.
19447- *
19448- * Returns number of bytes that could not be copied.
19449- * On success, this will be zero.
19450- *
19451- * If some data could not be copied, this function will pad the copied
19452- * data to the requested size using zero bytes.
19453- */
19454-unsigned long
19455-_copy_from_user(void *to, const void __user *from, unsigned long n)
19456+void copy_to_user_overflow(void)
19457 {
19458- if (access_ok(VERIFY_READ, from, n))
19459- n = __copy_from_user(to, from, n);
19460- else
19461- memset(to, 0, n);
19462- return n;
19463+ WARN(1, "Buffer overflow detected!\n");
19464 }
19465-EXPORT_SYMBOL(_copy_from_user);
19466+EXPORT_SYMBOL(copy_to_user_overflow);
19467
19468-void copy_from_user_overflow(void)
19469+#ifdef CONFIG_PAX_MEMORY_UDEREF
19470+void __set_fs(mm_segment_t x)
19471 {
19472- WARN(1, "Buffer overflow detected!\n");
19473+ switch (x.seg) {
19474+ case 0:
19475+ loadsegment(gs, 0);
19476+ break;
19477+ case TASK_SIZE_MAX:
19478+ loadsegment(gs, __USER_DS);
19479+ break;
19480+ case -1UL:
19481+ loadsegment(gs, __KERNEL_DS);
19482+ break;
19483+ default:
19484+ BUG();
19485+ }
19486+ return;
19487 }
19488-EXPORT_SYMBOL(copy_from_user_overflow);
19489+EXPORT_SYMBOL(__set_fs);
19490+
19491+void set_fs(mm_segment_t x)
19492+{
19493+ current_thread_info()->addr_limit = x;
19494+ __set_fs(x);
19495+}
19496+EXPORT_SYMBOL(set_fs);
19497+#endif
19498diff -urNp linux-3.0.7/arch/x86/lib/usercopy_64.c linux-3.0.7/arch/x86/lib/usercopy_64.c
19499--- linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19500+++ linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19501@@ -42,6 +42,12 @@ long
19502 __strncpy_from_user(char *dst, const char __user *src, long count)
19503 {
19504 long res;
19505+
19506+#ifdef CONFIG_PAX_MEMORY_UDEREF
19507+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19508+ src += PAX_USER_SHADOW_BASE;
19509+#endif
19510+
19511 __do_strncpy_from_user(dst, src, count, res);
19512 return res;
19513 }
19514@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19515 {
19516 long __d0;
19517 might_fault();
19518+
19519+#ifdef CONFIG_PAX_MEMORY_UDEREF
19520+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19521+ addr += PAX_USER_SHADOW_BASE;
19522+#endif
19523+
19524 /* no memory constraint because it doesn't change any memory gcc knows
19525 about */
19526 asm volatile(
19527@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19528
19529 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19530 {
19531- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19532- return copy_user_generic((__force void *)to, (__force void *)from, len);
19533- }
19534- return len;
19535+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19536+
19537+#ifdef CONFIG_PAX_MEMORY_UDEREF
19538+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19539+ to += PAX_USER_SHADOW_BASE;
19540+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19541+ from += PAX_USER_SHADOW_BASE;
19542+#endif
19543+
19544+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19545+ }
19546+ return len;
19547 }
19548 EXPORT_SYMBOL(copy_in_user);
19549
19550@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19551 * it is not necessary to optimize tail handling.
19552 */
19553 unsigned long
19554-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19555+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19556 {
19557 char c;
19558 unsigned zero_len;
19559diff -urNp linux-3.0.7/arch/x86/mm/extable.c linux-3.0.7/arch/x86/mm/extable.c
19560--- linux-3.0.7/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19561+++ linux-3.0.7/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19562@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19563 const struct exception_table_entry *fixup;
19564
19565 #ifdef CONFIG_PNPBIOS
19566- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19567+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19568 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19569 extern u32 pnp_bios_is_utter_crap;
19570 pnp_bios_is_utter_crap = 1;
19571diff -urNp linux-3.0.7/arch/x86/mm/fault.c linux-3.0.7/arch/x86/mm/fault.c
19572--- linux-3.0.7/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19573+++ linux-3.0.7/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19574@@ -13,10 +13,18 @@
19575 #include <linux/perf_event.h> /* perf_sw_event */
19576 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19577 #include <linux/prefetch.h> /* prefetchw */
19578+#include <linux/unistd.h>
19579+#include <linux/compiler.h>
19580
19581 #include <asm/traps.h> /* dotraplinkage, ... */
19582 #include <asm/pgalloc.h> /* pgd_*(), ... */
19583 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19584+#include <asm/vsyscall.h>
19585+#include <asm/tlbflush.h>
19586+
19587+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19588+#include <asm/stacktrace.h>
19589+#endif
19590
19591 /*
19592 * Page fault error code bits:
19593@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19594 int ret = 0;
19595
19596 /* kprobe_running() needs smp_processor_id() */
19597- if (kprobes_built_in() && !user_mode_vm(regs)) {
19598+ if (kprobes_built_in() && !user_mode(regs)) {
19599 preempt_disable();
19600 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19601 ret = 1;
19602@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19603 return !instr_lo || (instr_lo>>1) == 1;
19604 case 0x00:
19605 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19606- if (probe_kernel_address(instr, opcode))
19607+ if (user_mode(regs)) {
19608+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19609+ return 0;
19610+ } else if (probe_kernel_address(instr, opcode))
19611 return 0;
19612
19613 *prefetch = (instr_lo == 0xF) &&
19614@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19615 while (instr < max_instr) {
19616 unsigned char opcode;
19617
19618- if (probe_kernel_address(instr, opcode))
19619+ if (user_mode(regs)) {
19620+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19621+ break;
19622+ } else if (probe_kernel_address(instr, opcode))
19623 break;
19624
19625 instr++;
19626@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19627 force_sig_info(si_signo, &info, tsk);
19628 }
19629
19630+#ifdef CONFIG_PAX_EMUTRAMP
19631+static int pax_handle_fetch_fault(struct pt_regs *regs);
19632+#endif
19633+
19634+#ifdef CONFIG_PAX_PAGEEXEC
19635+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19636+{
19637+ pgd_t *pgd;
19638+ pud_t *pud;
19639+ pmd_t *pmd;
19640+
19641+ pgd = pgd_offset(mm, address);
19642+ if (!pgd_present(*pgd))
19643+ return NULL;
19644+ pud = pud_offset(pgd, address);
19645+ if (!pud_present(*pud))
19646+ return NULL;
19647+ pmd = pmd_offset(pud, address);
19648+ if (!pmd_present(*pmd))
19649+ return NULL;
19650+ return pmd;
19651+}
19652+#endif
19653+
19654 DEFINE_SPINLOCK(pgd_lock);
19655 LIST_HEAD(pgd_list);
19656
19657@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19658 for (address = VMALLOC_START & PMD_MASK;
19659 address >= TASK_SIZE && address < FIXADDR_TOP;
19660 address += PMD_SIZE) {
19661+
19662+#ifdef CONFIG_PAX_PER_CPU_PGD
19663+ unsigned long cpu;
19664+#else
19665 struct page *page;
19666+#endif
19667
19668 spin_lock(&pgd_lock);
19669+
19670+#ifdef CONFIG_PAX_PER_CPU_PGD
19671+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19672+ pgd_t *pgd = get_cpu_pgd(cpu);
19673+ pmd_t *ret;
19674+#else
19675 list_for_each_entry(page, &pgd_list, lru) {
19676+ pgd_t *pgd = page_address(page);
19677 spinlock_t *pgt_lock;
19678 pmd_t *ret;
19679
19680@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19681 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19682
19683 spin_lock(pgt_lock);
19684- ret = vmalloc_sync_one(page_address(page), address);
19685+#endif
19686+
19687+ ret = vmalloc_sync_one(pgd, address);
19688+
19689+#ifndef CONFIG_PAX_PER_CPU_PGD
19690 spin_unlock(pgt_lock);
19691+#endif
19692
19693 if (!ret)
19694 break;
19695@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19696 * an interrupt in the middle of a task switch..
19697 */
19698 pgd_paddr = read_cr3();
19699+
19700+#ifdef CONFIG_PAX_PER_CPU_PGD
19701+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19702+#endif
19703+
19704 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19705 if (!pmd_k)
19706 return -1;
19707@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19708 * happen within a race in page table update. In the later
19709 * case just flush:
19710 */
19711+
19712+#ifdef CONFIG_PAX_PER_CPU_PGD
19713+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19714+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19715+#else
19716 pgd = pgd_offset(current->active_mm, address);
19717+#endif
19718+
19719 pgd_ref = pgd_offset_k(address);
19720 if (pgd_none(*pgd_ref))
19721 return -1;
19722@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19723 static int is_errata100(struct pt_regs *regs, unsigned long address)
19724 {
19725 #ifdef CONFIG_X86_64
19726- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19727+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19728 return 1;
19729 #endif
19730 return 0;
19731@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19732 }
19733
19734 static const char nx_warning[] = KERN_CRIT
19735-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19736+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19737
19738 static void
19739 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19740@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19741 if (!oops_may_print())
19742 return;
19743
19744- if (error_code & PF_INSTR) {
19745+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19746 unsigned int level;
19747
19748 pte_t *pte = lookup_address(address, &level);
19749
19750 if (pte && pte_present(*pte) && !pte_exec(*pte))
19751- printk(nx_warning, current_uid());
19752+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19753+ }
19754+
19755+#ifdef CONFIG_PAX_KERNEXEC
19756+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19757+ if (current->signal->curr_ip)
19758+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19759+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19760+ else
19761+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19762+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19763 }
19764+#endif
19765
19766 printk(KERN_ALERT "BUG: unable to handle kernel ");
19767 if (address < PAGE_SIZE)
19768@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19769 unsigned long address, int si_code)
19770 {
19771 struct task_struct *tsk = current;
19772+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19773+ struct mm_struct *mm = tsk->mm;
19774+#endif
19775+
19776+#ifdef CONFIG_X86_64
19777+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19778+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19779+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19780+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19781+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19782+ return;
19783+ }
19784+ }
19785+#endif
19786+
19787+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19788+ if (mm && (error_code & PF_USER)) {
19789+ unsigned long ip = regs->ip;
19790+
19791+ if (v8086_mode(regs))
19792+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19793+
19794+ /*
19795+ * It's possible to have interrupts off here:
19796+ */
19797+ local_irq_enable();
19798+
19799+#ifdef CONFIG_PAX_PAGEEXEC
19800+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19801+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19802+
19803+#ifdef CONFIG_PAX_EMUTRAMP
19804+ switch (pax_handle_fetch_fault(regs)) {
19805+ case 2:
19806+ return;
19807+ }
19808+#endif
19809+
19810+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19811+ do_group_exit(SIGKILL);
19812+ }
19813+#endif
19814+
19815+#ifdef CONFIG_PAX_SEGMEXEC
19816+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19817+
19818+#ifdef CONFIG_PAX_EMUTRAMP
19819+ switch (pax_handle_fetch_fault(regs)) {
19820+ case 2:
19821+ return;
19822+ }
19823+#endif
19824+
19825+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19826+ do_group_exit(SIGKILL);
19827+ }
19828+#endif
19829+
19830+ }
19831+#endif
19832
19833 /* User mode accesses just cause a SIGSEGV */
19834 if (error_code & PF_USER) {
19835@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19836 return 1;
19837 }
19838
19839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19840+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19841+{
19842+ pte_t *pte;
19843+ pmd_t *pmd;
19844+ spinlock_t *ptl;
19845+ unsigned char pte_mask;
19846+
19847+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19848+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19849+ return 0;
19850+
19851+ /* PaX: it's our fault, let's handle it if we can */
19852+
19853+ /* PaX: take a look at read faults before acquiring any locks */
19854+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19855+ /* instruction fetch attempt from a protected page in user mode */
19856+ up_read(&mm->mmap_sem);
19857+
19858+#ifdef CONFIG_PAX_EMUTRAMP
19859+ switch (pax_handle_fetch_fault(regs)) {
19860+ case 2:
19861+ return 1;
19862+ }
19863+#endif
19864+
19865+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19866+ do_group_exit(SIGKILL);
19867+ }
19868+
19869+ pmd = pax_get_pmd(mm, address);
19870+ if (unlikely(!pmd))
19871+ return 0;
19872+
19873+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19874+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19875+ pte_unmap_unlock(pte, ptl);
19876+ return 0;
19877+ }
19878+
19879+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19880+ /* write attempt to a protected page in user mode */
19881+ pte_unmap_unlock(pte, ptl);
19882+ return 0;
19883+ }
19884+
19885+#ifdef CONFIG_SMP
19886+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19887+#else
19888+ if (likely(address > get_limit(regs->cs)))
19889+#endif
19890+ {
19891+ set_pte(pte, pte_mkread(*pte));
19892+ __flush_tlb_one(address);
19893+ pte_unmap_unlock(pte, ptl);
19894+ up_read(&mm->mmap_sem);
19895+ return 1;
19896+ }
19897+
19898+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19899+
19900+ /*
19901+ * PaX: fill DTLB with user rights and retry
19902+ */
19903+ __asm__ __volatile__ (
19904+ "orb %2,(%1)\n"
19905+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19906+/*
19907+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19908+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19909+ * page fault when examined during a TLB load attempt. this is true not only
19910+ * for PTEs holding a non-present entry but also present entries that will
19911+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19912+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19913+ * for our target pages since their PTEs are simply not in the TLBs at all.
19914+
19915+ * the best thing in omitting it is that we gain around 15-20% speed in the
19916+ * fast path of the page fault handler and can get rid of tracing since we
19917+ * can no longer flush unintended entries.
19918+ */
19919+ "invlpg (%0)\n"
19920+#endif
19921+ __copyuser_seg"testb $0,(%0)\n"
19922+ "xorb %3,(%1)\n"
19923+ :
19924+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19925+ : "memory", "cc");
19926+ pte_unmap_unlock(pte, ptl);
19927+ up_read(&mm->mmap_sem);
19928+ return 1;
19929+}
19930+#endif
19931+
19932 /*
19933 * Handle a spurious fault caused by a stale TLB entry.
19934 *
19935@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19936 static inline int
19937 access_error(unsigned long error_code, struct vm_area_struct *vma)
19938 {
19939+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19940+ return 1;
19941+
19942 if (error_code & PF_WRITE) {
19943 /* write, present and write, not present: */
19944 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19945@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19946 {
19947 struct vm_area_struct *vma;
19948 struct task_struct *tsk;
19949- unsigned long address;
19950 struct mm_struct *mm;
19951 int fault;
19952 int write = error_code & PF_WRITE;
19953 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19954 (write ? FAULT_FLAG_WRITE : 0);
19955
19956+ /* Get the faulting address: */
19957+ unsigned long address = read_cr2();
19958+
19959+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19960+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19961+ if (!search_exception_tables(regs->ip)) {
19962+ bad_area_nosemaphore(regs, error_code, address);
19963+ return;
19964+ }
19965+ if (address < PAX_USER_SHADOW_BASE) {
19966+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19967+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19968+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19969+ } else
19970+ address -= PAX_USER_SHADOW_BASE;
19971+ }
19972+#endif
19973+
19974 tsk = current;
19975 mm = tsk->mm;
19976
19977- /* Get the faulting address: */
19978- address = read_cr2();
19979-
19980 /*
19981 * Detect and handle instructions that would cause a page fault for
19982 * both a tracked kernel page and a userspace page.
19983@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19984 * User-mode registers count as a user access even for any
19985 * potential system fault or CPU buglet:
19986 */
19987- if (user_mode_vm(regs)) {
19988+ if (user_mode(regs)) {
19989 local_irq_enable();
19990 error_code |= PF_USER;
19991 } else {
19992@@ -1103,6 +1351,11 @@ retry:
19993 might_sleep();
19994 }
19995
19996+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19997+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19998+ return;
19999+#endif
20000+
20001 vma = find_vma(mm, address);
20002 if (unlikely(!vma)) {
20003 bad_area(regs, error_code, address);
20004@@ -1114,18 +1367,24 @@ retry:
20005 bad_area(regs, error_code, address);
20006 return;
20007 }
20008- if (error_code & PF_USER) {
20009- /*
20010- * Accessing the stack below %sp is always a bug.
20011- * The large cushion allows instructions like enter
20012- * and pusha to work. ("enter $65535, $31" pushes
20013- * 32 pointers and then decrements %sp by 65535.)
20014- */
20015- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20016- bad_area(regs, error_code, address);
20017- return;
20018- }
20019+ /*
20020+ * Accessing the stack below %sp is always a bug.
20021+ * The large cushion allows instructions like enter
20022+ * and pusha to work. ("enter $65535, $31" pushes
20023+ * 32 pointers and then decrements %sp by 65535.)
20024+ */
20025+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20026+ bad_area(regs, error_code, address);
20027+ return;
20028 }
20029+
20030+#ifdef CONFIG_PAX_SEGMEXEC
20031+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20032+ bad_area(regs, error_code, address);
20033+ return;
20034+ }
20035+#endif
20036+
20037 if (unlikely(expand_stack(vma, address))) {
20038 bad_area(regs, error_code, address);
20039 return;
20040@@ -1180,3 +1439,199 @@ good_area:
20041
20042 up_read(&mm->mmap_sem);
20043 }
20044+
20045+#ifdef CONFIG_PAX_EMUTRAMP
20046+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20047+{
20048+ int err;
20049+
20050+ do { /* PaX: gcc trampoline emulation #1 */
20051+ unsigned char mov1, mov2;
20052+ unsigned short jmp;
20053+ unsigned int addr1, addr2;
20054+
20055+#ifdef CONFIG_X86_64
20056+ if ((regs->ip + 11) >> 32)
20057+ break;
20058+#endif
20059+
20060+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20061+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20062+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20063+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20064+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20065+
20066+ if (err)
20067+ break;
20068+
20069+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20070+ regs->cx = addr1;
20071+ regs->ax = addr2;
20072+ regs->ip = addr2;
20073+ return 2;
20074+ }
20075+ } while (0);
20076+
20077+ do { /* PaX: gcc trampoline emulation #2 */
20078+ unsigned char mov, jmp;
20079+ unsigned int addr1, addr2;
20080+
20081+#ifdef CONFIG_X86_64
20082+ if ((regs->ip + 9) >> 32)
20083+ break;
20084+#endif
20085+
20086+ err = get_user(mov, (unsigned char __user *)regs->ip);
20087+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20088+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20089+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20090+
20091+ if (err)
20092+ break;
20093+
20094+ if (mov == 0xB9 && jmp == 0xE9) {
20095+ regs->cx = addr1;
20096+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20097+ return 2;
20098+ }
20099+ } while (0);
20100+
20101+ return 1; /* PaX in action */
20102+}
20103+
20104+#ifdef CONFIG_X86_64
20105+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20106+{
20107+ int err;
20108+
20109+ do { /* PaX: gcc trampoline emulation #1 */
20110+ unsigned short mov1, mov2, jmp1;
20111+ unsigned char jmp2;
20112+ unsigned int addr1;
20113+ unsigned long addr2;
20114+
20115+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20116+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20117+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20118+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20119+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20120+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20121+
20122+ if (err)
20123+ break;
20124+
20125+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20126+ regs->r11 = addr1;
20127+ regs->r10 = addr2;
20128+ regs->ip = addr1;
20129+ return 2;
20130+ }
20131+ } while (0);
20132+
20133+ do { /* PaX: gcc trampoline emulation #2 */
20134+ unsigned short mov1, mov2, jmp1;
20135+ unsigned char jmp2;
20136+ unsigned long addr1, addr2;
20137+
20138+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20139+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20140+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20141+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20142+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20143+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20144+
20145+ if (err)
20146+ break;
20147+
20148+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20149+ regs->r11 = addr1;
20150+ regs->r10 = addr2;
20151+ regs->ip = addr1;
20152+ return 2;
20153+ }
20154+ } while (0);
20155+
20156+ return 1; /* PaX in action */
20157+}
20158+#endif
20159+
20160+/*
20161+ * PaX: decide what to do with offenders (regs->ip = fault address)
20162+ *
20163+ * returns 1 when task should be killed
20164+ * 2 when gcc trampoline was detected
20165+ */
20166+static int pax_handle_fetch_fault(struct pt_regs *regs)
20167+{
20168+ if (v8086_mode(regs))
20169+ return 1;
20170+
20171+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20172+ return 1;
20173+
20174+#ifdef CONFIG_X86_32
20175+ return pax_handle_fetch_fault_32(regs);
20176+#else
20177+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20178+ return pax_handle_fetch_fault_32(regs);
20179+ else
20180+ return pax_handle_fetch_fault_64(regs);
20181+#endif
20182+}
20183+#endif
20184+
20185+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20186+void pax_report_insns(void *pc, void *sp)
20187+{
20188+ long i;
20189+
20190+ printk(KERN_ERR "PAX: bytes at PC: ");
20191+ for (i = 0; i < 20; i++) {
20192+ unsigned char c;
20193+ if (get_user(c, (unsigned char __force_user *)pc+i))
20194+ printk(KERN_CONT "?? ");
20195+ else
20196+ printk(KERN_CONT "%02x ", c);
20197+ }
20198+ printk("\n");
20199+
20200+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20201+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20202+ unsigned long c;
20203+ if (get_user(c, (unsigned long __force_user *)sp+i))
20204+#ifdef CONFIG_X86_32
20205+ printk(KERN_CONT "???????? ");
20206+#else
20207+ printk(KERN_CONT "???????????????? ");
20208+#endif
20209+ else
20210+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20211+ }
20212+ printk("\n");
20213+}
20214+#endif
20215+
20216+/**
20217+ * probe_kernel_write(): safely attempt to write to a location
20218+ * @dst: address to write to
20219+ * @src: pointer to the data that shall be written
20220+ * @size: size of the data chunk
20221+ *
20222+ * Safely write to address @dst from the buffer at @src. If a kernel fault
20223+ * happens, handle that and return -EFAULT.
20224+ */
20225+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20226+{
20227+ long ret;
20228+ mm_segment_t old_fs = get_fs();
20229+
20230+ set_fs(KERNEL_DS);
20231+ pagefault_disable();
20232+ pax_open_kernel();
20233+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20234+ pax_close_kernel();
20235+ pagefault_enable();
20236+ set_fs(old_fs);
20237+
20238+ return ret ? -EFAULT : 0;
20239+}
20240diff -urNp linux-3.0.7/arch/x86/mm/gup.c linux-3.0.7/arch/x86/mm/gup.c
20241--- linux-3.0.7/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
20242+++ linux-3.0.7/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
20243@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
20244 addr = start;
20245 len = (unsigned long) nr_pages << PAGE_SHIFT;
20246 end = start + len;
20247- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20248+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20249 (void __user *)start, len)))
20250 return 0;
20251
20252diff -urNp linux-3.0.7/arch/x86/mm/highmem_32.c linux-3.0.7/arch/x86/mm/highmem_32.c
20253--- linux-3.0.7/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
20254+++ linux-3.0.7/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
20255@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20256 idx = type + KM_TYPE_NR*smp_processor_id();
20257 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20258 BUG_ON(!pte_none(*(kmap_pte-idx)));
20259+
20260+ pax_open_kernel();
20261 set_pte(kmap_pte-idx, mk_pte(page, prot));
20262+ pax_close_kernel();
20263
20264 return (void *)vaddr;
20265 }
20266diff -urNp linux-3.0.7/arch/x86/mm/hugetlbpage.c linux-3.0.7/arch/x86/mm/hugetlbpage.c
20267--- linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
20268+++ linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
20269@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20270 struct hstate *h = hstate_file(file);
20271 struct mm_struct *mm = current->mm;
20272 struct vm_area_struct *vma;
20273- unsigned long start_addr;
20274+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20275+
20276+#ifdef CONFIG_PAX_SEGMEXEC
20277+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20278+ pax_task_size = SEGMEXEC_TASK_SIZE;
20279+#endif
20280+
20281+ pax_task_size -= PAGE_SIZE;
20282
20283 if (len > mm->cached_hole_size) {
20284- start_addr = mm->free_area_cache;
20285+ start_addr = mm->free_area_cache;
20286 } else {
20287- start_addr = TASK_UNMAPPED_BASE;
20288- mm->cached_hole_size = 0;
20289+ start_addr = mm->mmap_base;
20290+ mm->cached_hole_size = 0;
20291 }
20292
20293 full_search:
20294@@ -280,26 +287,27 @@ full_search:
20295
20296 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20297 /* At this point: (!vma || addr < vma->vm_end). */
20298- if (TASK_SIZE - len < addr) {
20299+ if (pax_task_size - len < addr) {
20300 /*
20301 * Start a new search - just in case we missed
20302 * some holes.
20303 */
20304- if (start_addr != TASK_UNMAPPED_BASE) {
20305- start_addr = TASK_UNMAPPED_BASE;
20306+ if (start_addr != mm->mmap_base) {
20307+ start_addr = mm->mmap_base;
20308 mm->cached_hole_size = 0;
20309 goto full_search;
20310 }
20311 return -ENOMEM;
20312 }
20313- if (!vma || addr + len <= vma->vm_start) {
20314- mm->free_area_cache = addr + len;
20315- return addr;
20316- }
20317+ if (check_heap_stack_gap(vma, addr, len))
20318+ break;
20319 if (addr + mm->cached_hole_size < vma->vm_start)
20320 mm->cached_hole_size = vma->vm_start - addr;
20321 addr = ALIGN(vma->vm_end, huge_page_size(h));
20322 }
20323+
20324+ mm->free_area_cache = addr + len;
20325+ return addr;
20326 }
20327
20328 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20329@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20330 {
20331 struct hstate *h = hstate_file(file);
20332 struct mm_struct *mm = current->mm;
20333- struct vm_area_struct *vma, *prev_vma;
20334- unsigned long base = mm->mmap_base, addr = addr0;
20335+ struct vm_area_struct *vma;
20336+ unsigned long base = mm->mmap_base, addr;
20337 unsigned long largest_hole = mm->cached_hole_size;
20338- int first_time = 1;
20339
20340 /* don't allow allocations above current base */
20341 if (mm->free_area_cache > base)
20342@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20343 largest_hole = 0;
20344 mm->free_area_cache = base;
20345 }
20346-try_again:
20347+
20348 /* make sure it can fit in the remaining address space */
20349 if (mm->free_area_cache < len)
20350 goto fail;
20351
20352 /* either no address requested or can't fit in requested address hole */
20353- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20354+ addr = (mm->free_area_cache - len);
20355 do {
20356+ addr &= huge_page_mask(h);
20357+ vma = find_vma(mm, addr);
20358 /*
20359 * Lookup failure means no vma is above this address,
20360 * i.e. return with success:
20361- */
20362- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20363- return addr;
20364-
20365- /*
20366 * new region fits between prev_vma->vm_end and
20367 * vma->vm_start, use it:
20368 */
20369- if (addr + len <= vma->vm_start &&
20370- (!prev_vma || (addr >= prev_vma->vm_end))) {
20371+ if (check_heap_stack_gap(vma, addr, len)) {
20372 /* remember the address as a hint for next time */
20373- mm->cached_hole_size = largest_hole;
20374- return (mm->free_area_cache = addr);
20375- } else {
20376- /* pull free_area_cache down to the first hole */
20377- if (mm->free_area_cache == vma->vm_end) {
20378- mm->free_area_cache = vma->vm_start;
20379- mm->cached_hole_size = largest_hole;
20380- }
20381+ mm->cached_hole_size = largest_hole;
20382+ return (mm->free_area_cache = addr);
20383+ }
20384+ /* pull free_area_cache down to the first hole */
20385+ if (mm->free_area_cache == vma->vm_end) {
20386+ mm->free_area_cache = vma->vm_start;
20387+ mm->cached_hole_size = largest_hole;
20388 }
20389
20390 /* remember the largest hole we saw so far */
20391 if (addr + largest_hole < vma->vm_start)
20392- largest_hole = vma->vm_start - addr;
20393+ largest_hole = vma->vm_start - addr;
20394
20395 /* try just below the current vma->vm_start */
20396- addr = (vma->vm_start - len) & huge_page_mask(h);
20397- } while (len <= vma->vm_start);
20398+ addr = skip_heap_stack_gap(vma, len);
20399+ } while (!IS_ERR_VALUE(addr));
20400
20401 fail:
20402 /*
20403- * if hint left us with no space for the requested
20404- * mapping then try again:
20405- */
20406- if (first_time) {
20407- mm->free_area_cache = base;
20408- largest_hole = 0;
20409- first_time = 0;
20410- goto try_again;
20411- }
20412- /*
20413 * A failed mmap() very likely causes application failure,
20414 * so fall back to the bottom-up function here. This scenario
20415 * can happen with large stack limits and large mmap()
20416 * allocations.
20417 */
20418- mm->free_area_cache = TASK_UNMAPPED_BASE;
20419+
20420+#ifdef CONFIG_PAX_SEGMEXEC
20421+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20422+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20423+ else
20424+#endif
20425+
20426+ mm->mmap_base = TASK_UNMAPPED_BASE;
20427+
20428+#ifdef CONFIG_PAX_RANDMMAP
20429+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20430+ mm->mmap_base += mm->delta_mmap;
20431+#endif
20432+
20433+ mm->free_area_cache = mm->mmap_base;
20434 mm->cached_hole_size = ~0UL;
20435 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20436 len, pgoff, flags);
20437@@ -386,6 +392,7 @@ fail:
20438 /*
20439 * Restore the topdown base:
20440 */
20441+ mm->mmap_base = base;
20442 mm->free_area_cache = base;
20443 mm->cached_hole_size = ~0UL;
20444
20445@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20446 struct hstate *h = hstate_file(file);
20447 struct mm_struct *mm = current->mm;
20448 struct vm_area_struct *vma;
20449+ unsigned long pax_task_size = TASK_SIZE;
20450
20451 if (len & ~huge_page_mask(h))
20452 return -EINVAL;
20453- if (len > TASK_SIZE)
20454+
20455+#ifdef CONFIG_PAX_SEGMEXEC
20456+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20457+ pax_task_size = SEGMEXEC_TASK_SIZE;
20458+#endif
20459+
20460+ pax_task_size -= PAGE_SIZE;
20461+
20462+ if (len > pax_task_size)
20463 return -ENOMEM;
20464
20465 if (flags & MAP_FIXED) {
20466@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20467 if (addr) {
20468 addr = ALIGN(addr, huge_page_size(h));
20469 vma = find_vma(mm, addr);
20470- if (TASK_SIZE - len >= addr &&
20471- (!vma || addr + len <= vma->vm_start))
20472+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20473 return addr;
20474 }
20475 if (mm->get_unmapped_area == arch_get_unmapped_area)
20476diff -urNp linux-3.0.7/arch/x86/mm/init.c linux-3.0.7/arch/x86/mm/init.c
20477--- linux-3.0.7/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20478+++ linux-3.0.7/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20479@@ -31,7 +31,7 @@ int direct_gbpages
20480 static void __init find_early_table_space(unsigned long end, int use_pse,
20481 int use_gbpages)
20482 {
20483- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20484+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20485 phys_addr_t base;
20486
20487 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20488@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20489 */
20490 int devmem_is_allowed(unsigned long pagenr)
20491 {
20492- if (pagenr <= 256)
20493+#ifdef CONFIG_GRKERNSEC_KMEM
20494+ /* allow BDA */
20495+ if (!pagenr)
20496+ return 1;
20497+ /* allow EBDA */
20498+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20499+ return 1;
20500+#else
20501+ if (!pagenr)
20502+ return 1;
20503+#ifdef CONFIG_VM86
20504+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20505+ return 1;
20506+#endif
20507+#endif
20508+
20509+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20510 return 1;
20511+#ifdef CONFIG_GRKERNSEC_KMEM
20512+ /* throw out everything else below 1MB */
20513+ if (pagenr <= 256)
20514+ return 0;
20515+#endif
20516 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20517 return 0;
20518 if (!page_is_ram(pagenr))
20519 return 1;
20520+
20521 return 0;
20522 }
20523
20524@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20525
20526 void free_initmem(void)
20527 {
20528+
20529+#ifdef CONFIG_PAX_KERNEXEC
20530+#ifdef CONFIG_X86_32
20531+ /* PaX: limit KERNEL_CS to actual size */
20532+ unsigned long addr, limit;
20533+ struct desc_struct d;
20534+ int cpu;
20535+
20536+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20537+ limit = (limit - 1UL) >> PAGE_SHIFT;
20538+
20539+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20540+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20541+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20542+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20543+ }
20544+
20545+ /* PaX: make KERNEL_CS read-only */
20546+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20547+ if (!paravirt_enabled())
20548+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20549+/*
20550+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20551+ pgd = pgd_offset_k(addr);
20552+ pud = pud_offset(pgd, addr);
20553+ pmd = pmd_offset(pud, addr);
20554+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20555+ }
20556+*/
20557+#ifdef CONFIG_X86_PAE
20558+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20559+/*
20560+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20561+ pgd = pgd_offset_k(addr);
20562+ pud = pud_offset(pgd, addr);
20563+ pmd = pmd_offset(pud, addr);
20564+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20565+ }
20566+*/
20567+#endif
20568+
20569+#ifdef CONFIG_MODULES
20570+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20571+#endif
20572+
20573+#else
20574+ pgd_t *pgd;
20575+ pud_t *pud;
20576+ pmd_t *pmd;
20577+ unsigned long addr, end;
20578+
20579+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20580+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20581+ pgd = pgd_offset_k(addr);
20582+ pud = pud_offset(pgd, addr);
20583+ pmd = pmd_offset(pud, addr);
20584+ if (!pmd_present(*pmd))
20585+ continue;
20586+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20587+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20588+ else
20589+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20590+ }
20591+
20592+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20593+ end = addr + KERNEL_IMAGE_SIZE;
20594+ for (; addr < end; addr += PMD_SIZE) {
20595+ pgd = pgd_offset_k(addr);
20596+ pud = pud_offset(pgd, addr);
20597+ pmd = pmd_offset(pud, addr);
20598+ if (!pmd_present(*pmd))
20599+ continue;
20600+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20601+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20602+ }
20603+#endif
20604+
20605+ flush_tlb_all();
20606+#endif
20607+
20608 free_init_pages("unused kernel memory",
20609 (unsigned long)(&__init_begin),
20610 (unsigned long)(&__init_end));
20611diff -urNp linux-3.0.7/arch/x86/mm/init_32.c linux-3.0.7/arch/x86/mm/init_32.c
20612--- linux-3.0.7/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20613+++ linux-3.0.7/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20614@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20615 }
20616
20617 /*
20618- * Creates a middle page table and puts a pointer to it in the
20619- * given global directory entry. This only returns the gd entry
20620- * in non-PAE compilation mode, since the middle layer is folded.
20621- */
20622-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20623-{
20624- pud_t *pud;
20625- pmd_t *pmd_table;
20626-
20627-#ifdef CONFIG_X86_PAE
20628- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20629- if (after_bootmem)
20630- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20631- else
20632- pmd_table = (pmd_t *)alloc_low_page();
20633- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20634- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20635- pud = pud_offset(pgd, 0);
20636- BUG_ON(pmd_table != pmd_offset(pud, 0));
20637-
20638- return pmd_table;
20639- }
20640-#endif
20641- pud = pud_offset(pgd, 0);
20642- pmd_table = pmd_offset(pud, 0);
20643-
20644- return pmd_table;
20645-}
20646-
20647-/*
20648 * Create a page table and place a pointer to it in a middle page
20649 * directory entry:
20650 */
20651@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20652 page_table = (pte_t *)alloc_low_page();
20653
20654 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20655+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20656+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20657+#else
20658 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20659+#endif
20660 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20661 }
20662
20663 return pte_offset_kernel(pmd, 0);
20664 }
20665
20666+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20667+{
20668+ pud_t *pud;
20669+ pmd_t *pmd_table;
20670+
20671+ pud = pud_offset(pgd, 0);
20672+ pmd_table = pmd_offset(pud, 0);
20673+
20674+ return pmd_table;
20675+}
20676+
20677 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20678 {
20679 int pgd_idx = pgd_index(vaddr);
20680@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20681 int pgd_idx, pmd_idx;
20682 unsigned long vaddr;
20683 pgd_t *pgd;
20684+ pud_t *pud;
20685 pmd_t *pmd;
20686 pte_t *pte = NULL;
20687
20688@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20689 pgd = pgd_base + pgd_idx;
20690
20691 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20692- pmd = one_md_table_init(pgd);
20693- pmd = pmd + pmd_index(vaddr);
20694+ pud = pud_offset(pgd, vaddr);
20695+ pmd = pmd_offset(pud, vaddr);
20696+
20697+#ifdef CONFIG_X86_PAE
20698+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20699+#endif
20700+
20701 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20702 pmd++, pmd_idx++) {
20703 pte = page_table_kmap_check(one_page_table_init(pmd),
20704@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20705 }
20706 }
20707
20708-static inline int is_kernel_text(unsigned long addr)
20709+static inline int is_kernel_text(unsigned long start, unsigned long end)
20710 {
20711- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20712- return 1;
20713- return 0;
20714+ if ((start > ktla_ktva((unsigned long)_etext) ||
20715+ end <= ktla_ktva((unsigned long)_stext)) &&
20716+ (start > ktla_ktva((unsigned long)_einittext) ||
20717+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20718+
20719+#ifdef CONFIG_ACPI_SLEEP
20720+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20721+#endif
20722+
20723+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20724+ return 0;
20725+ return 1;
20726 }
20727
20728 /*
20729@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20730 unsigned long last_map_addr = end;
20731 unsigned long start_pfn, end_pfn;
20732 pgd_t *pgd_base = swapper_pg_dir;
20733- int pgd_idx, pmd_idx, pte_ofs;
20734+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20735 unsigned long pfn;
20736 pgd_t *pgd;
20737+ pud_t *pud;
20738 pmd_t *pmd;
20739 pte_t *pte;
20740 unsigned pages_2m, pages_4k;
20741@@ -281,8 +282,13 @@ repeat:
20742 pfn = start_pfn;
20743 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20744 pgd = pgd_base + pgd_idx;
20745- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20746- pmd = one_md_table_init(pgd);
20747+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20748+ pud = pud_offset(pgd, 0);
20749+ pmd = pmd_offset(pud, 0);
20750+
20751+#ifdef CONFIG_X86_PAE
20752+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20753+#endif
20754
20755 if (pfn >= end_pfn)
20756 continue;
20757@@ -294,14 +300,13 @@ repeat:
20758 #endif
20759 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20760 pmd++, pmd_idx++) {
20761- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20762+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20763
20764 /*
20765 * Map with big pages if possible, otherwise
20766 * create normal page tables:
20767 */
20768 if (use_pse) {
20769- unsigned int addr2;
20770 pgprot_t prot = PAGE_KERNEL_LARGE;
20771 /*
20772 * first pass will use the same initial
20773@@ -311,11 +316,7 @@ repeat:
20774 __pgprot(PTE_IDENT_ATTR |
20775 _PAGE_PSE);
20776
20777- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20778- PAGE_OFFSET + PAGE_SIZE-1;
20779-
20780- if (is_kernel_text(addr) ||
20781- is_kernel_text(addr2))
20782+ if (is_kernel_text(address, address + PMD_SIZE))
20783 prot = PAGE_KERNEL_LARGE_EXEC;
20784
20785 pages_2m++;
20786@@ -332,7 +333,7 @@ repeat:
20787 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20788 pte += pte_ofs;
20789 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20790- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20791+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20792 pgprot_t prot = PAGE_KERNEL;
20793 /*
20794 * first pass will use the same initial
20795@@ -340,7 +341,7 @@ repeat:
20796 */
20797 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20798
20799- if (is_kernel_text(addr))
20800+ if (is_kernel_text(address, address + PAGE_SIZE))
20801 prot = PAGE_KERNEL_EXEC;
20802
20803 pages_4k++;
20804@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20805
20806 pud = pud_offset(pgd, va);
20807 pmd = pmd_offset(pud, va);
20808- if (!pmd_present(*pmd))
20809+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20810 break;
20811
20812 pte = pte_offset_kernel(pmd, va);
20813@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20814
20815 static void __init pagetable_init(void)
20816 {
20817- pgd_t *pgd_base = swapper_pg_dir;
20818-
20819- permanent_kmaps_init(pgd_base);
20820+ permanent_kmaps_init(swapper_pg_dir);
20821 }
20822
20823-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20824+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20825 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20826
20827 /* user-defined highmem size */
20828@@ -757,6 +756,12 @@ void __init mem_init(void)
20829
20830 pci_iommu_alloc();
20831
20832+#ifdef CONFIG_PAX_PER_CPU_PGD
20833+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20834+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20835+ KERNEL_PGD_PTRS);
20836+#endif
20837+
20838 #ifdef CONFIG_FLATMEM
20839 BUG_ON(!mem_map);
20840 #endif
20841@@ -774,7 +779,7 @@ void __init mem_init(void)
20842 set_highmem_pages_init();
20843
20844 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20845- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20846+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20847 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20848
20849 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20850@@ -815,10 +820,10 @@ void __init mem_init(void)
20851 ((unsigned long)&__init_end -
20852 (unsigned long)&__init_begin) >> 10,
20853
20854- (unsigned long)&_etext, (unsigned long)&_edata,
20855- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20856+ (unsigned long)&_sdata, (unsigned long)&_edata,
20857+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20858
20859- (unsigned long)&_text, (unsigned long)&_etext,
20860+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20861 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20862
20863 /*
20864@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20865 if (!kernel_set_to_readonly)
20866 return;
20867
20868+ start = ktla_ktva(start);
20869 pr_debug("Set kernel text: %lx - %lx for read write\n",
20870 start, start+size);
20871
20872@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20873 if (!kernel_set_to_readonly)
20874 return;
20875
20876+ start = ktla_ktva(start);
20877 pr_debug("Set kernel text: %lx - %lx for read only\n",
20878 start, start+size);
20879
20880@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20881 unsigned long start = PFN_ALIGN(_text);
20882 unsigned long size = PFN_ALIGN(_etext) - start;
20883
20884+ start = ktla_ktva(start);
20885 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20886 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20887 size >> 10);
20888diff -urNp linux-3.0.7/arch/x86/mm/init_64.c linux-3.0.7/arch/x86/mm/init_64.c
20889--- linux-3.0.7/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20890+++ linux-3.0.7/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20891@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20892 * around without checking the pgd every time.
20893 */
20894
20895-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20896+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20897 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20898
20899 int force_personality32;
20900@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20901
20902 for (address = start; address <= end; address += PGDIR_SIZE) {
20903 const pgd_t *pgd_ref = pgd_offset_k(address);
20904+
20905+#ifdef CONFIG_PAX_PER_CPU_PGD
20906+ unsigned long cpu;
20907+#else
20908 struct page *page;
20909+#endif
20910
20911 if (pgd_none(*pgd_ref))
20912 continue;
20913
20914 spin_lock(&pgd_lock);
20915+
20916+#ifdef CONFIG_PAX_PER_CPU_PGD
20917+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20918+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20919+#else
20920 list_for_each_entry(page, &pgd_list, lru) {
20921 pgd_t *pgd;
20922 spinlock_t *pgt_lock;
20923@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20924 /* the pgt_lock only for Xen */
20925 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20926 spin_lock(pgt_lock);
20927+#endif
20928
20929 if (pgd_none(*pgd))
20930 set_pgd(pgd, *pgd_ref);
20931@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20932 BUG_ON(pgd_page_vaddr(*pgd)
20933 != pgd_page_vaddr(*pgd_ref));
20934
20935+#ifndef CONFIG_PAX_PER_CPU_PGD
20936 spin_unlock(pgt_lock);
20937+#endif
20938+
20939 }
20940 spin_unlock(&pgd_lock);
20941 }
20942@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20943 pmd = fill_pmd(pud, vaddr);
20944 pte = fill_pte(pmd, vaddr);
20945
20946+ pax_open_kernel();
20947 set_pte(pte, new_pte);
20948+ pax_close_kernel();
20949
20950 /*
20951 * It's enough to flush this one mapping.
20952@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20953 pgd = pgd_offset_k((unsigned long)__va(phys));
20954 if (pgd_none(*pgd)) {
20955 pud = (pud_t *) spp_getpage();
20956- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20957- _PAGE_USER));
20958+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20959 }
20960 pud = pud_offset(pgd, (unsigned long)__va(phys));
20961 if (pud_none(*pud)) {
20962 pmd = (pmd_t *) spp_getpage();
20963- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20964- _PAGE_USER));
20965+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20966 }
20967 pmd = pmd_offset(pud, phys);
20968 BUG_ON(!pmd_none(*pmd));
20969@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20970 if (pfn >= pgt_buf_top)
20971 panic("alloc_low_page: ran out of memory");
20972
20973- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20974+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20975 clear_page(adr);
20976 *phys = pfn * PAGE_SIZE;
20977 return adr;
20978@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20979
20980 phys = __pa(virt);
20981 left = phys & (PAGE_SIZE - 1);
20982- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20983+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20984 adr = (void *)(((unsigned long)adr) | left);
20985
20986 return adr;
20987@@ -693,6 +707,12 @@ void __init mem_init(void)
20988
20989 pci_iommu_alloc();
20990
20991+#ifdef CONFIG_PAX_PER_CPU_PGD
20992+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20993+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20994+ KERNEL_PGD_PTRS);
20995+#endif
20996+
20997 /* clear_bss() already clear the empty_zero_page */
20998
20999 reservedpages = 0;
21000@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21001 static struct vm_area_struct gate_vma = {
21002 .vm_start = VSYSCALL_START,
21003 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21004- .vm_page_prot = PAGE_READONLY_EXEC,
21005- .vm_flags = VM_READ | VM_EXEC
21006+ .vm_page_prot = PAGE_READONLY,
21007+ .vm_flags = VM_READ
21008 };
21009
21010 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21011@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21012
21013 const char *arch_vma_name(struct vm_area_struct *vma)
21014 {
21015- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21016+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21017 return "[vdso]";
21018 if (vma == &gate_vma)
21019 return "[vsyscall]";
21020diff -urNp linux-3.0.7/arch/x86/mm/iomap_32.c linux-3.0.7/arch/x86/mm/iomap_32.c
21021--- linux-3.0.7/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
21022+++ linux-3.0.7/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
21023@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21024 type = kmap_atomic_idx_push();
21025 idx = type + KM_TYPE_NR * smp_processor_id();
21026 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21027+
21028+ pax_open_kernel();
21029 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21030+ pax_close_kernel();
21031+
21032 arch_flush_lazy_mmu_mode();
21033
21034 return (void *)vaddr;
21035diff -urNp linux-3.0.7/arch/x86/mm/ioremap.c linux-3.0.7/arch/x86/mm/ioremap.c
21036--- linux-3.0.7/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
21037+++ linux-3.0.7/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
21038@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21039 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21040 int is_ram = page_is_ram(pfn);
21041
21042- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21043+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21044 return NULL;
21045 WARN_ON_ONCE(is_ram);
21046 }
21047@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21048 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21049
21050 static __initdata int after_paging_init;
21051-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21052+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21053
21054 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21055 {
21056@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21057 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21058
21059 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21060- memset(bm_pte, 0, sizeof(bm_pte));
21061- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21062+ pmd_populate_user(&init_mm, pmd, bm_pte);
21063
21064 /*
21065 * The boot-ioremap range spans multiple pmds, for which
21066diff -urNp linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c
21067--- linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
21068+++ linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
21069@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21070 * memory (e.g. tracked pages)? For now, we need this to avoid
21071 * invoking kmemcheck for PnP BIOS calls.
21072 */
21073- if (regs->flags & X86_VM_MASK)
21074+ if (v8086_mode(regs))
21075 return false;
21076- if (regs->cs != __KERNEL_CS)
21077+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21078 return false;
21079
21080 pte = kmemcheck_pte_lookup(address);
21081diff -urNp linux-3.0.7/arch/x86/mm/mmap.c linux-3.0.7/arch/x86/mm/mmap.c
21082--- linux-3.0.7/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
21083+++ linux-3.0.7/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
21084@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21085 * Leave an at least ~128 MB hole with possible stack randomization.
21086 */
21087 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21088-#define MAX_GAP (TASK_SIZE/6*5)
21089+#define MAX_GAP (pax_task_size/6*5)
21090
21091 /*
21092 * True on X86_32 or when emulating IA32 on X86_64
21093@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21094 return rnd << PAGE_SHIFT;
21095 }
21096
21097-static unsigned long mmap_base(void)
21098+static unsigned long mmap_base(struct mm_struct *mm)
21099 {
21100 unsigned long gap = rlimit(RLIMIT_STACK);
21101+ unsigned long pax_task_size = TASK_SIZE;
21102+
21103+#ifdef CONFIG_PAX_SEGMEXEC
21104+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21105+ pax_task_size = SEGMEXEC_TASK_SIZE;
21106+#endif
21107
21108 if (gap < MIN_GAP)
21109 gap = MIN_GAP;
21110 else if (gap > MAX_GAP)
21111 gap = MAX_GAP;
21112
21113- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21114+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21115 }
21116
21117 /*
21118 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21119 * does, but not when emulating X86_32
21120 */
21121-static unsigned long mmap_legacy_base(void)
21122+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21123 {
21124- if (mmap_is_ia32())
21125+ if (mmap_is_ia32()) {
21126+
21127+#ifdef CONFIG_PAX_SEGMEXEC
21128+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21129+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21130+ else
21131+#endif
21132+
21133 return TASK_UNMAPPED_BASE;
21134- else
21135+ } else
21136 return TASK_UNMAPPED_BASE + mmap_rnd();
21137 }
21138
21139@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21140 void arch_pick_mmap_layout(struct mm_struct *mm)
21141 {
21142 if (mmap_is_legacy()) {
21143- mm->mmap_base = mmap_legacy_base();
21144+ mm->mmap_base = mmap_legacy_base(mm);
21145+
21146+#ifdef CONFIG_PAX_RANDMMAP
21147+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21148+ mm->mmap_base += mm->delta_mmap;
21149+#endif
21150+
21151 mm->get_unmapped_area = arch_get_unmapped_area;
21152 mm->unmap_area = arch_unmap_area;
21153 } else {
21154- mm->mmap_base = mmap_base();
21155+ mm->mmap_base = mmap_base(mm);
21156+
21157+#ifdef CONFIG_PAX_RANDMMAP
21158+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21159+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21160+#endif
21161+
21162 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21163 mm->unmap_area = arch_unmap_area_topdown;
21164 }
21165diff -urNp linux-3.0.7/arch/x86/mm/mmio-mod.c linux-3.0.7/arch/x86/mm/mmio-mod.c
21166--- linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
21167+++ linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
21168@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21169 break;
21170 default:
21171 {
21172- unsigned char *ip = (unsigned char *)instptr;
21173+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21174 my_trace->opcode = MMIO_UNKNOWN_OP;
21175 my_trace->width = 0;
21176 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21177@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21178 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21179 void __iomem *addr)
21180 {
21181- static atomic_t next_id;
21182+ static atomic_unchecked_t next_id;
21183 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21184 /* These are page-unaligned. */
21185 struct mmiotrace_map map = {
21186@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21187 .private = trace
21188 },
21189 .phys = offset,
21190- .id = atomic_inc_return(&next_id)
21191+ .id = atomic_inc_return_unchecked(&next_id)
21192 };
21193 map.map_id = trace->id;
21194
21195diff -urNp linux-3.0.7/arch/x86/mm/pageattr-test.c linux-3.0.7/arch/x86/mm/pageattr-test.c
21196--- linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
21197+++ linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
21198@@ -36,7 +36,7 @@ enum {
21199
21200 static int pte_testbit(pte_t pte)
21201 {
21202- return pte_flags(pte) & _PAGE_UNUSED1;
21203+ return pte_flags(pte) & _PAGE_CPA_TEST;
21204 }
21205
21206 struct split_state {
21207diff -urNp linux-3.0.7/arch/x86/mm/pageattr.c linux-3.0.7/arch/x86/mm/pageattr.c
21208--- linux-3.0.7/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
21209+++ linux-3.0.7/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
21210@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21211 */
21212 #ifdef CONFIG_PCI_BIOS
21213 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21214- pgprot_val(forbidden) |= _PAGE_NX;
21215+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21216 #endif
21217
21218 /*
21219@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21220 * Does not cover __inittext since that is gone later on. On
21221 * 64bit we do not enforce !NX on the low mapping
21222 */
21223- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21224- pgprot_val(forbidden) |= _PAGE_NX;
21225+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21226+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21227
21228+#ifdef CONFIG_DEBUG_RODATA
21229 /*
21230 * The .rodata section needs to be read-only. Using the pfn
21231 * catches all aliases.
21232@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21233 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21234 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21235 pgprot_val(forbidden) |= _PAGE_RW;
21236+#endif
21237
21238 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21239 /*
21240@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21241 }
21242 #endif
21243
21244+#ifdef CONFIG_PAX_KERNEXEC
21245+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21246+ pgprot_val(forbidden) |= _PAGE_RW;
21247+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21248+ }
21249+#endif
21250+
21251 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21252
21253 return prot;
21254@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21255 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21256 {
21257 /* change init_mm */
21258+ pax_open_kernel();
21259 set_pte_atomic(kpte, pte);
21260+
21261 #ifdef CONFIG_X86_32
21262 if (!SHARED_KERNEL_PMD) {
21263+
21264+#ifdef CONFIG_PAX_PER_CPU_PGD
21265+ unsigned long cpu;
21266+#else
21267 struct page *page;
21268+#endif
21269
21270+#ifdef CONFIG_PAX_PER_CPU_PGD
21271+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21272+ pgd_t *pgd = get_cpu_pgd(cpu);
21273+#else
21274 list_for_each_entry(page, &pgd_list, lru) {
21275- pgd_t *pgd;
21276+ pgd_t *pgd = (pgd_t *)page_address(page);
21277+#endif
21278+
21279 pud_t *pud;
21280 pmd_t *pmd;
21281
21282- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21283+ pgd += pgd_index(address);
21284 pud = pud_offset(pgd, address);
21285 pmd = pmd_offset(pud, address);
21286 set_pte_atomic((pte_t *)pmd, pte);
21287 }
21288 }
21289 #endif
21290+ pax_close_kernel();
21291 }
21292
21293 static int
21294diff -urNp linux-3.0.7/arch/x86/mm/pat.c linux-3.0.7/arch/x86/mm/pat.c
21295--- linux-3.0.7/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
21296+++ linux-3.0.7/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
21297@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21298
21299 if (!entry) {
21300 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21301- current->comm, current->pid, start, end);
21302+ current->comm, task_pid_nr(current), start, end);
21303 return -EINVAL;
21304 }
21305
21306@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21307 while (cursor < to) {
21308 if (!devmem_is_allowed(pfn)) {
21309 printk(KERN_INFO
21310- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21311- current->comm, from, to);
21312+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21313+ current->comm, from, to, cursor);
21314 return 0;
21315 }
21316 cursor += PAGE_SIZE;
21317@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21318 printk(KERN_INFO
21319 "%s:%d ioremap_change_attr failed %s "
21320 "for %Lx-%Lx\n",
21321- current->comm, current->pid,
21322+ current->comm, task_pid_nr(current),
21323 cattr_name(flags),
21324 base, (unsigned long long)(base + size));
21325 return -EINVAL;
21326@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21327 if (want_flags != flags) {
21328 printk(KERN_WARNING
21329 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21330- current->comm, current->pid,
21331+ current->comm, task_pid_nr(current),
21332 cattr_name(want_flags),
21333 (unsigned long long)paddr,
21334 (unsigned long long)(paddr + size),
21335@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21336 free_memtype(paddr, paddr + size);
21337 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21338 " for %Lx-%Lx, got %s\n",
21339- current->comm, current->pid,
21340+ current->comm, task_pid_nr(current),
21341 cattr_name(want_flags),
21342 (unsigned long long)paddr,
21343 (unsigned long long)(paddr + size),
21344diff -urNp linux-3.0.7/arch/x86/mm/pf_in.c linux-3.0.7/arch/x86/mm/pf_in.c
21345--- linux-3.0.7/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
21346+++ linux-3.0.7/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
21347@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21348 int i;
21349 enum reason_type rv = OTHERS;
21350
21351- p = (unsigned char *)ins_addr;
21352+ p = (unsigned char *)ktla_ktva(ins_addr);
21353 p += skip_prefix(p, &prf);
21354 p += get_opcode(p, &opcode);
21355
21356@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21357 struct prefix_bits prf;
21358 int i;
21359
21360- p = (unsigned char *)ins_addr;
21361+ p = (unsigned char *)ktla_ktva(ins_addr);
21362 p += skip_prefix(p, &prf);
21363 p += get_opcode(p, &opcode);
21364
21365@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21366 struct prefix_bits prf;
21367 int i;
21368
21369- p = (unsigned char *)ins_addr;
21370+ p = (unsigned char *)ktla_ktva(ins_addr);
21371 p += skip_prefix(p, &prf);
21372 p += get_opcode(p, &opcode);
21373
21374@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21375 struct prefix_bits prf;
21376 int i;
21377
21378- p = (unsigned char *)ins_addr;
21379+ p = (unsigned char *)ktla_ktva(ins_addr);
21380 p += skip_prefix(p, &prf);
21381 p += get_opcode(p, &opcode);
21382 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21383@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21384 struct prefix_bits prf;
21385 int i;
21386
21387- p = (unsigned char *)ins_addr;
21388+ p = (unsigned char *)ktla_ktva(ins_addr);
21389 p += skip_prefix(p, &prf);
21390 p += get_opcode(p, &opcode);
21391 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21392diff -urNp linux-3.0.7/arch/x86/mm/pgtable.c linux-3.0.7/arch/x86/mm/pgtable.c
21393--- linux-3.0.7/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21394+++ linux-3.0.7/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21395@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21396 list_del(&page->lru);
21397 }
21398
21399-#define UNSHARED_PTRS_PER_PGD \
21400- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21401+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21402+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21403
21404+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21405+{
21406+ while (count--)
21407+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21408+}
21409+#endif
21410+
21411+#ifdef CONFIG_PAX_PER_CPU_PGD
21412+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21413+{
21414+ while (count--)
21415+
21416+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21417+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21418+#else
21419+ *dst++ = *src++;
21420+#endif
21421
21422+}
21423+#endif
21424+
21425+#ifdef CONFIG_X86_64
21426+#define pxd_t pud_t
21427+#define pyd_t pgd_t
21428+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21429+#define pxd_free(mm, pud) pud_free((mm), (pud))
21430+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21431+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21432+#define PYD_SIZE PGDIR_SIZE
21433+#else
21434+#define pxd_t pmd_t
21435+#define pyd_t pud_t
21436+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21437+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21438+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21439+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21440+#define PYD_SIZE PUD_SIZE
21441+#endif
21442+
21443+#ifdef CONFIG_PAX_PER_CPU_PGD
21444+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21445+static inline void pgd_dtor(pgd_t *pgd) {}
21446+#else
21447 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21448 {
21449 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21450@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21451 pgd_list_del(pgd);
21452 spin_unlock(&pgd_lock);
21453 }
21454+#endif
21455
21456 /*
21457 * List of all pgd's needed for non-PAE so it can invalidate entries
21458@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21459 * -- wli
21460 */
21461
21462-#ifdef CONFIG_X86_PAE
21463+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21464 /*
21465 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21466 * updating the top-level pagetable entries to guarantee the
21467@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21468 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21469 * and initialize the kernel pmds here.
21470 */
21471-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21472+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21473
21474 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21475 {
21476@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21477 */
21478 flush_tlb_mm(mm);
21479 }
21480+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21481+#define PREALLOCATED_PXDS USER_PGD_PTRS
21482 #else /* !CONFIG_X86_PAE */
21483
21484 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21485-#define PREALLOCATED_PMDS 0
21486+#define PREALLOCATED_PXDS 0
21487
21488 #endif /* CONFIG_X86_PAE */
21489
21490-static void free_pmds(pmd_t *pmds[])
21491+static void free_pxds(pxd_t *pxds[])
21492 {
21493 int i;
21494
21495- for(i = 0; i < PREALLOCATED_PMDS; i++)
21496- if (pmds[i])
21497- free_page((unsigned long)pmds[i]);
21498+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21499+ if (pxds[i])
21500+ free_page((unsigned long)pxds[i]);
21501 }
21502
21503-static int preallocate_pmds(pmd_t *pmds[])
21504+static int preallocate_pxds(pxd_t *pxds[])
21505 {
21506 int i;
21507 bool failed = false;
21508
21509- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21510- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21511- if (pmd == NULL)
21512+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21513+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21514+ if (pxd == NULL)
21515 failed = true;
21516- pmds[i] = pmd;
21517+ pxds[i] = pxd;
21518 }
21519
21520 if (failed) {
21521- free_pmds(pmds);
21522+ free_pxds(pxds);
21523 return -ENOMEM;
21524 }
21525
21526@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21527 * preallocate which never got a corresponding vma will need to be
21528 * freed manually.
21529 */
21530-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21531+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21532 {
21533 int i;
21534
21535- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21536+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21537 pgd_t pgd = pgdp[i];
21538
21539 if (pgd_val(pgd) != 0) {
21540- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21541+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21542
21543- pgdp[i] = native_make_pgd(0);
21544+ set_pgd(pgdp + i, native_make_pgd(0));
21545
21546- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21547- pmd_free(mm, pmd);
21548+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21549+ pxd_free(mm, pxd);
21550 }
21551 }
21552 }
21553
21554-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21555+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21556 {
21557- pud_t *pud;
21558+ pyd_t *pyd;
21559 unsigned long addr;
21560 int i;
21561
21562- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21563+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21564 return;
21565
21566- pud = pud_offset(pgd, 0);
21567+#ifdef CONFIG_X86_64
21568+ pyd = pyd_offset(mm, 0L);
21569+#else
21570+ pyd = pyd_offset(pgd, 0L);
21571+#endif
21572
21573- for (addr = i = 0; i < PREALLOCATED_PMDS;
21574- i++, pud++, addr += PUD_SIZE) {
21575- pmd_t *pmd = pmds[i];
21576+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21577+ i++, pyd++, addr += PYD_SIZE) {
21578+ pxd_t *pxd = pxds[i];
21579
21580 if (i >= KERNEL_PGD_BOUNDARY)
21581- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21582- sizeof(pmd_t) * PTRS_PER_PMD);
21583+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21584+ sizeof(pxd_t) * PTRS_PER_PMD);
21585
21586- pud_populate(mm, pud, pmd);
21587+ pyd_populate(mm, pyd, pxd);
21588 }
21589 }
21590
21591 pgd_t *pgd_alloc(struct mm_struct *mm)
21592 {
21593 pgd_t *pgd;
21594- pmd_t *pmds[PREALLOCATED_PMDS];
21595+ pxd_t *pxds[PREALLOCATED_PXDS];
21596
21597 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21598
21599@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21600
21601 mm->pgd = pgd;
21602
21603- if (preallocate_pmds(pmds) != 0)
21604+ if (preallocate_pxds(pxds) != 0)
21605 goto out_free_pgd;
21606
21607 if (paravirt_pgd_alloc(mm) != 0)
21608- goto out_free_pmds;
21609+ goto out_free_pxds;
21610
21611 /*
21612 * Make sure that pre-populating the pmds is atomic with
21613@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21614 spin_lock(&pgd_lock);
21615
21616 pgd_ctor(mm, pgd);
21617- pgd_prepopulate_pmd(mm, pgd, pmds);
21618+ pgd_prepopulate_pxd(mm, pgd, pxds);
21619
21620 spin_unlock(&pgd_lock);
21621
21622 return pgd;
21623
21624-out_free_pmds:
21625- free_pmds(pmds);
21626+out_free_pxds:
21627+ free_pxds(pxds);
21628 out_free_pgd:
21629 free_page((unsigned long)pgd);
21630 out:
21631@@ -295,7 +344,7 @@ out:
21632
21633 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21634 {
21635- pgd_mop_up_pmds(mm, pgd);
21636+ pgd_mop_up_pxds(mm, pgd);
21637 pgd_dtor(pgd);
21638 paravirt_pgd_free(mm, pgd);
21639 free_page((unsigned long)pgd);
21640diff -urNp linux-3.0.7/arch/x86/mm/pgtable_32.c linux-3.0.7/arch/x86/mm/pgtable_32.c
21641--- linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21642+++ linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21643@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21644 return;
21645 }
21646 pte = pte_offset_kernel(pmd, vaddr);
21647+
21648+ pax_open_kernel();
21649 if (pte_val(pteval))
21650 set_pte_at(&init_mm, vaddr, pte, pteval);
21651 else
21652 pte_clear(&init_mm, vaddr, pte);
21653+ pax_close_kernel();
21654
21655 /*
21656 * It's enough to flush this one mapping.
21657diff -urNp linux-3.0.7/arch/x86/mm/setup_nx.c linux-3.0.7/arch/x86/mm/setup_nx.c
21658--- linux-3.0.7/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21659+++ linux-3.0.7/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21660@@ -5,8 +5,10 @@
21661 #include <asm/pgtable.h>
21662 #include <asm/proto.h>
21663
21664+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21665 static int disable_nx __cpuinitdata;
21666
21667+#ifndef CONFIG_PAX_PAGEEXEC
21668 /*
21669 * noexec = on|off
21670 *
21671@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21672 return 0;
21673 }
21674 early_param("noexec", noexec_setup);
21675+#endif
21676+
21677+#endif
21678
21679 void __cpuinit x86_configure_nx(void)
21680 {
21681+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21682 if (cpu_has_nx && !disable_nx)
21683 __supported_pte_mask |= _PAGE_NX;
21684 else
21685+#endif
21686 __supported_pte_mask &= ~_PAGE_NX;
21687 }
21688
21689diff -urNp linux-3.0.7/arch/x86/mm/tlb.c linux-3.0.7/arch/x86/mm/tlb.c
21690--- linux-3.0.7/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21691+++ linux-3.0.7/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21692@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21693 BUG();
21694 cpumask_clear_cpu(cpu,
21695 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21696+
21697+#ifndef CONFIG_PAX_PER_CPU_PGD
21698 load_cr3(swapper_pg_dir);
21699+#endif
21700+
21701 }
21702 EXPORT_SYMBOL_GPL(leave_mm);
21703
21704diff -urNp linux-3.0.7/arch/x86/net/bpf_jit.S linux-3.0.7/arch/x86/net/bpf_jit.S
21705--- linux-3.0.7/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21706+++ linux-3.0.7/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21707@@ -9,6 +9,7 @@
21708 */
21709 #include <linux/linkage.h>
21710 #include <asm/dwarf2.h>
21711+#include <asm/alternative-asm.h>
21712
21713 /*
21714 * Calling convention :
21715@@ -35,6 +36,7 @@ sk_load_word:
21716 jle bpf_slow_path_word
21717 mov (SKBDATA,%rsi),%eax
21718 bswap %eax /* ntohl() */
21719+ pax_force_retaddr
21720 ret
21721
21722
21723@@ -53,6 +55,7 @@ sk_load_half:
21724 jle bpf_slow_path_half
21725 movzwl (SKBDATA,%rsi),%eax
21726 rol $8,%ax # ntohs()
21727+ pax_force_retaddr
21728 ret
21729
21730 sk_load_byte_ind:
21731@@ -66,6 +69,7 @@ sk_load_byte:
21732 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21733 jle bpf_slow_path_byte
21734 movzbl (SKBDATA,%rsi),%eax
21735+ pax_force_retaddr
21736 ret
21737
21738 /**
21739@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21740 movzbl (SKBDATA,%rsi),%ebx
21741 and $15,%bl
21742 shl $2,%bl
21743+ pax_force_retaddr
21744 ret
21745 CFI_ENDPROC
21746 ENDPROC(sk_load_byte_msh)
21747@@ -91,6 +96,7 @@ bpf_error:
21748 xor %eax,%eax
21749 mov -8(%rbp),%rbx
21750 leaveq
21751+ pax_force_retaddr
21752 ret
21753
21754 /* rsi contains offset and can be scratched */
21755@@ -113,6 +119,7 @@ bpf_slow_path_word:
21756 js bpf_error
21757 mov -12(%rbp),%eax
21758 bswap %eax
21759+ pax_force_retaddr
21760 ret
21761
21762 bpf_slow_path_half:
21763@@ -121,12 +128,14 @@ bpf_slow_path_half:
21764 mov -12(%rbp),%ax
21765 rol $8,%ax
21766 movzwl %ax,%eax
21767+ pax_force_retaddr
21768 ret
21769
21770 bpf_slow_path_byte:
21771 bpf_slow_path_common(1)
21772 js bpf_error
21773 movzbl -12(%rbp),%eax
21774+ pax_force_retaddr
21775 ret
21776
21777 bpf_slow_path_byte_msh:
21778@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
21779 and $15,%al
21780 shl $2,%al
21781 xchg %eax,%ebx
21782+ pax_force_retaddr
21783 ret
21784diff -urNp linux-3.0.7/arch/x86/net/bpf_jit_comp.c linux-3.0.7/arch/x86/net/bpf_jit_comp.c
21785--- linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21786+++ linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21787@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21788 module_free(NULL, image);
21789 return;
21790 }
21791+ pax_open_kernel();
21792 memcpy(image + proglen, temp, ilen);
21793+ pax_close_kernel();
21794 }
21795 proglen += ilen;
21796 addrs[i] = proglen;
21797@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21798 break;
21799 }
21800 if (proglen == oldproglen) {
21801- image = module_alloc(max_t(unsigned int,
21802+ image = module_alloc_exec(max_t(unsigned int,
21803 proglen,
21804 sizeof(struct work_struct)));
21805 if (!image)
21806diff -urNp linux-3.0.7/arch/x86/oprofile/backtrace.c linux-3.0.7/arch/x86/oprofile/backtrace.c
21807--- linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21808+++ linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21809@@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21810 struct stack_frame_ia32 *fp;
21811 unsigned long bytes;
21812
21813- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21814+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21815 if (bytes != sizeof(bufhead))
21816 return NULL;
21817
21818- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21819+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21820
21821 oprofile_add_trace(bufhead[0].return_address);
21822
21823@@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21824 struct stack_frame bufhead[2];
21825 unsigned long bytes;
21826
21827- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21828+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21829 if (bytes != sizeof(bufhead))
21830 return NULL;
21831
21832@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21833 {
21834 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21835
21836- if (!user_mode_vm(regs)) {
21837+ if (!user_mode(regs)) {
21838 unsigned long stack = kernel_stack_pointer(regs);
21839 if (depth)
21840 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21841diff -urNp linux-3.0.7/arch/x86/pci/mrst.c linux-3.0.7/arch/x86/pci/mrst.c
21842--- linux-3.0.7/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21843+++ linux-3.0.7/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21844@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21845 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21846 pci_mmcfg_late_init();
21847 pcibios_enable_irq = mrst_pci_irq_enable;
21848- pci_root_ops = pci_mrst_ops;
21849+ pax_open_kernel();
21850+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21851+ pax_close_kernel();
21852 /* Continue with standard init */
21853 return 1;
21854 }
21855diff -urNp linux-3.0.7/arch/x86/pci/pcbios.c linux-3.0.7/arch/x86/pci/pcbios.c
21856--- linux-3.0.7/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21857+++ linux-3.0.7/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21858@@ -79,50 +79,93 @@ union bios32 {
21859 static struct {
21860 unsigned long address;
21861 unsigned short segment;
21862-} bios32_indirect = { 0, __KERNEL_CS };
21863+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21864
21865 /*
21866 * Returns the entry point for the given service, NULL on error
21867 */
21868
21869-static unsigned long bios32_service(unsigned long service)
21870+static unsigned long __devinit bios32_service(unsigned long service)
21871 {
21872 unsigned char return_code; /* %al */
21873 unsigned long address; /* %ebx */
21874 unsigned long length; /* %ecx */
21875 unsigned long entry; /* %edx */
21876 unsigned long flags;
21877+ struct desc_struct d, *gdt;
21878
21879 local_irq_save(flags);
21880- __asm__("lcall *(%%edi); cld"
21881+
21882+ gdt = get_cpu_gdt_table(smp_processor_id());
21883+
21884+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21885+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21886+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21887+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21888+
21889+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21890 : "=a" (return_code),
21891 "=b" (address),
21892 "=c" (length),
21893 "=d" (entry)
21894 : "0" (service),
21895 "1" (0),
21896- "D" (&bios32_indirect));
21897+ "D" (&bios32_indirect),
21898+ "r"(__PCIBIOS_DS)
21899+ : "memory");
21900+
21901+ pax_open_kernel();
21902+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21903+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21904+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21905+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21906+ pax_close_kernel();
21907+
21908 local_irq_restore(flags);
21909
21910 switch (return_code) {
21911- case 0:
21912- return address + entry;
21913- case 0x80: /* Not present */
21914- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21915- return 0;
21916- default: /* Shouldn't happen */
21917- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21918- service, return_code);
21919+ case 0: {
21920+ int cpu;
21921+ unsigned char flags;
21922+
21923+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21924+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21925+ printk(KERN_WARNING "bios32_service: not valid\n");
21926 return 0;
21927+ }
21928+ address = address + PAGE_OFFSET;
21929+ length += 16UL; /* some BIOSs underreport this... */
21930+ flags = 4;
21931+ if (length >= 64*1024*1024) {
21932+ length >>= PAGE_SHIFT;
21933+ flags |= 8;
21934+ }
21935+
21936+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21937+ gdt = get_cpu_gdt_table(cpu);
21938+ pack_descriptor(&d, address, length, 0x9b, flags);
21939+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21940+ pack_descriptor(&d, address, length, 0x93, flags);
21941+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21942+ }
21943+ return entry;
21944+ }
21945+ case 0x80: /* Not present */
21946+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21947+ return 0;
21948+ default: /* Shouldn't happen */
21949+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21950+ service, return_code);
21951+ return 0;
21952 }
21953 }
21954
21955 static struct {
21956 unsigned long address;
21957 unsigned short segment;
21958-} pci_indirect = { 0, __KERNEL_CS };
21959+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21960
21961-static int pci_bios_present;
21962+static int pci_bios_present __read_only;
21963
21964 static int __devinit check_pcibios(void)
21965 {
21966@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21967 unsigned long flags, pcibios_entry;
21968
21969 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21970- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21971+ pci_indirect.address = pcibios_entry;
21972
21973 local_irq_save(flags);
21974- __asm__(
21975- "lcall *(%%edi); cld\n\t"
21976+ __asm__("movw %w6, %%ds\n\t"
21977+ "lcall *%%ss:(%%edi); cld\n\t"
21978+ "push %%ss\n\t"
21979+ "pop %%ds\n\t"
21980 "jc 1f\n\t"
21981 "xor %%ah, %%ah\n"
21982 "1:"
21983@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21984 "=b" (ebx),
21985 "=c" (ecx)
21986 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21987- "D" (&pci_indirect)
21988+ "D" (&pci_indirect),
21989+ "r" (__PCIBIOS_DS)
21990 : "memory");
21991 local_irq_restore(flags);
21992
21993@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21994
21995 switch (len) {
21996 case 1:
21997- __asm__("lcall *(%%esi); cld\n\t"
21998+ __asm__("movw %w6, %%ds\n\t"
21999+ "lcall *%%ss:(%%esi); cld\n\t"
22000+ "push %%ss\n\t"
22001+ "pop %%ds\n\t"
22002 "jc 1f\n\t"
22003 "xor %%ah, %%ah\n"
22004 "1:"
22005@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
22006 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22007 "b" (bx),
22008 "D" ((long)reg),
22009- "S" (&pci_indirect));
22010+ "S" (&pci_indirect),
22011+ "r" (__PCIBIOS_DS));
22012 /*
22013 * Zero-extend the result beyond 8 bits, do not trust the
22014 * BIOS having done it:
22015@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
22016 *value &= 0xff;
22017 break;
22018 case 2:
22019- __asm__("lcall *(%%esi); cld\n\t"
22020+ __asm__("movw %w6, %%ds\n\t"
22021+ "lcall *%%ss:(%%esi); cld\n\t"
22022+ "push %%ss\n\t"
22023+ "pop %%ds\n\t"
22024 "jc 1f\n\t"
22025 "xor %%ah, %%ah\n"
22026 "1:"
22027@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
22028 : "1" (PCIBIOS_READ_CONFIG_WORD),
22029 "b" (bx),
22030 "D" ((long)reg),
22031- "S" (&pci_indirect));
22032+ "S" (&pci_indirect),
22033+ "r" (__PCIBIOS_DS));
22034 /*
22035 * Zero-extend the result beyond 16 bits, do not trust the
22036 * BIOS having done it:
22037@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
22038 *value &= 0xffff;
22039 break;
22040 case 4:
22041- __asm__("lcall *(%%esi); cld\n\t"
22042+ __asm__("movw %w6, %%ds\n\t"
22043+ "lcall *%%ss:(%%esi); cld\n\t"
22044+ "push %%ss\n\t"
22045+ "pop %%ds\n\t"
22046 "jc 1f\n\t"
22047 "xor %%ah, %%ah\n"
22048 "1:"
22049@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
22050 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22051 "b" (bx),
22052 "D" ((long)reg),
22053- "S" (&pci_indirect));
22054+ "S" (&pci_indirect),
22055+ "r" (__PCIBIOS_DS));
22056 break;
22057 }
22058
22059@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
22060
22061 switch (len) {
22062 case 1:
22063- __asm__("lcall *(%%esi); cld\n\t"
22064+ __asm__("movw %w6, %%ds\n\t"
22065+ "lcall *%%ss:(%%esi); cld\n\t"
22066+ "push %%ss\n\t"
22067+ "pop %%ds\n\t"
22068 "jc 1f\n\t"
22069 "xor %%ah, %%ah\n"
22070 "1:"
22071@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
22072 "c" (value),
22073 "b" (bx),
22074 "D" ((long)reg),
22075- "S" (&pci_indirect));
22076+ "S" (&pci_indirect),
22077+ "r" (__PCIBIOS_DS));
22078 break;
22079 case 2:
22080- __asm__("lcall *(%%esi); cld\n\t"
22081+ __asm__("movw %w6, %%ds\n\t"
22082+ "lcall *%%ss:(%%esi); cld\n\t"
22083+ "push %%ss\n\t"
22084+ "pop %%ds\n\t"
22085 "jc 1f\n\t"
22086 "xor %%ah, %%ah\n"
22087 "1:"
22088@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
22089 "c" (value),
22090 "b" (bx),
22091 "D" ((long)reg),
22092- "S" (&pci_indirect));
22093+ "S" (&pci_indirect),
22094+ "r" (__PCIBIOS_DS));
22095 break;
22096 case 4:
22097- __asm__("lcall *(%%esi); cld\n\t"
22098+ __asm__("movw %w6, %%ds\n\t"
22099+ "lcall *%%ss:(%%esi); cld\n\t"
22100+ "push %%ss\n\t"
22101+ "pop %%ds\n\t"
22102 "jc 1f\n\t"
22103 "xor %%ah, %%ah\n"
22104 "1:"
22105@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
22106 "c" (value),
22107 "b" (bx),
22108 "D" ((long)reg),
22109- "S" (&pci_indirect));
22110+ "S" (&pci_indirect),
22111+ "r" (__PCIBIOS_DS));
22112 break;
22113 }
22114
22115@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
22116
22117 DBG("PCI: Fetching IRQ routing table... ");
22118 __asm__("push %%es\n\t"
22119+ "movw %w8, %%ds\n\t"
22120 "push %%ds\n\t"
22121 "pop %%es\n\t"
22122- "lcall *(%%esi); cld\n\t"
22123+ "lcall *%%ss:(%%esi); cld\n\t"
22124 "pop %%es\n\t"
22125+ "push %%ss\n\t"
22126+ "pop %%ds\n"
22127 "jc 1f\n\t"
22128 "xor %%ah, %%ah\n"
22129 "1:"
22130@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
22131 "1" (0),
22132 "D" ((long) &opt),
22133 "S" (&pci_indirect),
22134- "m" (opt)
22135+ "m" (opt),
22136+ "r" (__PCIBIOS_DS)
22137 : "memory");
22138 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22139 if (ret & 0xff00)
22140@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
22141 {
22142 int ret;
22143
22144- __asm__("lcall *(%%esi); cld\n\t"
22145+ __asm__("movw %w5, %%ds\n\t"
22146+ "lcall *%%ss:(%%esi); cld\n\t"
22147+ "push %%ss\n\t"
22148+ "pop %%ds\n"
22149 "jc 1f\n\t"
22150 "xor %%ah, %%ah\n"
22151 "1:"
22152@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
22153 : "0" (PCIBIOS_SET_PCI_HW_INT),
22154 "b" ((dev->bus->number << 8) | dev->devfn),
22155 "c" ((irq << 8) | (pin + 10)),
22156- "S" (&pci_indirect));
22157+ "S" (&pci_indirect),
22158+ "r" (__PCIBIOS_DS));
22159 return !(ret & 0xff00);
22160 }
22161 EXPORT_SYMBOL(pcibios_set_irq_routing);
22162diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_32.c linux-3.0.7/arch/x86/platform/efi/efi_32.c
22163--- linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
22164+++ linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
22165@@ -38,70 +38,56 @@
22166 */
22167
22168 static unsigned long efi_rt_eflags;
22169-static pgd_t efi_bak_pg_dir_pointer[2];
22170+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22171
22172-void efi_call_phys_prelog(void)
22173+void __init efi_call_phys_prelog(void)
22174 {
22175- unsigned long cr4;
22176- unsigned long temp;
22177 struct desc_ptr gdt_descr;
22178
22179- local_irq_save(efi_rt_eflags);
22180+#ifdef CONFIG_PAX_KERNEXEC
22181+ struct desc_struct d;
22182+#endif
22183
22184- /*
22185- * If I don't have PAE, I should just duplicate two entries in page
22186- * directory. If I have PAE, I just need to duplicate one entry in
22187- * page directory.
22188- */
22189- cr4 = read_cr4_safe();
22190+ local_irq_save(efi_rt_eflags);
22191
22192- if (cr4 & X86_CR4_PAE) {
22193- efi_bak_pg_dir_pointer[0].pgd =
22194- swapper_pg_dir[pgd_index(0)].pgd;
22195- swapper_pg_dir[0].pgd =
22196- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22197- } else {
22198- efi_bak_pg_dir_pointer[0].pgd =
22199- swapper_pg_dir[pgd_index(0)].pgd;
22200- efi_bak_pg_dir_pointer[1].pgd =
22201- swapper_pg_dir[pgd_index(0x400000)].pgd;
22202- swapper_pg_dir[pgd_index(0)].pgd =
22203- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22204- temp = PAGE_OFFSET + 0x400000;
22205- swapper_pg_dir[pgd_index(0x400000)].pgd =
22206- swapper_pg_dir[pgd_index(temp)].pgd;
22207- }
22208+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22209+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22210+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22211
22212 /*
22213 * After the lock is released, the original page table is restored.
22214 */
22215 __flush_tlb_all();
22216
22217+#ifdef CONFIG_PAX_KERNEXEC
22218+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22219+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22220+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22221+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22222+#endif
22223+
22224 gdt_descr.address = __pa(get_cpu_gdt_table(0));
22225 gdt_descr.size = GDT_SIZE - 1;
22226 load_gdt(&gdt_descr);
22227 }
22228
22229-void efi_call_phys_epilog(void)
22230+void __init efi_call_phys_epilog(void)
22231 {
22232- unsigned long cr4;
22233 struct desc_ptr gdt_descr;
22234
22235+#ifdef CONFIG_PAX_KERNEXEC
22236+ struct desc_struct d;
22237+
22238+ memset(&d, 0, sizeof d);
22239+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22240+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22241+#endif
22242+
22243 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22244 gdt_descr.size = GDT_SIZE - 1;
22245 load_gdt(&gdt_descr);
22246
22247- cr4 = read_cr4_safe();
22248-
22249- if (cr4 & X86_CR4_PAE) {
22250- swapper_pg_dir[pgd_index(0)].pgd =
22251- efi_bak_pg_dir_pointer[0].pgd;
22252- } else {
22253- swapper_pg_dir[pgd_index(0)].pgd =
22254- efi_bak_pg_dir_pointer[0].pgd;
22255- swapper_pg_dir[pgd_index(0x400000)].pgd =
22256- efi_bak_pg_dir_pointer[1].pgd;
22257- }
22258+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22259
22260 /*
22261 * After the lock is released, the original page table is restored.
22262diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S
22263--- linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
22264+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
22265@@ -6,7 +6,9 @@
22266 */
22267
22268 #include <linux/linkage.h>
22269+#include <linux/init.h>
22270 #include <asm/page_types.h>
22271+#include <asm/segment.h>
22272
22273 /*
22274 * efi_call_phys(void *, ...) is a function with variable parameters.
22275@@ -20,7 +22,7 @@
22276 * service functions will comply with gcc calling convention, too.
22277 */
22278
22279-.text
22280+__INIT
22281 ENTRY(efi_call_phys)
22282 /*
22283 * 0. The function can only be called in Linux kernel. So CS has been
22284@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22285 * The mapping of lower virtual memory has been created in prelog and
22286 * epilog.
22287 */
22288- movl $1f, %edx
22289- subl $__PAGE_OFFSET, %edx
22290- jmp *%edx
22291+ movl $(__KERNEXEC_EFI_DS), %edx
22292+ mov %edx, %ds
22293+ mov %edx, %es
22294+ mov %edx, %ss
22295+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22296 1:
22297
22298 /*
22299@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22300 * parameter 2, ..., param n. To make things easy, we save the return
22301 * address of efi_call_phys in a global variable.
22302 */
22303- popl %edx
22304- movl %edx, saved_return_addr
22305- /* get the function pointer into ECX*/
22306- popl %ecx
22307- movl %ecx, efi_rt_function_ptr
22308- movl $2f, %edx
22309- subl $__PAGE_OFFSET, %edx
22310- pushl %edx
22311+ popl (saved_return_addr)
22312+ popl (efi_rt_function_ptr)
22313
22314 /*
22315 * 3. Clear PG bit in %CR0.
22316@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22317 /*
22318 * 5. Call the physical function.
22319 */
22320- jmp *%ecx
22321+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22322
22323-2:
22324 /*
22325 * 6. After EFI runtime service returns, control will return to
22326 * following instruction. We'd better readjust stack pointer first.
22327@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22328 movl %cr0, %edx
22329 orl $0x80000000, %edx
22330 movl %edx, %cr0
22331- jmp 1f
22332-1:
22333+
22334 /*
22335 * 8. Now restore the virtual mode from flat mode by
22336 * adding EIP with PAGE_OFFSET.
22337 */
22338- movl $1f, %edx
22339- jmp *%edx
22340+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22341 1:
22342+ movl $(__KERNEL_DS), %edx
22343+ mov %edx, %ds
22344+ mov %edx, %es
22345+ mov %edx, %ss
22346
22347 /*
22348 * 9. Balance the stack. And because EAX contain the return value,
22349 * we'd better not clobber it.
22350 */
22351- leal efi_rt_function_ptr, %edx
22352- movl (%edx), %ecx
22353- pushl %ecx
22354+ pushl (efi_rt_function_ptr)
22355
22356 /*
22357- * 10. Push the saved return address onto the stack and return.
22358+ * 10. Return to the saved return address.
22359 */
22360- leal saved_return_addr, %edx
22361- movl (%edx), %ecx
22362- pushl %ecx
22363- ret
22364+ jmpl *(saved_return_addr)
22365 ENDPROC(efi_call_phys)
22366 .previous
22367
22368-.data
22369+__INITDATA
22370 saved_return_addr:
22371 .long 0
22372 efi_rt_function_ptr:
22373diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S
22374--- linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22375+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22376@@ -7,6 +7,7 @@
22377 */
22378
22379 #include <linux/linkage.h>
22380+#include <asm/alternative-asm.h>
22381
22382 #define SAVE_XMM \
22383 mov %rsp, %rax; \
22384@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22385 call *%rdi
22386 addq $32, %rsp
22387 RESTORE_XMM
22388+ pax_force_retaddr
22389 ret
22390 ENDPROC(efi_call0)
22391
22392@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22393 call *%rdi
22394 addq $32, %rsp
22395 RESTORE_XMM
22396+ pax_force_retaddr
22397 ret
22398 ENDPROC(efi_call1)
22399
22400@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22401 call *%rdi
22402 addq $32, %rsp
22403 RESTORE_XMM
22404+ pax_force_retaddr
22405 ret
22406 ENDPROC(efi_call2)
22407
22408@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22409 call *%rdi
22410 addq $32, %rsp
22411 RESTORE_XMM
22412+ pax_force_retaddr
22413 ret
22414 ENDPROC(efi_call3)
22415
22416@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22417 call *%rdi
22418 addq $32, %rsp
22419 RESTORE_XMM
22420+ pax_force_retaddr
22421 ret
22422 ENDPROC(efi_call4)
22423
22424@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22425 call *%rdi
22426 addq $48, %rsp
22427 RESTORE_XMM
22428+ pax_force_retaddr
22429 ret
22430 ENDPROC(efi_call5)
22431
22432@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22433 call *%rdi
22434 addq $48, %rsp
22435 RESTORE_XMM
22436+ pax_force_retaddr
22437 ret
22438 ENDPROC(efi_call6)
22439diff -urNp linux-3.0.7/arch/x86/platform/mrst/mrst.c linux-3.0.7/arch/x86/platform/mrst/mrst.c
22440--- linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22441+++ linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22442@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22443 }
22444
22445 /* Reboot and power off are handled by the SCU on a MID device */
22446-static void mrst_power_off(void)
22447+static __noreturn void mrst_power_off(void)
22448 {
22449 intel_scu_ipc_simple_command(0xf1, 1);
22450+ BUG();
22451 }
22452
22453-static void mrst_reboot(void)
22454+static __noreturn void mrst_reboot(void)
22455 {
22456 intel_scu_ipc_simple_command(0xf1, 0);
22457+ BUG();
22458 }
22459
22460 /*
22461diff -urNp linux-3.0.7/arch/x86/platform/uv/tlb_uv.c linux-3.0.7/arch/x86/platform/uv/tlb_uv.c
22462--- linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22463+++ linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22464@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22465 cpumask_t mask;
22466 struct reset_args reset_args;
22467
22468+ pax_track_stack();
22469+
22470 reset_args.sender = sender;
22471 cpus_clear(mask);
22472 /* find a single cpu for each uvhub in this distribution mask */
22473diff -urNp linux-3.0.7/arch/x86/power/cpu.c linux-3.0.7/arch/x86/power/cpu.c
22474--- linux-3.0.7/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22475+++ linux-3.0.7/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22476@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22477 static void fix_processor_context(void)
22478 {
22479 int cpu = smp_processor_id();
22480- struct tss_struct *t = &per_cpu(init_tss, cpu);
22481+ struct tss_struct *t = init_tss + cpu;
22482
22483 set_tss_desc(cpu, t); /*
22484 * This just modifies memory; should not be
22485@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22486 */
22487
22488 #ifdef CONFIG_X86_64
22489+ pax_open_kernel();
22490 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22491+ pax_close_kernel();
22492
22493 syscall_init(); /* This sets MSR_*STAR and related */
22494 #endif
22495diff -urNp linux-3.0.7/arch/x86/vdso/Makefile linux-3.0.7/arch/x86/vdso/Makefile
22496--- linux-3.0.7/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22497+++ linux-3.0.7/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22498@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22499 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22500 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22501
22502-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22503+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22504 GCOV_PROFILE := n
22505
22506 #
22507diff -urNp linux-3.0.7/arch/x86/vdso/vdso32-setup.c linux-3.0.7/arch/x86/vdso/vdso32-setup.c
22508--- linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22509+++ linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22510@@ -25,6 +25,7 @@
22511 #include <asm/tlbflush.h>
22512 #include <asm/vdso.h>
22513 #include <asm/proto.h>
22514+#include <asm/mman.h>
22515
22516 enum {
22517 VDSO_DISABLED = 0,
22518@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22519 void enable_sep_cpu(void)
22520 {
22521 int cpu = get_cpu();
22522- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22523+ struct tss_struct *tss = init_tss + cpu;
22524
22525 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22526 put_cpu();
22527@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22528 gate_vma.vm_start = FIXADDR_USER_START;
22529 gate_vma.vm_end = FIXADDR_USER_END;
22530 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22531- gate_vma.vm_page_prot = __P101;
22532+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22533 /*
22534 * Make sure the vDSO gets into every core dump.
22535 * Dumping its contents makes post-mortem fully interpretable later
22536@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22537 if (compat)
22538 addr = VDSO_HIGH_BASE;
22539 else {
22540- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22541+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22542 if (IS_ERR_VALUE(addr)) {
22543 ret = addr;
22544 goto up_fail;
22545 }
22546 }
22547
22548- current->mm->context.vdso = (void *)addr;
22549+ current->mm->context.vdso = addr;
22550
22551 if (compat_uses_vma || !compat) {
22552 /*
22553@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22554 }
22555
22556 current_thread_info()->sysenter_return =
22557- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22558+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22559
22560 up_fail:
22561 if (ret)
22562- current->mm->context.vdso = NULL;
22563+ current->mm->context.vdso = 0;
22564
22565 up_write(&mm->mmap_sem);
22566
22567@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22568
22569 const char *arch_vma_name(struct vm_area_struct *vma)
22570 {
22571- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22572+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22573 return "[vdso]";
22574+
22575+#ifdef CONFIG_PAX_SEGMEXEC
22576+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22577+ return "[vdso]";
22578+#endif
22579+
22580 return NULL;
22581 }
22582
22583@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22584 * Check to see if the corresponding task was created in compat vdso
22585 * mode.
22586 */
22587- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22588+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22589 return &gate_vma;
22590 return NULL;
22591 }
22592diff -urNp linux-3.0.7/arch/x86/vdso/vma.c linux-3.0.7/arch/x86/vdso/vma.c
22593--- linux-3.0.7/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22594+++ linux-3.0.7/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22595@@ -15,18 +15,19 @@
22596 #include <asm/proto.h>
22597 #include <asm/vdso.h>
22598
22599-unsigned int __read_mostly vdso_enabled = 1;
22600-
22601 extern char vdso_start[], vdso_end[];
22602 extern unsigned short vdso_sync_cpuid;
22603+extern char __vsyscall_0;
22604
22605 static struct page **vdso_pages;
22606+static struct page *vsyscall_page;
22607 static unsigned vdso_size;
22608
22609 static int __init init_vdso_vars(void)
22610 {
22611- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22612- int i;
22613+ size_t nbytes = vdso_end - vdso_start;
22614+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22615+ size_t i;
22616
22617 vdso_size = npages << PAGE_SHIFT;
22618 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22619@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22620 goto oom;
22621 for (i = 0; i < npages; i++) {
22622 struct page *p;
22623- p = alloc_page(GFP_KERNEL);
22624+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22625 if (!p)
22626 goto oom;
22627 vdso_pages[i] = p;
22628- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22629+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22630+ nbytes -= PAGE_SIZE;
22631 }
22632+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22633
22634 return 0;
22635
22636 oom:
22637- printk("Cannot allocate vdso\n");
22638- vdso_enabled = 0;
22639- return -ENOMEM;
22640+ panic("Cannot allocate vdso\n");
22641 }
22642 subsys_initcall(init_vdso_vars);
22643
22644@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22645 unsigned long addr;
22646 int ret;
22647
22648- if (!vdso_enabled)
22649- return 0;
22650-
22651 down_write(&mm->mmap_sem);
22652- addr = vdso_addr(mm->start_stack, vdso_size);
22653- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22654+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22655+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22656 if (IS_ERR_VALUE(addr)) {
22657 ret = addr;
22658 goto up_fail;
22659 }
22660
22661- current->mm->context.vdso = (void *)addr;
22662+ mm->context.vdso = addr + PAGE_SIZE;
22663
22664- ret = install_special_mapping(mm, addr, vdso_size,
22665+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
22666 VM_READ|VM_EXEC|
22667- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22668+ VM_MAYREAD|VM_MAYEXEC|
22669 VM_ALWAYSDUMP,
22670- vdso_pages);
22671+ &vsyscall_page);
22672 if (ret) {
22673- current->mm->context.vdso = NULL;
22674+ mm->context.vdso = 0;
22675 goto up_fail;
22676 }
22677
22678+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22679+ VM_READ|VM_EXEC|
22680+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22681+ VM_ALWAYSDUMP,
22682+ vdso_pages);
22683+ if (ret)
22684+ mm->context.vdso = 0;
22685+
22686 up_fail:
22687 up_write(&mm->mmap_sem);
22688 return ret;
22689 }
22690-
22691-static __init int vdso_setup(char *s)
22692-{
22693- vdso_enabled = simple_strtoul(s, NULL, 0);
22694- return 0;
22695-}
22696-__setup("vdso=", vdso_setup);
22697diff -urNp linux-3.0.7/arch/x86/xen/enlighten.c linux-3.0.7/arch/x86/xen/enlighten.c
22698--- linux-3.0.7/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22699+++ linux-3.0.7/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22700@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22701
22702 struct shared_info xen_dummy_shared_info;
22703
22704-void *xen_initial_gdt;
22705-
22706 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22707 __read_mostly int xen_have_vector_callback;
22708 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22709@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22710 #endif
22711 };
22712
22713-static void xen_reboot(int reason)
22714+static __noreturn void xen_reboot(int reason)
22715 {
22716 struct sched_shutdown r = { .reason = reason };
22717
22718@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22719 BUG();
22720 }
22721
22722-static void xen_restart(char *msg)
22723+static __noreturn void xen_restart(char *msg)
22724 {
22725 xen_reboot(SHUTDOWN_reboot);
22726 }
22727
22728-static void xen_emergency_restart(void)
22729+static __noreturn void xen_emergency_restart(void)
22730 {
22731 xen_reboot(SHUTDOWN_reboot);
22732 }
22733
22734-static void xen_machine_halt(void)
22735+static __noreturn void xen_machine_halt(void)
22736 {
22737 xen_reboot(SHUTDOWN_poweroff);
22738 }
22739@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22740 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22741
22742 /* Work out if we support NX */
22743- x86_configure_nx();
22744+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22745+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22746+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22747+ unsigned l, h;
22748+
22749+ __supported_pte_mask |= _PAGE_NX;
22750+ rdmsr(MSR_EFER, l, h);
22751+ l |= EFER_NX;
22752+ wrmsr(MSR_EFER, l, h);
22753+ }
22754+#endif
22755
22756 xen_setup_features();
22757
22758@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22759
22760 machine_ops = xen_machine_ops;
22761
22762- /*
22763- * The only reliable way to retain the initial address of the
22764- * percpu gdt_page is to remember it here, so we can go and
22765- * mark it RW later, when the initial percpu area is freed.
22766- */
22767- xen_initial_gdt = &per_cpu(gdt_page, 0);
22768-
22769 xen_smp_init();
22770
22771 #ifdef CONFIG_ACPI_NUMA
22772diff -urNp linux-3.0.7/arch/x86/xen/mmu.c linux-3.0.7/arch/x86/xen/mmu.c
22773--- linux-3.0.7/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22774+++ linux-3.0.7/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22775@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22776 convert_pfn_mfn(init_level4_pgt);
22777 convert_pfn_mfn(level3_ident_pgt);
22778 convert_pfn_mfn(level3_kernel_pgt);
22779+ convert_pfn_mfn(level3_vmalloc_pgt);
22780+ convert_pfn_mfn(level3_vmemmap_pgt);
22781
22782 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22783 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22784@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22785 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22786 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22787 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22788+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22789+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22790 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22791+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22792 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22793 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22794
22795@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22796 pv_mmu_ops.set_pud = xen_set_pud;
22797 #if PAGETABLE_LEVELS == 4
22798 pv_mmu_ops.set_pgd = xen_set_pgd;
22799+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22800 #endif
22801
22802 /* This will work as long as patching hasn't happened yet
22803@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22804 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22805 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22806 .set_pgd = xen_set_pgd_hyper,
22807+ .set_pgd_batched = xen_set_pgd_hyper,
22808
22809 .alloc_pud = xen_alloc_pmd_init,
22810 .release_pud = xen_release_pmd_init,
22811diff -urNp linux-3.0.7/arch/x86/xen/smp.c linux-3.0.7/arch/x86/xen/smp.c
22812--- linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:54:53.000000000 -0400
22813+++ linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:55:27.000000000 -0400
22814@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
22815 {
22816 BUG_ON(smp_processor_id() != 0);
22817 native_smp_prepare_boot_cpu();
22818-
22819- /* We've switched to the "real" per-cpu gdt, so make sure the
22820- old memory can be recycled */
22821- make_lowmem_page_readwrite(xen_initial_gdt);
22822-
22823 xen_filter_cpu_maps();
22824 xen_setup_vcpu_info_placement();
22825 }
22826@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
22827 gdt = get_cpu_gdt_table(cpu);
22828
22829 ctxt->flags = VGCF_IN_KERNEL;
22830- ctxt->user_regs.ds = __USER_DS;
22831- ctxt->user_regs.es = __USER_DS;
22832+ ctxt->user_regs.ds = __KERNEL_DS;
22833+ ctxt->user_regs.es = __KERNEL_DS;
22834 ctxt->user_regs.ss = __KERNEL_DS;
22835 #ifdef CONFIG_X86_32
22836 ctxt->user_regs.fs = __KERNEL_PERCPU;
22837- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22838+ savesegment(gs, ctxt->user_regs.gs);
22839 #else
22840 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22841 #endif
22842@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
22843 int rc;
22844
22845 per_cpu(current_task, cpu) = idle;
22846+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22847 #ifdef CONFIG_X86_32
22848 irq_ctx_init(cpu);
22849 #else
22850 clear_tsk_thread_flag(idle, TIF_FORK);
22851- per_cpu(kernel_stack, cpu) =
22852- (unsigned long)task_stack_page(idle) -
22853- KERNEL_STACK_OFFSET + THREAD_SIZE;
22854+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22855 #endif
22856 xen_setup_runstate_info(cpu);
22857 xen_setup_timer(cpu);
22858diff -urNp linux-3.0.7/arch/x86/xen/xen-asm_32.S linux-3.0.7/arch/x86/xen/xen-asm_32.S
22859--- linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:54:53.000000000 -0400
22860+++ linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:55:27.000000000 -0400
22861@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22862 ESP_OFFSET=4 # bytes pushed onto stack
22863
22864 /*
22865- * Store vcpu_info pointer for easy access. Do it this way to
22866- * avoid having to reload %fs
22867+ * Store vcpu_info pointer for easy access.
22868 */
22869 #ifdef CONFIG_SMP
22870- GET_THREAD_INFO(%eax)
22871- movl TI_cpu(%eax), %eax
22872- movl __per_cpu_offset(,%eax,4), %eax
22873- mov xen_vcpu(%eax), %eax
22874+ push %fs
22875+ mov $(__KERNEL_PERCPU), %eax
22876+ mov %eax, %fs
22877+ mov PER_CPU_VAR(xen_vcpu), %eax
22878+ pop %fs
22879 #else
22880 movl xen_vcpu, %eax
22881 #endif
22882diff -urNp linux-3.0.7/arch/x86/xen/xen-head.S linux-3.0.7/arch/x86/xen/xen-head.S
22883--- linux-3.0.7/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22884+++ linux-3.0.7/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22885@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22886 #ifdef CONFIG_X86_32
22887 mov %esi,xen_start_info
22888 mov $init_thread_union+THREAD_SIZE,%esp
22889+#ifdef CONFIG_SMP
22890+ movl $cpu_gdt_table,%edi
22891+ movl $__per_cpu_load,%eax
22892+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22893+ rorl $16,%eax
22894+ movb %al,__KERNEL_PERCPU + 4(%edi)
22895+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22896+ movl $__per_cpu_end - 1,%eax
22897+ subl $__per_cpu_start,%eax
22898+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22899+#endif
22900 #else
22901 mov %rsi,xen_start_info
22902 mov $init_thread_union+THREAD_SIZE,%rsp
22903diff -urNp linux-3.0.7/arch/x86/xen/xen-ops.h linux-3.0.7/arch/x86/xen/xen-ops.h
22904--- linux-3.0.7/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22905+++ linux-3.0.7/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22906@@ -10,8 +10,6 @@
22907 extern const char xen_hypervisor_callback[];
22908 extern const char xen_failsafe_callback[];
22909
22910-extern void *xen_initial_gdt;
22911-
22912 struct trap_info;
22913 void xen_copy_trap_info(struct trap_info *traps);
22914
22915diff -urNp linux-3.0.7/block/blk-iopoll.c linux-3.0.7/block/blk-iopoll.c
22916--- linux-3.0.7/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22917+++ linux-3.0.7/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22918@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22919 }
22920 EXPORT_SYMBOL(blk_iopoll_complete);
22921
22922-static void blk_iopoll_softirq(struct softirq_action *h)
22923+static void blk_iopoll_softirq(void)
22924 {
22925 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22926 int rearm = 0, budget = blk_iopoll_budget;
22927diff -urNp linux-3.0.7/block/blk-map.c linux-3.0.7/block/blk-map.c
22928--- linux-3.0.7/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22929+++ linux-3.0.7/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22930@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22931 if (!len || !kbuf)
22932 return -EINVAL;
22933
22934- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22935+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22936 if (do_copy)
22937 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22938 else
22939diff -urNp linux-3.0.7/block/blk-softirq.c linux-3.0.7/block/blk-softirq.c
22940--- linux-3.0.7/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22941+++ linux-3.0.7/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22942@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22943 * Softirq action handler - move entries to local list and loop over them
22944 * while passing them to the queue registered handler.
22945 */
22946-static void blk_done_softirq(struct softirq_action *h)
22947+static void blk_done_softirq(void)
22948 {
22949 struct list_head *cpu_list, local_list;
22950
22951diff -urNp linux-3.0.7/block/bsg.c linux-3.0.7/block/bsg.c
22952--- linux-3.0.7/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22953+++ linux-3.0.7/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22954@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22955 struct sg_io_v4 *hdr, struct bsg_device *bd,
22956 fmode_t has_write_perm)
22957 {
22958+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22959+ unsigned char *cmdptr;
22960+
22961 if (hdr->request_len > BLK_MAX_CDB) {
22962 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22963 if (!rq->cmd)
22964 return -ENOMEM;
22965- }
22966+ cmdptr = rq->cmd;
22967+ } else
22968+ cmdptr = tmpcmd;
22969
22970- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22971+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22972 hdr->request_len))
22973 return -EFAULT;
22974
22975+ if (cmdptr != rq->cmd)
22976+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22977+
22978 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22979 if (blk_verify_command(rq->cmd, has_write_perm))
22980 return -EPERM;
22981@@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22982 struct request *rq, *next_rq = NULL;
22983 int ret, rw;
22984 unsigned int dxfer_len;
22985- void *dxferp = NULL;
22986+ void __user *dxferp = NULL;
22987 struct bsg_class_device *bcd = &q->bsg_dev;
22988
22989 /* if the LLD has been removed then the bsg_unregister_queue will
22990@@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22991 rq->next_rq = next_rq;
22992 next_rq->cmd_type = rq->cmd_type;
22993
22994- dxferp = (void*)(unsigned long)hdr->din_xferp;
22995+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22996 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22997 hdr->din_xfer_len, GFP_KERNEL);
22998 if (ret)
22999@@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
23000
23001 if (hdr->dout_xfer_len) {
23002 dxfer_len = hdr->dout_xfer_len;
23003- dxferp = (void*)(unsigned long)hdr->dout_xferp;
23004+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
23005 } else if (hdr->din_xfer_len) {
23006 dxfer_len = hdr->din_xfer_len;
23007- dxferp = (void*)(unsigned long)hdr->din_xferp;
23008+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
23009 } else
23010 dxfer_len = 0;
23011
23012@@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
23013 int len = min_t(unsigned int, hdr->max_response_len,
23014 rq->sense_len);
23015
23016- ret = copy_to_user((void*)(unsigned long)hdr->response,
23017+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
23018 rq->sense, len);
23019 if (!ret)
23020 hdr->response_len = len;
23021diff -urNp linux-3.0.7/block/compat_ioctl.c linux-3.0.7/block/compat_ioctl.c
23022--- linux-3.0.7/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
23023+++ linux-3.0.7/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
23024@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
23025 err |= __get_user(f->spec1, &uf->spec1);
23026 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23027 err |= __get_user(name, &uf->name);
23028- f->name = compat_ptr(name);
23029+ f->name = (void __force_kernel *)compat_ptr(name);
23030 if (err) {
23031 err = -EFAULT;
23032 goto out;
23033diff -urNp linux-3.0.7/block/scsi_ioctl.c linux-3.0.7/block/scsi_ioctl.c
23034--- linux-3.0.7/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
23035+++ linux-3.0.7/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
23036@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23037 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23038 struct sg_io_hdr *hdr, fmode_t mode)
23039 {
23040- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23041+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23042+ unsigned char *cmdptr;
23043+
23044+ if (rq->cmd != rq->__cmd)
23045+ cmdptr = rq->cmd;
23046+ else
23047+ cmdptr = tmpcmd;
23048+
23049+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23050 return -EFAULT;
23051+
23052+ if (cmdptr != rq->cmd)
23053+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23054+
23055 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23056 return -EPERM;
23057
23058@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23059 int err;
23060 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23061 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23062+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23063+ unsigned char *cmdptr;
23064
23065 if (!sic)
23066 return -EINVAL;
23067@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23068 */
23069 err = -EFAULT;
23070 rq->cmd_len = cmdlen;
23071- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23072+
23073+ if (rq->cmd != rq->__cmd)
23074+ cmdptr = rq->cmd;
23075+ else
23076+ cmdptr = tmpcmd;
23077+
23078+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23079 goto error;
23080
23081+ if (rq->cmd != cmdptr)
23082+ memcpy(rq->cmd, cmdptr, cmdlen);
23083+
23084 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23085 goto error;
23086
23087diff -urNp linux-3.0.7/crypto/cryptd.c linux-3.0.7/crypto/cryptd.c
23088--- linux-3.0.7/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
23089+++ linux-3.0.7/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
23090@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23091
23092 struct cryptd_blkcipher_request_ctx {
23093 crypto_completion_t complete;
23094-};
23095+} __no_const;
23096
23097 struct cryptd_hash_ctx {
23098 struct crypto_shash *child;
23099@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23100
23101 struct cryptd_aead_request_ctx {
23102 crypto_completion_t complete;
23103-};
23104+} __no_const;
23105
23106 static void cryptd_queue_worker(struct work_struct *work);
23107
23108diff -urNp linux-3.0.7/crypto/gf128mul.c linux-3.0.7/crypto/gf128mul.c
23109--- linux-3.0.7/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
23110+++ linux-3.0.7/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
23111@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23112 for (i = 0; i < 7; ++i)
23113 gf128mul_x_lle(&p[i + 1], &p[i]);
23114
23115- memset(r, 0, sizeof(r));
23116+ memset(r, 0, sizeof(*r));
23117 for (i = 0;;) {
23118 u8 ch = ((u8 *)b)[15 - i];
23119
23120@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23121 for (i = 0; i < 7; ++i)
23122 gf128mul_x_bbe(&p[i + 1], &p[i]);
23123
23124- memset(r, 0, sizeof(r));
23125+ memset(r, 0, sizeof(*r));
23126 for (i = 0;;) {
23127 u8 ch = ((u8 *)b)[i];
23128
23129diff -urNp linux-3.0.7/crypto/serpent.c linux-3.0.7/crypto/serpent.c
23130--- linux-3.0.7/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
23131+++ linux-3.0.7/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
23132@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23133 u32 r0,r1,r2,r3,r4;
23134 int i;
23135
23136+ pax_track_stack();
23137+
23138 /* Copy key, add padding */
23139
23140 for (i = 0; i < keylen; ++i)
23141diff -urNp linux-3.0.7/drivers/acpi/apei/cper.c linux-3.0.7/drivers/acpi/apei/cper.c
23142--- linux-3.0.7/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
23143+++ linux-3.0.7/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
23144@@ -38,12 +38,12 @@
23145 */
23146 u64 cper_next_record_id(void)
23147 {
23148- static atomic64_t seq;
23149+ static atomic64_unchecked_t seq;
23150
23151- if (!atomic64_read(&seq))
23152- atomic64_set(&seq, ((u64)get_seconds()) << 32);
23153+ if (!atomic64_read_unchecked(&seq))
23154+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23155
23156- return atomic64_inc_return(&seq);
23157+ return atomic64_inc_return_unchecked(&seq);
23158 }
23159 EXPORT_SYMBOL_GPL(cper_next_record_id);
23160
23161diff -urNp linux-3.0.7/drivers/acpi/ec_sys.c linux-3.0.7/drivers/acpi/ec_sys.c
23162--- linux-3.0.7/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
23163+++ linux-3.0.7/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
23164@@ -11,6 +11,7 @@
23165 #include <linux/kernel.h>
23166 #include <linux/acpi.h>
23167 #include <linux/debugfs.h>
23168+#include <asm/uaccess.h>
23169 #include "internal.h"
23170
23171 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
23172@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23173 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23174 */
23175 unsigned int size = EC_SPACE_SIZE;
23176- u8 *data = (u8 *) buf;
23177+ u8 data;
23178 loff_t init_off = *off;
23179 int err = 0;
23180
23181@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23182 size = count;
23183
23184 while (size) {
23185- err = ec_read(*off, &data[*off - init_off]);
23186+ err = ec_read(*off, &data);
23187 if (err)
23188 return err;
23189+ if (put_user(data, &buf[*off - init_off]))
23190+ return -EFAULT;
23191 *off += 1;
23192 size--;
23193 }
23194@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23195
23196 unsigned int size = count;
23197 loff_t init_off = *off;
23198- u8 *data = (u8 *) buf;
23199 int err = 0;
23200
23201 if (*off >= EC_SPACE_SIZE)
23202@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23203 }
23204
23205 while (size) {
23206- u8 byte_write = data[*off - init_off];
23207+ u8 byte_write;
23208+ if (get_user(byte_write, &buf[*off - init_off]))
23209+ return -EFAULT;
23210 err = ec_write(*off, byte_write);
23211 if (err)
23212 return err;
23213diff -urNp linux-3.0.7/drivers/acpi/proc.c linux-3.0.7/drivers/acpi/proc.c
23214--- linux-3.0.7/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
23215+++ linux-3.0.7/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
23216@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23217 size_t count, loff_t * ppos)
23218 {
23219 struct list_head *node, *next;
23220- char strbuf[5];
23221- char str[5] = "";
23222- unsigned int len = count;
23223-
23224- if (len > 4)
23225- len = 4;
23226- if (len < 0)
23227- return -EFAULT;
23228+ char strbuf[5] = {0};
23229
23230- if (copy_from_user(strbuf, buffer, len))
23231+ if (count > 4)
23232+ count = 4;
23233+ if (copy_from_user(strbuf, buffer, count))
23234 return -EFAULT;
23235- strbuf[len] = '\0';
23236- sscanf(strbuf, "%s", str);
23237+ strbuf[count] = '\0';
23238
23239 mutex_lock(&acpi_device_lock);
23240 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23241@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23242 if (!dev->wakeup.flags.valid)
23243 continue;
23244
23245- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23246+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23247 if (device_can_wakeup(&dev->dev)) {
23248 bool enable = !device_may_wakeup(&dev->dev);
23249 device_set_wakeup_enable(&dev->dev, enable);
23250diff -urNp linux-3.0.7/drivers/acpi/processor_driver.c linux-3.0.7/drivers/acpi/processor_driver.c
23251--- linux-3.0.7/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23252+++ linux-3.0.7/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23253@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23254 return 0;
23255 #endif
23256
23257- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23258+ BUG_ON(pr->id >= nr_cpu_ids);
23259
23260 /*
23261 * Buggy BIOS check
23262diff -urNp linux-3.0.7/drivers/ata/libata-core.c linux-3.0.7/drivers/ata/libata-core.c
23263--- linux-3.0.7/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23264+++ linux-3.0.7/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23265@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23266 struct ata_port *ap;
23267 unsigned int tag;
23268
23269- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23270+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23271 ap = qc->ap;
23272
23273 qc->flags = 0;
23274@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23275 struct ata_port *ap;
23276 struct ata_link *link;
23277
23278- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23279+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23280 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23281 ap = qc->ap;
23282 link = qc->dev->link;
23283@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23284 return;
23285
23286 spin_lock(&lock);
23287+ pax_open_kernel();
23288
23289 for (cur = ops->inherits; cur; cur = cur->inherits) {
23290 void **inherit = (void **)cur;
23291@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23292 if (IS_ERR(*pp))
23293 *pp = NULL;
23294
23295- ops->inherits = NULL;
23296+ *(struct ata_port_operations **)&ops->inherits = NULL;
23297
23298+ pax_close_kernel();
23299 spin_unlock(&lock);
23300 }
23301
23302diff -urNp linux-3.0.7/drivers/ata/libata-eh.c linux-3.0.7/drivers/ata/libata-eh.c
23303--- linux-3.0.7/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23304+++ linux-3.0.7/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23305@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23306 {
23307 struct ata_link *link;
23308
23309+ pax_track_stack();
23310+
23311 ata_for_each_link(link, ap, HOST_FIRST)
23312 ata_eh_link_report(link);
23313 }
23314diff -urNp linux-3.0.7/drivers/ata/pata_arasan_cf.c linux-3.0.7/drivers/ata/pata_arasan_cf.c
23315--- linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23316+++ linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23317@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23318 /* Handle platform specific quirks */
23319 if (pdata->quirk) {
23320 if (pdata->quirk & CF_BROKEN_PIO) {
23321- ap->ops->set_piomode = NULL;
23322+ pax_open_kernel();
23323+ *(void **)&ap->ops->set_piomode = NULL;
23324+ pax_close_kernel();
23325 ap->pio_mask = 0;
23326 }
23327 if (pdata->quirk & CF_BROKEN_MWDMA)
23328diff -urNp linux-3.0.7/drivers/atm/adummy.c linux-3.0.7/drivers/atm/adummy.c
23329--- linux-3.0.7/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23330+++ linux-3.0.7/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23331@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23332 vcc->pop(vcc, skb);
23333 else
23334 dev_kfree_skb_any(skb);
23335- atomic_inc(&vcc->stats->tx);
23336+ atomic_inc_unchecked(&vcc->stats->tx);
23337
23338 return 0;
23339 }
23340diff -urNp linux-3.0.7/drivers/atm/ambassador.c linux-3.0.7/drivers/atm/ambassador.c
23341--- linux-3.0.7/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23342+++ linux-3.0.7/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23343@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23344 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23345
23346 // VC layer stats
23347- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23348+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23349
23350 // free the descriptor
23351 kfree (tx_descr);
23352@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23353 dump_skb ("<<<", vc, skb);
23354
23355 // VC layer stats
23356- atomic_inc(&atm_vcc->stats->rx);
23357+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23358 __net_timestamp(skb);
23359 // end of our responsibility
23360 atm_vcc->push (atm_vcc, skb);
23361@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23362 } else {
23363 PRINTK (KERN_INFO, "dropped over-size frame");
23364 // should we count this?
23365- atomic_inc(&atm_vcc->stats->rx_drop);
23366+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23367 }
23368
23369 } else {
23370@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23371 }
23372
23373 if (check_area (skb->data, skb->len)) {
23374- atomic_inc(&atm_vcc->stats->tx_err);
23375+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23376 return -ENOMEM; // ?
23377 }
23378
23379diff -urNp linux-3.0.7/drivers/atm/atmtcp.c linux-3.0.7/drivers/atm/atmtcp.c
23380--- linux-3.0.7/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23381+++ linux-3.0.7/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23382@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23383 if (vcc->pop) vcc->pop(vcc,skb);
23384 else dev_kfree_skb(skb);
23385 if (dev_data) return 0;
23386- atomic_inc(&vcc->stats->tx_err);
23387+ atomic_inc_unchecked(&vcc->stats->tx_err);
23388 return -ENOLINK;
23389 }
23390 size = skb->len+sizeof(struct atmtcp_hdr);
23391@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23392 if (!new_skb) {
23393 if (vcc->pop) vcc->pop(vcc,skb);
23394 else dev_kfree_skb(skb);
23395- atomic_inc(&vcc->stats->tx_err);
23396+ atomic_inc_unchecked(&vcc->stats->tx_err);
23397 return -ENOBUFS;
23398 }
23399 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23400@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23401 if (vcc->pop) vcc->pop(vcc,skb);
23402 else dev_kfree_skb(skb);
23403 out_vcc->push(out_vcc,new_skb);
23404- atomic_inc(&vcc->stats->tx);
23405- atomic_inc(&out_vcc->stats->rx);
23406+ atomic_inc_unchecked(&vcc->stats->tx);
23407+ atomic_inc_unchecked(&out_vcc->stats->rx);
23408 return 0;
23409 }
23410
23411@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23412 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23413 read_unlock(&vcc_sklist_lock);
23414 if (!out_vcc) {
23415- atomic_inc(&vcc->stats->tx_err);
23416+ atomic_inc_unchecked(&vcc->stats->tx_err);
23417 goto done;
23418 }
23419 skb_pull(skb,sizeof(struct atmtcp_hdr));
23420@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23421 __net_timestamp(new_skb);
23422 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23423 out_vcc->push(out_vcc,new_skb);
23424- atomic_inc(&vcc->stats->tx);
23425- atomic_inc(&out_vcc->stats->rx);
23426+ atomic_inc_unchecked(&vcc->stats->tx);
23427+ atomic_inc_unchecked(&out_vcc->stats->rx);
23428 done:
23429 if (vcc->pop) vcc->pop(vcc,skb);
23430 else dev_kfree_skb(skb);
23431diff -urNp linux-3.0.7/drivers/atm/eni.c linux-3.0.7/drivers/atm/eni.c
23432--- linux-3.0.7/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23433+++ linux-3.0.7/drivers/atm/eni.c 2011-10-11 10:44:33.000000000 -0400
23434@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23435 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23436 vcc->dev->number);
23437 length = 0;
23438- atomic_inc(&vcc->stats->rx_err);
23439+ atomic_inc_unchecked(&vcc->stats->rx_err);
23440 }
23441 else {
23442 length = ATM_CELL_SIZE-1; /* no HEC */
23443@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23444 size);
23445 }
23446 eff = length = 0;
23447- atomic_inc(&vcc->stats->rx_err);
23448+ atomic_inc_unchecked(&vcc->stats->rx_err);
23449 }
23450 else {
23451 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23452@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23453 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23454 vcc->dev->number,vcc->vci,length,size << 2,descr);
23455 length = eff = 0;
23456- atomic_inc(&vcc->stats->rx_err);
23457+ atomic_inc_unchecked(&vcc->stats->rx_err);
23458 }
23459 }
23460 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23461@@ -771,7 +771,7 @@ rx_dequeued++;
23462 vcc->push(vcc,skb);
23463 pushed++;
23464 }
23465- atomic_inc(&vcc->stats->rx);
23466+ atomic_inc_unchecked(&vcc->stats->rx);
23467 }
23468 wake_up(&eni_dev->rx_wait);
23469 }
23470@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23471 PCI_DMA_TODEVICE);
23472 if (vcc->pop) vcc->pop(vcc,skb);
23473 else dev_kfree_skb_irq(skb);
23474- atomic_inc(&vcc->stats->tx);
23475+ atomic_inc_unchecked(&vcc->stats->tx);
23476 wake_up(&eni_dev->tx_wait);
23477 dma_complete++;
23478 }
23479@@ -1568,7 +1568,7 @@ tx_complete++;
23480 /*--------------------------------- entries ---------------------------------*/
23481
23482
23483-static const char *media_name[] __devinitdata = {
23484+static const char *media_name[] __devinitconst = {
23485 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23486 "UTP", "05?", "06?", "07?", /* 4- 7 */
23487 "TAXI","09?", "10?", "11?", /* 8-11 */
23488diff -urNp linux-3.0.7/drivers/atm/firestream.c linux-3.0.7/drivers/atm/firestream.c
23489--- linux-3.0.7/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23490+++ linux-3.0.7/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23491@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23492 }
23493 }
23494
23495- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23496+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23497
23498 fs_dprintk (FS_DEBUG_TXMEM, "i");
23499 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23500@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23501 #endif
23502 skb_put (skb, qe->p1 & 0xffff);
23503 ATM_SKB(skb)->vcc = atm_vcc;
23504- atomic_inc(&atm_vcc->stats->rx);
23505+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23506 __net_timestamp(skb);
23507 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23508 atm_vcc->push (atm_vcc, skb);
23509@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23510 kfree (pe);
23511 }
23512 if (atm_vcc)
23513- atomic_inc(&atm_vcc->stats->rx_drop);
23514+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23515 break;
23516 case 0x1f: /* Reassembly abort: no buffers. */
23517 /* Silently increment error counter. */
23518 if (atm_vcc)
23519- atomic_inc(&atm_vcc->stats->rx_drop);
23520+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23521 break;
23522 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23523 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23524diff -urNp linux-3.0.7/drivers/atm/fore200e.c linux-3.0.7/drivers/atm/fore200e.c
23525--- linux-3.0.7/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23526+++ linux-3.0.7/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23527@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23528 #endif
23529 /* check error condition */
23530 if (*entry->status & STATUS_ERROR)
23531- atomic_inc(&vcc->stats->tx_err);
23532+ atomic_inc_unchecked(&vcc->stats->tx_err);
23533 else
23534- atomic_inc(&vcc->stats->tx);
23535+ atomic_inc_unchecked(&vcc->stats->tx);
23536 }
23537 }
23538
23539@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23540 if (skb == NULL) {
23541 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23542
23543- atomic_inc(&vcc->stats->rx_drop);
23544+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23545 return -ENOMEM;
23546 }
23547
23548@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23549
23550 dev_kfree_skb_any(skb);
23551
23552- atomic_inc(&vcc->stats->rx_drop);
23553+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23554 return -ENOMEM;
23555 }
23556
23557 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23558
23559 vcc->push(vcc, skb);
23560- atomic_inc(&vcc->stats->rx);
23561+ atomic_inc_unchecked(&vcc->stats->rx);
23562
23563 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23564
23565@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23566 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23567 fore200e->atm_dev->number,
23568 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23569- atomic_inc(&vcc->stats->rx_err);
23570+ atomic_inc_unchecked(&vcc->stats->rx_err);
23571 }
23572 }
23573
23574@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23575 goto retry_here;
23576 }
23577
23578- atomic_inc(&vcc->stats->tx_err);
23579+ atomic_inc_unchecked(&vcc->stats->tx_err);
23580
23581 fore200e->tx_sat++;
23582 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23583diff -urNp linux-3.0.7/drivers/atm/he.c linux-3.0.7/drivers/atm/he.c
23584--- linux-3.0.7/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23585+++ linux-3.0.7/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23586@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23587
23588 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23589 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23590- atomic_inc(&vcc->stats->rx_drop);
23591+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23592 goto return_host_buffers;
23593 }
23594
23595@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23596 RBRQ_LEN_ERR(he_dev->rbrq_head)
23597 ? "LEN_ERR" : "",
23598 vcc->vpi, vcc->vci);
23599- atomic_inc(&vcc->stats->rx_err);
23600+ atomic_inc_unchecked(&vcc->stats->rx_err);
23601 goto return_host_buffers;
23602 }
23603
23604@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23605 vcc->push(vcc, skb);
23606 spin_lock(&he_dev->global_lock);
23607
23608- atomic_inc(&vcc->stats->rx);
23609+ atomic_inc_unchecked(&vcc->stats->rx);
23610
23611 return_host_buffers:
23612 ++pdus_assembled;
23613@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23614 tpd->vcc->pop(tpd->vcc, tpd->skb);
23615 else
23616 dev_kfree_skb_any(tpd->skb);
23617- atomic_inc(&tpd->vcc->stats->tx_err);
23618+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23619 }
23620 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23621 return;
23622@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23623 vcc->pop(vcc, skb);
23624 else
23625 dev_kfree_skb_any(skb);
23626- atomic_inc(&vcc->stats->tx_err);
23627+ atomic_inc_unchecked(&vcc->stats->tx_err);
23628 return -EINVAL;
23629 }
23630
23631@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23632 vcc->pop(vcc, skb);
23633 else
23634 dev_kfree_skb_any(skb);
23635- atomic_inc(&vcc->stats->tx_err);
23636+ atomic_inc_unchecked(&vcc->stats->tx_err);
23637 return -EINVAL;
23638 }
23639 #endif
23640@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23641 vcc->pop(vcc, skb);
23642 else
23643 dev_kfree_skb_any(skb);
23644- atomic_inc(&vcc->stats->tx_err);
23645+ atomic_inc_unchecked(&vcc->stats->tx_err);
23646 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23647 return -ENOMEM;
23648 }
23649@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23650 vcc->pop(vcc, skb);
23651 else
23652 dev_kfree_skb_any(skb);
23653- atomic_inc(&vcc->stats->tx_err);
23654+ atomic_inc_unchecked(&vcc->stats->tx_err);
23655 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23656 return -ENOMEM;
23657 }
23658@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23659 __enqueue_tpd(he_dev, tpd, cid);
23660 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23661
23662- atomic_inc(&vcc->stats->tx);
23663+ atomic_inc_unchecked(&vcc->stats->tx);
23664
23665 return 0;
23666 }
23667diff -urNp linux-3.0.7/drivers/atm/horizon.c linux-3.0.7/drivers/atm/horizon.c
23668--- linux-3.0.7/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23669+++ linux-3.0.7/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23670@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23671 {
23672 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23673 // VC layer stats
23674- atomic_inc(&vcc->stats->rx);
23675+ atomic_inc_unchecked(&vcc->stats->rx);
23676 __net_timestamp(skb);
23677 // end of our responsibility
23678 vcc->push (vcc, skb);
23679@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23680 dev->tx_iovec = NULL;
23681
23682 // VC layer stats
23683- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23684+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23685
23686 // free the skb
23687 hrz_kfree_skb (skb);
23688diff -urNp linux-3.0.7/drivers/atm/idt77252.c linux-3.0.7/drivers/atm/idt77252.c
23689--- linux-3.0.7/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23690+++ linux-3.0.7/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23691@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23692 else
23693 dev_kfree_skb(skb);
23694
23695- atomic_inc(&vcc->stats->tx);
23696+ atomic_inc_unchecked(&vcc->stats->tx);
23697 }
23698
23699 atomic_dec(&scq->used);
23700@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23701 if ((sb = dev_alloc_skb(64)) == NULL) {
23702 printk("%s: Can't allocate buffers for aal0.\n",
23703 card->name);
23704- atomic_add(i, &vcc->stats->rx_drop);
23705+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23706 break;
23707 }
23708 if (!atm_charge(vcc, sb->truesize)) {
23709 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23710 card->name);
23711- atomic_add(i - 1, &vcc->stats->rx_drop);
23712+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23713 dev_kfree_skb(sb);
23714 break;
23715 }
23716@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23717 ATM_SKB(sb)->vcc = vcc;
23718 __net_timestamp(sb);
23719 vcc->push(vcc, sb);
23720- atomic_inc(&vcc->stats->rx);
23721+ atomic_inc_unchecked(&vcc->stats->rx);
23722
23723 cell += ATM_CELL_PAYLOAD;
23724 }
23725@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23726 "(CDC: %08x)\n",
23727 card->name, len, rpp->len, readl(SAR_REG_CDC));
23728 recycle_rx_pool_skb(card, rpp);
23729- atomic_inc(&vcc->stats->rx_err);
23730+ atomic_inc_unchecked(&vcc->stats->rx_err);
23731 return;
23732 }
23733 if (stat & SAR_RSQE_CRC) {
23734 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23735 recycle_rx_pool_skb(card, rpp);
23736- atomic_inc(&vcc->stats->rx_err);
23737+ atomic_inc_unchecked(&vcc->stats->rx_err);
23738 return;
23739 }
23740 if (skb_queue_len(&rpp->queue) > 1) {
23741@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23742 RXPRINTK("%s: Can't alloc RX skb.\n",
23743 card->name);
23744 recycle_rx_pool_skb(card, rpp);
23745- atomic_inc(&vcc->stats->rx_err);
23746+ atomic_inc_unchecked(&vcc->stats->rx_err);
23747 return;
23748 }
23749 if (!atm_charge(vcc, skb->truesize)) {
23750@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23751 __net_timestamp(skb);
23752
23753 vcc->push(vcc, skb);
23754- atomic_inc(&vcc->stats->rx);
23755+ atomic_inc_unchecked(&vcc->stats->rx);
23756
23757 return;
23758 }
23759@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23760 __net_timestamp(skb);
23761
23762 vcc->push(vcc, skb);
23763- atomic_inc(&vcc->stats->rx);
23764+ atomic_inc_unchecked(&vcc->stats->rx);
23765
23766 if (skb->truesize > SAR_FB_SIZE_3)
23767 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23768@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23769 if (vcc->qos.aal != ATM_AAL0) {
23770 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23771 card->name, vpi, vci);
23772- atomic_inc(&vcc->stats->rx_drop);
23773+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23774 goto drop;
23775 }
23776
23777 if ((sb = dev_alloc_skb(64)) == NULL) {
23778 printk("%s: Can't allocate buffers for AAL0.\n",
23779 card->name);
23780- atomic_inc(&vcc->stats->rx_err);
23781+ atomic_inc_unchecked(&vcc->stats->rx_err);
23782 goto drop;
23783 }
23784
23785@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23786 ATM_SKB(sb)->vcc = vcc;
23787 __net_timestamp(sb);
23788 vcc->push(vcc, sb);
23789- atomic_inc(&vcc->stats->rx);
23790+ atomic_inc_unchecked(&vcc->stats->rx);
23791
23792 drop:
23793 skb_pull(queue, 64);
23794@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23795
23796 if (vc == NULL) {
23797 printk("%s: NULL connection in send().\n", card->name);
23798- atomic_inc(&vcc->stats->tx_err);
23799+ atomic_inc_unchecked(&vcc->stats->tx_err);
23800 dev_kfree_skb(skb);
23801 return -EINVAL;
23802 }
23803 if (!test_bit(VCF_TX, &vc->flags)) {
23804 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23805- atomic_inc(&vcc->stats->tx_err);
23806+ atomic_inc_unchecked(&vcc->stats->tx_err);
23807 dev_kfree_skb(skb);
23808 return -EINVAL;
23809 }
23810@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23811 break;
23812 default:
23813 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23814- atomic_inc(&vcc->stats->tx_err);
23815+ atomic_inc_unchecked(&vcc->stats->tx_err);
23816 dev_kfree_skb(skb);
23817 return -EINVAL;
23818 }
23819
23820 if (skb_shinfo(skb)->nr_frags != 0) {
23821 printk("%s: No scatter-gather yet.\n", card->name);
23822- atomic_inc(&vcc->stats->tx_err);
23823+ atomic_inc_unchecked(&vcc->stats->tx_err);
23824 dev_kfree_skb(skb);
23825 return -EINVAL;
23826 }
23827@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23828
23829 err = queue_skb(card, vc, skb, oam);
23830 if (err) {
23831- atomic_inc(&vcc->stats->tx_err);
23832+ atomic_inc_unchecked(&vcc->stats->tx_err);
23833 dev_kfree_skb(skb);
23834 return err;
23835 }
23836@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23837 skb = dev_alloc_skb(64);
23838 if (!skb) {
23839 printk("%s: Out of memory in send_oam().\n", card->name);
23840- atomic_inc(&vcc->stats->tx_err);
23841+ atomic_inc_unchecked(&vcc->stats->tx_err);
23842 return -ENOMEM;
23843 }
23844 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23845diff -urNp linux-3.0.7/drivers/atm/iphase.c linux-3.0.7/drivers/atm/iphase.c
23846--- linux-3.0.7/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23847+++ linux-3.0.7/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23848@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23849 status = (u_short) (buf_desc_ptr->desc_mode);
23850 if (status & (RX_CER | RX_PTE | RX_OFL))
23851 {
23852- atomic_inc(&vcc->stats->rx_err);
23853+ atomic_inc_unchecked(&vcc->stats->rx_err);
23854 IF_ERR(printk("IA: bad packet, dropping it");)
23855 if (status & RX_CER) {
23856 IF_ERR(printk(" cause: packet CRC error\n");)
23857@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23858 len = dma_addr - buf_addr;
23859 if (len > iadev->rx_buf_sz) {
23860 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23861- atomic_inc(&vcc->stats->rx_err);
23862+ atomic_inc_unchecked(&vcc->stats->rx_err);
23863 goto out_free_desc;
23864 }
23865
23866@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23867 ia_vcc = INPH_IA_VCC(vcc);
23868 if (ia_vcc == NULL)
23869 {
23870- atomic_inc(&vcc->stats->rx_err);
23871+ atomic_inc_unchecked(&vcc->stats->rx_err);
23872 dev_kfree_skb_any(skb);
23873 atm_return(vcc, atm_guess_pdu2truesize(len));
23874 goto INCR_DLE;
23875@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23876 if ((length > iadev->rx_buf_sz) || (length >
23877 (skb->len - sizeof(struct cpcs_trailer))))
23878 {
23879- atomic_inc(&vcc->stats->rx_err);
23880+ atomic_inc_unchecked(&vcc->stats->rx_err);
23881 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23882 length, skb->len);)
23883 dev_kfree_skb_any(skb);
23884@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23885
23886 IF_RX(printk("rx_dle_intr: skb push");)
23887 vcc->push(vcc,skb);
23888- atomic_inc(&vcc->stats->rx);
23889+ atomic_inc_unchecked(&vcc->stats->rx);
23890 iadev->rx_pkt_cnt++;
23891 }
23892 INCR_DLE:
23893@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23894 {
23895 struct k_sonet_stats *stats;
23896 stats = &PRIV(_ia_dev[board])->sonet_stats;
23897- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23898- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23899- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23900- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23901- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23902- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23903- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23904- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23905- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23906+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23907+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23908+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23909+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23910+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23911+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23912+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23913+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23914+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23915 }
23916 ia_cmds.status = 0;
23917 break;
23918@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23919 if ((desc == 0) || (desc > iadev->num_tx_desc))
23920 {
23921 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23922- atomic_inc(&vcc->stats->tx);
23923+ atomic_inc_unchecked(&vcc->stats->tx);
23924 if (vcc->pop)
23925 vcc->pop(vcc, skb);
23926 else
23927@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23928 ATM_DESC(skb) = vcc->vci;
23929 skb_queue_tail(&iadev->tx_dma_q, skb);
23930
23931- atomic_inc(&vcc->stats->tx);
23932+ atomic_inc_unchecked(&vcc->stats->tx);
23933 iadev->tx_pkt_cnt++;
23934 /* Increment transaction counter */
23935 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23936
23937 #if 0
23938 /* add flow control logic */
23939- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23940+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23941 if (iavcc->vc_desc_cnt > 10) {
23942 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23943 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23944diff -urNp linux-3.0.7/drivers/atm/lanai.c linux-3.0.7/drivers/atm/lanai.c
23945--- linux-3.0.7/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23946+++ linux-3.0.7/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23947@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23948 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23949 lanai_endtx(lanai, lvcc);
23950 lanai_free_skb(lvcc->tx.atmvcc, skb);
23951- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23952+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23953 }
23954
23955 /* Try to fill the buffer - don't call unless there is backlog */
23956@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23957 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23958 __net_timestamp(skb);
23959 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23960- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23961+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23962 out:
23963 lvcc->rx.buf.ptr = end;
23964 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23965@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23966 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23967 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23968 lanai->stats.service_rxnotaal5++;
23969- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23970+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23971 return 0;
23972 }
23973 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23974@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23975 int bytes;
23976 read_unlock(&vcc_sklist_lock);
23977 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23978- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23979+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23980 lvcc->stats.x.aal5.service_trash++;
23981 bytes = (SERVICE_GET_END(s) * 16) -
23982 (((unsigned long) lvcc->rx.buf.ptr) -
23983@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23984 }
23985 if (s & SERVICE_STREAM) {
23986 read_unlock(&vcc_sklist_lock);
23987- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23988+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23989 lvcc->stats.x.aal5.service_stream++;
23990 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23991 "PDU on VCI %d!\n", lanai->number, vci);
23992@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23993 return 0;
23994 }
23995 DPRINTK("got rx crc error on vci %d\n", vci);
23996- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23997+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23998 lvcc->stats.x.aal5.service_rxcrc++;
23999 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24000 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24001diff -urNp linux-3.0.7/drivers/atm/nicstar.c linux-3.0.7/drivers/atm/nicstar.c
24002--- linux-3.0.7/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
24003+++ linux-3.0.7/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
24004@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24005 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24006 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24007 card->index);
24008- atomic_inc(&vcc->stats->tx_err);
24009+ atomic_inc_unchecked(&vcc->stats->tx_err);
24010 dev_kfree_skb_any(skb);
24011 return -EINVAL;
24012 }
24013@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24014 if (!vc->tx) {
24015 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24016 card->index);
24017- atomic_inc(&vcc->stats->tx_err);
24018+ atomic_inc_unchecked(&vcc->stats->tx_err);
24019 dev_kfree_skb_any(skb);
24020 return -EINVAL;
24021 }
24022@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24023 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24024 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24025 card->index);
24026- atomic_inc(&vcc->stats->tx_err);
24027+ atomic_inc_unchecked(&vcc->stats->tx_err);
24028 dev_kfree_skb_any(skb);
24029 return -EINVAL;
24030 }
24031
24032 if (skb_shinfo(skb)->nr_frags != 0) {
24033 printk("nicstar%d: No scatter-gather yet.\n", card->index);
24034- atomic_inc(&vcc->stats->tx_err);
24035+ atomic_inc_unchecked(&vcc->stats->tx_err);
24036 dev_kfree_skb_any(skb);
24037 return -EINVAL;
24038 }
24039@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24040 }
24041
24042 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24043- atomic_inc(&vcc->stats->tx_err);
24044+ atomic_inc_unchecked(&vcc->stats->tx_err);
24045 dev_kfree_skb_any(skb);
24046 return -EIO;
24047 }
24048- atomic_inc(&vcc->stats->tx);
24049+ atomic_inc_unchecked(&vcc->stats->tx);
24050
24051 return 0;
24052 }
24053@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24054 printk
24055 ("nicstar%d: Can't allocate buffers for aal0.\n",
24056 card->index);
24057- atomic_add(i, &vcc->stats->rx_drop);
24058+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24059 break;
24060 }
24061 if (!atm_charge(vcc, sb->truesize)) {
24062 RXPRINTK
24063 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24064 card->index);
24065- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24066+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24067 dev_kfree_skb_any(sb);
24068 break;
24069 }
24070@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24071 ATM_SKB(sb)->vcc = vcc;
24072 __net_timestamp(sb);
24073 vcc->push(vcc, sb);
24074- atomic_inc(&vcc->stats->rx);
24075+ atomic_inc_unchecked(&vcc->stats->rx);
24076 cell += ATM_CELL_PAYLOAD;
24077 }
24078
24079@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24080 if (iovb == NULL) {
24081 printk("nicstar%d: Out of iovec buffers.\n",
24082 card->index);
24083- atomic_inc(&vcc->stats->rx_drop);
24084+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24085 recycle_rx_buf(card, skb);
24086 return;
24087 }
24088@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24089 small or large buffer itself. */
24090 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24091 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24092- atomic_inc(&vcc->stats->rx_err);
24093+ atomic_inc_unchecked(&vcc->stats->rx_err);
24094 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24095 NS_MAX_IOVECS);
24096 NS_PRV_IOVCNT(iovb) = 0;
24097@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24098 ("nicstar%d: Expected a small buffer, and this is not one.\n",
24099 card->index);
24100 which_list(card, skb);
24101- atomic_inc(&vcc->stats->rx_err);
24102+ atomic_inc_unchecked(&vcc->stats->rx_err);
24103 recycle_rx_buf(card, skb);
24104 vc->rx_iov = NULL;
24105 recycle_iov_buf(card, iovb);
24106@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24107 ("nicstar%d: Expected a large buffer, and this is not one.\n",
24108 card->index);
24109 which_list(card, skb);
24110- atomic_inc(&vcc->stats->rx_err);
24111+ atomic_inc_unchecked(&vcc->stats->rx_err);
24112 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24113 NS_PRV_IOVCNT(iovb));
24114 vc->rx_iov = NULL;
24115@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24116 printk(" - PDU size mismatch.\n");
24117 else
24118 printk(".\n");
24119- atomic_inc(&vcc->stats->rx_err);
24120+ atomic_inc_unchecked(&vcc->stats->rx_err);
24121 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24122 NS_PRV_IOVCNT(iovb));
24123 vc->rx_iov = NULL;
24124@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24125 /* skb points to a small buffer */
24126 if (!atm_charge(vcc, skb->truesize)) {
24127 push_rxbufs(card, skb);
24128- atomic_inc(&vcc->stats->rx_drop);
24129+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24130 } else {
24131 skb_put(skb, len);
24132 dequeue_sm_buf(card, skb);
24133@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24134 ATM_SKB(skb)->vcc = vcc;
24135 __net_timestamp(skb);
24136 vcc->push(vcc, skb);
24137- atomic_inc(&vcc->stats->rx);
24138+ atomic_inc_unchecked(&vcc->stats->rx);
24139 }
24140 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24141 struct sk_buff *sb;
24142@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24143 if (len <= NS_SMBUFSIZE) {
24144 if (!atm_charge(vcc, sb->truesize)) {
24145 push_rxbufs(card, sb);
24146- atomic_inc(&vcc->stats->rx_drop);
24147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24148 } else {
24149 skb_put(sb, len);
24150 dequeue_sm_buf(card, sb);
24151@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24152 ATM_SKB(sb)->vcc = vcc;
24153 __net_timestamp(sb);
24154 vcc->push(vcc, sb);
24155- atomic_inc(&vcc->stats->rx);
24156+ atomic_inc_unchecked(&vcc->stats->rx);
24157 }
24158
24159 push_rxbufs(card, skb);
24160@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24161
24162 if (!atm_charge(vcc, skb->truesize)) {
24163 push_rxbufs(card, skb);
24164- atomic_inc(&vcc->stats->rx_drop);
24165+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24166 } else {
24167 dequeue_lg_buf(card, skb);
24168 #ifdef NS_USE_DESTRUCTORS
24169@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24170 ATM_SKB(skb)->vcc = vcc;
24171 __net_timestamp(skb);
24172 vcc->push(vcc, skb);
24173- atomic_inc(&vcc->stats->rx);
24174+ atomic_inc_unchecked(&vcc->stats->rx);
24175 }
24176
24177 push_rxbufs(card, sb);
24178@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24179 printk
24180 ("nicstar%d: Out of huge buffers.\n",
24181 card->index);
24182- atomic_inc(&vcc->stats->rx_drop);
24183+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24184 recycle_iovec_rx_bufs(card,
24185 (struct iovec *)
24186 iovb->data,
24187@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24188 card->hbpool.count++;
24189 } else
24190 dev_kfree_skb_any(hb);
24191- atomic_inc(&vcc->stats->rx_drop);
24192+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24193 } else {
24194 /* Copy the small buffer to the huge buffer */
24195 sb = (struct sk_buff *)iov->iov_base;
24196@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24197 #endif /* NS_USE_DESTRUCTORS */
24198 __net_timestamp(hb);
24199 vcc->push(vcc, hb);
24200- atomic_inc(&vcc->stats->rx);
24201+ atomic_inc_unchecked(&vcc->stats->rx);
24202 }
24203 }
24204
24205diff -urNp linux-3.0.7/drivers/atm/solos-pci.c linux-3.0.7/drivers/atm/solos-pci.c
24206--- linux-3.0.7/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
24207+++ linux-3.0.7/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
24208@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24209 }
24210 atm_charge(vcc, skb->truesize);
24211 vcc->push(vcc, skb);
24212- atomic_inc(&vcc->stats->rx);
24213+ atomic_inc_unchecked(&vcc->stats->rx);
24214 break;
24215
24216 case PKT_STATUS:
24217@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24218 char msg[500];
24219 char item[10];
24220
24221+ pax_track_stack();
24222+
24223 len = buf->len;
24224 for (i = 0; i < len; i++){
24225 if(i % 8 == 0)
24226@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24227 vcc = SKB_CB(oldskb)->vcc;
24228
24229 if (vcc) {
24230- atomic_inc(&vcc->stats->tx);
24231+ atomic_inc_unchecked(&vcc->stats->tx);
24232 solos_pop(vcc, oldskb);
24233 } else
24234 dev_kfree_skb_irq(oldskb);
24235diff -urNp linux-3.0.7/drivers/atm/suni.c linux-3.0.7/drivers/atm/suni.c
24236--- linux-3.0.7/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24237+++ linux-3.0.7/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24238@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24239
24240
24241 #define ADD_LIMITED(s,v) \
24242- atomic_add((v),&stats->s); \
24243- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24244+ atomic_add_unchecked((v),&stats->s); \
24245+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24246
24247
24248 static void suni_hz(unsigned long from_timer)
24249diff -urNp linux-3.0.7/drivers/atm/uPD98402.c linux-3.0.7/drivers/atm/uPD98402.c
24250--- linux-3.0.7/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24251+++ linux-3.0.7/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24252@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24253 struct sonet_stats tmp;
24254 int error = 0;
24255
24256- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24257+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24258 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24259 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24260 if (zero && !error) {
24261@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24262
24263
24264 #define ADD_LIMITED(s,v) \
24265- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24266- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24267- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24268+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24269+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24270+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24271
24272
24273 static void stat_event(struct atm_dev *dev)
24274@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24275 if (reason & uPD98402_INT_PFM) stat_event(dev);
24276 if (reason & uPD98402_INT_PCO) {
24277 (void) GET(PCOCR); /* clear interrupt cause */
24278- atomic_add(GET(HECCT),
24279+ atomic_add_unchecked(GET(HECCT),
24280 &PRIV(dev)->sonet_stats.uncorr_hcs);
24281 }
24282 if ((reason & uPD98402_INT_RFO) &&
24283@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24284 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24285 uPD98402_INT_LOS),PIMR); /* enable them */
24286 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24287- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24288- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24289- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24290+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24291+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24292+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24293 return 0;
24294 }
24295
24296diff -urNp linux-3.0.7/drivers/atm/zatm.c linux-3.0.7/drivers/atm/zatm.c
24297--- linux-3.0.7/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24298+++ linux-3.0.7/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24299@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24300 }
24301 if (!size) {
24302 dev_kfree_skb_irq(skb);
24303- if (vcc) atomic_inc(&vcc->stats->rx_err);
24304+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24305 continue;
24306 }
24307 if (!atm_charge(vcc,skb->truesize)) {
24308@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24309 skb->len = size;
24310 ATM_SKB(skb)->vcc = vcc;
24311 vcc->push(vcc,skb);
24312- atomic_inc(&vcc->stats->rx);
24313+ atomic_inc_unchecked(&vcc->stats->rx);
24314 }
24315 zout(pos & 0xffff,MTA(mbx));
24316 #if 0 /* probably a stupid idea */
24317@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24318 skb_queue_head(&zatm_vcc->backlog,skb);
24319 break;
24320 }
24321- atomic_inc(&vcc->stats->tx);
24322+ atomic_inc_unchecked(&vcc->stats->tx);
24323 wake_up(&zatm_vcc->tx_wait);
24324 }
24325
24326diff -urNp linux-3.0.7/drivers/base/devtmpfs.c linux-3.0.7/drivers/base/devtmpfs.c
24327--- linux-3.0.7/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24328+++ linux-3.0.7/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24329@@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24330 if (!dev_mnt)
24331 return 0;
24332
24333- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24334+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24335 if (err)
24336 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24337 else
24338diff -urNp linux-3.0.7/drivers/base/power/wakeup.c linux-3.0.7/drivers/base/power/wakeup.c
24339--- linux-3.0.7/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24340+++ linux-3.0.7/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24341@@ -29,14 +29,14 @@ bool events_check_enabled;
24342 * They need to be modified together atomically, so it's better to use one
24343 * atomic variable to hold them both.
24344 */
24345-static atomic_t combined_event_count = ATOMIC_INIT(0);
24346+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24347
24348 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24349 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24350
24351 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24352 {
24353- unsigned int comb = atomic_read(&combined_event_count);
24354+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24355
24356 *cnt = (comb >> IN_PROGRESS_BITS);
24357 *inpr = comb & MAX_IN_PROGRESS;
24358@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24359 ws->last_time = ktime_get();
24360
24361 /* Increment the counter of events in progress. */
24362- atomic_inc(&combined_event_count);
24363+ atomic_inc_unchecked(&combined_event_count);
24364 }
24365
24366 /**
24367@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24368 * Increment the counter of registered wakeup events and decrement the
24369 * couter of wakeup events in progress simultaneously.
24370 */
24371- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24372+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24373 }
24374
24375 /**
24376diff -urNp linux-3.0.7/drivers/block/DAC960.c linux-3.0.7/drivers/block/DAC960.c
24377--- linux-3.0.7/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24378+++ linux-3.0.7/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24379@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24380 unsigned long flags;
24381 int Channel, TargetID;
24382
24383+ pax_track_stack();
24384+
24385 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24386 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24387 sizeof(DAC960_SCSI_Inquiry_T) +
24388diff -urNp linux-3.0.7/drivers/block/cciss.c linux-3.0.7/drivers/block/cciss.c
24389--- linux-3.0.7/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24390+++ linux-3.0.7/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24391@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24392 int err;
24393 u32 cp;
24394
24395+ memset(&arg64, 0, sizeof(arg64));
24396+
24397 err = 0;
24398 err |=
24399 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24400@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24401 while (!list_empty(&h->reqQ)) {
24402 c = list_entry(h->reqQ.next, CommandList_struct, list);
24403 /* can't do anything if fifo is full */
24404- if ((h->access.fifo_full(h))) {
24405+ if ((h->access->fifo_full(h))) {
24406 dev_warn(&h->pdev->dev, "fifo full\n");
24407 break;
24408 }
24409@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24410 h->Qdepth--;
24411
24412 /* Tell the controller execute command */
24413- h->access.submit_command(h, c);
24414+ h->access->submit_command(h, c);
24415
24416 /* Put job onto the completed Q */
24417 addQ(&h->cmpQ, c);
24418@@ -3422,17 +3424,17 @@ startio:
24419
24420 static inline unsigned long get_next_completion(ctlr_info_t *h)
24421 {
24422- return h->access.command_completed(h);
24423+ return h->access->command_completed(h);
24424 }
24425
24426 static inline int interrupt_pending(ctlr_info_t *h)
24427 {
24428- return h->access.intr_pending(h);
24429+ return h->access->intr_pending(h);
24430 }
24431
24432 static inline long interrupt_not_for_us(ctlr_info_t *h)
24433 {
24434- return ((h->access.intr_pending(h) == 0) ||
24435+ return ((h->access->intr_pending(h) == 0) ||
24436 (h->interrupts_enabled == 0));
24437 }
24438
24439@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24440 u32 a;
24441
24442 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24443- return h->access.command_completed(h);
24444+ return h->access->command_completed(h);
24445
24446 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24447 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24448@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24449 trans_support & CFGTBL_Trans_use_short_tags);
24450
24451 /* Change the access methods to the performant access methods */
24452- h->access = SA5_performant_access;
24453+ h->access = &SA5_performant_access;
24454 h->transMethod = CFGTBL_Trans_Performant;
24455
24456 return;
24457@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24458 if (prod_index < 0)
24459 return -ENODEV;
24460 h->product_name = products[prod_index].product_name;
24461- h->access = *(products[prod_index].access);
24462+ h->access = products[prod_index].access;
24463
24464 if (cciss_board_disabled(h)) {
24465 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24466@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24467 }
24468
24469 /* make sure the board interrupts are off */
24470- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24471+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24472 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24473 if (rc)
24474 goto clean2;
24475@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24476 * fake ones to scoop up any residual completions.
24477 */
24478 spin_lock_irqsave(&h->lock, flags);
24479- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24480+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24481 spin_unlock_irqrestore(&h->lock, flags);
24482 free_irq(h->intr[PERF_MODE_INT], h);
24483 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24484@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24485 dev_info(&h->pdev->dev, "Board READY.\n");
24486 dev_info(&h->pdev->dev,
24487 "Waiting for stale completions to drain.\n");
24488- h->access.set_intr_mask(h, CCISS_INTR_ON);
24489+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24490 msleep(10000);
24491- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24492+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24493
24494 rc = controller_reset_failed(h->cfgtable);
24495 if (rc)
24496@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24497 cciss_scsi_setup(h);
24498
24499 /* Turn the interrupts on so we can service requests */
24500- h->access.set_intr_mask(h, CCISS_INTR_ON);
24501+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24502
24503 /* Get the firmware version */
24504 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24505@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24506 kfree(flush_buf);
24507 if (return_code != IO_OK)
24508 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24509- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24510+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24511 free_irq(h->intr[PERF_MODE_INT], h);
24512 }
24513
24514diff -urNp linux-3.0.7/drivers/block/cciss.h linux-3.0.7/drivers/block/cciss.h
24515--- linux-3.0.7/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24516+++ linux-3.0.7/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24517@@ -100,7 +100,7 @@ struct ctlr_info
24518 /* information about each logical volume */
24519 drive_info_struct *drv[CISS_MAX_LUN];
24520
24521- struct access_method access;
24522+ struct access_method *access;
24523
24524 /* queue and queue Info */
24525 struct list_head reqQ;
24526diff -urNp linux-3.0.7/drivers/block/cpqarray.c linux-3.0.7/drivers/block/cpqarray.c
24527--- linux-3.0.7/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24528+++ linux-3.0.7/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24529@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24530 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24531 goto Enomem4;
24532 }
24533- hba[i]->access.set_intr_mask(hba[i], 0);
24534+ hba[i]->access->set_intr_mask(hba[i], 0);
24535 if (request_irq(hba[i]->intr, do_ida_intr,
24536 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24537 {
24538@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24539 add_timer(&hba[i]->timer);
24540
24541 /* Enable IRQ now that spinlock and rate limit timer are set up */
24542- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24543+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24544
24545 for(j=0; j<NWD; j++) {
24546 struct gendisk *disk = ida_gendisk[i][j];
24547@@ -694,7 +694,7 @@ DBGINFO(
24548 for(i=0; i<NR_PRODUCTS; i++) {
24549 if (board_id == products[i].board_id) {
24550 c->product_name = products[i].product_name;
24551- c->access = *(products[i].access);
24552+ c->access = products[i].access;
24553 break;
24554 }
24555 }
24556@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24557 hba[ctlr]->intr = intr;
24558 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24559 hba[ctlr]->product_name = products[j].product_name;
24560- hba[ctlr]->access = *(products[j].access);
24561+ hba[ctlr]->access = products[j].access;
24562 hba[ctlr]->ctlr = ctlr;
24563 hba[ctlr]->board_id = board_id;
24564 hba[ctlr]->pci_dev = NULL; /* not PCI */
24565@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24566 struct scatterlist tmp_sg[SG_MAX];
24567 int i, dir, seg;
24568
24569+ pax_track_stack();
24570+
24571 queue_next:
24572 creq = blk_peek_request(q);
24573 if (!creq)
24574@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24575
24576 while((c = h->reqQ) != NULL) {
24577 /* Can't do anything if we're busy */
24578- if (h->access.fifo_full(h) == 0)
24579+ if (h->access->fifo_full(h) == 0)
24580 return;
24581
24582 /* Get the first entry from the request Q */
24583@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24584 h->Qdepth--;
24585
24586 /* Tell the controller to do our bidding */
24587- h->access.submit_command(h, c);
24588+ h->access->submit_command(h, c);
24589
24590 /* Get onto the completion Q */
24591 addQ(&h->cmpQ, c);
24592@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24593 unsigned long flags;
24594 __u32 a,a1;
24595
24596- istat = h->access.intr_pending(h);
24597+ istat = h->access->intr_pending(h);
24598 /* Is this interrupt for us? */
24599 if (istat == 0)
24600 return IRQ_NONE;
24601@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24602 */
24603 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24604 if (istat & FIFO_NOT_EMPTY) {
24605- while((a = h->access.command_completed(h))) {
24606+ while((a = h->access->command_completed(h))) {
24607 a1 = a; a &= ~3;
24608 if ((c = h->cmpQ) == NULL)
24609 {
24610@@ -1449,11 +1451,11 @@ static int sendcmd(
24611 /*
24612 * Disable interrupt
24613 */
24614- info_p->access.set_intr_mask(info_p, 0);
24615+ info_p->access->set_intr_mask(info_p, 0);
24616 /* Make sure there is room in the command FIFO */
24617 /* Actually it should be completely empty at this time. */
24618 for (i = 200000; i > 0; i--) {
24619- temp = info_p->access.fifo_full(info_p);
24620+ temp = info_p->access->fifo_full(info_p);
24621 if (temp != 0) {
24622 break;
24623 }
24624@@ -1466,7 +1468,7 @@ DBG(
24625 /*
24626 * Send the cmd
24627 */
24628- info_p->access.submit_command(info_p, c);
24629+ info_p->access->submit_command(info_p, c);
24630 complete = pollcomplete(ctlr);
24631
24632 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24633@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24634 * we check the new geometry. Then turn interrupts back on when
24635 * we're done.
24636 */
24637- host->access.set_intr_mask(host, 0);
24638+ host->access->set_intr_mask(host, 0);
24639 getgeometry(ctlr);
24640- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24641+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24642
24643 for(i=0; i<NWD; i++) {
24644 struct gendisk *disk = ida_gendisk[ctlr][i];
24645@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24646 /* Wait (up to 2 seconds) for a command to complete */
24647
24648 for (i = 200000; i > 0; i--) {
24649- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24650+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24651 if (done == 0) {
24652 udelay(10); /* a short fixed delay */
24653 } else
24654diff -urNp linux-3.0.7/drivers/block/cpqarray.h linux-3.0.7/drivers/block/cpqarray.h
24655--- linux-3.0.7/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24656+++ linux-3.0.7/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24657@@ -99,7 +99,7 @@ struct ctlr_info {
24658 drv_info_t drv[NWD];
24659 struct proc_dir_entry *proc;
24660
24661- struct access_method access;
24662+ struct access_method *access;
24663
24664 cmdlist_t *reqQ;
24665 cmdlist_t *cmpQ;
24666diff -urNp linux-3.0.7/drivers/block/drbd/drbd_int.h linux-3.0.7/drivers/block/drbd/drbd_int.h
24667--- linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24668+++ linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24669@@ -737,7 +737,7 @@ struct drbd_request;
24670 struct drbd_epoch {
24671 struct list_head list;
24672 unsigned int barrier_nr;
24673- atomic_t epoch_size; /* increased on every request added. */
24674+ atomic_unchecked_t epoch_size; /* increased on every request added. */
24675 atomic_t active; /* increased on every req. added, and dec on every finished. */
24676 unsigned long flags;
24677 };
24678@@ -1109,7 +1109,7 @@ struct drbd_conf {
24679 void *int_dig_in;
24680 void *int_dig_vv;
24681 wait_queue_head_t seq_wait;
24682- atomic_t packet_seq;
24683+ atomic_unchecked_t packet_seq;
24684 unsigned int peer_seq;
24685 spinlock_t peer_seq_lock;
24686 unsigned int minor;
24687@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24688
24689 static inline void drbd_tcp_cork(struct socket *sock)
24690 {
24691- int __user val = 1;
24692+ int val = 1;
24693 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24694- (char __user *)&val, sizeof(val));
24695+ (char __force_user *)&val, sizeof(val));
24696 }
24697
24698 static inline void drbd_tcp_uncork(struct socket *sock)
24699 {
24700- int __user val = 0;
24701+ int val = 0;
24702 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24703- (char __user *)&val, sizeof(val));
24704+ (char __force_user *)&val, sizeof(val));
24705 }
24706
24707 static inline void drbd_tcp_nodelay(struct socket *sock)
24708 {
24709- int __user val = 1;
24710+ int val = 1;
24711 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24712- (char __user *)&val, sizeof(val));
24713+ (char __force_user *)&val, sizeof(val));
24714 }
24715
24716 static inline void drbd_tcp_quickack(struct socket *sock)
24717 {
24718- int __user val = 2;
24719+ int val = 2;
24720 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24721- (char __user *)&val, sizeof(val));
24722+ (char __force_user *)&val, sizeof(val));
24723 }
24724
24725 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24726diff -urNp linux-3.0.7/drivers/block/drbd/drbd_main.c linux-3.0.7/drivers/block/drbd/drbd_main.c
24727--- linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24728+++ linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24729@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24730 p.sector = sector;
24731 p.block_id = block_id;
24732 p.blksize = blksize;
24733- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24734+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24735
24736 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24737 return false;
24738@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24739 p.sector = cpu_to_be64(req->sector);
24740 p.block_id = (unsigned long)req;
24741 p.seq_num = cpu_to_be32(req->seq_num =
24742- atomic_add_return(1, &mdev->packet_seq));
24743+ atomic_add_return_unchecked(1, &mdev->packet_seq));
24744
24745 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24746
24747@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24748 atomic_set(&mdev->unacked_cnt, 0);
24749 atomic_set(&mdev->local_cnt, 0);
24750 atomic_set(&mdev->net_cnt, 0);
24751- atomic_set(&mdev->packet_seq, 0);
24752+ atomic_set_unchecked(&mdev->packet_seq, 0);
24753 atomic_set(&mdev->pp_in_use, 0);
24754 atomic_set(&mdev->pp_in_use_by_net, 0);
24755 atomic_set(&mdev->rs_sect_in, 0);
24756@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24757 mdev->receiver.t_state);
24758
24759 /* no need to lock it, I'm the only thread alive */
24760- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24761- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24762+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24763+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24764 mdev->al_writ_cnt =
24765 mdev->bm_writ_cnt =
24766 mdev->read_cnt =
24767diff -urNp linux-3.0.7/drivers/block/drbd/drbd_nl.c linux-3.0.7/drivers/block/drbd/drbd_nl.c
24768--- linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24769+++ linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24770@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24771 module_put(THIS_MODULE);
24772 }
24773
24774-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24775+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24776
24777 static unsigned short *
24778 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24779@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24780 cn_reply->id.idx = CN_IDX_DRBD;
24781 cn_reply->id.val = CN_VAL_DRBD;
24782
24783- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24784+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24785 cn_reply->ack = 0; /* not used here. */
24786 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24787 (int)((char *)tl - (char *)reply->tag_list);
24788@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24789 cn_reply->id.idx = CN_IDX_DRBD;
24790 cn_reply->id.val = CN_VAL_DRBD;
24791
24792- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24793+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24794 cn_reply->ack = 0; /* not used here. */
24795 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24796 (int)((char *)tl - (char *)reply->tag_list);
24797@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24798 cn_reply->id.idx = CN_IDX_DRBD;
24799 cn_reply->id.val = CN_VAL_DRBD;
24800
24801- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24802+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24803 cn_reply->ack = 0; // not used here.
24804 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24805 (int)((char*)tl - (char*)reply->tag_list);
24806@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24807 cn_reply->id.idx = CN_IDX_DRBD;
24808 cn_reply->id.val = CN_VAL_DRBD;
24809
24810- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24811+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24812 cn_reply->ack = 0; /* not used here. */
24813 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24814 (int)((char *)tl - (char *)reply->tag_list);
24815diff -urNp linux-3.0.7/drivers/block/drbd/drbd_receiver.c linux-3.0.7/drivers/block/drbd/drbd_receiver.c
24816--- linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24817+++ linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24818@@ -894,7 +894,7 @@ retry:
24819 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24820 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24821
24822- atomic_set(&mdev->packet_seq, 0);
24823+ atomic_set_unchecked(&mdev->packet_seq, 0);
24824 mdev->peer_seq = 0;
24825
24826 drbd_thread_start(&mdev->asender);
24827@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24828 do {
24829 next_epoch = NULL;
24830
24831- epoch_size = atomic_read(&epoch->epoch_size);
24832+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24833
24834 switch (ev & ~EV_CLEANUP) {
24835 case EV_PUT:
24836@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24837 rv = FE_DESTROYED;
24838 } else {
24839 epoch->flags = 0;
24840- atomic_set(&epoch->epoch_size, 0);
24841+ atomic_set_unchecked(&epoch->epoch_size, 0);
24842 /* atomic_set(&epoch->active, 0); is already zero */
24843 if (rv == FE_STILL_LIVE)
24844 rv = FE_RECYCLED;
24845@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24846 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24847 drbd_flush(mdev);
24848
24849- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24850+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24851 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24852 if (epoch)
24853 break;
24854 }
24855
24856 epoch = mdev->current_epoch;
24857- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24858+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24859
24860 D_ASSERT(atomic_read(&epoch->active) == 0);
24861 D_ASSERT(epoch->flags == 0);
24862@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24863 }
24864
24865 epoch->flags = 0;
24866- atomic_set(&epoch->epoch_size, 0);
24867+ atomic_set_unchecked(&epoch->epoch_size, 0);
24868 atomic_set(&epoch->active, 0);
24869
24870 spin_lock(&mdev->epoch_lock);
24871- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24872+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24873 list_add(&epoch->list, &mdev->current_epoch->list);
24874 mdev->current_epoch = epoch;
24875 mdev->epochs++;
24876@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24877 spin_unlock(&mdev->peer_seq_lock);
24878
24879 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24880- atomic_inc(&mdev->current_epoch->epoch_size);
24881+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24882 return drbd_drain_block(mdev, data_size);
24883 }
24884
24885@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24886
24887 spin_lock(&mdev->epoch_lock);
24888 e->epoch = mdev->current_epoch;
24889- atomic_inc(&e->epoch->epoch_size);
24890+ atomic_inc_unchecked(&e->epoch->epoch_size);
24891 atomic_inc(&e->epoch->active);
24892 spin_unlock(&mdev->epoch_lock);
24893
24894@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24895 D_ASSERT(list_empty(&mdev->done_ee));
24896
24897 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24898- atomic_set(&mdev->current_epoch->epoch_size, 0);
24899+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24900 D_ASSERT(list_empty(&mdev->current_epoch->list));
24901 }
24902
24903diff -urNp linux-3.0.7/drivers/block/loop.c linux-3.0.7/drivers/block/loop.c
24904--- linux-3.0.7/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24905+++ linux-3.0.7/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24906@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24907 mm_segment_t old_fs = get_fs();
24908
24909 set_fs(get_ds());
24910- bw = file->f_op->write(file, buf, len, &pos);
24911+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24912 set_fs(old_fs);
24913 if (likely(bw == len))
24914 return 0;
24915diff -urNp linux-3.0.7/drivers/block/nbd.c linux-3.0.7/drivers/block/nbd.c
24916--- linux-3.0.7/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24917+++ linux-3.0.7/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24918@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24919 struct kvec iov;
24920 sigset_t blocked, oldset;
24921
24922+ pax_track_stack();
24923+
24924 if (unlikely(!sock)) {
24925 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24926 lo->disk->disk_name, (send ? "send" : "recv"));
24927@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24928 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24929 unsigned int cmd, unsigned long arg)
24930 {
24931+ pax_track_stack();
24932+
24933 switch (cmd) {
24934 case NBD_DISCONNECT: {
24935 struct request sreq;
24936diff -urNp linux-3.0.7/drivers/char/Kconfig linux-3.0.7/drivers/char/Kconfig
24937--- linux-3.0.7/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24938+++ linux-3.0.7/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24939@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24940
24941 config DEVKMEM
24942 bool "/dev/kmem virtual device support"
24943- default y
24944+ default n
24945+ depends on !GRKERNSEC_KMEM
24946 help
24947 Say Y here if you want to support the /dev/kmem device. The
24948 /dev/kmem device is rarely used, but can be used for certain
24949@@ -596,6 +597,7 @@ config DEVPORT
24950 bool
24951 depends on !M68K
24952 depends on ISA || PCI
24953+ depends on !GRKERNSEC_KMEM
24954 default y
24955
24956 source "drivers/s390/char/Kconfig"
24957diff -urNp linux-3.0.7/drivers/char/agp/frontend.c linux-3.0.7/drivers/char/agp/frontend.c
24958--- linux-3.0.7/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24959+++ linux-3.0.7/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24960@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24961 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24962 return -EFAULT;
24963
24964- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24965+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24966 return -EFAULT;
24967
24968 client = agp_find_client_by_pid(reserve.pid);
24969diff -urNp linux-3.0.7/drivers/char/briq_panel.c linux-3.0.7/drivers/char/briq_panel.c
24970--- linux-3.0.7/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24971+++ linux-3.0.7/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24972@@ -9,6 +9,7 @@
24973 #include <linux/types.h>
24974 #include <linux/errno.h>
24975 #include <linux/tty.h>
24976+#include <linux/mutex.h>
24977 #include <linux/timer.h>
24978 #include <linux/kernel.h>
24979 #include <linux/wait.h>
24980@@ -34,6 +35,7 @@ static int vfd_is_open;
24981 static unsigned char vfd[40];
24982 static int vfd_cursor;
24983 static unsigned char ledpb, led;
24984+static DEFINE_MUTEX(vfd_mutex);
24985
24986 static void update_vfd(void)
24987 {
24988@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24989 if (!vfd_is_open)
24990 return -EBUSY;
24991
24992+ mutex_lock(&vfd_mutex);
24993 for (;;) {
24994 char c;
24995 if (!indx)
24996 break;
24997- if (get_user(c, buf))
24998+ if (get_user(c, buf)) {
24999+ mutex_unlock(&vfd_mutex);
25000 return -EFAULT;
25001+ }
25002 if (esc) {
25003 set_led(c);
25004 esc = 0;
25005@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25006 buf++;
25007 }
25008 update_vfd();
25009+ mutex_unlock(&vfd_mutex);
25010
25011 return len;
25012 }
25013diff -urNp linux-3.0.7/drivers/char/genrtc.c linux-3.0.7/drivers/char/genrtc.c
25014--- linux-3.0.7/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
25015+++ linux-3.0.7/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
25016@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25017 switch (cmd) {
25018
25019 case RTC_PLL_GET:
25020+ memset(&pll, 0, sizeof(pll));
25021 if (get_rtc_pll(&pll))
25022 return -EINVAL;
25023 else
25024diff -urNp linux-3.0.7/drivers/char/hpet.c linux-3.0.7/drivers/char/hpet.c
25025--- linux-3.0.7/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
25026+++ linux-3.0.7/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
25027@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25028 }
25029
25030 static int
25031-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25032+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25033 struct hpet_info *info)
25034 {
25035 struct hpet_timer __iomem *timer;
25036diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c
25037--- linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
25038+++ linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
25039@@ -415,7 +415,7 @@ struct ipmi_smi {
25040 struct proc_dir_entry *proc_dir;
25041 char proc_dir_name[10];
25042
25043- atomic_t stats[IPMI_NUM_STATS];
25044+ atomic_unchecked_t stats[IPMI_NUM_STATS];
25045
25046 /*
25047 * run_to_completion duplicate of smb_info, smi_info
25048@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25049
25050
25051 #define ipmi_inc_stat(intf, stat) \
25052- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25053+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25054 #define ipmi_get_stat(intf, stat) \
25055- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25056+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25057
25058 static int is_lan_addr(struct ipmi_addr *addr)
25059 {
25060@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25061 INIT_LIST_HEAD(&intf->cmd_rcvrs);
25062 init_waitqueue_head(&intf->waitq);
25063 for (i = 0; i < IPMI_NUM_STATS; i++)
25064- atomic_set(&intf->stats[i], 0);
25065+ atomic_set_unchecked(&intf->stats[i], 0);
25066
25067 intf->proc_dir = NULL;
25068
25069@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25070 struct ipmi_smi_msg smi_msg;
25071 struct ipmi_recv_msg recv_msg;
25072
25073+ pax_track_stack();
25074+
25075 si = (struct ipmi_system_interface_addr *) &addr;
25076 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25077 si->channel = IPMI_BMC_CHANNEL;
25078diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c
25079--- linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
25080+++ linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
25081@@ -277,7 +277,7 @@ struct smi_info {
25082 unsigned char slave_addr;
25083
25084 /* Counters and things for the proc filesystem. */
25085- atomic_t stats[SI_NUM_STATS];
25086+ atomic_unchecked_t stats[SI_NUM_STATS];
25087
25088 struct task_struct *thread;
25089
25090@@ -286,9 +286,9 @@ struct smi_info {
25091 };
25092
25093 #define smi_inc_stat(smi, stat) \
25094- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25095+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25096 #define smi_get_stat(smi, stat) \
25097- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25098+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25099
25100 #define SI_MAX_PARMS 4
25101
25102@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25103 atomic_set(&new_smi->req_events, 0);
25104 new_smi->run_to_completion = 0;
25105 for (i = 0; i < SI_NUM_STATS; i++)
25106- atomic_set(&new_smi->stats[i], 0);
25107+ atomic_set_unchecked(&new_smi->stats[i], 0);
25108
25109 new_smi->interrupt_disabled = 1;
25110 atomic_set(&new_smi->stop_operation, 0);
25111diff -urNp linux-3.0.7/drivers/char/mbcs.c linux-3.0.7/drivers/char/mbcs.c
25112--- linux-3.0.7/drivers/char/mbcs.c 2011-07-21 22:17:23.000000000 -0400
25113+++ linux-3.0.7/drivers/char/mbcs.c 2011-10-11 10:44:33.000000000 -0400
25114@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25115 return 0;
25116 }
25117
25118-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25119+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25120 {
25121 .part_num = MBCS_PART_NUM,
25122 .mfg_num = MBCS_MFG_NUM,
25123diff -urNp linux-3.0.7/drivers/char/mem.c linux-3.0.7/drivers/char/mem.c
25124--- linux-3.0.7/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
25125+++ linux-3.0.7/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
25126@@ -18,6 +18,7 @@
25127 #include <linux/raw.h>
25128 #include <linux/tty.h>
25129 #include <linux/capability.h>
25130+#include <linux/security.h>
25131 #include <linux/ptrace.h>
25132 #include <linux/device.h>
25133 #include <linux/highmem.h>
25134@@ -34,6 +35,10 @@
25135 # include <linux/efi.h>
25136 #endif
25137
25138+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25139+extern struct file_operations grsec_fops;
25140+#endif
25141+
25142 static inline unsigned long size_inside_page(unsigned long start,
25143 unsigned long size)
25144 {
25145@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25146
25147 while (cursor < to) {
25148 if (!devmem_is_allowed(pfn)) {
25149+#ifdef CONFIG_GRKERNSEC_KMEM
25150+ gr_handle_mem_readwrite(from, to);
25151+#else
25152 printk(KERN_INFO
25153 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25154 current->comm, from, to);
25155+#endif
25156 return 0;
25157 }
25158 cursor += PAGE_SIZE;
25159@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25160 }
25161 return 1;
25162 }
25163+#elif defined(CONFIG_GRKERNSEC_KMEM)
25164+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25165+{
25166+ return 0;
25167+}
25168 #else
25169 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25170 {
25171@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25172
25173 while (count > 0) {
25174 unsigned long remaining;
25175+ char *temp;
25176
25177 sz = size_inside_page(p, count);
25178
25179@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25180 if (!ptr)
25181 return -EFAULT;
25182
25183- remaining = copy_to_user(buf, ptr, sz);
25184+#ifdef CONFIG_PAX_USERCOPY
25185+ temp = kmalloc(sz, GFP_KERNEL);
25186+ if (!temp) {
25187+ unxlate_dev_mem_ptr(p, ptr);
25188+ return -ENOMEM;
25189+ }
25190+ memcpy(temp, ptr, sz);
25191+#else
25192+ temp = ptr;
25193+#endif
25194+
25195+ remaining = copy_to_user(buf, temp, sz);
25196+
25197+#ifdef CONFIG_PAX_USERCOPY
25198+ kfree(temp);
25199+#endif
25200+
25201 unxlate_dev_mem_ptr(p, ptr);
25202 if (remaining)
25203 return -EFAULT;
25204@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25205 size_t count, loff_t *ppos)
25206 {
25207 unsigned long p = *ppos;
25208- ssize_t low_count, read, sz;
25209+ ssize_t low_count, read, sz, err = 0;
25210 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25211- int err = 0;
25212
25213 read = 0;
25214 if (p < (unsigned long) high_memory) {
25215@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25216 }
25217 #endif
25218 while (low_count > 0) {
25219+ char *temp;
25220+
25221 sz = size_inside_page(p, low_count);
25222
25223 /*
25224@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25225 */
25226 kbuf = xlate_dev_kmem_ptr((char *)p);
25227
25228- if (copy_to_user(buf, kbuf, sz))
25229+#ifdef CONFIG_PAX_USERCOPY
25230+ temp = kmalloc(sz, GFP_KERNEL);
25231+ if (!temp)
25232+ return -ENOMEM;
25233+ memcpy(temp, kbuf, sz);
25234+#else
25235+ temp = kbuf;
25236+#endif
25237+
25238+ err = copy_to_user(buf, temp, sz);
25239+
25240+#ifdef CONFIG_PAX_USERCOPY
25241+ kfree(temp);
25242+#endif
25243+
25244+ if (err)
25245 return -EFAULT;
25246 buf += sz;
25247 p += sz;
25248@@ -866,6 +913,9 @@ static const struct memdev {
25249 #ifdef CONFIG_CRASH_DUMP
25250 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25251 #endif
25252+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25253+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25254+#endif
25255 };
25256
25257 static int memory_open(struct inode *inode, struct file *filp)
25258diff -urNp linux-3.0.7/drivers/char/nvram.c linux-3.0.7/drivers/char/nvram.c
25259--- linux-3.0.7/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25260+++ linux-3.0.7/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25261@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25262
25263 spin_unlock_irq(&rtc_lock);
25264
25265- if (copy_to_user(buf, contents, tmp - contents))
25266+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25267 return -EFAULT;
25268
25269 *ppos = i;
25270diff -urNp linux-3.0.7/drivers/char/random.c linux-3.0.7/drivers/char/random.c
25271--- linux-3.0.7/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
25272+++ linux-3.0.7/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25273@@ -261,8 +261,13 @@
25274 /*
25275 * Configuration information
25276 */
25277+#ifdef CONFIG_GRKERNSEC_RANDNET
25278+#define INPUT_POOL_WORDS 512
25279+#define OUTPUT_POOL_WORDS 128
25280+#else
25281 #define INPUT_POOL_WORDS 128
25282 #define OUTPUT_POOL_WORDS 32
25283+#endif
25284 #define SEC_XFER_SIZE 512
25285 #define EXTRACT_SIZE 10
25286
25287@@ -300,10 +305,17 @@ static struct poolinfo {
25288 int poolwords;
25289 int tap1, tap2, tap3, tap4, tap5;
25290 } poolinfo_table[] = {
25291+#ifdef CONFIG_GRKERNSEC_RANDNET
25292+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25293+ { 512, 411, 308, 208, 104, 1 },
25294+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25295+ { 128, 103, 76, 51, 25, 1 },
25296+#else
25297 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25298 { 128, 103, 76, 51, 25, 1 },
25299 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25300 { 32, 26, 20, 14, 7, 1 },
25301+#endif
25302 #if 0
25303 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25304 { 2048, 1638, 1231, 819, 411, 1 },
25305@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25306
25307 extract_buf(r, tmp);
25308 i = min_t(int, nbytes, EXTRACT_SIZE);
25309- if (copy_to_user(buf, tmp, i)) {
25310+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25311 ret = -EFAULT;
25312 break;
25313 }
25314@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25315 #include <linux/sysctl.h>
25316
25317 static int min_read_thresh = 8, min_write_thresh;
25318-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25319+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25320 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25321 static char sysctl_bootid[16];
25322
25323diff -urNp linux-3.0.7/drivers/char/sonypi.c linux-3.0.7/drivers/char/sonypi.c
25324--- linux-3.0.7/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25325+++ linux-3.0.7/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25326@@ -55,6 +55,7 @@
25327 #include <asm/uaccess.h>
25328 #include <asm/io.h>
25329 #include <asm/system.h>
25330+#include <asm/local.h>
25331
25332 #include <linux/sonypi.h>
25333
25334@@ -491,7 +492,7 @@ static struct sonypi_device {
25335 spinlock_t fifo_lock;
25336 wait_queue_head_t fifo_proc_list;
25337 struct fasync_struct *fifo_async;
25338- int open_count;
25339+ local_t open_count;
25340 int model;
25341 struct input_dev *input_jog_dev;
25342 struct input_dev *input_key_dev;
25343@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25344 static int sonypi_misc_release(struct inode *inode, struct file *file)
25345 {
25346 mutex_lock(&sonypi_device.lock);
25347- sonypi_device.open_count--;
25348+ local_dec(&sonypi_device.open_count);
25349 mutex_unlock(&sonypi_device.lock);
25350 return 0;
25351 }
25352@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25353 {
25354 mutex_lock(&sonypi_device.lock);
25355 /* Flush input queue on first open */
25356- if (!sonypi_device.open_count)
25357+ if (!local_read(&sonypi_device.open_count))
25358 kfifo_reset(&sonypi_device.fifo);
25359- sonypi_device.open_count++;
25360+ local_inc(&sonypi_device.open_count);
25361 mutex_unlock(&sonypi_device.lock);
25362
25363 return 0;
25364diff -urNp linux-3.0.7/drivers/char/tpm/tpm.c linux-3.0.7/drivers/char/tpm/tpm.c
25365--- linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:54:53.000000000 -0400
25366+++ linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:55:27.000000000 -0400
25367@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25368 chip->vendor.req_complete_val)
25369 goto out_recv;
25370
25371- if ((status == chip->vendor.req_canceled)) {
25372+ if (status == chip->vendor.req_canceled) {
25373 dev_err(chip->dev, "Operation Canceled\n");
25374 rc = -ECANCELED;
25375 goto out;
25376@@ -847,6 +847,8 @@ ssize_t tpm_show_pubek(struct device *de
25377
25378 struct tpm_chip *chip = dev_get_drvdata(dev);
25379
25380+ pax_track_stack();
25381+
25382 tpm_cmd.header.in = tpm_readpubek_header;
25383 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25384 "attempting to read the PUBEK");
25385diff -urNp linux-3.0.7/drivers/char/tpm/tpm_bios.c linux-3.0.7/drivers/char/tpm/tpm_bios.c
25386--- linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25387+++ linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25388@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25389 event = addr;
25390
25391 if ((event->event_type == 0 && event->event_size == 0) ||
25392- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25393+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25394 return NULL;
25395
25396 return addr;
25397@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25398 return NULL;
25399
25400 if ((event->event_type == 0 && event->event_size == 0) ||
25401- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25402+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25403 return NULL;
25404
25405 (*pos)++;
25406@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25407 int i;
25408
25409 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25410- seq_putc(m, data[i]);
25411+ if (!seq_putc(m, data[i]))
25412+ return -EFAULT;
25413
25414 return 0;
25415 }
25416@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25417 log->bios_event_log_end = log->bios_event_log + len;
25418
25419 virt = acpi_os_map_memory(start, len);
25420+ if (!virt) {
25421+ kfree(log->bios_event_log);
25422+ log->bios_event_log = NULL;
25423+ return -EFAULT;
25424+ }
25425
25426- memcpy(log->bios_event_log, virt, len);
25427+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25428
25429 acpi_os_unmap_memory(virt, len);
25430 return 0;
25431diff -urNp linux-3.0.7/drivers/char/virtio_console.c linux-3.0.7/drivers/char/virtio_console.c
25432--- linux-3.0.7/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25433+++ linux-3.0.7/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25434@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25435 if (to_user) {
25436 ssize_t ret;
25437
25438- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25439+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25440 if (ret)
25441 return -EFAULT;
25442 } else {
25443@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25444 if (!port_has_data(port) && !port->host_connected)
25445 return 0;
25446
25447- return fill_readbuf(port, ubuf, count, true);
25448+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25449 }
25450
25451 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25452diff -urNp linux-3.0.7/drivers/crypto/hifn_795x.c linux-3.0.7/drivers/crypto/hifn_795x.c
25453--- linux-3.0.7/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25454+++ linux-3.0.7/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25455@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25456 0xCA, 0x34, 0x2B, 0x2E};
25457 struct scatterlist sg;
25458
25459+ pax_track_stack();
25460+
25461 memset(src, 0, sizeof(src));
25462 memset(ctx.key, 0, sizeof(ctx.key));
25463
25464diff -urNp linux-3.0.7/drivers/crypto/padlock-aes.c linux-3.0.7/drivers/crypto/padlock-aes.c
25465--- linux-3.0.7/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25466+++ linux-3.0.7/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25467@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25468 struct crypto_aes_ctx gen_aes;
25469 int cpu;
25470
25471+ pax_track_stack();
25472+
25473 if (key_len % 8) {
25474 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25475 return -EINVAL;
25476diff -urNp linux-3.0.7/drivers/dma/ioat/dma_v3.c linux-3.0.7/drivers/dma/ioat/dma_v3.c
25477--- linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-07-21 22:17:23.000000000 -0400
25478+++ linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-10-11 10:44:33.000000000 -0400
25479@@ -73,10 +73,10 @@
25480 /* provide a lookup table for setting the source address in the base or
25481 * extended descriptor of an xor or pq descriptor
25482 */
25483-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
25484-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
25485-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
25486-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
25487+static const u8 xor_idx_to_desc = 0xd0;
25488+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
25489+static const u8 pq_idx_to_desc = 0xf8;
25490+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
25491
25492 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
25493 {
25494diff -urNp linux-3.0.7/drivers/edac/amd64_edac.c linux-3.0.7/drivers/edac/amd64_edac.c
25495--- linux-3.0.7/drivers/edac/amd64_edac.c 2011-07-21 22:17:23.000000000 -0400
25496+++ linux-3.0.7/drivers/edac/amd64_edac.c 2011-10-11 10:44:33.000000000 -0400
25497@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25498 * PCI core identifies what devices are on a system during boot, and then
25499 * inquiry this table to see if this driver is for a given device found.
25500 */
25501-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25502+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25503 {
25504 .vendor = PCI_VENDOR_ID_AMD,
25505 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25506diff -urNp linux-3.0.7/drivers/edac/amd76x_edac.c linux-3.0.7/drivers/edac/amd76x_edac.c
25507--- linux-3.0.7/drivers/edac/amd76x_edac.c 2011-07-21 22:17:23.000000000 -0400
25508+++ linux-3.0.7/drivers/edac/amd76x_edac.c 2011-10-11 10:44:33.000000000 -0400
25509@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25510 edac_mc_free(mci);
25511 }
25512
25513-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25514+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25515 {
25516 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25517 AMD762},
25518diff -urNp linux-3.0.7/drivers/edac/e752x_edac.c linux-3.0.7/drivers/edac/e752x_edac.c
25519--- linux-3.0.7/drivers/edac/e752x_edac.c 2011-07-21 22:17:23.000000000 -0400
25520+++ linux-3.0.7/drivers/edac/e752x_edac.c 2011-10-11 10:44:33.000000000 -0400
25521@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25522 edac_mc_free(mci);
25523 }
25524
25525-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25526+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25527 {
25528 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25529 E7520},
25530diff -urNp linux-3.0.7/drivers/edac/e7xxx_edac.c linux-3.0.7/drivers/edac/e7xxx_edac.c
25531--- linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-07-21 22:17:23.000000000 -0400
25532+++ linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-10-11 10:44:33.000000000 -0400
25533@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25534 edac_mc_free(mci);
25535 }
25536
25537-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25538+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25539 {
25540 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25541 E7205},
25542diff -urNp linux-3.0.7/drivers/edac/edac_pci_sysfs.c linux-3.0.7/drivers/edac/edac_pci_sysfs.c
25543--- linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25544+++ linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25545@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25546 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25547 static int edac_pci_poll_msec = 1000; /* one second workq period */
25548
25549-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25550-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25551+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25552+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25553
25554 static struct kobject *edac_pci_top_main_kobj;
25555 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25556@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25557 edac_printk(KERN_CRIT, EDAC_PCI,
25558 "Signaled System Error on %s\n",
25559 pci_name(dev));
25560- atomic_inc(&pci_nonparity_count);
25561+ atomic_inc_unchecked(&pci_nonparity_count);
25562 }
25563
25564 if (status & (PCI_STATUS_PARITY)) {
25565@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25566 "Master Data Parity Error on %s\n",
25567 pci_name(dev));
25568
25569- atomic_inc(&pci_parity_count);
25570+ atomic_inc_unchecked(&pci_parity_count);
25571 }
25572
25573 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25574@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25575 "Detected Parity Error on %s\n",
25576 pci_name(dev));
25577
25578- atomic_inc(&pci_parity_count);
25579+ atomic_inc_unchecked(&pci_parity_count);
25580 }
25581 }
25582
25583@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25584 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25585 "Signaled System Error on %s\n",
25586 pci_name(dev));
25587- atomic_inc(&pci_nonparity_count);
25588+ atomic_inc_unchecked(&pci_nonparity_count);
25589 }
25590
25591 if (status & (PCI_STATUS_PARITY)) {
25592@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25593 "Master Data Parity Error on "
25594 "%s\n", pci_name(dev));
25595
25596- atomic_inc(&pci_parity_count);
25597+ atomic_inc_unchecked(&pci_parity_count);
25598 }
25599
25600 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25601@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25602 "Detected Parity Error on %s\n",
25603 pci_name(dev));
25604
25605- atomic_inc(&pci_parity_count);
25606+ atomic_inc_unchecked(&pci_parity_count);
25607 }
25608 }
25609 }
25610@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25611 if (!check_pci_errors)
25612 return;
25613
25614- before_count = atomic_read(&pci_parity_count);
25615+ before_count = atomic_read_unchecked(&pci_parity_count);
25616
25617 /* scan all PCI devices looking for a Parity Error on devices and
25618 * bridges.
25619@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25620 /* Only if operator has selected panic on PCI Error */
25621 if (edac_pci_get_panic_on_pe()) {
25622 /* If the count is different 'after' from 'before' */
25623- if (before_count != atomic_read(&pci_parity_count))
25624+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25625 panic("EDAC: PCI Parity Error");
25626 }
25627 }
25628diff -urNp linux-3.0.7/drivers/edac/i3000_edac.c linux-3.0.7/drivers/edac/i3000_edac.c
25629--- linux-3.0.7/drivers/edac/i3000_edac.c 2011-07-21 22:17:23.000000000 -0400
25630+++ linux-3.0.7/drivers/edac/i3000_edac.c 2011-10-11 10:44:33.000000000 -0400
25631@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25632 edac_mc_free(mci);
25633 }
25634
25635-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25636+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25637 {
25638 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25639 I3000},
25640diff -urNp linux-3.0.7/drivers/edac/i3200_edac.c linux-3.0.7/drivers/edac/i3200_edac.c
25641--- linux-3.0.7/drivers/edac/i3200_edac.c 2011-07-21 22:17:23.000000000 -0400
25642+++ linux-3.0.7/drivers/edac/i3200_edac.c 2011-10-11 10:44:33.000000000 -0400
25643@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25644 edac_mc_free(mci);
25645 }
25646
25647-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25648+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25649 {
25650 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25651 I3200},
25652diff -urNp linux-3.0.7/drivers/edac/i5000_edac.c linux-3.0.7/drivers/edac/i5000_edac.c
25653--- linux-3.0.7/drivers/edac/i5000_edac.c 2011-07-21 22:17:23.000000000 -0400
25654+++ linux-3.0.7/drivers/edac/i5000_edac.c 2011-10-11 10:44:33.000000000 -0400
25655@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25656 *
25657 * The "E500P" device is the first device supported.
25658 */
25659-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25660+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25661 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
25662 .driver_data = I5000P},
25663
25664diff -urNp linux-3.0.7/drivers/edac/i5100_edac.c linux-3.0.7/drivers/edac/i5100_edac.c
25665--- linux-3.0.7/drivers/edac/i5100_edac.c 2011-07-21 22:17:23.000000000 -0400
25666+++ linux-3.0.7/drivers/edac/i5100_edac.c 2011-10-11 10:44:33.000000000 -0400
25667@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
25668 edac_mc_free(mci);
25669 }
25670
25671-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
25672+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
25673 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
25674 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
25675 { 0, }
25676diff -urNp linux-3.0.7/drivers/edac/i5400_edac.c linux-3.0.7/drivers/edac/i5400_edac.c
25677--- linux-3.0.7/drivers/edac/i5400_edac.c 2011-07-21 22:17:23.000000000 -0400
25678+++ linux-3.0.7/drivers/edac/i5400_edac.c 2011-10-11 10:44:33.000000000 -0400
25679@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
25680 *
25681 * The "E500P" device is the first device supported.
25682 */
25683-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
25684+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
25685 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
25686 {0,} /* 0 terminated list. */
25687 };
25688diff -urNp linux-3.0.7/drivers/edac/i7300_edac.c linux-3.0.7/drivers/edac/i7300_edac.c
25689--- linux-3.0.7/drivers/edac/i7300_edac.c 2011-07-21 22:17:23.000000000 -0400
25690+++ linux-3.0.7/drivers/edac/i7300_edac.c 2011-10-11 10:44:33.000000000 -0400
25691@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
25692 *
25693 * Has only 8086:360c PCI ID
25694 */
25695-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
25696+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
25697 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
25698 {0,} /* 0 terminated list. */
25699 };
25700diff -urNp linux-3.0.7/drivers/edac/i7core_edac.c linux-3.0.7/drivers/edac/i7core_edac.c
25701--- linux-3.0.7/drivers/edac/i7core_edac.c 2011-09-02 18:11:26.000000000 -0400
25702+++ linux-3.0.7/drivers/edac/i7core_edac.c 2011-10-11 10:44:33.000000000 -0400
25703@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
25704 /*
25705 * pci_device_id table for which devices we are looking for
25706 */
25707-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
25708+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
25709 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
25710 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
25711 {0,} /* 0 terminated list. */
25712diff -urNp linux-3.0.7/drivers/edac/i82443bxgx_edac.c linux-3.0.7/drivers/edac/i82443bxgx_edac.c
25713--- linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-07-21 22:17:23.000000000 -0400
25714+++ linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-10-11 10:44:33.000000000 -0400
25715@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
25716
25717 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
25718
25719-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
25720+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
25721 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
25722 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
25723 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
25724diff -urNp linux-3.0.7/drivers/edac/i82860_edac.c linux-3.0.7/drivers/edac/i82860_edac.c
25725--- linux-3.0.7/drivers/edac/i82860_edac.c 2011-07-21 22:17:23.000000000 -0400
25726+++ linux-3.0.7/drivers/edac/i82860_edac.c 2011-10-11 10:44:33.000000000 -0400
25727@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
25728 edac_mc_free(mci);
25729 }
25730
25731-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
25732+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
25733 {
25734 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25735 I82860},
25736diff -urNp linux-3.0.7/drivers/edac/i82875p_edac.c linux-3.0.7/drivers/edac/i82875p_edac.c
25737--- linux-3.0.7/drivers/edac/i82875p_edac.c 2011-07-21 22:17:23.000000000 -0400
25738+++ linux-3.0.7/drivers/edac/i82875p_edac.c 2011-10-11 10:44:33.000000000 -0400
25739@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
25740 edac_mc_free(mci);
25741 }
25742
25743-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
25744+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
25745 {
25746 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25747 I82875P},
25748diff -urNp linux-3.0.7/drivers/edac/i82975x_edac.c linux-3.0.7/drivers/edac/i82975x_edac.c
25749--- linux-3.0.7/drivers/edac/i82975x_edac.c 2011-07-21 22:17:23.000000000 -0400
25750+++ linux-3.0.7/drivers/edac/i82975x_edac.c 2011-10-11 10:44:33.000000000 -0400
25751@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
25752 edac_mc_free(mci);
25753 }
25754
25755-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
25756+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
25757 {
25758 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25759 I82975X
25760diff -urNp linux-3.0.7/drivers/edac/mce_amd.h linux-3.0.7/drivers/edac/mce_amd.h
25761--- linux-3.0.7/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25762+++ linux-3.0.7/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25763@@ -83,7 +83,7 @@ struct amd_decoder_ops {
25764 bool (*dc_mce)(u16, u8);
25765 bool (*ic_mce)(u16, u8);
25766 bool (*nb_mce)(u16, u8);
25767-};
25768+} __no_const;
25769
25770 void amd_report_gart_errors(bool);
25771 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25772diff -urNp linux-3.0.7/drivers/edac/r82600_edac.c linux-3.0.7/drivers/edac/r82600_edac.c
25773--- linux-3.0.7/drivers/edac/r82600_edac.c 2011-07-21 22:17:23.000000000 -0400
25774+++ linux-3.0.7/drivers/edac/r82600_edac.c 2011-10-11 10:44:33.000000000 -0400
25775@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
25776 edac_mc_free(mci);
25777 }
25778
25779-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
25780+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
25781 {
25782 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
25783 },
25784diff -urNp linux-3.0.7/drivers/edac/x38_edac.c linux-3.0.7/drivers/edac/x38_edac.c
25785--- linux-3.0.7/drivers/edac/x38_edac.c 2011-07-21 22:17:23.000000000 -0400
25786+++ linux-3.0.7/drivers/edac/x38_edac.c 2011-10-11 10:44:33.000000000 -0400
25787@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
25788 edac_mc_free(mci);
25789 }
25790
25791-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
25792+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
25793 {
25794 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25795 X38},
25796diff -urNp linux-3.0.7/drivers/firewire/core-card.c linux-3.0.7/drivers/firewire/core-card.c
25797--- linux-3.0.7/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25798+++ linux-3.0.7/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25799@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25800
25801 void fw_core_remove_card(struct fw_card *card)
25802 {
25803- struct fw_card_driver dummy_driver = dummy_driver_template;
25804+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
25805
25806 card->driver->update_phy_reg(card, 4,
25807 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25808diff -urNp linux-3.0.7/drivers/firewire/core-cdev.c linux-3.0.7/drivers/firewire/core-cdev.c
25809--- linux-3.0.7/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25810+++ linux-3.0.7/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25811@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25812 int ret;
25813
25814 if ((request->channels == 0 && request->bandwidth == 0) ||
25815- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25816- request->bandwidth < 0)
25817+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25818 return -EINVAL;
25819
25820 r = kmalloc(sizeof(*r), GFP_KERNEL);
25821diff -urNp linux-3.0.7/drivers/firewire/core-transaction.c linux-3.0.7/drivers/firewire/core-transaction.c
25822--- linux-3.0.7/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25823+++ linux-3.0.7/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25824@@ -37,6 +37,7 @@
25825 #include <linux/timer.h>
25826 #include <linux/types.h>
25827 #include <linux/workqueue.h>
25828+#include <linux/sched.h>
25829
25830 #include <asm/byteorder.h>
25831
25832@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25833 struct transaction_callback_data d;
25834 struct fw_transaction t;
25835
25836+ pax_track_stack();
25837+
25838 init_timer_on_stack(&t.split_timeout_timer);
25839 init_completion(&d.done);
25840 d.payload = payload;
25841diff -urNp linux-3.0.7/drivers/firewire/core.h linux-3.0.7/drivers/firewire/core.h
25842--- linux-3.0.7/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25843+++ linux-3.0.7/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25844@@ -101,6 +101,7 @@ struct fw_card_driver {
25845
25846 int (*stop_iso)(struct fw_iso_context *ctx);
25847 };
25848+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25849
25850 void fw_card_initialize(struct fw_card *card,
25851 const struct fw_card_driver *driver, struct device *device);
25852diff -urNp linux-3.0.7/drivers/firmware/dmi_scan.c linux-3.0.7/drivers/firmware/dmi_scan.c
25853--- linux-3.0.7/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25854+++ linux-3.0.7/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25855@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25856 }
25857 }
25858 else {
25859- /*
25860- * no iounmap() for that ioremap(); it would be a no-op, but
25861- * it's so early in setup that sucker gets confused into doing
25862- * what it shouldn't if we actually call it.
25863- */
25864 p = dmi_ioremap(0xF0000, 0x10000);
25865 if (p == NULL)
25866 goto error;
25867@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25868 if (buf == NULL)
25869 return -1;
25870
25871- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25872+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25873
25874 iounmap(buf);
25875 return 0;
25876diff -urNp linux-3.0.7/drivers/gpio/vr41xx_giu.c linux-3.0.7/drivers/gpio/vr41xx_giu.c
25877--- linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25878+++ linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25879@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25880 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25881 maskl, pendl, maskh, pendh);
25882
25883- atomic_inc(&irq_err_count);
25884+ atomic_inc_unchecked(&irq_err_count);
25885
25886 return -EINVAL;
25887 }
25888diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc.c linux-3.0.7/drivers/gpu/drm/drm_crtc.c
25889--- linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25890+++ linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25891@@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25892 */
25893 if ((out_resp->count_modes >= mode_count) && mode_count) {
25894 copied = 0;
25895- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25896+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25897 list_for_each_entry(mode, &connector->modes, head) {
25898 drm_crtc_convert_to_umode(&u_mode, mode);
25899 if (copy_to_user(mode_ptr + copied,
25900@@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25901
25902 if ((out_resp->count_props >= props_count) && props_count) {
25903 copied = 0;
25904- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25905- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25906+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25907+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25908 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25909 if (connector->property_ids[i] != 0) {
25910 if (put_user(connector->property_ids[i],
25911@@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25912
25913 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25914 copied = 0;
25915- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25916+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25917 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25918 if (connector->encoder_ids[i] != 0) {
25919 if (put_user(connector->encoder_ids[i],
25920@@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25921 }
25922
25923 for (i = 0; i < crtc_req->count_connectors; i++) {
25924- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25925+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25926 if (get_user(out_id, &set_connectors_ptr[i])) {
25927 ret = -EFAULT;
25928 goto out;
25929@@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25930 fb = obj_to_fb(obj);
25931
25932 num_clips = r->num_clips;
25933- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25934+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25935
25936 if (!num_clips != !clips_ptr) {
25937 ret = -EINVAL;
25938@@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25939 out_resp->flags = property->flags;
25940
25941 if ((out_resp->count_values >= value_count) && value_count) {
25942- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25943+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25944 for (i = 0; i < value_count; i++) {
25945 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25946 ret = -EFAULT;
25947@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25948 if (property->flags & DRM_MODE_PROP_ENUM) {
25949 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25950 copied = 0;
25951- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25952+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25953 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25954
25955 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25956@@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25957 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25958 copied = 0;
25959 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25960- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25961+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25962
25963 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25964 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25965@@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25966 struct drm_mode_get_blob *out_resp = data;
25967 struct drm_property_blob *blob;
25968 int ret = 0;
25969- void *blob_ptr;
25970+ void __user *blob_ptr;
25971
25972 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25973 return -EINVAL;
25974@@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25975 blob = obj_to_blob(obj);
25976
25977 if (out_resp->length == blob->length) {
25978- blob_ptr = (void *)(unsigned long)out_resp->data;
25979+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
25980 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25981 ret = -EFAULT;
25982 goto done;
25983diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c
25984--- linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25985+++ linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25986@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25987 struct drm_crtc *tmp;
25988 int crtc_mask = 1;
25989
25990- WARN(!crtc, "checking null crtc?\n");
25991+ BUG_ON(!crtc);
25992
25993 dev = crtc->dev;
25994
25995@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25996 struct drm_encoder *encoder;
25997 bool ret = true;
25998
25999+ pax_track_stack();
26000+
26001 crtc->enabled = drm_helper_crtc_in_use(crtc);
26002 if (!crtc->enabled)
26003 return true;
26004diff -urNp linux-3.0.7/drivers/gpu/drm/drm_drv.c linux-3.0.7/drivers/gpu/drm/drm_drv.c
26005--- linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
26006+++ linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
26007@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26008 /**
26009 * Copy and IOCTL return string to user space
26010 */
26011-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26012+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26013 {
26014 int len;
26015
26016@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26017
26018 dev = file_priv->minor->dev;
26019 atomic_inc(&dev->ioctl_count);
26020- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26021+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26022 ++file_priv->ioctl_count;
26023
26024 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26025diff -urNp linux-3.0.7/drivers/gpu/drm/drm_fops.c linux-3.0.7/drivers/gpu/drm/drm_fops.c
26026--- linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
26027+++ linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
26028@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26029 }
26030
26031 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26032- atomic_set(&dev->counts[i], 0);
26033+ atomic_set_unchecked(&dev->counts[i], 0);
26034
26035 dev->sigdata.lock = NULL;
26036
26037@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26038
26039 retcode = drm_open_helper(inode, filp, dev);
26040 if (!retcode) {
26041- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26042- if (!dev->open_count++)
26043+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26044+ if (local_inc_return(&dev->open_count) == 1)
26045 retcode = drm_setup(dev);
26046 }
26047 if (!retcode) {
26048@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26049
26050 mutex_lock(&drm_global_mutex);
26051
26052- DRM_DEBUG("open_count = %d\n", dev->open_count);
26053+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26054
26055 if (dev->driver->preclose)
26056 dev->driver->preclose(dev, file_priv);
26057@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26058 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26059 task_pid_nr(current),
26060 (long)old_encode_dev(file_priv->minor->device),
26061- dev->open_count);
26062+ local_read(&dev->open_count));
26063
26064 /* if the master has gone away we can't do anything with the lock */
26065 if (file_priv->minor->master)
26066@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26067 * End inline drm_release
26068 */
26069
26070- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26071- if (!--dev->open_count) {
26072+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26073+ if (local_dec_and_test(&dev->open_count)) {
26074 if (atomic_read(&dev->ioctl_count)) {
26075 DRM_ERROR("Device busy: %d\n",
26076 atomic_read(&dev->ioctl_count));
26077diff -urNp linux-3.0.7/drivers/gpu/drm/drm_global.c linux-3.0.7/drivers/gpu/drm/drm_global.c
26078--- linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
26079+++ linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
26080@@ -36,7 +36,7 @@
26081 struct drm_global_item {
26082 struct mutex mutex;
26083 void *object;
26084- int refcount;
26085+ atomic_t refcount;
26086 };
26087
26088 static struct drm_global_item glob[DRM_GLOBAL_NUM];
26089@@ -49,7 +49,7 @@ void drm_global_init(void)
26090 struct drm_global_item *item = &glob[i];
26091 mutex_init(&item->mutex);
26092 item->object = NULL;
26093- item->refcount = 0;
26094+ atomic_set(&item->refcount, 0);
26095 }
26096 }
26097
26098@@ -59,7 +59,7 @@ void drm_global_release(void)
26099 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26100 struct drm_global_item *item = &glob[i];
26101 BUG_ON(item->object != NULL);
26102- BUG_ON(item->refcount != 0);
26103+ BUG_ON(atomic_read(&item->refcount) != 0);
26104 }
26105 }
26106
26107@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26108 void *object;
26109
26110 mutex_lock(&item->mutex);
26111- if (item->refcount == 0) {
26112+ if (atomic_read(&item->refcount) == 0) {
26113 item->object = kzalloc(ref->size, GFP_KERNEL);
26114 if (unlikely(item->object == NULL)) {
26115 ret = -ENOMEM;
26116@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26117 goto out_err;
26118
26119 }
26120- ++item->refcount;
26121+ atomic_inc(&item->refcount);
26122 ref->object = item->object;
26123 object = item->object;
26124 mutex_unlock(&item->mutex);
26125@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26126 struct drm_global_item *item = &glob[ref->global_type];
26127
26128 mutex_lock(&item->mutex);
26129- BUG_ON(item->refcount == 0);
26130+ BUG_ON(atomic_read(&item->refcount) == 0);
26131 BUG_ON(ref->object != item->object);
26132- if (--item->refcount == 0) {
26133+ if (atomic_dec_and_test(&item->refcount)) {
26134 ref->release(ref);
26135 item->object = NULL;
26136 }
26137diff -urNp linux-3.0.7/drivers/gpu/drm/drm_info.c linux-3.0.7/drivers/gpu/drm/drm_info.c
26138--- linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
26139+++ linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
26140@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26141 struct drm_local_map *map;
26142 struct drm_map_list *r_list;
26143
26144- /* Hardcoded from _DRM_FRAME_BUFFER,
26145- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26146- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26147- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26148+ static const char * const types[] = {
26149+ [_DRM_FRAME_BUFFER] = "FB",
26150+ [_DRM_REGISTERS] = "REG",
26151+ [_DRM_SHM] = "SHM",
26152+ [_DRM_AGP] = "AGP",
26153+ [_DRM_SCATTER_GATHER] = "SG",
26154+ [_DRM_CONSISTENT] = "PCI",
26155+ [_DRM_GEM] = "GEM" };
26156 const char *type;
26157 int i;
26158
26159@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26160 map = r_list->map;
26161 if (!map)
26162 continue;
26163- if (map->type < 0 || map->type > 5)
26164+ if (map->type >= ARRAY_SIZE(types))
26165 type = "??";
26166 else
26167 type = types[map->type];
26168@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26169 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26170 vma->vm_flags & VM_LOCKED ? 'l' : '-',
26171 vma->vm_flags & VM_IO ? 'i' : '-',
26172+#ifdef CONFIG_GRKERNSEC_HIDESYM
26173+ 0);
26174+#else
26175 vma->vm_pgoff);
26176+#endif
26177
26178 #if defined(__i386__)
26179 pgprot = pgprot_val(vma->vm_page_prot);
26180diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioc32.c linux-3.0.7/drivers/gpu/drm/drm_ioc32.c
26181--- linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26182+++ linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
26183@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26184 request = compat_alloc_user_space(nbytes);
26185 if (!access_ok(VERIFY_WRITE, request, nbytes))
26186 return -EFAULT;
26187- list = (struct drm_buf_desc *) (request + 1);
26188+ list = (struct drm_buf_desc __user *) (request + 1);
26189
26190 if (__put_user(count, &request->count)
26191 || __put_user(list, &request->list))
26192@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26193 request = compat_alloc_user_space(nbytes);
26194 if (!access_ok(VERIFY_WRITE, request, nbytes))
26195 return -EFAULT;
26196- list = (struct drm_buf_pub *) (request + 1);
26197+ list = (struct drm_buf_pub __user *) (request + 1);
26198
26199 if (__put_user(count, &request->count)
26200 || __put_user(list, &request->list))
26201diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioctl.c linux-3.0.7/drivers/gpu/drm/drm_ioctl.c
26202--- linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
26203+++ linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
26204@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26205 stats->data[i].value =
26206 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26207 else
26208- stats->data[i].value = atomic_read(&dev->counts[i]);
26209+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26210 stats->data[i].type = dev->types[i];
26211 }
26212
26213diff -urNp linux-3.0.7/drivers/gpu/drm/drm_lock.c linux-3.0.7/drivers/gpu/drm/drm_lock.c
26214--- linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
26215+++ linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
26216@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26217 if (drm_lock_take(&master->lock, lock->context)) {
26218 master->lock.file_priv = file_priv;
26219 master->lock.lock_time = jiffies;
26220- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26221+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26222 break; /* Got lock */
26223 }
26224
26225@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26226 return -EINVAL;
26227 }
26228
26229- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26230+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26231
26232 if (drm_lock_free(&master->lock, lock->context)) {
26233 /* FIXME: Should really bail out here. */
26234diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c
26235--- linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
26236+++ linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
26237@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26238 dma->buflist[vertex->idx],
26239 vertex->discard, vertex->used);
26240
26241- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26242- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26243+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26244+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26245 sarea_priv->last_enqueue = dev_priv->counter - 1;
26246 sarea_priv->last_dispatch = (int)hw_status[5];
26247
26248@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26249 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26250 mc->last_render);
26251
26252- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26253- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26254+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26255+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26256 sarea_priv->last_enqueue = dev_priv->counter - 1;
26257 sarea_priv->last_dispatch = (int)hw_status[5];
26258
26259diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h
26260--- linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
26261+++ linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
26262@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26263 int page_flipping;
26264
26265 wait_queue_head_t irq_queue;
26266- atomic_t irq_received;
26267- atomic_t irq_emitted;
26268+ atomic_unchecked_t irq_received;
26269+ atomic_unchecked_t irq_emitted;
26270
26271 int front_offset;
26272 } drm_i810_private_t;
26273diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c
26274--- linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
26275+++ linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
26276@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26277 I915_READ(GTIMR));
26278 }
26279 seq_printf(m, "Interrupts received: %d\n",
26280- atomic_read(&dev_priv->irq_received));
26281+ atomic_read_unchecked(&dev_priv->irq_received));
26282 for (i = 0; i < I915_NUM_RINGS; i++) {
26283 if (IS_GEN6(dev)) {
26284 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26285@@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
26286 return ret;
26287
26288 if (opregion->header)
26289- seq_write(m, opregion->header, OPREGION_SIZE);
26290+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26291
26292 mutex_unlock(&dev->struct_mutex);
26293
26294diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c
26295--- linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
26296+++ linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
26297@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
26298 bool can_switch;
26299
26300 spin_lock(&dev->count_lock);
26301- can_switch = (dev->open_count == 0);
26302+ can_switch = (local_read(&dev->open_count) == 0);
26303 spin_unlock(&dev->count_lock);
26304 return can_switch;
26305 }
26306diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h
26307--- linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
26308+++ linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
26309@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
26310 /* render clock increase/decrease */
26311 /* display clock increase/decrease */
26312 /* pll clock increase/decrease */
26313-};
26314+} __no_const;
26315
26316 struct intel_device_info {
26317 u8 gen;
26318@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
26319 int current_page;
26320 int page_flipping;
26321
26322- atomic_t irq_received;
26323+ atomic_unchecked_t irq_received;
26324
26325 /* protects the irq masks */
26326 spinlock_t irq_lock;
26327@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
26328 * will be page flipped away on the next vblank. When it
26329 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26330 */
26331- atomic_t pending_flip;
26332+ atomic_unchecked_t pending_flip;
26333 };
26334
26335 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26336@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
26337 extern void intel_teardown_gmbus(struct drm_device *dev);
26338 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26339 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26340-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26341+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26342 {
26343 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26344 }
26345diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26346--- linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
26347+++ linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
26348@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26349 i915_gem_clflush_object(obj);
26350
26351 if (obj->base.pending_write_domain)
26352- cd->flips |= atomic_read(&obj->pending_flip);
26353+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26354
26355 /* The actual obj->write_domain will be updated with
26356 * pending_write_domain after we emit the accumulated flush for all
26357diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c
26358--- linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
26359+++ linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
26360@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
26361 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26362 struct drm_i915_master_private *master_priv;
26363
26364- atomic_inc(&dev_priv->irq_received);
26365+ atomic_inc_unchecked(&dev_priv->irq_received);
26366
26367 /* disable master interrupt before clearing iir */
26368 de_ier = I915_READ(DEIER);
26369@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
26370 struct drm_i915_master_private *master_priv;
26371 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26372
26373- atomic_inc(&dev_priv->irq_received);
26374+ atomic_inc_unchecked(&dev_priv->irq_received);
26375
26376 if (IS_GEN6(dev))
26377 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26378@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
26379 int ret = IRQ_NONE, pipe;
26380 bool blc_event = false;
26381
26382- atomic_inc(&dev_priv->irq_received);
26383+ atomic_inc_unchecked(&dev_priv->irq_received);
26384
26385 iir = I915_READ(IIR);
26386
26387@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
26388 {
26389 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26390
26391- atomic_set(&dev_priv->irq_received, 0);
26392+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26393
26394 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26395 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26396@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
26397 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26398 int pipe;
26399
26400- atomic_set(&dev_priv->irq_received, 0);
26401+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26402
26403 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26404 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26405diff -urNp linux-3.0.7/drivers/gpu/drm/i915/intel_display.c linux-3.0.7/drivers/gpu/drm/i915/intel_display.c
26406--- linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
26407+++ linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
26408@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26409
26410 wait_event(dev_priv->pending_flip_queue,
26411 atomic_read(&dev_priv->mm.wedged) ||
26412- atomic_read(&obj->pending_flip) == 0);
26413+ atomic_read_unchecked(&obj->pending_flip) == 0);
26414
26415 /* Big Hammer, we also need to ensure that any pending
26416 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26417@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
26418 obj = to_intel_framebuffer(crtc->fb)->obj;
26419 dev_priv = crtc->dev->dev_private;
26420 wait_event(dev_priv->pending_flip_queue,
26421- atomic_read(&obj->pending_flip) == 0);
26422+ atomic_read_unchecked(&obj->pending_flip) == 0);
26423 }
26424
26425 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26426@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
26427
26428 atomic_clear_mask(1 << intel_crtc->plane,
26429 &obj->pending_flip.counter);
26430- if (atomic_read(&obj->pending_flip) == 0)
26431+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26432 wake_up(&dev_priv->pending_flip_queue);
26433
26434 schedule_work(&work->work);
26435@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
26436 /* Block clients from rendering to the new back buffer until
26437 * the flip occurs and the object is no longer visible.
26438 */
26439- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26440+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26441
26442 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26443 if (ret)
26444@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
26445 return 0;
26446
26447 cleanup_pending:
26448- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26449+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26450 cleanup_objs:
26451 drm_gem_object_unreference(&work->old_fb_obj->base);
26452 drm_gem_object_unreference(&obj->base);
26453diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h
26454--- linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
26455+++ linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
26456@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26457 u32 clear_cmd;
26458 u32 maccess;
26459
26460- atomic_t vbl_received; /**< Number of vblanks received. */
26461+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26462 wait_queue_head_t fence_queue;
26463- atomic_t last_fence_retired;
26464+ atomic_unchecked_t last_fence_retired;
26465 u32 next_fence_to_post;
26466
26467 unsigned int fb_cpp;
26468diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c
26469--- linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26470+++ linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26471@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26472 if (crtc != 0)
26473 return 0;
26474
26475- return atomic_read(&dev_priv->vbl_received);
26476+ return atomic_read_unchecked(&dev_priv->vbl_received);
26477 }
26478
26479
26480@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26481 /* VBLANK interrupt */
26482 if (status & MGA_VLINEPEN) {
26483 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26484- atomic_inc(&dev_priv->vbl_received);
26485+ atomic_inc_unchecked(&dev_priv->vbl_received);
26486 drm_handle_vblank(dev, 0);
26487 handled = 1;
26488 }
26489@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26490 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26491 MGA_WRITE(MGA_PRIMEND, prim_end);
26492
26493- atomic_inc(&dev_priv->last_fence_retired);
26494+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26495 DRM_WAKEUP(&dev_priv->fence_queue);
26496 handled = 1;
26497 }
26498@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26499 * using fences.
26500 */
26501 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26502- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26503+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26504 - *sequence) <= (1 << 23)));
26505
26506 *sequence = cur_fence;
26507diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c
26508--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26509+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26510@@ -200,7 +200,7 @@ struct methods {
26511 const char desc[8];
26512 void (*loadbios)(struct drm_device *, uint8_t *);
26513 const bool rw;
26514-};
26515+} __do_const;
26516
26517 static struct methods shadow_methods[] = {
26518 { "PRAMIN", load_vbios_pramin, true },
26519@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26520 struct bit_table {
26521 const char id;
26522 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26523-};
26524+} __no_const;
26525
26526 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26527
26528diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h
26529--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26530+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26531@@ -227,7 +227,7 @@ struct nouveau_channel {
26532 struct list_head pending;
26533 uint32_t sequence;
26534 uint32_t sequence_ack;
26535- atomic_t last_sequence_irq;
26536+ atomic_unchecked_t last_sequence_irq;
26537 } fence;
26538
26539 /* DMA push buffer */
26540@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26541 u32 handle, u16 class);
26542 void (*set_tile_region)(struct drm_device *dev, int i);
26543 void (*tlb_flush)(struct drm_device *, int engine);
26544-};
26545+} __no_const;
26546
26547 struct nouveau_instmem_engine {
26548 void *priv;
26549@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26550 struct nouveau_mc_engine {
26551 int (*init)(struct drm_device *dev);
26552 void (*takedown)(struct drm_device *dev);
26553-};
26554+} __no_const;
26555
26556 struct nouveau_timer_engine {
26557 int (*init)(struct drm_device *dev);
26558 void (*takedown)(struct drm_device *dev);
26559 uint64_t (*read)(struct drm_device *dev);
26560-};
26561+} __no_const;
26562
26563 struct nouveau_fb_engine {
26564 int num_tiles;
26565@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26566 void (*put)(struct drm_device *, struct nouveau_mem **);
26567
26568 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26569-};
26570+} __no_const;
26571
26572 struct nouveau_engine {
26573 struct nouveau_instmem_engine instmem;
26574@@ -640,7 +640,7 @@ struct drm_nouveau_private {
26575 struct drm_global_reference mem_global_ref;
26576 struct ttm_bo_global_ref bo_global_ref;
26577 struct ttm_bo_device bdev;
26578- atomic_t validate_sequence;
26579+ atomic_unchecked_t validate_sequence;
26580 } ttm;
26581
26582 struct {
26583diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c
26584--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26585+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26586@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26587 if (USE_REFCNT(dev))
26588 sequence = nvchan_rd32(chan, 0x48);
26589 else
26590- sequence = atomic_read(&chan->fence.last_sequence_irq);
26591+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26592
26593 if (chan->fence.sequence_ack == sequence)
26594 goto out;
26595@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26596
26597 INIT_LIST_HEAD(&chan->fence.pending);
26598 spin_lock_init(&chan->fence.lock);
26599- atomic_set(&chan->fence.last_sequence_irq, 0);
26600+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26601 return 0;
26602 }
26603
26604diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c
26605--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26606+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26607@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26608 int trycnt = 0;
26609 int ret, i;
26610
26611- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26612+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26613 retry:
26614 if (++trycnt > 100000) {
26615 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26616diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c
26617--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
26618+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
26619@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
26620 bool can_switch;
26621
26622 spin_lock(&dev->count_lock);
26623- can_switch = (dev->open_count == 0);
26624+ can_switch = (local_read(&dev->open_count) == 0);
26625 spin_unlock(&dev->count_lock);
26626 return can_switch;
26627 }
26628diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c
26629--- linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
26630+++ linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
26631@@ -560,7 +560,7 @@ static int
26632 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26633 u32 class, u32 mthd, u32 data)
26634 {
26635- atomic_set(&chan->fence.last_sequence_irq, data);
26636+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26637 return 0;
26638 }
26639
26640diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c
26641--- linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
26642+++ linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
26643@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26644
26645 /* GH: Simple idle check.
26646 */
26647- atomic_set(&dev_priv->idle_count, 0);
26648+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26649
26650 /* We don't support anything other than bus-mastering ring mode,
26651 * but the ring can be in either AGP or PCI space for the ring
26652diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h
26653--- linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
26654+++ linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
26655@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26656 int is_pci;
26657 unsigned long cce_buffers_offset;
26658
26659- atomic_t idle_count;
26660+ atomic_unchecked_t idle_count;
26661
26662 int page_flipping;
26663 int current_page;
26664 u32 crtc_offset;
26665 u32 crtc_offset_cntl;
26666
26667- atomic_t vbl_received;
26668+ atomic_unchecked_t vbl_received;
26669
26670 u32 color_fmt;
26671 unsigned int front_offset;
26672diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c
26673--- linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
26674+++ linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
26675@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
26676 if (crtc != 0)
26677 return 0;
26678
26679- return atomic_read(&dev_priv->vbl_received);
26680+ return atomic_read_unchecked(&dev_priv->vbl_received);
26681 }
26682
26683 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
26684@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
26685 /* VBLANK interrupt */
26686 if (status & R128_CRTC_VBLANK_INT) {
26687 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
26688- atomic_inc(&dev_priv->vbl_received);
26689+ atomic_inc_unchecked(&dev_priv->vbl_received);
26690 drm_handle_vblank(dev, 0);
26691 return IRQ_HANDLED;
26692 }
26693diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_state.c linux-3.0.7/drivers/gpu/drm/r128/r128_state.c
26694--- linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
26695+++ linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
26696@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
26697
26698 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
26699 {
26700- if (atomic_read(&dev_priv->idle_count) == 0)
26701+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
26702 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
26703 else
26704- atomic_set(&dev_priv->idle_count, 0);
26705+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26706 }
26707
26708 #endif
26709diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/atom.c linux-3.0.7/drivers/gpu/drm/radeon/atom.c
26710--- linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
26711+++ linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
26712@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26713 char name[512];
26714 int i;
26715
26716+ pax_track_stack();
26717+
26718 ctx->card = card;
26719 ctx->bios = bios;
26720
26721diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c
26722--- linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26723+++ linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26724@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26725 regex_t mask_rex;
26726 regmatch_t match[4];
26727 char buf[1024];
26728- size_t end;
26729+ long end;
26730 int len;
26731 int done = 0;
26732 int r;
26733 unsigned o;
26734 struct offset *offset;
26735 char last_reg_s[10];
26736- int last_reg;
26737+ unsigned long last_reg;
26738
26739 if (regcomp
26740 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26741diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon.h linux-3.0.7/drivers/gpu/drm/radeon/radeon.h
26742--- linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:54:53.000000000 -0400
26743+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:55:27.000000000 -0400
26744@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26745 */
26746 struct radeon_fence_driver {
26747 uint32_t scratch_reg;
26748- atomic_t seq;
26749+ atomic_unchecked_t seq;
26750 uint32_t last_seq;
26751 unsigned long last_jiffies;
26752 unsigned long last_timeout;
26753@@ -961,7 +961,7 @@ struct radeon_asic {
26754 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26755 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26756 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26757-};
26758+} __no_const;
26759
26760 /*
26761 * Asic structures
26762diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c
26763--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26764+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26765@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26766 struct radeon_gpio_rec gpio;
26767 struct radeon_hpd hpd;
26768
26769+ pax_track_stack();
26770+
26771 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26772 return false;
26773
26774diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c
26775--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26776+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26777@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26778 bool can_switch;
26779
26780 spin_lock(&dev->count_lock);
26781- can_switch = (dev->open_count == 0);
26782+ can_switch = (local_read(&dev->open_count) == 0);
26783 spin_unlock(&dev->count_lock);
26784 return can_switch;
26785 }
26786diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c
26787--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26788+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26789@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26790 uint32_t post_div;
26791 u32 pll_out_min, pll_out_max;
26792
26793+ pax_track_stack();
26794+
26795 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26796 freq = freq * 1000;
26797
26798diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h
26799--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26800+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26801@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26802
26803 /* SW interrupt */
26804 wait_queue_head_t swi_queue;
26805- atomic_t swi_emitted;
26806+ atomic_unchecked_t swi_emitted;
26807 int vblank_crtc;
26808 uint32_t irq_enable_reg;
26809 uint32_t r500_disp_irq_reg;
26810diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c
26811--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26812+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26813@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26814 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26815 return 0;
26816 }
26817- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26818+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26819 if (!rdev->cp.ready)
26820 /* FIXME: cp is not running assume everythings is done right
26821 * away
26822@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26823 return r;
26824 }
26825 radeon_fence_write(rdev, 0);
26826- atomic_set(&rdev->fence_drv.seq, 0);
26827+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26828 INIT_LIST_HEAD(&rdev->fence_drv.created);
26829 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26830 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26831diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c
26832--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26833+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26834@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26835 request = compat_alloc_user_space(sizeof(*request));
26836 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26837 || __put_user(req32.param, &request->param)
26838- || __put_user((void __user *)(unsigned long)req32.value,
26839+ || __put_user((unsigned long)req32.value,
26840 &request->value))
26841 return -EFAULT;
26842
26843diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c
26844--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26845+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26846@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26847 unsigned int ret;
26848 RING_LOCALS;
26849
26850- atomic_inc(&dev_priv->swi_emitted);
26851- ret = atomic_read(&dev_priv->swi_emitted);
26852+ atomic_inc_unchecked(&dev_priv->swi_emitted);
26853+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26854
26855 BEGIN_RING(4);
26856 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26857@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26858 drm_radeon_private_t *dev_priv =
26859 (drm_radeon_private_t *) dev->dev_private;
26860
26861- atomic_set(&dev_priv->swi_emitted, 0);
26862+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26863 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26864
26865 dev->max_vblank_count = 0x001fffff;
26866diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c
26867--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26868+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26869@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26870 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26871 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26872
26873- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26874+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26875 sarea_priv->nbox * sizeof(depth_boxes[0])))
26876 return -EFAULT;
26877
26878@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26879 {
26880 drm_radeon_private_t *dev_priv = dev->dev_private;
26881 drm_radeon_getparam_t *param = data;
26882- int value;
26883+ int value = 0;
26884
26885 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26886
26887diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c
26888--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:54:53.000000000 -0400
26889+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:55:27.000000000 -0400
26890@@ -649,8 +649,10 @@ int radeon_mmap(struct file *filp, struc
26891 }
26892 if (unlikely(ttm_vm_ops == NULL)) {
26893 ttm_vm_ops = vma->vm_ops;
26894- radeon_ttm_vm_ops = *ttm_vm_ops;
26895- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26896+ pax_open_kernel();
26897+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26898+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26899+ pax_close_kernel();
26900 }
26901 vma->vm_ops = &radeon_ttm_vm_ops;
26902 return 0;
26903diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/rs690.c linux-3.0.7/drivers/gpu/drm/radeon/rs690.c
26904--- linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26905+++ linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26906@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26907 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26908 rdev->pm.sideport_bandwidth.full)
26909 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26910- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26911+ read_delay_latency.full = dfixed_const(800 * 1000);
26912 read_delay_latency.full = dfixed_div(read_delay_latency,
26913 rdev->pm.igp_sideport_mclk);
26914+ a.full = dfixed_const(370);
26915+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26916 } else {
26917 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26918 rdev->pm.k8_bandwidth.full)
26919diff -urNp linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c
26920--- linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26921+++ linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26922@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26923 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26924 struct shrink_control *sc)
26925 {
26926- static atomic_t start_pool = ATOMIC_INIT(0);
26927+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26928 unsigned i;
26929- unsigned pool_offset = atomic_add_return(1, &start_pool);
26930+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26931 struct ttm_page_pool *pool;
26932 int shrink_pages = sc->nr_to_scan;
26933
26934diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_drv.h linux-3.0.7/drivers/gpu/drm/via/via_drv.h
26935--- linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26936+++ linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26937@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26938 typedef uint32_t maskarray_t[5];
26939
26940 typedef struct drm_via_irq {
26941- atomic_t irq_received;
26942+ atomic_unchecked_t irq_received;
26943 uint32_t pending_mask;
26944 uint32_t enable_mask;
26945 wait_queue_head_t irq_queue;
26946@@ -75,7 +75,7 @@ typedef struct drm_via_private {
26947 struct timeval last_vblank;
26948 int last_vblank_valid;
26949 unsigned usec_per_vblank;
26950- atomic_t vbl_received;
26951+ atomic_unchecked_t vbl_received;
26952 drm_via_state_t hc_state;
26953 char pci_buf[VIA_PCI_BUF_SIZE];
26954 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26955diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_irq.c linux-3.0.7/drivers/gpu/drm/via/via_irq.c
26956--- linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26957+++ linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26958@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26959 if (crtc != 0)
26960 return 0;
26961
26962- return atomic_read(&dev_priv->vbl_received);
26963+ return atomic_read_unchecked(&dev_priv->vbl_received);
26964 }
26965
26966 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26967@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26968
26969 status = VIA_READ(VIA_REG_INTERRUPT);
26970 if (status & VIA_IRQ_VBLANK_PENDING) {
26971- atomic_inc(&dev_priv->vbl_received);
26972- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26973+ atomic_inc_unchecked(&dev_priv->vbl_received);
26974+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26975 do_gettimeofday(&cur_vblank);
26976 if (dev_priv->last_vblank_valid) {
26977 dev_priv->usec_per_vblank =
26978@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26979 dev_priv->last_vblank = cur_vblank;
26980 dev_priv->last_vblank_valid = 1;
26981 }
26982- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26983+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26984 DRM_DEBUG("US per vblank is: %u\n",
26985 dev_priv->usec_per_vblank);
26986 }
26987@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26988
26989 for (i = 0; i < dev_priv->num_irqs; ++i) {
26990 if (status & cur_irq->pending_mask) {
26991- atomic_inc(&cur_irq->irq_received);
26992+ atomic_inc_unchecked(&cur_irq->irq_received);
26993 DRM_WAKEUP(&cur_irq->irq_queue);
26994 handled = 1;
26995 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26996@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26997 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26998 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26999 masks[irq][4]));
27000- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27001+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27002 } else {
27003 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27004 (((cur_irq_sequence =
27005- atomic_read(&cur_irq->irq_received)) -
27006+ atomic_read_unchecked(&cur_irq->irq_received)) -
27007 *sequence) <= (1 << 23)));
27008 }
27009 *sequence = cur_irq_sequence;
27010@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27011 }
27012
27013 for (i = 0; i < dev_priv->num_irqs; ++i) {
27014- atomic_set(&cur_irq->irq_received, 0);
27015+ atomic_set_unchecked(&cur_irq->irq_received, 0);
27016 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27017 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27018 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27019@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27020 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27021 case VIA_IRQ_RELATIVE:
27022 irqwait->request.sequence +=
27023- atomic_read(&cur_irq->irq_received);
27024+ atomic_read_unchecked(&cur_irq->irq_received);
27025 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27026 case VIA_IRQ_ABSOLUTE:
27027 break;
27028diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27029--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
27030+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
27031@@ -240,7 +240,7 @@ struct vmw_private {
27032 * Fencing and IRQs.
27033 */
27034
27035- atomic_t fence_seq;
27036+ atomic_unchecked_t fence_seq;
27037 wait_queue_head_t fence_queue;
27038 wait_queue_head_t fifo_queue;
27039 atomic_t fence_queue_waiters;
27040diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27041--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
27042+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
27043@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27044 struct drm_vmw_fence_rep fence_rep;
27045 struct drm_vmw_fence_rep __user *user_fence_rep;
27046 int ret;
27047- void *user_cmd;
27048+ void __user *user_cmd;
27049 void *cmd;
27050 uint32_t sequence;
27051 struct vmw_sw_context *sw_context = &dev_priv->ctx;
27052diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27053--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
27054+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
27055@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27056 while (!vmw_lag_lt(queue, us)) {
27057 spin_lock(&queue->lock);
27058 if (list_empty(&queue->head))
27059- sequence = atomic_read(&dev_priv->fence_seq);
27060+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27061 else {
27062 fence = list_first_entry(&queue->head,
27063 struct vmw_fence, head);
27064diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27065--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
27066+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
27067@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27068 (unsigned int) min,
27069 (unsigned int) fifo->capabilities);
27070
27071- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27072+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27073 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27074 vmw_fence_queue_init(&fifo->fence_queue);
27075 return vmw_fifo_send_fence(dev_priv, &dummy);
27076@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27077 if (reserveable)
27078 iowrite32(bytes, fifo_mem +
27079 SVGA_FIFO_RESERVED);
27080- return fifo_mem + (next_cmd >> 2);
27081+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27082 } else {
27083 need_bounce = true;
27084 }
27085@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27086
27087 fm = vmw_fifo_reserve(dev_priv, bytes);
27088 if (unlikely(fm == NULL)) {
27089- *sequence = atomic_read(&dev_priv->fence_seq);
27090+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27091 ret = -ENOMEM;
27092 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27093 false, 3*HZ);
27094@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27095 }
27096
27097 do {
27098- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27099+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27100 } while (*sequence == 0);
27101
27102 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27103diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27104--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
27105+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
27106@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27107 * emitted. Then the fence is stale and signaled.
27108 */
27109
27110- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27111+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27112 > VMW_FENCE_WRAP);
27113
27114 return ret;
27115@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27116
27117 if (fifo_idle)
27118 down_read(&fifo_state->rwsem);
27119- signal_seq = atomic_read(&dev_priv->fence_seq);
27120+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27121 ret = 0;
27122
27123 for (;;) {
27124diff -urNp linux-3.0.7/drivers/hid/hid-core.c linux-3.0.7/drivers/hid/hid-core.c
27125--- linux-3.0.7/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
27126+++ linux-3.0.7/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
27127@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
27128
27129 int hid_add_device(struct hid_device *hdev)
27130 {
27131- static atomic_t id = ATOMIC_INIT(0);
27132+ static atomic_unchecked_t id = ATOMIC_INIT(0);
27133 int ret;
27134
27135 if (WARN_ON(hdev->status & HID_STAT_ADDED))
27136@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
27137 /* XXX hack, any other cleaner solution after the driver core
27138 * is converted to allow more than 20 bytes as the device name? */
27139 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27140- hdev->vendor, hdev->product, atomic_inc_return(&id));
27141+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27142
27143 hid_debug_register(hdev, dev_name(&hdev->dev));
27144 ret = device_add(&hdev->dev);
27145diff -urNp linux-3.0.7/drivers/hid/usbhid/hiddev.c linux-3.0.7/drivers/hid/usbhid/hiddev.c
27146--- linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
27147+++ linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
27148@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27149 break;
27150
27151 case HIDIOCAPPLICATION:
27152- if (arg < 0 || arg >= hid->maxapplication)
27153+ if (arg >= hid->maxapplication)
27154 break;
27155
27156 for (i = 0; i < hid->maxcollection; i++)
27157diff -urNp linux-3.0.7/drivers/hwmon/acpi_power_meter.c linux-3.0.7/drivers/hwmon/acpi_power_meter.c
27158--- linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
27159+++ linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
27160@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27161 return res;
27162
27163 temp /= 1000;
27164- if (temp < 0)
27165- return -EINVAL;
27166
27167 mutex_lock(&resource->lock);
27168 resource->trip[attr->index - 7] = temp;
27169diff -urNp linux-3.0.7/drivers/hwmon/sht15.c linux-3.0.7/drivers/hwmon/sht15.c
27170--- linux-3.0.7/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
27171+++ linux-3.0.7/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
27172@@ -166,7 +166,7 @@ struct sht15_data {
27173 int supply_uV;
27174 bool supply_uV_valid;
27175 struct work_struct update_supply_work;
27176- atomic_t interrupt_handled;
27177+ atomic_unchecked_t interrupt_handled;
27178 };
27179
27180 /**
27181@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27182 return ret;
27183
27184 gpio_direction_input(data->pdata->gpio_data);
27185- atomic_set(&data->interrupt_handled, 0);
27186+ atomic_set_unchecked(&data->interrupt_handled, 0);
27187
27188 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27189 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27190 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27191 /* Only relevant if the interrupt hasn't occurred. */
27192- if (!atomic_read(&data->interrupt_handled))
27193+ if (!atomic_read_unchecked(&data->interrupt_handled))
27194 schedule_work(&data->read_work);
27195 }
27196 ret = wait_event_timeout(data->wait_queue,
27197@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27198
27199 /* First disable the interrupt */
27200 disable_irq_nosync(irq);
27201- atomic_inc(&data->interrupt_handled);
27202+ atomic_inc_unchecked(&data->interrupt_handled);
27203 /* Then schedule a reading work struct */
27204 if (data->state != SHT15_READING_NOTHING)
27205 schedule_work(&data->read_work);
27206@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27207 * If not, then start the interrupt again - care here as could
27208 * have gone low in meantime so verify it hasn't!
27209 */
27210- atomic_set(&data->interrupt_handled, 0);
27211+ atomic_set_unchecked(&data->interrupt_handled, 0);
27212 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27213 /* If still not occurred or another handler has been scheduled */
27214 if (gpio_get_value(data->pdata->gpio_data)
27215- || atomic_read(&data->interrupt_handled))
27216+ || atomic_read_unchecked(&data->interrupt_handled))
27217 return;
27218 }
27219
27220diff -urNp linux-3.0.7/drivers/hwmon/w83791d.c linux-3.0.7/drivers/hwmon/w83791d.c
27221--- linux-3.0.7/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
27222+++ linux-3.0.7/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
27223@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
27224 struct i2c_board_info *info);
27225 static int w83791d_remove(struct i2c_client *client);
27226
27227-static int w83791d_read(struct i2c_client *client, u8 register);
27228-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
27229+static int w83791d_read(struct i2c_client *client, u8 reg);
27230+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
27231 static struct w83791d_data *w83791d_update_device(struct device *dev);
27232
27233 #ifdef DEBUG
27234diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c
27235--- linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
27236+++ linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
27237@@ -43,7 +43,7 @@
27238 extern struct i2c_adapter amd756_smbus;
27239
27240 static struct i2c_adapter *s4882_adapter;
27241-static struct i2c_algorithm *s4882_algo;
27242+static i2c_algorithm_no_const *s4882_algo;
27243
27244 /* Wrapper access functions for multiplexed SMBus */
27245 static DEFINE_MUTEX(amd756_lock);
27246diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c
27247--- linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
27248+++ linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
27249@@ -41,7 +41,7 @@
27250 extern struct i2c_adapter *nforce2_smbus;
27251
27252 static struct i2c_adapter *s4985_adapter;
27253-static struct i2c_algorithm *s4985_algo;
27254+static i2c_algorithm_no_const *s4985_algo;
27255
27256 /* Wrapper access functions for multiplexed SMBus */
27257 static DEFINE_MUTEX(nforce2_lock);
27258diff -urNp linux-3.0.7/drivers/i2c/i2c-mux.c linux-3.0.7/drivers/i2c/i2c-mux.c
27259--- linux-3.0.7/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
27260+++ linux-3.0.7/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
27261@@ -28,7 +28,7 @@
27262 /* multiplexer per channel data */
27263 struct i2c_mux_priv {
27264 struct i2c_adapter adap;
27265- struct i2c_algorithm algo;
27266+ i2c_algorithm_no_const algo;
27267
27268 struct i2c_adapter *parent;
27269 void *mux_dev; /* the mux chip/device */
27270diff -urNp linux-3.0.7/drivers/ide/aec62xx.c linux-3.0.7/drivers/ide/aec62xx.c
27271--- linux-3.0.7/drivers/ide/aec62xx.c 2011-07-21 22:17:23.000000000 -0400
27272+++ linux-3.0.7/drivers/ide/aec62xx.c 2011-10-11 10:44:33.000000000 -0400
27273@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27274 .cable_detect = atp86x_cable_detect,
27275 };
27276
27277-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27278+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27279 { /* 0: AEC6210 */
27280 .name = DRV_NAME,
27281 .init_chipset = init_chipset_aec62xx,
27282diff -urNp linux-3.0.7/drivers/ide/alim15x3.c linux-3.0.7/drivers/ide/alim15x3.c
27283--- linux-3.0.7/drivers/ide/alim15x3.c 2011-07-21 22:17:23.000000000 -0400
27284+++ linux-3.0.7/drivers/ide/alim15x3.c 2011-10-11 10:44:33.000000000 -0400
27285@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27286 .dma_sff_read_status = ide_dma_sff_read_status,
27287 };
27288
27289-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27290+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27291 .name = DRV_NAME,
27292 .init_chipset = init_chipset_ali15x3,
27293 .init_hwif = init_hwif_ali15x3,
27294diff -urNp linux-3.0.7/drivers/ide/amd74xx.c linux-3.0.7/drivers/ide/amd74xx.c
27295--- linux-3.0.7/drivers/ide/amd74xx.c 2011-07-21 22:17:23.000000000 -0400
27296+++ linux-3.0.7/drivers/ide/amd74xx.c 2011-10-11 10:44:33.000000000 -0400
27297@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27298 .udma_mask = udma, \
27299 }
27300
27301-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27302+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27303 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27304 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27305 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27306diff -urNp linux-3.0.7/drivers/ide/atiixp.c linux-3.0.7/drivers/ide/atiixp.c
27307--- linux-3.0.7/drivers/ide/atiixp.c 2011-07-21 22:17:23.000000000 -0400
27308+++ linux-3.0.7/drivers/ide/atiixp.c 2011-10-11 10:44:33.000000000 -0400
27309@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27310 .cable_detect = atiixp_cable_detect,
27311 };
27312
27313-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27314+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27315 { /* 0: IXP200/300/400/700 */
27316 .name = DRV_NAME,
27317 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27318diff -urNp linux-3.0.7/drivers/ide/cmd64x.c linux-3.0.7/drivers/ide/cmd64x.c
27319--- linux-3.0.7/drivers/ide/cmd64x.c 2011-07-21 22:17:23.000000000 -0400
27320+++ linux-3.0.7/drivers/ide/cmd64x.c 2011-10-11 10:44:33.000000000 -0400
27321@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27322 .dma_sff_read_status = ide_dma_sff_read_status,
27323 };
27324
27325-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27326+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27327 { /* 0: CMD643 */
27328 .name = DRV_NAME,
27329 .init_chipset = init_chipset_cmd64x,
27330diff -urNp linux-3.0.7/drivers/ide/cs5520.c linux-3.0.7/drivers/ide/cs5520.c
27331--- linux-3.0.7/drivers/ide/cs5520.c 2011-07-21 22:17:23.000000000 -0400
27332+++ linux-3.0.7/drivers/ide/cs5520.c 2011-10-11 10:44:33.000000000 -0400
27333@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27334 .set_dma_mode = cs5520_set_dma_mode,
27335 };
27336
27337-static const struct ide_port_info cyrix_chipset __devinitdata = {
27338+static const struct ide_port_info cyrix_chipset __devinitconst = {
27339 .name = DRV_NAME,
27340 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27341 .port_ops = &cs5520_port_ops,
27342diff -urNp linux-3.0.7/drivers/ide/cs5530.c linux-3.0.7/drivers/ide/cs5530.c
27343--- linux-3.0.7/drivers/ide/cs5530.c 2011-07-21 22:17:23.000000000 -0400
27344+++ linux-3.0.7/drivers/ide/cs5530.c 2011-10-11 10:44:33.000000000 -0400
27345@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27346 .udma_filter = cs5530_udma_filter,
27347 };
27348
27349-static const struct ide_port_info cs5530_chipset __devinitdata = {
27350+static const struct ide_port_info cs5530_chipset __devinitconst = {
27351 .name = DRV_NAME,
27352 .init_chipset = init_chipset_cs5530,
27353 .init_hwif = init_hwif_cs5530,
27354diff -urNp linux-3.0.7/drivers/ide/cs5535.c linux-3.0.7/drivers/ide/cs5535.c
27355--- linux-3.0.7/drivers/ide/cs5535.c 2011-07-21 22:17:23.000000000 -0400
27356+++ linux-3.0.7/drivers/ide/cs5535.c 2011-10-11 10:44:33.000000000 -0400
27357@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27358 .cable_detect = cs5535_cable_detect,
27359 };
27360
27361-static const struct ide_port_info cs5535_chipset __devinitdata = {
27362+static const struct ide_port_info cs5535_chipset __devinitconst = {
27363 .name = DRV_NAME,
27364 .port_ops = &cs5535_port_ops,
27365 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27366diff -urNp linux-3.0.7/drivers/ide/cy82c693.c linux-3.0.7/drivers/ide/cy82c693.c
27367--- linux-3.0.7/drivers/ide/cy82c693.c 2011-07-21 22:17:23.000000000 -0400
27368+++ linux-3.0.7/drivers/ide/cy82c693.c 2011-10-11 10:44:33.000000000 -0400
27369@@ -161,7 +161,7 @@ static const struct ide_port_ops cy82c69
27370 .set_dma_mode = cy82c693_set_dma_mode,
27371 };
27372
27373-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27374+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27375 .name = DRV_NAME,
27376 .init_iops = init_iops_cy82c693,
27377 .port_ops = &cy82c693_port_ops,
27378diff -urNp linux-3.0.7/drivers/ide/hpt366.c linux-3.0.7/drivers/ide/hpt366.c
27379--- linux-3.0.7/drivers/ide/hpt366.c 2011-07-21 22:17:23.000000000 -0400
27380+++ linux-3.0.7/drivers/ide/hpt366.c 2011-10-11 10:44:33.000000000 -0400
27381@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27382 }
27383 };
27384
27385-static const struct hpt_info hpt36x __devinitdata = {
27386+static const struct hpt_info hpt36x __devinitconst = {
27387 .chip_name = "HPT36x",
27388 .chip_type = HPT36x,
27389 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27390@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27391 .timings = &hpt36x_timings
27392 };
27393
27394-static const struct hpt_info hpt370 __devinitdata = {
27395+static const struct hpt_info hpt370 __devinitconst = {
27396 .chip_name = "HPT370",
27397 .chip_type = HPT370,
27398 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27399@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27400 .timings = &hpt37x_timings
27401 };
27402
27403-static const struct hpt_info hpt370a __devinitdata = {
27404+static const struct hpt_info hpt370a __devinitconst = {
27405 .chip_name = "HPT370A",
27406 .chip_type = HPT370A,
27407 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27408@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27409 .timings = &hpt37x_timings
27410 };
27411
27412-static const struct hpt_info hpt374 __devinitdata = {
27413+static const struct hpt_info hpt374 __devinitconst = {
27414 .chip_name = "HPT374",
27415 .chip_type = HPT374,
27416 .udma_mask = ATA_UDMA5,
27417@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27418 .timings = &hpt37x_timings
27419 };
27420
27421-static const struct hpt_info hpt372 __devinitdata = {
27422+static const struct hpt_info hpt372 __devinitconst = {
27423 .chip_name = "HPT372",
27424 .chip_type = HPT372,
27425 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27426@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27427 .timings = &hpt37x_timings
27428 };
27429
27430-static const struct hpt_info hpt372a __devinitdata = {
27431+static const struct hpt_info hpt372a __devinitconst = {
27432 .chip_name = "HPT372A",
27433 .chip_type = HPT372A,
27434 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27435@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27436 .timings = &hpt37x_timings
27437 };
27438
27439-static const struct hpt_info hpt302 __devinitdata = {
27440+static const struct hpt_info hpt302 __devinitconst = {
27441 .chip_name = "HPT302",
27442 .chip_type = HPT302,
27443 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27444@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27445 .timings = &hpt37x_timings
27446 };
27447
27448-static const struct hpt_info hpt371 __devinitdata = {
27449+static const struct hpt_info hpt371 __devinitconst = {
27450 .chip_name = "HPT371",
27451 .chip_type = HPT371,
27452 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27453@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27454 .timings = &hpt37x_timings
27455 };
27456
27457-static const struct hpt_info hpt372n __devinitdata = {
27458+static const struct hpt_info hpt372n __devinitconst = {
27459 .chip_name = "HPT372N",
27460 .chip_type = HPT372N,
27461 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27462@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27463 .timings = &hpt37x_timings
27464 };
27465
27466-static const struct hpt_info hpt302n __devinitdata = {
27467+static const struct hpt_info hpt302n __devinitconst = {
27468 .chip_name = "HPT302N",
27469 .chip_type = HPT302N,
27470 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27471@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27472 .timings = &hpt37x_timings
27473 };
27474
27475-static const struct hpt_info hpt371n __devinitdata = {
27476+static const struct hpt_info hpt371n __devinitconst = {
27477 .chip_name = "HPT371N",
27478 .chip_type = HPT371N,
27479 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27480@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27481 .dma_sff_read_status = ide_dma_sff_read_status,
27482 };
27483
27484-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27485+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27486 { /* 0: HPT36x */
27487 .name = DRV_NAME,
27488 .init_chipset = init_chipset_hpt366,
27489diff -urNp linux-3.0.7/drivers/ide/ide-cd.c linux-3.0.7/drivers/ide/ide-cd.c
27490--- linux-3.0.7/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
27491+++ linux-3.0.7/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
27492@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27493 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27494 if ((unsigned long)buf & alignment
27495 || blk_rq_bytes(rq) & q->dma_pad_mask
27496- || object_is_on_stack(buf))
27497+ || object_starts_on_stack(buf))
27498 drive->dma = 0;
27499 }
27500 }
27501diff -urNp linux-3.0.7/drivers/ide/ide-floppy.c linux-3.0.7/drivers/ide/ide-floppy.c
27502--- linux-3.0.7/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
27503+++ linux-3.0.7/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
27504@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27505 u8 pc_buf[256], header_len, desc_cnt;
27506 int i, rc = 1, blocks, length;
27507
27508+ pax_track_stack();
27509+
27510 ide_debug_log(IDE_DBG_FUNC, "enter");
27511
27512 drive->bios_cyl = 0;
27513diff -urNp linux-3.0.7/drivers/ide/ide-pci-generic.c linux-3.0.7/drivers/ide/ide-pci-generic.c
27514--- linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-07-21 22:17:23.000000000 -0400
27515+++ linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-10-11 10:44:33.000000000 -0400
27516@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27517 .udma_mask = ATA_UDMA6, \
27518 }
27519
27520-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27521+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27522 /* 0: Unknown */
27523 DECLARE_GENERIC_PCI_DEV(0),
27524
27525diff -urNp linux-3.0.7/drivers/ide/it8172.c linux-3.0.7/drivers/ide/it8172.c
27526--- linux-3.0.7/drivers/ide/it8172.c 2011-07-21 22:17:23.000000000 -0400
27527+++ linux-3.0.7/drivers/ide/it8172.c 2011-10-11 10:44:33.000000000 -0400
27528@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27529 .set_dma_mode = it8172_set_dma_mode,
27530 };
27531
27532-static const struct ide_port_info it8172_port_info __devinitdata = {
27533+static const struct ide_port_info it8172_port_info __devinitconst = {
27534 .name = DRV_NAME,
27535 .port_ops = &it8172_port_ops,
27536 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27537diff -urNp linux-3.0.7/drivers/ide/it8213.c linux-3.0.7/drivers/ide/it8213.c
27538--- linux-3.0.7/drivers/ide/it8213.c 2011-07-21 22:17:23.000000000 -0400
27539+++ linux-3.0.7/drivers/ide/it8213.c 2011-10-11 10:44:33.000000000 -0400
27540@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27541 .cable_detect = it8213_cable_detect,
27542 };
27543
27544-static const struct ide_port_info it8213_chipset __devinitdata = {
27545+static const struct ide_port_info it8213_chipset __devinitconst = {
27546 .name = DRV_NAME,
27547 .enablebits = { {0x41, 0x80, 0x80} },
27548 .port_ops = &it8213_port_ops,
27549diff -urNp linux-3.0.7/drivers/ide/it821x.c linux-3.0.7/drivers/ide/it821x.c
27550--- linux-3.0.7/drivers/ide/it821x.c 2011-07-21 22:17:23.000000000 -0400
27551+++ linux-3.0.7/drivers/ide/it821x.c 2011-10-11 10:44:33.000000000 -0400
27552@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27553 .cable_detect = it821x_cable_detect,
27554 };
27555
27556-static const struct ide_port_info it821x_chipset __devinitdata = {
27557+static const struct ide_port_info it821x_chipset __devinitconst = {
27558 .name = DRV_NAME,
27559 .init_chipset = init_chipset_it821x,
27560 .init_hwif = init_hwif_it821x,
27561diff -urNp linux-3.0.7/drivers/ide/jmicron.c linux-3.0.7/drivers/ide/jmicron.c
27562--- linux-3.0.7/drivers/ide/jmicron.c 2011-07-21 22:17:23.000000000 -0400
27563+++ linux-3.0.7/drivers/ide/jmicron.c 2011-10-11 10:44:33.000000000 -0400
27564@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27565 .cable_detect = jmicron_cable_detect,
27566 };
27567
27568-static const struct ide_port_info jmicron_chipset __devinitdata = {
27569+static const struct ide_port_info jmicron_chipset __devinitconst = {
27570 .name = DRV_NAME,
27571 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27572 .port_ops = &jmicron_port_ops,
27573diff -urNp linux-3.0.7/drivers/ide/ns87415.c linux-3.0.7/drivers/ide/ns87415.c
27574--- linux-3.0.7/drivers/ide/ns87415.c 2011-07-21 22:17:23.000000000 -0400
27575+++ linux-3.0.7/drivers/ide/ns87415.c 2011-10-11 10:44:33.000000000 -0400
27576@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27577 .dma_sff_read_status = superio_dma_sff_read_status,
27578 };
27579
27580-static const struct ide_port_info ns87415_chipset __devinitdata = {
27581+static const struct ide_port_info ns87415_chipset __devinitconst = {
27582 .name = DRV_NAME,
27583 .init_hwif = init_hwif_ns87415,
27584 .tp_ops = &ns87415_tp_ops,
27585diff -urNp linux-3.0.7/drivers/ide/opti621.c linux-3.0.7/drivers/ide/opti621.c
27586--- linux-3.0.7/drivers/ide/opti621.c 2011-07-21 22:17:23.000000000 -0400
27587+++ linux-3.0.7/drivers/ide/opti621.c 2011-10-11 10:44:33.000000000 -0400
27588@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27589 .set_pio_mode = opti621_set_pio_mode,
27590 };
27591
27592-static const struct ide_port_info opti621_chipset __devinitdata = {
27593+static const struct ide_port_info opti621_chipset __devinitconst = {
27594 .name = DRV_NAME,
27595 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27596 .port_ops = &opti621_port_ops,
27597diff -urNp linux-3.0.7/drivers/ide/pdc202xx_new.c linux-3.0.7/drivers/ide/pdc202xx_new.c
27598--- linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-07-21 22:17:23.000000000 -0400
27599+++ linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-10-11 10:44:33.000000000 -0400
27600@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27601 .udma_mask = udma, \
27602 }
27603
27604-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27605+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27606 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27607 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27608 };
27609diff -urNp linux-3.0.7/drivers/ide/pdc202xx_old.c linux-3.0.7/drivers/ide/pdc202xx_old.c
27610--- linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-07-21 22:17:23.000000000 -0400
27611+++ linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-10-11 10:44:33.000000000 -0400
27612@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27613 .max_sectors = sectors, \
27614 }
27615
27616-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27617+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27618 { /* 0: PDC20246 */
27619 .name = DRV_NAME,
27620 .init_chipset = init_chipset_pdc202xx,
27621diff -urNp linux-3.0.7/drivers/ide/piix.c linux-3.0.7/drivers/ide/piix.c
27622--- linux-3.0.7/drivers/ide/piix.c 2011-07-21 22:17:23.000000000 -0400
27623+++ linux-3.0.7/drivers/ide/piix.c 2011-10-11 10:44:33.000000000 -0400
27624@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27625 .udma_mask = udma, \
27626 }
27627
27628-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27629+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27630 /* 0: MPIIX */
27631 { /*
27632 * MPIIX actually has only a single IDE channel mapped to
27633diff -urNp linux-3.0.7/drivers/ide/rz1000.c linux-3.0.7/drivers/ide/rz1000.c
27634--- linux-3.0.7/drivers/ide/rz1000.c 2011-07-21 22:17:23.000000000 -0400
27635+++ linux-3.0.7/drivers/ide/rz1000.c 2011-10-11 10:44:33.000000000 -0400
27636@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27637 }
27638 }
27639
27640-static const struct ide_port_info rz1000_chipset __devinitdata = {
27641+static const struct ide_port_info rz1000_chipset __devinitconst = {
27642 .name = DRV_NAME,
27643 .host_flags = IDE_HFLAG_NO_DMA,
27644 };
27645diff -urNp linux-3.0.7/drivers/ide/sc1200.c linux-3.0.7/drivers/ide/sc1200.c
27646--- linux-3.0.7/drivers/ide/sc1200.c 2011-07-21 22:17:23.000000000 -0400
27647+++ linux-3.0.7/drivers/ide/sc1200.c 2011-10-11 10:44:33.000000000 -0400
27648@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27649 .dma_sff_read_status = ide_dma_sff_read_status,
27650 };
27651
27652-static const struct ide_port_info sc1200_chipset __devinitdata = {
27653+static const struct ide_port_info sc1200_chipset __devinitconst = {
27654 .name = DRV_NAME,
27655 .port_ops = &sc1200_port_ops,
27656 .dma_ops = &sc1200_dma_ops,
27657diff -urNp linux-3.0.7/drivers/ide/scc_pata.c linux-3.0.7/drivers/ide/scc_pata.c
27658--- linux-3.0.7/drivers/ide/scc_pata.c 2011-07-21 22:17:23.000000000 -0400
27659+++ linux-3.0.7/drivers/ide/scc_pata.c 2011-10-11 10:44:33.000000000 -0400
27660@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27661 .dma_sff_read_status = scc_dma_sff_read_status,
27662 };
27663
27664-static const struct ide_port_info scc_chipset __devinitdata = {
27665+static const struct ide_port_info scc_chipset __devinitconst = {
27666 .name = "sccIDE",
27667 .init_iops = init_iops_scc,
27668 .init_dma = scc_init_dma,
27669diff -urNp linux-3.0.7/drivers/ide/serverworks.c linux-3.0.7/drivers/ide/serverworks.c
27670--- linux-3.0.7/drivers/ide/serverworks.c 2011-07-21 22:17:23.000000000 -0400
27671+++ linux-3.0.7/drivers/ide/serverworks.c 2011-10-11 10:44:33.000000000 -0400
27672@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27673 .cable_detect = svwks_cable_detect,
27674 };
27675
27676-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
27677+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
27678 { /* 0: OSB4 */
27679 .name = DRV_NAME,
27680 .init_chipset = init_chipset_svwks,
27681diff -urNp linux-3.0.7/drivers/ide/setup-pci.c linux-3.0.7/drivers/ide/setup-pci.c
27682--- linux-3.0.7/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
27683+++ linux-3.0.7/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
27684@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
27685 int ret, i, n_ports = dev2 ? 4 : 2;
27686 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
27687
27688+ pax_track_stack();
27689+
27690 for (i = 0; i < n_ports / 2; i++) {
27691 ret = ide_setup_pci_controller(pdev[i], d, !i);
27692 if (ret < 0)
27693diff -urNp linux-3.0.7/drivers/ide/siimage.c linux-3.0.7/drivers/ide/siimage.c
27694--- linux-3.0.7/drivers/ide/siimage.c 2011-07-21 22:17:23.000000000 -0400
27695+++ linux-3.0.7/drivers/ide/siimage.c 2011-10-11 10:44:33.000000000 -0400
27696@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
27697 .udma_mask = ATA_UDMA6, \
27698 }
27699
27700-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
27701+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
27702 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
27703 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
27704 };
27705diff -urNp linux-3.0.7/drivers/ide/sis5513.c linux-3.0.7/drivers/ide/sis5513.c
27706--- linux-3.0.7/drivers/ide/sis5513.c 2011-07-21 22:17:23.000000000 -0400
27707+++ linux-3.0.7/drivers/ide/sis5513.c 2011-10-11 10:44:33.000000000 -0400
27708@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
27709 .cable_detect = sis_cable_detect,
27710 };
27711
27712-static const struct ide_port_info sis5513_chipset __devinitdata = {
27713+static const struct ide_port_info sis5513_chipset __devinitconst = {
27714 .name = DRV_NAME,
27715 .init_chipset = init_chipset_sis5513,
27716 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
27717diff -urNp linux-3.0.7/drivers/ide/sl82c105.c linux-3.0.7/drivers/ide/sl82c105.c
27718--- linux-3.0.7/drivers/ide/sl82c105.c 2011-07-21 22:17:23.000000000 -0400
27719+++ linux-3.0.7/drivers/ide/sl82c105.c 2011-10-11 10:44:33.000000000 -0400
27720@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
27721 .dma_sff_read_status = ide_dma_sff_read_status,
27722 };
27723
27724-static const struct ide_port_info sl82c105_chipset __devinitdata = {
27725+static const struct ide_port_info sl82c105_chipset __devinitconst = {
27726 .name = DRV_NAME,
27727 .init_chipset = init_chipset_sl82c105,
27728 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
27729diff -urNp linux-3.0.7/drivers/ide/slc90e66.c linux-3.0.7/drivers/ide/slc90e66.c
27730--- linux-3.0.7/drivers/ide/slc90e66.c 2011-07-21 22:17:23.000000000 -0400
27731+++ linux-3.0.7/drivers/ide/slc90e66.c 2011-10-11 10:44:33.000000000 -0400
27732@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
27733 .cable_detect = slc90e66_cable_detect,
27734 };
27735
27736-static const struct ide_port_info slc90e66_chipset __devinitdata = {
27737+static const struct ide_port_info slc90e66_chipset __devinitconst = {
27738 .name = DRV_NAME,
27739 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
27740 .port_ops = &slc90e66_port_ops,
27741diff -urNp linux-3.0.7/drivers/ide/tc86c001.c linux-3.0.7/drivers/ide/tc86c001.c
27742--- linux-3.0.7/drivers/ide/tc86c001.c 2011-07-21 22:17:23.000000000 -0400
27743+++ linux-3.0.7/drivers/ide/tc86c001.c 2011-10-11 10:44:33.000000000 -0400
27744@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
27745 .dma_sff_read_status = ide_dma_sff_read_status,
27746 };
27747
27748-static const struct ide_port_info tc86c001_chipset __devinitdata = {
27749+static const struct ide_port_info tc86c001_chipset __devinitconst = {
27750 .name = DRV_NAME,
27751 .init_hwif = init_hwif_tc86c001,
27752 .port_ops = &tc86c001_port_ops,
27753diff -urNp linux-3.0.7/drivers/ide/triflex.c linux-3.0.7/drivers/ide/triflex.c
27754--- linux-3.0.7/drivers/ide/triflex.c 2011-07-21 22:17:23.000000000 -0400
27755+++ linux-3.0.7/drivers/ide/triflex.c 2011-10-11 10:44:33.000000000 -0400
27756@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
27757 .set_dma_mode = triflex_set_mode,
27758 };
27759
27760-static const struct ide_port_info triflex_device __devinitdata = {
27761+static const struct ide_port_info triflex_device __devinitconst = {
27762 .name = DRV_NAME,
27763 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
27764 .port_ops = &triflex_port_ops,
27765diff -urNp linux-3.0.7/drivers/ide/trm290.c linux-3.0.7/drivers/ide/trm290.c
27766--- linux-3.0.7/drivers/ide/trm290.c 2011-07-21 22:17:23.000000000 -0400
27767+++ linux-3.0.7/drivers/ide/trm290.c 2011-10-11 10:44:33.000000000 -0400
27768@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
27769 .dma_check = trm290_dma_check,
27770 };
27771
27772-static const struct ide_port_info trm290_chipset __devinitdata = {
27773+static const struct ide_port_info trm290_chipset __devinitconst = {
27774 .name = DRV_NAME,
27775 .init_hwif = init_hwif_trm290,
27776 .tp_ops = &trm290_tp_ops,
27777diff -urNp linux-3.0.7/drivers/ide/via82cxxx.c linux-3.0.7/drivers/ide/via82cxxx.c
27778--- linux-3.0.7/drivers/ide/via82cxxx.c 2011-07-21 22:17:23.000000000 -0400
27779+++ linux-3.0.7/drivers/ide/via82cxxx.c 2011-10-11 10:44:33.000000000 -0400
27780@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
27781 .cable_detect = via82cxxx_cable_detect,
27782 };
27783
27784-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
27785+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
27786 .name = DRV_NAME,
27787 .init_chipset = init_chipset_via82cxxx,
27788 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
27789diff -urNp linux-3.0.7/drivers/infiniband/core/cm.c linux-3.0.7/drivers/infiniband/core/cm.c
27790--- linux-3.0.7/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
27791+++ linux-3.0.7/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
27792@@ -113,7 +113,7 @@ static char const counter_group_names[CM
27793
27794 struct cm_counter_group {
27795 struct kobject obj;
27796- atomic_long_t counter[CM_ATTR_COUNT];
27797+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
27798 };
27799
27800 struct cm_counter_attribute {
27801@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
27802 struct ib_mad_send_buf *msg = NULL;
27803 int ret;
27804
27805- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27806+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27807 counter[CM_REQ_COUNTER]);
27808
27809 /* Quick state check to discard duplicate REQs. */
27810@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
27811 if (!cm_id_priv)
27812 return;
27813
27814- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27815+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27816 counter[CM_REP_COUNTER]);
27817 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
27818 if (ret)
27819@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
27820 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
27821 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
27822 spin_unlock_irq(&cm_id_priv->lock);
27823- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27824+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27825 counter[CM_RTU_COUNTER]);
27826 goto out;
27827 }
27828@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
27829 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
27830 dreq_msg->local_comm_id);
27831 if (!cm_id_priv) {
27832- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27833+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27834 counter[CM_DREQ_COUNTER]);
27835 cm_issue_drep(work->port, work->mad_recv_wc);
27836 return -EINVAL;
27837@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
27838 case IB_CM_MRA_REP_RCVD:
27839 break;
27840 case IB_CM_TIMEWAIT:
27841- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27842+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27843 counter[CM_DREQ_COUNTER]);
27844 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27845 goto unlock;
27846@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
27847 cm_free_msg(msg);
27848 goto deref;
27849 case IB_CM_DREQ_RCVD:
27850- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27851+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27852 counter[CM_DREQ_COUNTER]);
27853 goto unlock;
27854 default:
27855@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
27856 ib_modify_mad(cm_id_priv->av.port->mad_agent,
27857 cm_id_priv->msg, timeout)) {
27858 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
27859- atomic_long_inc(&work->port->
27860+ atomic_long_inc_unchecked(&work->port->
27861 counter_group[CM_RECV_DUPLICATES].
27862 counter[CM_MRA_COUNTER]);
27863 goto out;
27864@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
27865 break;
27866 case IB_CM_MRA_REQ_RCVD:
27867 case IB_CM_MRA_REP_RCVD:
27868- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27869+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27870 counter[CM_MRA_COUNTER]);
27871 /* fall through */
27872 default:
27873@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
27874 case IB_CM_LAP_IDLE:
27875 break;
27876 case IB_CM_MRA_LAP_SENT:
27877- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27878+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27879 counter[CM_LAP_COUNTER]);
27880 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27881 goto unlock;
27882@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
27883 cm_free_msg(msg);
27884 goto deref;
27885 case IB_CM_LAP_RCVD:
27886- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27887+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27888 counter[CM_LAP_COUNTER]);
27889 goto unlock;
27890 default:
27891@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
27892 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
27893 if (cur_cm_id_priv) {
27894 spin_unlock_irq(&cm.lock);
27895- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27896+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27897 counter[CM_SIDR_REQ_COUNTER]);
27898 goto out; /* Duplicate message. */
27899 }
27900@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
27901 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
27902 msg->retries = 1;
27903
27904- atomic_long_add(1 + msg->retries,
27905+ atomic_long_add_unchecked(1 + msg->retries,
27906 &port->counter_group[CM_XMIT].counter[attr_index]);
27907 if (msg->retries)
27908- atomic_long_add(msg->retries,
27909+ atomic_long_add_unchecked(msg->retries,
27910 &port->counter_group[CM_XMIT_RETRIES].
27911 counter[attr_index]);
27912
27913@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
27914 }
27915
27916 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
27917- atomic_long_inc(&port->counter_group[CM_RECV].
27918+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
27919 counter[attr_id - CM_ATTR_ID_OFFSET]);
27920
27921 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
27922@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
27923 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
27924
27925 return sprintf(buf, "%ld\n",
27926- atomic_long_read(&group->counter[cm_attr->index]));
27927+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
27928 }
27929
27930 static const struct sysfs_ops cm_counter_ops = {
27931diff -urNp linux-3.0.7/drivers/infiniband/core/fmr_pool.c linux-3.0.7/drivers/infiniband/core/fmr_pool.c
27932--- linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
27933+++ linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
27934@@ -97,8 +97,8 @@ struct ib_fmr_pool {
27935
27936 struct task_struct *thread;
27937
27938- atomic_t req_ser;
27939- atomic_t flush_ser;
27940+ atomic_unchecked_t req_ser;
27941+ atomic_unchecked_t flush_ser;
27942
27943 wait_queue_head_t force_wait;
27944 };
27945@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
27946 struct ib_fmr_pool *pool = pool_ptr;
27947
27948 do {
27949- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
27950+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
27951 ib_fmr_batch_release(pool);
27952
27953- atomic_inc(&pool->flush_ser);
27954+ atomic_inc_unchecked(&pool->flush_ser);
27955 wake_up_interruptible(&pool->force_wait);
27956
27957 if (pool->flush_function)
27958@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
27959 }
27960
27961 set_current_state(TASK_INTERRUPTIBLE);
27962- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
27963+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
27964 !kthread_should_stop())
27965 schedule();
27966 __set_current_state(TASK_RUNNING);
27967@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
27968 pool->dirty_watermark = params->dirty_watermark;
27969 pool->dirty_len = 0;
27970 spin_lock_init(&pool->pool_lock);
27971- atomic_set(&pool->req_ser, 0);
27972- atomic_set(&pool->flush_ser, 0);
27973+ atomic_set_unchecked(&pool->req_ser, 0);
27974+ atomic_set_unchecked(&pool->flush_ser, 0);
27975 init_waitqueue_head(&pool->force_wait);
27976
27977 pool->thread = kthread_run(ib_fmr_cleanup_thread,
27978@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
27979 }
27980 spin_unlock_irq(&pool->pool_lock);
27981
27982- serial = atomic_inc_return(&pool->req_ser);
27983+ serial = atomic_inc_return_unchecked(&pool->req_ser);
27984 wake_up_process(pool->thread);
27985
27986 if (wait_event_interruptible(pool->force_wait,
27987- atomic_read(&pool->flush_ser) - serial >= 0))
27988+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
27989 return -EINTR;
27990
27991 return 0;
27992@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
27993 } else {
27994 list_add_tail(&fmr->list, &pool->dirty_list);
27995 if (++pool->dirty_len >= pool->dirty_watermark) {
27996- atomic_inc(&pool->req_ser);
27997+ atomic_inc_unchecked(&pool->req_ser);
27998 wake_up_process(pool->thread);
27999 }
28000 }
28001diff -urNp linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c
28002--- linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
28003+++ linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
28004@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28005 int err;
28006 struct fw_ri_tpte tpt;
28007 u32 stag_idx;
28008- static atomic_t key;
28009+ static atomic_unchecked_t key;
28010
28011 if (c4iw_fatal_error(rdev))
28012 return -EIO;
28013@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28014 &rdev->resource.tpt_fifo_lock);
28015 if (!stag_idx)
28016 return -ENOMEM;
28017- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28018+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28019 }
28020 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28021 __func__, stag_state, type, pdid, stag_idx);
28022diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c
28023--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
28024+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
28025@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28026 struct infinipath_counters counters;
28027 struct ipath_devdata *dd;
28028
28029+ pax_track_stack();
28030+
28031 dd = file->f_path.dentry->d_inode->i_private;
28032 dd->ipath_f_read_counters(dd, &counters);
28033
28034diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c
28035--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
28036+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
28037@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28038 struct ib_atomic_eth *ateth;
28039 struct ipath_ack_entry *e;
28040 u64 vaddr;
28041- atomic64_t *maddr;
28042+ atomic64_unchecked_t *maddr;
28043 u64 sdata;
28044 u32 rkey;
28045 u8 next;
28046@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28047 IB_ACCESS_REMOTE_ATOMIC)))
28048 goto nack_acc_unlck;
28049 /* Perform atomic OP and save result. */
28050- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28051+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28052 sdata = be64_to_cpu(ateth->swap_data);
28053 e = &qp->s_ack_queue[qp->r_head_ack_queue];
28054 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28055- (u64) atomic64_add_return(sdata, maddr) - sdata :
28056+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28057 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28058 be64_to_cpu(ateth->compare_data),
28059 sdata);
28060diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c
28061--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
28062+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
28063@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28064 unsigned long flags;
28065 struct ib_wc wc;
28066 u64 sdata;
28067- atomic64_t *maddr;
28068+ atomic64_unchecked_t *maddr;
28069 enum ib_wc_status send_status;
28070
28071 /*
28072@@ -382,11 +382,11 @@ again:
28073 IB_ACCESS_REMOTE_ATOMIC)))
28074 goto acc_err;
28075 /* Perform atomic OP and save result. */
28076- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28077+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28078 sdata = wqe->wr.wr.atomic.compare_add;
28079 *(u64 *) sqp->s_sge.sge.vaddr =
28080 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28081- (u64) atomic64_add_return(sdata, maddr) - sdata :
28082+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28083 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28084 sdata, wqe->wr.wr.atomic.swap);
28085 goto send_comp;
28086diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.c linux-3.0.7/drivers/infiniband/hw/nes/nes.c
28087--- linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
28088+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
28089@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28090 LIST_HEAD(nes_adapter_list);
28091 static LIST_HEAD(nes_dev_list);
28092
28093-atomic_t qps_destroyed;
28094+atomic_unchecked_t qps_destroyed;
28095
28096 static unsigned int ee_flsh_adapter;
28097 static unsigned int sysfs_nonidx_addr;
28098@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28099 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28100 struct nes_adapter *nesadapter = nesdev->nesadapter;
28101
28102- atomic_inc(&qps_destroyed);
28103+ atomic_inc_unchecked(&qps_destroyed);
28104
28105 /* Free the control structures */
28106
28107diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.h linux-3.0.7/drivers/infiniband/hw/nes/nes.h
28108--- linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
28109+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
28110@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28111 extern unsigned int wqm_quanta;
28112 extern struct list_head nes_adapter_list;
28113
28114-extern atomic_t cm_connects;
28115-extern atomic_t cm_accepts;
28116-extern atomic_t cm_disconnects;
28117-extern atomic_t cm_closes;
28118-extern atomic_t cm_connecteds;
28119-extern atomic_t cm_connect_reqs;
28120-extern atomic_t cm_rejects;
28121-extern atomic_t mod_qp_timouts;
28122-extern atomic_t qps_created;
28123-extern atomic_t qps_destroyed;
28124-extern atomic_t sw_qps_destroyed;
28125+extern atomic_unchecked_t cm_connects;
28126+extern atomic_unchecked_t cm_accepts;
28127+extern atomic_unchecked_t cm_disconnects;
28128+extern atomic_unchecked_t cm_closes;
28129+extern atomic_unchecked_t cm_connecteds;
28130+extern atomic_unchecked_t cm_connect_reqs;
28131+extern atomic_unchecked_t cm_rejects;
28132+extern atomic_unchecked_t mod_qp_timouts;
28133+extern atomic_unchecked_t qps_created;
28134+extern atomic_unchecked_t qps_destroyed;
28135+extern atomic_unchecked_t sw_qps_destroyed;
28136 extern u32 mh_detected;
28137 extern u32 mh_pauses_sent;
28138 extern u32 cm_packets_sent;
28139@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28140 extern u32 cm_packets_received;
28141 extern u32 cm_packets_dropped;
28142 extern u32 cm_packets_retrans;
28143-extern atomic_t cm_listens_created;
28144-extern atomic_t cm_listens_destroyed;
28145+extern atomic_unchecked_t cm_listens_created;
28146+extern atomic_unchecked_t cm_listens_destroyed;
28147 extern u32 cm_backlog_drops;
28148-extern atomic_t cm_loopbacks;
28149-extern atomic_t cm_nodes_created;
28150-extern atomic_t cm_nodes_destroyed;
28151-extern atomic_t cm_accel_dropped_pkts;
28152-extern atomic_t cm_resets_recvd;
28153+extern atomic_unchecked_t cm_loopbacks;
28154+extern atomic_unchecked_t cm_nodes_created;
28155+extern atomic_unchecked_t cm_nodes_destroyed;
28156+extern atomic_unchecked_t cm_accel_dropped_pkts;
28157+extern atomic_unchecked_t cm_resets_recvd;
28158
28159 extern u32 int_mod_timer_init;
28160 extern u32 int_mod_cq_depth_256;
28161diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c
28162--- linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
28163+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
28164@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28165 u32 cm_packets_retrans;
28166 u32 cm_packets_created;
28167 u32 cm_packets_received;
28168-atomic_t cm_listens_created;
28169-atomic_t cm_listens_destroyed;
28170+atomic_unchecked_t cm_listens_created;
28171+atomic_unchecked_t cm_listens_destroyed;
28172 u32 cm_backlog_drops;
28173-atomic_t cm_loopbacks;
28174-atomic_t cm_nodes_created;
28175-atomic_t cm_nodes_destroyed;
28176-atomic_t cm_accel_dropped_pkts;
28177-atomic_t cm_resets_recvd;
28178+atomic_unchecked_t cm_loopbacks;
28179+atomic_unchecked_t cm_nodes_created;
28180+atomic_unchecked_t cm_nodes_destroyed;
28181+atomic_unchecked_t cm_accel_dropped_pkts;
28182+atomic_unchecked_t cm_resets_recvd;
28183
28184 static inline int mini_cm_accelerated(struct nes_cm_core *,
28185 struct nes_cm_node *);
28186@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28187
28188 static struct nes_cm_core *g_cm_core;
28189
28190-atomic_t cm_connects;
28191-atomic_t cm_accepts;
28192-atomic_t cm_disconnects;
28193-atomic_t cm_closes;
28194-atomic_t cm_connecteds;
28195-atomic_t cm_connect_reqs;
28196-atomic_t cm_rejects;
28197+atomic_unchecked_t cm_connects;
28198+atomic_unchecked_t cm_accepts;
28199+atomic_unchecked_t cm_disconnects;
28200+atomic_unchecked_t cm_closes;
28201+atomic_unchecked_t cm_connecteds;
28202+atomic_unchecked_t cm_connect_reqs;
28203+atomic_unchecked_t cm_rejects;
28204
28205
28206 /**
28207@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28208 kfree(listener);
28209 listener = NULL;
28210 ret = 0;
28211- atomic_inc(&cm_listens_destroyed);
28212+ atomic_inc_unchecked(&cm_listens_destroyed);
28213 } else {
28214 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28215 }
28216@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28217 cm_node->rem_mac);
28218
28219 add_hte_node(cm_core, cm_node);
28220- atomic_inc(&cm_nodes_created);
28221+ atomic_inc_unchecked(&cm_nodes_created);
28222
28223 return cm_node;
28224 }
28225@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28226 }
28227
28228 atomic_dec(&cm_core->node_cnt);
28229- atomic_inc(&cm_nodes_destroyed);
28230+ atomic_inc_unchecked(&cm_nodes_destroyed);
28231 nesqp = cm_node->nesqp;
28232 if (nesqp) {
28233 nesqp->cm_node = NULL;
28234@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28235
28236 static void drop_packet(struct sk_buff *skb)
28237 {
28238- atomic_inc(&cm_accel_dropped_pkts);
28239+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28240 dev_kfree_skb_any(skb);
28241 }
28242
28243@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28244 {
28245
28246 int reset = 0; /* whether to send reset in case of err.. */
28247- atomic_inc(&cm_resets_recvd);
28248+ atomic_inc_unchecked(&cm_resets_recvd);
28249 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28250 " refcnt=%d\n", cm_node, cm_node->state,
28251 atomic_read(&cm_node->ref_count));
28252@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28253 rem_ref_cm_node(cm_node->cm_core, cm_node);
28254 return NULL;
28255 }
28256- atomic_inc(&cm_loopbacks);
28257+ atomic_inc_unchecked(&cm_loopbacks);
28258 loopbackremotenode->loopbackpartner = cm_node;
28259 loopbackremotenode->tcp_cntxt.rcv_wscale =
28260 NES_CM_DEFAULT_RCV_WND_SCALE;
28261@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28262 add_ref_cm_node(cm_node);
28263 } else if (cm_node->state == NES_CM_STATE_TSA) {
28264 rem_ref_cm_node(cm_core, cm_node);
28265- atomic_inc(&cm_accel_dropped_pkts);
28266+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28267 dev_kfree_skb_any(skb);
28268 break;
28269 }
28270@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28271
28272 if ((cm_id) && (cm_id->event_handler)) {
28273 if (issue_disconn) {
28274- atomic_inc(&cm_disconnects);
28275+ atomic_inc_unchecked(&cm_disconnects);
28276 cm_event.event = IW_CM_EVENT_DISCONNECT;
28277 cm_event.status = disconn_status;
28278 cm_event.local_addr = cm_id->local_addr;
28279@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28280 }
28281
28282 if (issue_close) {
28283- atomic_inc(&cm_closes);
28284+ atomic_inc_unchecked(&cm_closes);
28285 nes_disconnect(nesqp, 1);
28286
28287 cm_id->provider_data = nesqp;
28288@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28289
28290 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28291 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28292- atomic_inc(&cm_accepts);
28293+ atomic_inc_unchecked(&cm_accepts);
28294
28295 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28296 netdev_refcnt_read(nesvnic->netdev));
28297@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28298
28299 struct nes_cm_core *cm_core;
28300
28301- atomic_inc(&cm_rejects);
28302+ atomic_inc_unchecked(&cm_rejects);
28303 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28304 loopback = cm_node->loopbackpartner;
28305 cm_core = cm_node->cm_core;
28306@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28307 ntohl(cm_id->local_addr.sin_addr.s_addr),
28308 ntohs(cm_id->local_addr.sin_port));
28309
28310- atomic_inc(&cm_connects);
28311+ atomic_inc_unchecked(&cm_connects);
28312 nesqp->active_conn = 1;
28313
28314 /* cache the cm_id in the qp */
28315@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28316 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28317 return err;
28318 }
28319- atomic_inc(&cm_listens_created);
28320+ atomic_inc_unchecked(&cm_listens_created);
28321 }
28322
28323 cm_id->add_ref(cm_id);
28324@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28325 if (nesqp->destroyed) {
28326 return;
28327 }
28328- atomic_inc(&cm_connecteds);
28329+ atomic_inc_unchecked(&cm_connecteds);
28330 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28331 " local port 0x%04X. jiffies = %lu.\n",
28332 nesqp->hwqp.qp_id,
28333@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28334
28335 cm_id->add_ref(cm_id);
28336 ret = cm_id->event_handler(cm_id, &cm_event);
28337- atomic_inc(&cm_closes);
28338+ atomic_inc_unchecked(&cm_closes);
28339 cm_event.event = IW_CM_EVENT_CLOSE;
28340 cm_event.status = 0;
28341 cm_event.provider_data = cm_id->provider_data;
28342@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28343 return;
28344 cm_id = cm_node->cm_id;
28345
28346- atomic_inc(&cm_connect_reqs);
28347+ atomic_inc_unchecked(&cm_connect_reqs);
28348 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28349 cm_node, cm_id, jiffies);
28350
28351@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28352 return;
28353 cm_id = cm_node->cm_id;
28354
28355- atomic_inc(&cm_connect_reqs);
28356+ atomic_inc_unchecked(&cm_connect_reqs);
28357 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28358 cm_node, cm_id, jiffies);
28359
28360diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c
28361--- linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
28362+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
28363@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28364 target_stat_values[++index] = mh_detected;
28365 target_stat_values[++index] = mh_pauses_sent;
28366 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28367- target_stat_values[++index] = atomic_read(&cm_connects);
28368- target_stat_values[++index] = atomic_read(&cm_accepts);
28369- target_stat_values[++index] = atomic_read(&cm_disconnects);
28370- target_stat_values[++index] = atomic_read(&cm_connecteds);
28371- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28372- target_stat_values[++index] = atomic_read(&cm_rejects);
28373- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28374- target_stat_values[++index] = atomic_read(&qps_created);
28375- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28376- target_stat_values[++index] = atomic_read(&qps_destroyed);
28377- target_stat_values[++index] = atomic_read(&cm_closes);
28378+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28379+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28380+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28381+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28382+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28383+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28384+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28385+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28386+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28387+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28388+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28389 target_stat_values[++index] = cm_packets_sent;
28390 target_stat_values[++index] = cm_packets_bounced;
28391 target_stat_values[++index] = cm_packets_created;
28392 target_stat_values[++index] = cm_packets_received;
28393 target_stat_values[++index] = cm_packets_dropped;
28394 target_stat_values[++index] = cm_packets_retrans;
28395- target_stat_values[++index] = atomic_read(&cm_listens_created);
28396- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28397+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28398+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28399 target_stat_values[++index] = cm_backlog_drops;
28400- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28401- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28402- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28403- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28404- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28405+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28406+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28407+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28408+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28409+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28410 target_stat_values[++index] = nesadapter->free_4kpbl;
28411 target_stat_values[++index] = nesadapter->free_256pbl;
28412 target_stat_values[++index] = int_mod_timer_init;
28413diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c
28414--- linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
28415+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
28416@@ -46,9 +46,9 @@
28417
28418 #include <rdma/ib_umem.h>
28419
28420-atomic_t mod_qp_timouts;
28421-atomic_t qps_created;
28422-atomic_t sw_qps_destroyed;
28423+atomic_unchecked_t mod_qp_timouts;
28424+atomic_unchecked_t qps_created;
28425+atomic_unchecked_t sw_qps_destroyed;
28426
28427 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28428
28429@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
28430 if (init_attr->create_flags)
28431 return ERR_PTR(-EINVAL);
28432
28433- atomic_inc(&qps_created);
28434+ atomic_inc_unchecked(&qps_created);
28435 switch (init_attr->qp_type) {
28436 case IB_QPT_RC:
28437 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28438@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
28439 struct iw_cm_event cm_event;
28440 int ret;
28441
28442- atomic_inc(&sw_qps_destroyed);
28443+ atomic_inc_unchecked(&sw_qps_destroyed);
28444 nesqp->destroyed = 1;
28445
28446 /* Blow away the connection if it exists. */
28447diff -urNp linux-3.0.7/drivers/infiniband/hw/qib/qib.h linux-3.0.7/drivers/infiniband/hw/qib/qib.h
28448--- linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
28449+++ linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
28450@@ -51,6 +51,7 @@
28451 #include <linux/completion.h>
28452 #include <linux/kref.h>
28453 #include <linux/sched.h>
28454+#include <linux/slab.h>
28455
28456 #include "qib_common.h"
28457 #include "qib_verbs.h"
28458diff -urNp linux-3.0.7/drivers/input/gameport/gameport.c linux-3.0.7/drivers/input/gameport/gameport.c
28459--- linux-3.0.7/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
28460+++ linux-3.0.7/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
28461@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28462 */
28463 static void gameport_init_port(struct gameport *gameport)
28464 {
28465- static atomic_t gameport_no = ATOMIC_INIT(0);
28466+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28467
28468 __module_get(THIS_MODULE);
28469
28470 mutex_init(&gameport->drv_mutex);
28471 device_initialize(&gameport->dev);
28472 dev_set_name(&gameport->dev, "gameport%lu",
28473- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28474+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28475 gameport->dev.bus = &gameport_bus;
28476 gameport->dev.release = gameport_release_port;
28477 if (gameport->parent)
28478diff -urNp linux-3.0.7/drivers/input/input.c linux-3.0.7/drivers/input/input.c
28479--- linux-3.0.7/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
28480+++ linux-3.0.7/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
28481@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28482 */
28483 int input_register_device(struct input_dev *dev)
28484 {
28485- static atomic_t input_no = ATOMIC_INIT(0);
28486+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28487 struct input_handler *handler;
28488 const char *path;
28489 int error;
28490@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28491 dev->setkeycode = input_default_setkeycode;
28492
28493 dev_set_name(&dev->dev, "input%ld",
28494- (unsigned long) atomic_inc_return(&input_no) - 1);
28495+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28496
28497 error = device_add(&dev->dev);
28498 if (error)
28499diff -urNp linux-3.0.7/drivers/input/joystick/sidewinder.c linux-3.0.7/drivers/input/joystick/sidewinder.c
28500--- linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
28501+++ linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
28502@@ -30,6 +30,7 @@
28503 #include <linux/kernel.h>
28504 #include <linux/module.h>
28505 #include <linux/slab.h>
28506+#include <linux/sched.h>
28507 #include <linux/init.h>
28508 #include <linux/input.h>
28509 #include <linux/gameport.h>
28510@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28511 unsigned char buf[SW_LENGTH];
28512 int i;
28513
28514+ pax_track_stack();
28515+
28516 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28517
28518 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28519diff -urNp linux-3.0.7/drivers/input/joystick/xpad.c linux-3.0.7/drivers/input/joystick/xpad.c
28520--- linux-3.0.7/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
28521+++ linux-3.0.7/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
28522@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
28523
28524 static int xpad_led_probe(struct usb_xpad *xpad)
28525 {
28526- static atomic_t led_seq = ATOMIC_INIT(0);
28527+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28528 long led_no;
28529 struct xpad_led *led;
28530 struct led_classdev *led_cdev;
28531@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
28532 if (!led)
28533 return -ENOMEM;
28534
28535- led_no = (long)atomic_inc_return(&led_seq) - 1;
28536+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28537
28538 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28539 led->xpad = xpad;
28540diff -urNp linux-3.0.7/drivers/input/mousedev.c linux-3.0.7/drivers/input/mousedev.c
28541--- linux-3.0.7/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
28542+++ linux-3.0.7/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
28543@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28544
28545 spin_unlock_irq(&client->packet_lock);
28546
28547- if (copy_to_user(buffer, data, count))
28548+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28549 return -EFAULT;
28550
28551 return count;
28552diff -urNp linux-3.0.7/drivers/input/serio/serio.c linux-3.0.7/drivers/input/serio/serio.c
28553--- linux-3.0.7/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
28554+++ linux-3.0.7/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
28555@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28556 */
28557 static void serio_init_port(struct serio *serio)
28558 {
28559- static atomic_t serio_no = ATOMIC_INIT(0);
28560+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28561
28562 __module_get(THIS_MODULE);
28563
28564@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28565 mutex_init(&serio->drv_mutex);
28566 device_initialize(&serio->dev);
28567 dev_set_name(&serio->dev, "serio%ld",
28568- (long)atomic_inc_return(&serio_no) - 1);
28569+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28570 serio->dev.bus = &serio_bus;
28571 serio->dev.release = serio_release_port;
28572 serio->dev.groups = serio_device_attr_groups;
28573diff -urNp linux-3.0.7/drivers/isdn/capi/capi.c linux-3.0.7/drivers/isdn/capi/capi.c
28574--- linux-3.0.7/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
28575+++ linux-3.0.7/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
28576@@ -83,8 +83,8 @@ struct capiminor {
28577
28578 struct capi20_appl *ap;
28579 u32 ncci;
28580- atomic_t datahandle;
28581- atomic_t msgid;
28582+ atomic_unchecked_t datahandle;
28583+ atomic_unchecked_t msgid;
28584
28585 struct tty_port port;
28586 int ttyinstop;
28587@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28588 capimsg_setu16(s, 2, mp->ap->applid);
28589 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28590 capimsg_setu8 (s, 5, CAPI_RESP);
28591- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28592+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28593 capimsg_setu32(s, 8, mp->ncci);
28594 capimsg_setu16(s, 12, datahandle);
28595 }
28596@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28597 mp->outbytes -= len;
28598 spin_unlock_bh(&mp->outlock);
28599
28600- datahandle = atomic_inc_return(&mp->datahandle);
28601+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28602 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28603 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28604 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28605 capimsg_setu16(skb->data, 2, mp->ap->applid);
28606 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28607 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28608- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28609+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28610 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28611 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28612 capimsg_setu16(skb->data, 16, len); /* Data length */
28613diff -urNp linux-3.0.7/drivers/isdn/gigaset/common.c linux-3.0.7/drivers/isdn/gigaset/common.c
28614--- linux-3.0.7/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
28615+++ linux-3.0.7/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
28616@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28617 cs->commands_pending = 0;
28618 cs->cur_at_seq = 0;
28619 cs->gotfwver = -1;
28620- cs->open_count = 0;
28621+ local_set(&cs->open_count, 0);
28622 cs->dev = NULL;
28623 cs->tty = NULL;
28624 cs->tty_dev = NULL;
28625diff -urNp linux-3.0.7/drivers/isdn/gigaset/gigaset.h linux-3.0.7/drivers/isdn/gigaset/gigaset.h
28626--- linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
28627+++ linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
28628@@ -35,6 +35,7 @@
28629 #include <linux/tty_driver.h>
28630 #include <linux/list.h>
28631 #include <asm/atomic.h>
28632+#include <asm/local.h>
28633
28634 #define GIG_VERSION {0, 5, 0, 0}
28635 #define GIG_COMPAT {0, 4, 0, 0}
28636@@ -433,7 +434,7 @@ struct cardstate {
28637 spinlock_t cmdlock;
28638 unsigned curlen, cmdbytes;
28639
28640- unsigned open_count;
28641+ local_t open_count;
28642 struct tty_struct *tty;
28643 struct tasklet_struct if_wake_tasklet;
28644 unsigned control_state;
28645diff -urNp linux-3.0.7/drivers/isdn/gigaset/interface.c linux-3.0.7/drivers/isdn/gigaset/interface.c
28646--- linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
28647+++ linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
28648@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28649 }
28650 tty->driver_data = cs;
28651
28652- ++cs->open_count;
28653-
28654- if (cs->open_count == 1) {
28655+ if (local_inc_return(&cs->open_count) == 1) {
28656 spin_lock_irqsave(&cs->lock, flags);
28657 cs->tty = tty;
28658 spin_unlock_irqrestore(&cs->lock, flags);
28659@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28660
28661 if (!cs->connected)
28662 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28663- else if (!cs->open_count)
28664+ else if (!local_read(&cs->open_count))
28665 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28666 else {
28667- if (!--cs->open_count) {
28668+ if (!local_dec_return(&cs->open_count)) {
28669 spin_lock_irqsave(&cs->lock, flags);
28670 cs->tty = NULL;
28671 spin_unlock_irqrestore(&cs->lock, flags);
28672@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28673 if (!cs->connected) {
28674 gig_dbg(DEBUG_IF, "not connected");
28675 retval = -ENODEV;
28676- } else if (!cs->open_count)
28677+ } else if (!local_read(&cs->open_count))
28678 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28679 else {
28680 retval = 0;
28681@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
28682 retval = -ENODEV;
28683 goto done;
28684 }
28685- if (!cs->open_count) {
28686+ if (!local_read(&cs->open_count)) {
28687 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28688 retval = -ENODEV;
28689 goto done;
28690@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
28691 if (!cs->connected) {
28692 gig_dbg(DEBUG_IF, "not connected");
28693 retval = -ENODEV;
28694- } else if (!cs->open_count)
28695+ } else if (!local_read(&cs->open_count))
28696 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28697 else if (cs->mstate != MS_LOCKED) {
28698 dev_warn(cs->dev, "can't write to unlocked device\n");
28699@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
28700
28701 if (!cs->connected)
28702 gig_dbg(DEBUG_IF, "not connected");
28703- else if (!cs->open_count)
28704+ else if (!local_read(&cs->open_count))
28705 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28706 else if (cs->mstate != MS_LOCKED)
28707 dev_warn(cs->dev, "can't write to unlocked device\n");
28708@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
28709
28710 if (!cs->connected)
28711 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28712- else if (!cs->open_count)
28713+ else if (!local_read(&cs->open_count))
28714 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28715 else
28716 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28717@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
28718
28719 if (!cs->connected)
28720 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28721- else if (!cs->open_count)
28722+ else if (!local_read(&cs->open_count))
28723 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28724 else
28725 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28726@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
28727 goto out;
28728 }
28729
28730- if (!cs->open_count) {
28731+ if (!local_read(&cs->open_count)) {
28732 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28733 goto out;
28734 }
28735diff -urNp linux-3.0.7/drivers/isdn/hardware/avm/b1.c linux-3.0.7/drivers/isdn/hardware/avm/b1.c
28736--- linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
28737+++ linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
28738@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
28739 }
28740 if (left) {
28741 if (t4file->user) {
28742- if (copy_from_user(buf, dp, left))
28743+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28744 return -EFAULT;
28745 } else {
28746 memcpy(buf, dp, left);
28747@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
28748 }
28749 if (left) {
28750 if (config->user) {
28751- if (copy_from_user(buf, dp, left))
28752+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28753 return -EFAULT;
28754 } else {
28755 memcpy(buf, dp, left);
28756diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c
28757--- linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
28758+++ linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
28759@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
28760 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
28761 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
28762
28763+ pax_track_stack();
28764
28765 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
28766 {
28767diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c
28768--- linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
28769+++ linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
28770@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
28771 IDI_SYNC_REQ req;
28772 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28773
28774+ pax_track_stack();
28775+
28776 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28777
28778 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28779diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c
28780--- linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
28781+++ linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
28782@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
28783 IDI_SYNC_REQ req;
28784 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28785
28786+ pax_track_stack();
28787+
28788 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28789
28790 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28791diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c
28792--- linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
28793+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
28794@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
28795 IDI_SYNC_REQ req;
28796 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28797
28798+ pax_track_stack();
28799+
28800 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28801
28802 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28803diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h
28804--- linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
28805+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
28806@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
28807 } diva_didd_add_adapter_t;
28808 typedef struct _diva_didd_remove_adapter {
28809 IDI_CALL p_request;
28810-} diva_didd_remove_adapter_t;
28811+} __no_const diva_didd_remove_adapter_t;
28812 typedef struct _diva_didd_read_adapter_array {
28813 void * buffer;
28814 dword length;
28815diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c
28816--- linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
28817+++ linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
28818@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
28819 IDI_SYNC_REQ req;
28820 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28821
28822+ pax_track_stack();
28823+
28824 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28825
28826 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28827diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/message.c linux-3.0.7/drivers/isdn/hardware/eicon/message.c
28828--- linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
28829+++ linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
28830@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
28831 dword d;
28832 word w;
28833
28834+ pax_track_stack();
28835+
28836 a = plci->adapter;
28837 Id = ((word)plci->Id<<8)|a->Id;
28838 PUT_WORD(&SS_Ind[4],0x0000);
28839@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
28840 word j, n, w;
28841 dword d;
28842
28843+ pax_track_stack();
28844+
28845
28846 for(i=0;i<8;i++) bp_parms[i].length = 0;
28847 for(i=0;i<2;i++) global_config[i].length = 0;
28848@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
28849 const byte llc3[] = {4,3,2,2,6,6,0};
28850 const byte header[] = {0,2,3,3,0,0,0};
28851
28852+ pax_track_stack();
28853+
28854 for(i=0;i<8;i++) bp_parms[i].length = 0;
28855 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
28856 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
28857@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
28858 word appl_number_group_type[MAX_APPL];
28859 PLCI *auxplci;
28860
28861+ pax_track_stack();
28862+
28863 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
28864
28865 if(!a->group_optimization_enabled)
28866diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c
28867--- linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
28868+++ linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
28869@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
28870 IDI_SYNC_REQ req;
28871 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28872
28873+ pax_track_stack();
28874+
28875 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28876
28877 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28878diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h
28879--- linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
28880+++ linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
28881@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
28882 typedef struct _diva_os_idi_adapter_interface {
28883 diva_init_card_proc_t cleanup_adapter_proc;
28884 diva_cmd_card_proc_t cmd_proc;
28885-} diva_os_idi_adapter_interface_t;
28886+} __no_const diva_os_idi_adapter_interface_t;
28887
28888 typedef struct _diva_os_xdi_adapter {
28889 struct list_head link;
28890diff -urNp linux-3.0.7/drivers/isdn/i4l/isdn_common.c linux-3.0.7/drivers/isdn/i4l/isdn_common.c
28891--- linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
28892+++ linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
28893@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
28894 } iocpar;
28895 void __user *argp = (void __user *)arg;
28896
28897+ pax_track_stack();
28898+
28899 #define name iocpar.name
28900 #define bname iocpar.bname
28901 #define iocts iocpar.iocts
28902diff -urNp linux-3.0.7/drivers/isdn/icn/icn.c linux-3.0.7/drivers/isdn/icn/icn.c
28903--- linux-3.0.7/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
28904+++ linux-3.0.7/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
28905@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
28906 if (count > len)
28907 count = len;
28908 if (user) {
28909- if (copy_from_user(msg, buf, count))
28910+ if (count > sizeof msg || copy_from_user(msg, buf, count))
28911 return -EFAULT;
28912 } else
28913 memcpy(msg, buf, count);
28914diff -urNp linux-3.0.7/drivers/lguest/core.c linux-3.0.7/drivers/lguest/core.c
28915--- linux-3.0.7/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
28916+++ linux-3.0.7/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
28917@@ -92,9 +92,17 @@ static __init int map_switcher(void)
28918 * it's worked so far. The end address needs +1 because __get_vm_area
28919 * allocates an extra guard page, so we need space for that.
28920 */
28921+
28922+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28923+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28924+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
28925+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28926+#else
28927 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28928 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
28929 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28930+#endif
28931+
28932 if (!switcher_vma) {
28933 err = -ENOMEM;
28934 printk("lguest: could not map switcher pages high\n");
28935@@ -119,7 +127,7 @@ static __init int map_switcher(void)
28936 * Now the Switcher is mapped at the right address, we can't fail!
28937 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
28938 */
28939- memcpy(switcher_vma->addr, start_switcher_text,
28940+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
28941 end_switcher_text - start_switcher_text);
28942
28943 printk(KERN_INFO "lguest: mapped switcher at %p\n",
28944diff -urNp linux-3.0.7/drivers/lguest/x86/core.c linux-3.0.7/drivers/lguest/x86/core.c
28945--- linux-3.0.7/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
28946+++ linux-3.0.7/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
28947@@ -59,7 +59,7 @@ static struct {
28948 /* Offset from where switcher.S was compiled to where we've copied it */
28949 static unsigned long switcher_offset(void)
28950 {
28951- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
28952+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
28953 }
28954
28955 /* This cpu's struct lguest_pages. */
28956@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
28957 * These copies are pretty cheap, so we do them unconditionally: */
28958 /* Save the current Host top-level page directory.
28959 */
28960+
28961+#ifdef CONFIG_PAX_PER_CPU_PGD
28962+ pages->state.host_cr3 = read_cr3();
28963+#else
28964 pages->state.host_cr3 = __pa(current->mm->pgd);
28965+#endif
28966+
28967 /*
28968 * Set up the Guest's page tables to see this CPU's pages (and no
28969 * other CPU's pages).
28970@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
28971 * compiled-in switcher code and the high-mapped copy we just made.
28972 */
28973 for (i = 0; i < IDT_ENTRIES; i++)
28974- default_idt_entries[i] += switcher_offset();
28975+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
28976
28977 /*
28978 * Set up the Switcher's per-cpu areas.
28979@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
28980 * it will be undisturbed when we switch. To change %cs and jump we
28981 * need this structure to feed to Intel's "lcall" instruction.
28982 */
28983- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
28984+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
28985 lguest_entry.segment = LGUEST_CS;
28986
28987 /*
28988diff -urNp linux-3.0.7/drivers/lguest/x86/switcher_32.S linux-3.0.7/drivers/lguest/x86/switcher_32.S
28989--- linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
28990+++ linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
28991@@ -87,6 +87,7 @@
28992 #include <asm/page.h>
28993 #include <asm/segment.h>
28994 #include <asm/lguest.h>
28995+#include <asm/processor-flags.h>
28996
28997 // We mark the start of the code to copy
28998 // It's placed in .text tho it's never run here
28999@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29000 // Changes type when we load it: damn Intel!
29001 // For after we switch over our page tables
29002 // That entry will be read-only: we'd crash.
29003+
29004+#ifdef CONFIG_PAX_KERNEXEC
29005+ mov %cr0, %edx
29006+ xor $X86_CR0_WP, %edx
29007+ mov %edx, %cr0
29008+#endif
29009+
29010 movl $(GDT_ENTRY_TSS*8), %edx
29011 ltr %dx
29012
29013@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29014 // Let's clear it again for our return.
29015 // The GDT descriptor of the Host
29016 // Points to the table after two "size" bytes
29017- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29018+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29019 // Clear "used" from type field (byte 5, bit 2)
29020- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29021+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29022+
29023+#ifdef CONFIG_PAX_KERNEXEC
29024+ mov %cr0, %eax
29025+ xor $X86_CR0_WP, %eax
29026+ mov %eax, %cr0
29027+#endif
29028
29029 // Once our page table's switched, the Guest is live!
29030 // The Host fades as we run this final step.
29031@@ -295,13 +309,12 @@ deliver_to_host:
29032 // I consulted gcc, and it gave
29033 // These instructions, which I gladly credit:
29034 leal (%edx,%ebx,8), %eax
29035- movzwl (%eax),%edx
29036- movl 4(%eax), %eax
29037- xorw %ax, %ax
29038- orl %eax, %edx
29039+ movl 4(%eax), %edx
29040+ movw (%eax), %dx
29041 // Now the address of the handler's in %edx
29042 // We call it now: its "iret" drops us home.
29043- jmp *%edx
29044+ ljmp $__KERNEL_CS, $1f
29045+1: jmp *%edx
29046
29047 // Every interrupt can come to us here
29048 // But we must truly tell each apart.
29049diff -urNp linux-3.0.7/drivers/macintosh/macio_asic.c linux-3.0.7/drivers/macintosh/macio_asic.c
29050--- linux-3.0.7/drivers/macintosh/macio_asic.c 2011-07-21 22:17:23.000000000 -0400
29051+++ linux-3.0.7/drivers/macintosh/macio_asic.c 2011-10-11 10:44:33.000000000 -0400
29052@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29053 * MacIO is matched against any Apple ID, it's probe() function
29054 * will then decide wether it applies or not
29055 */
29056-static const struct pci_device_id __devinitdata pci_ids [] = { {
29057+static const struct pci_device_id __devinitconst pci_ids [] = { {
29058 .vendor = PCI_VENDOR_ID_APPLE,
29059 .device = PCI_ANY_ID,
29060 .subvendor = PCI_ANY_ID,
29061diff -urNp linux-3.0.7/drivers/md/dm-ioctl.c linux-3.0.7/drivers/md/dm-ioctl.c
29062--- linux-3.0.7/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
29063+++ linux-3.0.7/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
29064@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
29065 cmd == DM_LIST_VERSIONS_CMD)
29066 return 0;
29067
29068- if ((cmd == DM_DEV_CREATE_CMD)) {
29069+ if (cmd == DM_DEV_CREATE_CMD) {
29070 if (!*param->name) {
29071 DMWARN("name not supplied when creating device");
29072 return -EINVAL;
29073diff -urNp linux-3.0.7/drivers/md/dm-raid1.c linux-3.0.7/drivers/md/dm-raid1.c
29074--- linux-3.0.7/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
29075+++ linux-3.0.7/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
29076@@ -40,7 +40,7 @@ enum dm_raid1_error {
29077
29078 struct mirror {
29079 struct mirror_set *ms;
29080- atomic_t error_count;
29081+ atomic_unchecked_t error_count;
29082 unsigned long error_type;
29083 struct dm_dev *dev;
29084 sector_t offset;
29085@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29086 struct mirror *m;
29087
29088 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29089- if (!atomic_read(&m->error_count))
29090+ if (!atomic_read_unchecked(&m->error_count))
29091 return m;
29092
29093 return NULL;
29094@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29095 * simple way to tell if a device has encountered
29096 * errors.
29097 */
29098- atomic_inc(&m->error_count);
29099+ atomic_inc_unchecked(&m->error_count);
29100
29101 if (test_and_set_bit(error_type, &m->error_type))
29102 return;
29103@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29104 struct mirror *m = get_default_mirror(ms);
29105
29106 do {
29107- if (likely(!atomic_read(&m->error_count)))
29108+ if (likely(!atomic_read_unchecked(&m->error_count)))
29109 return m;
29110
29111 if (m-- == ms->mirror)
29112@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29113 {
29114 struct mirror *default_mirror = get_default_mirror(m->ms);
29115
29116- return !atomic_read(&default_mirror->error_count);
29117+ return !atomic_read_unchecked(&default_mirror->error_count);
29118 }
29119
29120 static int mirror_available(struct mirror_set *ms, struct bio *bio)
29121@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29122 */
29123 if (likely(region_in_sync(ms, region, 1)))
29124 m = choose_mirror(ms, bio->bi_sector);
29125- else if (m && atomic_read(&m->error_count))
29126+ else if (m && atomic_read_unchecked(&m->error_count))
29127 m = NULL;
29128
29129 if (likely(m))
29130@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29131 }
29132
29133 ms->mirror[mirror].ms = ms;
29134- atomic_set(&(ms->mirror[mirror].error_count), 0);
29135+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29136 ms->mirror[mirror].error_type = 0;
29137 ms->mirror[mirror].offset = offset;
29138
29139@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29140 */
29141 static char device_status_char(struct mirror *m)
29142 {
29143- if (!atomic_read(&(m->error_count)))
29144+ if (!atomic_read_unchecked(&(m->error_count)))
29145 return 'A';
29146
29147 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29148diff -urNp linux-3.0.7/drivers/md/dm-stripe.c linux-3.0.7/drivers/md/dm-stripe.c
29149--- linux-3.0.7/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
29150+++ linux-3.0.7/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
29151@@ -20,7 +20,7 @@ struct stripe {
29152 struct dm_dev *dev;
29153 sector_t physical_start;
29154
29155- atomic_t error_count;
29156+ atomic_unchecked_t error_count;
29157 };
29158
29159 struct stripe_c {
29160@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29161 kfree(sc);
29162 return r;
29163 }
29164- atomic_set(&(sc->stripe[i].error_count), 0);
29165+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29166 }
29167
29168 ti->private = sc;
29169@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29170 DMEMIT("%d ", sc->stripes);
29171 for (i = 0; i < sc->stripes; i++) {
29172 DMEMIT("%s ", sc->stripe[i].dev->name);
29173- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29174+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29175 'D' : 'A';
29176 }
29177 buffer[i] = '\0';
29178@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29179 */
29180 for (i = 0; i < sc->stripes; i++)
29181 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29182- atomic_inc(&(sc->stripe[i].error_count));
29183- if (atomic_read(&(sc->stripe[i].error_count)) <
29184+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29185+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29186 DM_IO_ERROR_THRESHOLD)
29187 schedule_work(&sc->trigger_event);
29188 }
29189diff -urNp linux-3.0.7/drivers/md/dm-table.c linux-3.0.7/drivers/md/dm-table.c
29190--- linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:09.000000000 -0400
29191+++ linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:19.000000000 -0400
29192@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
29193 if (!dev_size)
29194 return 0;
29195
29196- if ((start >= dev_size) || (start + len > dev_size)) {
29197+ if ((start >= dev_size) || (len > dev_size - start)) {
29198 DMWARN("%s: %s too small for target: "
29199 "start=%llu, len=%llu, dev_size=%llu",
29200 dm_device_name(ti->table->md), bdevname(bdev, b),
29201diff -urNp linux-3.0.7/drivers/md/dm.c linux-3.0.7/drivers/md/dm.c
29202--- linux-3.0.7/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
29203+++ linux-3.0.7/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
29204@@ -164,9 +164,9 @@ struct mapped_device {
29205 /*
29206 * Event handling.
29207 */
29208- atomic_t event_nr;
29209+ atomic_unchecked_t event_nr;
29210 wait_queue_head_t eventq;
29211- atomic_t uevent_seq;
29212+ atomic_unchecked_t uevent_seq;
29213 struct list_head uevent_list;
29214 spinlock_t uevent_lock; /* Protect access to uevent_list */
29215
29216@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
29217 rwlock_init(&md->map_lock);
29218 atomic_set(&md->holders, 1);
29219 atomic_set(&md->open_count, 0);
29220- atomic_set(&md->event_nr, 0);
29221- atomic_set(&md->uevent_seq, 0);
29222+ atomic_set_unchecked(&md->event_nr, 0);
29223+ atomic_set_unchecked(&md->uevent_seq, 0);
29224 INIT_LIST_HEAD(&md->uevent_list);
29225 spin_lock_init(&md->uevent_lock);
29226
29227@@ -1977,7 +1977,7 @@ static void event_callback(void *context
29228
29229 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29230
29231- atomic_inc(&md->event_nr);
29232+ atomic_inc_unchecked(&md->event_nr);
29233 wake_up(&md->eventq);
29234 }
29235
29236@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
29237
29238 uint32_t dm_next_uevent_seq(struct mapped_device *md)
29239 {
29240- return atomic_add_return(1, &md->uevent_seq);
29241+ return atomic_add_return_unchecked(1, &md->uevent_seq);
29242 }
29243
29244 uint32_t dm_get_event_nr(struct mapped_device *md)
29245 {
29246- return atomic_read(&md->event_nr);
29247+ return atomic_read_unchecked(&md->event_nr);
29248 }
29249
29250 int dm_wait_event(struct mapped_device *md, int event_nr)
29251 {
29252 return wait_event_interruptible(md->eventq,
29253- (event_nr != atomic_read(&md->event_nr)));
29254+ (event_nr != atomic_read_unchecked(&md->event_nr)));
29255 }
29256
29257 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29258diff -urNp linux-3.0.7/drivers/md/md.c linux-3.0.7/drivers/md/md.c
29259--- linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:09.000000000 -0400
29260+++ linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:19.000000000 -0400
29261@@ -231,10 +231,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
29262 * start build, activate spare
29263 */
29264 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29265-static atomic_t md_event_count;
29266+static atomic_unchecked_t md_event_count;
29267 void md_new_event(mddev_t *mddev)
29268 {
29269- atomic_inc(&md_event_count);
29270+ atomic_inc_unchecked(&md_event_count);
29271 wake_up(&md_event_waiters);
29272 }
29273 EXPORT_SYMBOL_GPL(md_new_event);
29274@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29275 */
29276 static void md_new_event_inintr(mddev_t *mddev)
29277 {
29278- atomic_inc(&md_event_count);
29279+ atomic_inc_unchecked(&md_event_count);
29280 wake_up(&md_event_waiters);
29281 }
29282
29283@@ -1475,7 +1475,7 @@ static int super_1_load(mdk_rdev_t *rdev
29284
29285 rdev->preferred_minor = 0xffff;
29286 rdev->data_offset = le64_to_cpu(sb->data_offset);
29287- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29288+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29289
29290 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29291 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29292@@ -1653,7 +1653,7 @@ static void super_1_sync(mddev_t *mddev,
29293 else
29294 sb->resync_offset = cpu_to_le64(0);
29295
29296- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29297+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29298
29299 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29300 sb->size = cpu_to_le64(mddev->dev_sectors);
29301@@ -2446,7 +2446,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29302 static ssize_t
29303 errors_show(mdk_rdev_t *rdev, char *page)
29304 {
29305- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29306+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29307 }
29308
29309 static ssize_t
29310@@ -2455,7 +2455,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29311 char *e;
29312 unsigned long n = simple_strtoul(buf, &e, 10);
29313 if (*buf && (*e == 0 || *e == '\n')) {
29314- atomic_set(&rdev->corrected_errors, n);
29315+ atomic_set_unchecked(&rdev->corrected_errors, n);
29316 return len;
29317 }
29318 return -EINVAL;
29319@@ -2811,8 +2811,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
29320 rdev->last_read_error.tv_sec = 0;
29321 rdev->last_read_error.tv_nsec = 0;
29322 atomic_set(&rdev->nr_pending, 0);
29323- atomic_set(&rdev->read_errors, 0);
29324- atomic_set(&rdev->corrected_errors, 0);
29325+ atomic_set_unchecked(&rdev->read_errors, 0);
29326+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29327
29328 INIT_LIST_HEAD(&rdev->same_set);
29329 init_waitqueue_head(&rdev->blocked_wait);
29330@@ -6440,7 +6440,7 @@ static int md_seq_show(struct seq_file *
29331
29332 spin_unlock(&pers_lock);
29333 seq_printf(seq, "\n");
29334- mi->event = atomic_read(&md_event_count);
29335+ mi->event = atomic_read_unchecked(&md_event_count);
29336 return 0;
29337 }
29338 if (v == (void*)2) {
29339@@ -6529,7 +6529,7 @@ static int md_seq_show(struct seq_file *
29340 chunk_kb ? "KB" : "B");
29341 if (bitmap->file) {
29342 seq_printf(seq, ", file: ");
29343- seq_path(seq, &bitmap->file->f_path, " \t\n");
29344+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29345 }
29346
29347 seq_printf(seq, "\n");
29348@@ -6563,7 +6563,7 @@ static int md_seq_open(struct inode *ino
29349 else {
29350 struct seq_file *p = file->private_data;
29351 p->private = mi;
29352- mi->event = atomic_read(&md_event_count);
29353+ mi->event = atomic_read_unchecked(&md_event_count);
29354 }
29355 return error;
29356 }
29357@@ -6579,7 +6579,7 @@ static unsigned int mdstat_poll(struct f
29358 /* always allow read */
29359 mask = POLLIN | POLLRDNORM;
29360
29361- if (mi->event != atomic_read(&md_event_count))
29362+ if (mi->event != atomic_read_unchecked(&md_event_count))
29363 mask |= POLLERR | POLLPRI;
29364 return mask;
29365 }
29366@@ -6623,7 +6623,7 @@ static int is_mddev_idle(mddev_t *mddev,
29367 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29368 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29369 (int)part_stat_read(&disk->part0, sectors[1]) -
29370- atomic_read(&disk->sync_io);
29371+ atomic_read_unchecked(&disk->sync_io);
29372 /* sync IO will cause sync_io to increase before the disk_stats
29373 * as sync_io is counted when a request starts, and
29374 * disk_stats is counted when it completes.
29375diff -urNp linux-3.0.7/drivers/md/md.h linux-3.0.7/drivers/md/md.h
29376--- linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:09.000000000 -0400
29377+++ linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:19.000000000 -0400
29378@@ -97,13 +97,13 @@ struct mdk_rdev_s
29379 * only maintained for arrays that
29380 * support hot removal
29381 */
29382- atomic_t read_errors; /* number of consecutive read errors that
29383+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29384 * we have tried to ignore.
29385 */
29386 struct timespec last_read_error; /* monotonic time since our
29387 * last read error
29388 */
29389- atomic_t corrected_errors; /* number of corrected read errors,
29390+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29391 * for reporting to userspace and storing
29392 * in superblock.
29393 */
29394@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
29395
29396 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29397 {
29398- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29399+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29400 }
29401
29402 struct mdk_personality
29403diff -urNp linux-3.0.7/drivers/md/raid1.c linux-3.0.7/drivers/md/raid1.c
29404--- linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:09.000000000 -0400
29405+++ linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:19.000000000 -0400
29406@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
29407 rdev_dec_pending(rdev, mddev);
29408 md_error(mddev, rdev);
29409 } else
29410- atomic_add(s, &rdev->corrected_errors);
29411+ atomic_add_unchecked(s, &rdev->corrected_errors);
29412 }
29413 d = start;
29414 while (d != r1_bio->read_disk) {
29415@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
29416 /* Well, this device is dead */
29417 md_error(mddev, rdev);
29418 else {
29419- atomic_add(s, &rdev->corrected_errors);
29420+ atomic_add_unchecked(s, &rdev->corrected_errors);
29421 printk(KERN_INFO
29422 "md/raid1:%s: read error corrected "
29423 "(%d sectors at %llu on %s)\n",
29424diff -urNp linux-3.0.7/drivers/md/raid10.c linux-3.0.7/drivers/md/raid10.c
29425--- linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:09.000000000 -0400
29426+++ linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:19.000000000 -0400
29427@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
29428 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
29429 set_bit(R10BIO_Uptodate, &r10_bio->state);
29430 else {
29431- atomic_add(r10_bio->sectors,
29432+ atomic_add_unchecked(r10_bio->sectors,
29433 &conf->mirrors[d].rdev->corrected_errors);
29434 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
29435 md_error(r10_bio->mddev,
29436@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
29437 {
29438 struct timespec cur_time_mon;
29439 unsigned long hours_since_last;
29440- unsigned int read_errors = atomic_read(&rdev->read_errors);
29441+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29442
29443 ktime_get_ts(&cur_time_mon);
29444
29445@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
29446 * overflowing the shift of read_errors by hours_since_last.
29447 */
29448 if (hours_since_last >= 8 * sizeof(read_errors))
29449- atomic_set(&rdev->read_errors, 0);
29450+ atomic_set_unchecked(&rdev->read_errors, 0);
29451 else
29452- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29453+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29454 }
29455
29456 /*
29457@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
29458 return;
29459
29460 check_decay_read_errors(mddev, rdev);
29461- atomic_inc(&rdev->read_errors);
29462- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29463+ atomic_inc_unchecked(&rdev->read_errors);
29464+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29465 char b[BDEVNAME_SIZE];
29466 bdevname(rdev->bdev, b);
29467
29468@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
29469 "md/raid10:%s: %s: Raid device exceeded "
29470 "read_error threshold [cur %d:max %d]\n",
29471 mdname(mddev), b,
29472- atomic_read(&rdev->read_errors), max_read_errors);
29473+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29474 printk(KERN_NOTICE
29475 "md/raid10:%s: %s: Failing raid device\n",
29476 mdname(mddev), b);
29477@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
29478 test_bit(In_sync, &rdev->flags)) {
29479 atomic_inc(&rdev->nr_pending);
29480 rcu_read_unlock();
29481- atomic_add(s, &rdev->corrected_errors);
29482+ atomic_add_unchecked(s, &rdev->corrected_errors);
29483 if (sync_page_io(rdev,
29484 r10_bio->devs[sl].addr +
29485 sect,
29486diff -urNp linux-3.0.7/drivers/md/raid5.c linux-3.0.7/drivers/md/raid5.c
29487--- linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:09.000000000 -0400
29488+++ linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:19.000000000 -0400
29489@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
29490 bi->bi_next = NULL;
29491 if ((rw & WRITE) &&
29492 test_bit(R5_ReWrite, &sh->dev[i].flags))
29493- atomic_add(STRIPE_SECTORS,
29494+ atomic_add_unchecked(STRIPE_SECTORS,
29495 &rdev->corrected_errors);
29496 generic_make_request(bi);
29497 } else {
29498@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
29499 clear_bit(R5_ReadError, &sh->dev[i].flags);
29500 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29501 }
29502- if (atomic_read(&conf->disks[i].rdev->read_errors))
29503- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29504+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29505+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29506 } else {
29507 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29508 int retry = 0;
29509 rdev = conf->disks[i].rdev;
29510
29511 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29512- atomic_inc(&rdev->read_errors);
29513+ atomic_inc_unchecked(&rdev->read_errors);
29514 if (conf->mddev->degraded >= conf->max_degraded)
29515 printk_rl(KERN_WARNING
29516 "md/raid:%s: read error not correctable "
29517@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
29518 (unsigned long long)(sh->sector
29519 + rdev->data_offset),
29520 bdn);
29521- else if (atomic_read(&rdev->read_errors)
29522+ else if (atomic_read_unchecked(&rdev->read_errors)
29523 > conf->max_nr_stripes)
29524 printk(KERN_WARNING
29525 "md/raid:%s: Too many read errors, failing device %s.\n",
29526@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
29527 sector_t r_sector;
29528 struct stripe_head sh2;
29529
29530+ pax_track_stack();
29531
29532 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29533 stripe = new_sector;
29534diff -urNp linux-3.0.7/drivers/media/common/saa7146_hlp.c linux-3.0.7/drivers/media/common/saa7146_hlp.c
29535--- linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
29536+++ linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
29537@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29538
29539 int x[32], y[32], w[32], h[32];
29540
29541+ pax_track_stack();
29542+
29543 /* clear out memory */
29544 memset(&line_list[0], 0x00, sizeof(u32)*32);
29545 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29546diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29547--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
29548+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
29549@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29550 u8 buf[HOST_LINK_BUF_SIZE];
29551 int i;
29552
29553+ pax_track_stack();
29554+
29555 dprintk("%s\n", __func__);
29556
29557 /* check if we have space for a link buf in the rx_buffer */
29558@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29559 unsigned long timeout;
29560 int written;
29561
29562+ pax_track_stack();
29563+
29564 dprintk("%s\n", __func__);
29565
29566 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29567diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h
29568--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
29569+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
29570@@ -68,12 +68,12 @@ struct dvb_demux_feed {
29571 union {
29572 struct dmx_ts_feed ts;
29573 struct dmx_section_feed sec;
29574- } feed;
29575+ } __no_const feed;
29576
29577 union {
29578 dmx_ts_cb ts;
29579 dmx_section_cb sec;
29580- } cb;
29581+ } __no_const cb;
29582
29583 struct dvb_demux *demux;
29584 void *priv;
29585diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c
29586--- linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
29587+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
29588@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29589 const struct dvb_device *template, void *priv, int type)
29590 {
29591 struct dvb_device *dvbdev;
29592- struct file_operations *dvbdevfops;
29593+ file_operations_no_const *dvbdevfops;
29594 struct device *clsdev;
29595 int minor;
29596 int id;
29597diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c
29598--- linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
29599+++ linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
29600@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29601 struct dib0700_adapter_state {
29602 int (*set_param_save) (struct dvb_frontend *,
29603 struct dvb_frontend_parameters *);
29604-};
29605+} __no_const;
29606
29607 static int dib7070_set_param_override(struct dvb_frontend *fe,
29608 struct dvb_frontend_parameters *fep)
29609diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c
29610--- linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
29611+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
29612@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
29613 if (!buf)
29614 return -ENOMEM;
29615
29616+ pax_track_stack();
29617+
29618 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29619 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29620 hx.addr, hx.len, hx.chk);
29621diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h
29622--- linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
29623+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
29624@@ -97,7 +97,7 @@
29625 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
29626
29627 struct dibusb_state {
29628- struct dib_fe_xfer_ops ops;
29629+ dib_fe_xfer_ops_no_const ops;
29630 int mt2060_present;
29631 u8 tuner_addr;
29632 };
29633diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c
29634--- linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
29635+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
29636@@ -95,7 +95,7 @@ struct su3000_state {
29637
29638 struct s6x0_state {
29639 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29640-};
29641+} __no_const;
29642
29643 /* debug */
29644 static int dvb_usb_dw2102_debug;
29645diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c
29646--- linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
29647+++ linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
29648@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29649 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29650 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29651
29652+ pax_track_stack();
29653
29654 data[0] = 0x8a;
29655 len_in = 1;
29656@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29657 int ret = 0, len_in;
29658 u8 data[512] = {0};
29659
29660+ pax_track_stack();
29661+
29662 data[0] = 0x0a;
29663 len_in = 1;
29664 info("FRM Firmware Cold Reset");
29665diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000.h linux-3.0.7/drivers/media/dvb/frontends/dib3000.h
29666--- linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
29667+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
29668@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
29669 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
29670 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
29671 };
29672+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
29673
29674 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
29675 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29676- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
29677+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
29678 #else
29679 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29680 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29681diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c
29682--- linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
29683+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
29684@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
29685 static struct dvb_frontend_ops dib3000mb_ops;
29686
29687 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29688- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29689+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
29690 {
29691 struct dib3000_state* state = NULL;
29692
29693diff -urNp linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c
29694--- linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
29695+++ linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
29696@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
29697 int ret = -1;
29698 int sync;
29699
29700+ pax_track_stack();
29701+
29702 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
29703
29704 fcp = 3000;
29705diff -urNp linux-3.0.7/drivers/media/dvb/frontends/or51211.c linux-3.0.7/drivers/media/dvb/frontends/or51211.c
29706--- linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
29707+++ linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
29708@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
29709 u8 tudata[585];
29710 int i;
29711
29712+ pax_track_stack();
29713+
29714 dprintk("Firmware is %zd bytes\n",fw->size);
29715
29716 /* Get eprom data */
29717diff -urNp linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c
29718--- linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-07-21 22:17:23.000000000 -0400
29719+++ linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-10-11 10:44:33.000000000 -0400
29720@@ -379,7 +379,7 @@ static struct ngene_info ngene_info_m780
29721
29722 /****************************************************************************/
29723
29724-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
29725+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
29726 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
29727 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
29728 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
29729diff -urNp linux-3.0.7/drivers/media/video/cx18/cx18-driver.c linux-3.0.7/drivers/media/video/cx18/cx18-driver.c
29730--- linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
29731+++ linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
29732@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
29733 struct i2c_client c;
29734 u8 eedata[256];
29735
29736+ pax_track_stack();
29737+
29738 memset(&c, 0, sizeof(c));
29739 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
29740 c.adapter = &cx->i2c_adap[0];
29741diff -urNp linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c
29742--- linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
29743+++ linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
29744@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
29745 bool handle = false;
29746 struct ir_raw_event ir_core_event[64];
29747
29748+ pax_track_stack();
29749+
29750 do {
29751 num = 0;
29752 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
29753diff -urNp linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c
29754--- linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-07-21 22:17:23.000000000 -0400
29755+++ linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-10-11 10:44:33.000000000 -0400
29756@@ -764,7 +764,7 @@ static struct snd_kcontrol_new snd_cx88_
29757 * Only boards with eeprom and byte 1 at eeprom=1 have it
29758 */
29759
29760-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
29761+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
29762 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29763 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29764 {0, }
29765diff -urNp linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
29766--- linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
29767+++ linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
29768@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
29769 u8 *eeprom;
29770 struct tveeprom tvdata;
29771
29772+ pax_track_stack();
29773+
29774 memset(&tvdata,0,sizeof(tvdata));
29775
29776 eeprom = pvr2_eeprom_fetch(hdw);
29777diff -urNp linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c
29778--- linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
29779+++ linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
29780@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
29781 unsigned char localPAT[256];
29782 unsigned char localPMT[256];
29783
29784+ pax_track_stack();
29785+
29786 /* Set video format - must be done first as it resets other settings */
29787 set_reg8(client, 0x41, h->video_format);
29788
29789diff -urNp linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c
29790--- linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
29791+++ linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
29792@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
29793 u8 tmp[512];
29794 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29795
29796+ pax_track_stack();
29797+
29798 /* While any outstand message on the bus exists... */
29799 do {
29800
29801@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
29802 u8 tmp[512];
29803 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29804
29805+ pax_track_stack();
29806+
29807 while (loop) {
29808
29809 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
29810diff -urNp linux-3.0.7/drivers/media/video/timblogiw.c linux-3.0.7/drivers/media/video/timblogiw.c
29811--- linux-3.0.7/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
29812+++ linux-3.0.7/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
29813@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
29814
29815 /* Platform device functions */
29816
29817-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
29818+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
29819 .vidioc_querycap = timblogiw_querycap,
29820 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
29821 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
29822diff -urNp linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c
29823--- linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
29824+++ linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
29825@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
29826 unsigned char rv, gv, bv;
29827 static unsigned char *Y, *U, *V;
29828
29829+ pax_track_stack();
29830+
29831 frame = usbvision->cur_frame;
29832 image_size = frame->frmwidth * frame->frmheight;
29833 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
29834diff -urNp linux-3.0.7/drivers/media/video/videobuf-dma-sg.c linux-3.0.7/drivers/media/video/videobuf-dma-sg.c
29835--- linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
29836+++ linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
29837@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
29838 {
29839 struct videobuf_queue q;
29840
29841+ pax_track_stack();
29842+
29843 /* Required to make generic handler to call __videobuf_alloc */
29844 q.int_ops = &sg_ops;
29845
29846diff -urNp linux-3.0.7/drivers/message/fusion/mptbase.c linux-3.0.7/drivers/message/fusion/mptbase.c
29847--- linux-3.0.7/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
29848+++ linux-3.0.7/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
29849@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
29850 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
29851 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
29852
29853+#ifdef CONFIG_GRKERNSEC_HIDESYM
29854+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
29855+#else
29856 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
29857 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
29858+#endif
29859+
29860 /*
29861 * Rounding UP to nearest 4-kB boundary here...
29862 */
29863diff -urNp linux-3.0.7/drivers/message/fusion/mptsas.c linux-3.0.7/drivers/message/fusion/mptsas.c
29864--- linux-3.0.7/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
29865+++ linux-3.0.7/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
29866@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
29867 return 0;
29868 }
29869
29870+static inline void
29871+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29872+{
29873+ if (phy_info->port_details) {
29874+ phy_info->port_details->rphy = rphy;
29875+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29876+ ioc->name, rphy));
29877+ }
29878+
29879+ if (rphy) {
29880+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29881+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29882+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29883+ ioc->name, rphy, rphy->dev.release));
29884+ }
29885+}
29886+
29887 /* no mutex */
29888 static void
29889 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
29890@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
29891 return NULL;
29892 }
29893
29894-static inline void
29895-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29896-{
29897- if (phy_info->port_details) {
29898- phy_info->port_details->rphy = rphy;
29899- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29900- ioc->name, rphy));
29901- }
29902-
29903- if (rphy) {
29904- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29905- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29906- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29907- ioc->name, rphy, rphy->dev.release));
29908- }
29909-}
29910-
29911 static inline struct sas_port *
29912 mptsas_get_port(struct mptsas_phyinfo *phy_info)
29913 {
29914diff -urNp linux-3.0.7/drivers/message/fusion/mptscsih.c linux-3.0.7/drivers/message/fusion/mptscsih.c
29915--- linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
29916+++ linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
29917@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
29918
29919 h = shost_priv(SChost);
29920
29921- if (h) {
29922- if (h->info_kbuf == NULL)
29923- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29924- return h->info_kbuf;
29925- h->info_kbuf[0] = '\0';
29926+ if (!h)
29927+ return NULL;
29928
29929- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29930- h->info_kbuf[size-1] = '\0';
29931- }
29932+ if (h->info_kbuf == NULL)
29933+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29934+ return h->info_kbuf;
29935+ h->info_kbuf[0] = '\0';
29936+
29937+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29938+ h->info_kbuf[size-1] = '\0';
29939
29940 return h->info_kbuf;
29941 }
29942diff -urNp linux-3.0.7/drivers/message/i2o/i2o_config.c linux-3.0.7/drivers/message/i2o/i2o_config.c
29943--- linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
29944+++ linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
29945@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
29946 struct i2o_message *msg;
29947 unsigned int iop;
29948
29949+ pax_track_stack();
29950+
29951 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
29952 return -EFAULT;
29953
29954diff -urNp linux-3.0.7/drivers/message/i2o/i2o_proc.c linux-3.0.7/drivers/message/i2o/i2o_proc.c
29955--- linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
29956+++ linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
29957@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
29958 "Array Controller Device"
29959 };
29960
29961-static char *chtostr(u8 * chars, int n)
29962-{
29963- char tmp[256];
29964- tmp[0] = 0;
29965- return strncat(tmp, (char *)chars, n);
29966-}
29967-
29968 static int i2o_report_query_status(struct seq_file *seq, int block_status,
29969 char *group)
29970 {
29971@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
29972
29973 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
29974 seq_printf(seq, "%-#8x", ddm_table.module_id);
29975- seq_printf(seq, "%-29s",
29976- chtostr(ddm_table.module_name_version, 28));
29977+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
29978 seq_printf(seq, "%9d ", ddm_table.data_size);
29979 seq_printf(seq, "%8d", ddm_table.code_size);
29980
29981@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
29982
29983 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
29984 seq_printf(seq, "%-#8x", dst->module_id);
29985- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
29986- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
29987+ seq_printf(seq, "%-.28s", dst->module_name_version);
29988+ seq_printf(seq, "%-.8s", dst->date);
29989 seq_printf(seq, "%8d ", dst->module_size);
29990 seq_printf(seq, "%8d ", dst->mpb_size);
29991 seq_printf(seq, "0x%04x", dst->module_flags);
29992@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
29993 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
29994 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
29995 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
29996- seq_printf(seq, "Vendor info : %s\n",
29997- chtostr((u8 *) (work32 + 2), 16));
29998- seq_printf(seq, "Product info : %s\n",
29999- chtostr((u8 *) (work32 + 6), 16));
30000- seq_printf(seq, "Description : %s\n",
30001- chtostr((u8 *) (work32 + 10), 16));
30002- seq_printf(seq, "Product rev. : %s\n",
30003- chtostr((u8 *) (work32 + 14), 8));
30004+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30005+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30006+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30007+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30008
30009 seq_printf(seq, "Serial number : ");
30010 print_serial_number(seq, (u8 *) (work32 + 16),
30011@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30012 }
30013
30014 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30015- seq_printf(seq, "Module name : %s\n",
30016- chtostr(result.module_name, 24));
30017- seq_printf(seq, "Module revision : %s\n",
30018- chtostr(result.module_rev, 8));
30019+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
30020+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30021
30022 seq_printf(seq, "Serial number : ");
30023 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30024@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30025 return 0;
30026 }
30027
30028- seq_printf(seq, "Device name : %s\n",
30029- chtostr(result.device_name, 64));
30030- seq_printf(seq, "Service name : %s\n",
30031- chtostr(result.service_name, 64));
30032- seq_printf(seq, "Physical name : %s\n",
30033- chtostr(result.physical_location, 64));
30034- seq_printf(seq, "Instance number : %s\n",
30035- chtostr(result.instance_number, 4));
30036+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
30037+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
30038+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30039+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30040
30041 return 0;
30042 }
30043diff -urNp linux-3.0.7/drivers/message/i2o/iop.c linux-3.0.7/drivers/message/i2o/iop.c
30044--- linux-3.0.7/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
30045+++ linux-3.0.7/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
30046@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30047
30048 spin_lock_irqsave(&c->context_list_lock, flags);
30049
30050- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30051- atomic_inc(&c->context_list_counter);
30052+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30053+ atomic_inc_unchecked(&c->context_list_counter);
30054
30055- entry->context = atomic_read(&c->context_list_counter);
30056+ entry->context = atomic_read_unchecked(&c->context_list_counter);
30057
30058 list_add(&entry->list, &c->context_list);
30059
30060@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30061
30062 #if BITS_PER_LONG == 64
30063 spin_lock_init(&c->context_list_lock);
30064- atomic_set(&c->context_list_counter, 0);
30065+ atomic_set_unchecked(&c->context_list_counter, 0);
30066 INIT_LIST_HEAD(&c->context_list);
30067 #endif
30068
30069diff -urNp linux-3.0.7/drivers/mfd/ab3100-core.c linux-3.0.7/drivers/mfd/ab3100-core.c
30070--- linux-3.0.7/drivers/mfd/ab3100-core.c 2011-07-21 22:17:23.000000000 -0400
30071+++ linux-3.0.7/drivers/mfd/ab3100-core.c 2011-10-11 10:44:33.000000000 -0400
30072@@ -809,7 +809,7 @@ struct ab_family_id {
30073 char *name;
30074 };
30075
30076-static const struct ab_family_id ids[] __devinitdata = {
30077+static const struct ab_family_id ids[] __devinitconst = {
30078 /* AB3100 */
30079 {
30080 .id = 0xc0,
30081diff -urNp linux-3.0.7/drivers/mfd/abx500-core.c linux-3.0.7/drivers/mfd/abx500-core.c
30082--- linux-3.0.7/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
30083+++ linux-3.0.7/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
30084@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30085
30086 struct abx500_device_entry {
30087 struct list_head list;
30088- struct abx500_ops ops;
30089+ abx500_ops_no_const ops;
30090 struct device *dev;
30091 };
30092
30093diff -urNp linux-3.0.7/drivers/mfd/janz-cmodio.c linux-3.0.7/drivers/mfd/janz-cmodio.c
30094--- linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
30095+++ linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
30096@@ -13,6 +13,7 @@
30097
30098 #include <linux/kernel.h>
30099 #include <linux/module.h>
30100+#include <linux/slab.h>
30101 #include <linux/init.h>
30102 #include <linux/pci.h>
30103 #include <linux/interrupt.h>
30104diff -urNp linux-3.0.7/drivers/mfd/wm8350-i2c.c linux-3.0.7/drivers/mfd/wm8350-i2c.c
30105--- linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
30106+++ linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
30107@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30108 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30109 int ret;
30110
30111+ pax_track_stack();
30112+
30113 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30114 return -EINVAL;
30115
30116diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c
30117--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:09.000000000 -0400
30118+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:19.000000000 -0400
30119@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30120 * the lid is closed. This leads to interrupts as soon as a little move
30121 * is done.
30122 */
30123- atomic_inc(&lis3_dev.count);
30124+ atomic_inc_unchecked(&lis3_dev.count);
30125
30126 wake_up_interruptible(&lis3_dev.misc_wait);
30127 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30128@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30129 if (lis3_dev.pm_dev)
30130 pm_runtime_get_sync(lis3_dev.pm_dev);
30131
30132- atomic_set(&lis3_dev.count, 0);
30133+ atomic_set_unchecked(&lis3_dev.count, 0);
30134 return 0;
30135 }
30136
30137@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30138 add_wait_queue(&lis3_dev.misc_wait, &wait);
30139 while (true) {
30140 set_current_state(TASK_INTERRUPTIBLE);
30141- data = atomic_xchg(&lis3_dev.count, 0);
30142+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30143 if (data)
30144 break;
30145
30146@@ -585,7 +585,7 @@ out:
30147 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30148 {
30149 poll_wait(file, &lis3_dev.misc_wait, wait);
30150- if (atomic_read(&lis3_dev.count))
30151+ if (atomic_read_unchecked(&lis3_dev.count))
30152 return POLLIN | POLLRDNORM;
30153 return 0;
30154 }
30155diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h
30156--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
30157+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
30158@@ -265,7 +265,7 @@ struct lis3lv02d {
30159 struct input_polled_dev *idev; /* input device */
30160 struct platform_device *pdev; /* platform device */
30161 struct regulator_bulk_data regulators[2];
30162- atomic_t count; /* interrupt count after last read */
30163+ atomic_unchecked_t count; /* interrupt count after last read */
30164 union axis_conversion ac; /* hw -> logical axis */
30165 int mapped_btns[3];
30166
30167diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c
30168--- linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
30169+++ linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
30170@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30171 unsigned long nsec;
30172
30173 nsec = CLKS2NSEC(clks);
30174- atomic_long_inc(&mcs_op_statistics[op].count);
30175- atomic_long_add(nsec, &mcs_op_statistics[op].total);
30176+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30177+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30178 if (mcs_op_statistics[op].max < nsec)
30179 mcs_op_statistics[op].max = nsec;
30180 }
30181diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c
30182--- linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
30183+++ linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
30184@@ -32,9 +32,9 @@
30185
30186 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30187
30188-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30189+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30190 {
30191- unsigned long val = atomic_long_read(v);
30192+ unsigned long val = atomic_long_read_unchecked(v);
30193
30194 seq_printf(s, "%16lu %s\n", val, id);
30195 }
30196@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30197
30198 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30199 for (op = 0; op < mcsop_last; op++) {
30200- count = atomic_long_read(&mcs_op_statistics[op].count);
30201- total = atomic_long_read(&mcs_op_statistics[op].total);
30202+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30203+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30204 max = mcs_op_statistics[op].max;
30205 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30206 count ? total / count : 0, max);
30207diff -urNp linux-3.0.7/drivers/misc/sgi-gru/grutables.h linux-3.0.7/drivers/misc/sgi-gru/grutables.h
30208--- linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
30209+++ linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
30210@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30211 * GRU statistics.
30212 */
30213 struct gru_stats_s {
30214- atomic_long_t vdata_alloc;
30215- atomic_long_t vdata_free;
30216- atomic_long_t gts_alloc;
30217- atomic_long_t gts_free;
30218- atomic_long_t gms_alloc;
30219- atomic_long_t gms_free;
30220- atomic_long_t gts_double_allocate;
30221- atomic_long_t assign_context;
30222- atomic_long_t assign_context_failed;
30223- atomic_long_t free_context;
30224- atomic_long_t load_user_context;
30225- atomic_long_t load_kernel_context;
30226- atomic_long_t lock_kernel_context;
30227- atomic_long_t unlock_kernel_context;
30228- atomic_long_t steal_user_context;
30229- atomic_long_t steal_kernel_context;
30230- atomic_long_t steal_context_failed;
30231- atomic_long_t nopfn;
30232- atomic_long_t asid_new;
30233- atomic_long_t asid_next;
30234- atomic_long_t asid_wrap;
30235- atomic_long_t asid_reuse;
30236- atomic_long_t intr;
30237- atomic_long_t intr_cbr;
30238- atomic_long_t intr_tfh;
30239- atomic_long_t intr_spurious;
30240- atomic_long_t intr_mm_lock_failed;
30241- atomic_long_t call_os;
30242- atomic_long_t call_os_wait_queue;
30243- atomic_long_t user_flush_tlb;
30244- atomic_long_t user_unload_context;
30245- atomic_long_t user_exception;
30246- atomic_long_t set_context_option;
30247- atomic_long_t check_context_retarget_intr;
30248- atomic_long_t check_context_unload;
30249- atomic_long_t tlb_dropin;
30250- atomic_long_t tlb_preload_page;
30251- atomic_long_t tlb_dropin_fail_no_asid;
30252- atomic_long_t tlb_dropin_fail_upm;
30253- atomic_long_t tlb_dropin_fail_invalid;
30254- atomic_long_t tlb_dropin_fail_range_active;
30255- atomic_long_t tlb_dropin_fail_idle;
30256- atomic_long_t tlb_dropin_fail_fmm;
30257- atomic_long_t tlb_dropin_fail_no_exception;
30258- atomic_long_t tfh_stale_on_fault;
30259- atomic_long_t mmu_invalidate_range;
30260- atomic_long_t mmu_invalidate_page;
30261- atomic_long_t flush_tlb;
30262- atomic_long_t flush_tlb_gru;
30263- atomic_long_t flush_tlb_gru_tgh;
30264- atomic_long_t flush_tlb_gru_zero_asid;
30265-
30266- atomic_long_t copy_gpa;
30267- atomic_long_t read_gpa;
30268-
30269- atomic_long_t mesq_receive;
30270- atomic_long_t mesq_receive_none;
30271- atomic_long_t mesq_send;
30272- atomic_long_t mesq_send_failed;
30273- atomic_long_t mesq_noop;
30274- atomic_long_t mesq_send_unexpected_error;
30275- atomic_long_t mesq_send_lb_overflow;
30276- atomic_long_t mesq_send_qlimit_reached;
30277- atomic_long_t mesq_send_amo_nacked;
30278- atomic_long_t mesq_send_put_nacked;
30279- atomic_long_t mesq_page_overflow;
30280- atomic_long_t mesq_qf_locked;
30281- atomic_long_t mesq_qf_noop_not_full;
30282- atomic_long_t mesq_qf_switch_head_failed;
30283- atomic_long_t mesq_qf_unexpected_error;
30284- atomic_long_t mesq_noop_unexpected_error;
30285- atomic_long_t mesq_noop_lb_overflow;
30286- atomic_long_t mesq_noop_qlimit_reached;
30287- atomic_long_t mesq_noop_amo_nacked;
30288- atomic_long_t mesq_noop_put_nacked;
30289- atomic_long_t mesq_noop_page_overflow;
30290+ atomic_long_unchecked_t vdata_alloc;
30291+ atomic_long_unchecked_t vdata_free;
30292+ atomic_long_unchecked_t gts_alloc;
30293+ atomic_long_unchecked_t gts_free;
30294+ atomic_long_unchecked_t gms_alloc;
30295+ atomic_long_unchecked_t gms_free;
30296+ atomic_long_unchecked_t gts_double_allocate;
30297+ atomic_long_unchecked_t assign_context;
30298+ atomic_long_unchecked_t assign_context_failed;
30299+ atomic_long_unchecked_t free_context;
30300+ atomic_long_unchecked_t load_user_context;
30301+ atomic_long_unchecked_t load_kernel_context;
30302+ atomic_long_unchecked_t lock_kernel_context;
30303+ atomic_long_unchecked_t unlock_kernel_context;
30304+ atomic_long_unchecked_t steal_user_context;
30305+ atomic_long_unchecked_t steal_kernel_context;
30306+ atomic_long_unchecked_t steal_context_failed;
30307+ atomic_long_unchecked_t nopfn;
30308+ atomic_long_unchecked_t asid_new;
30309+ atomic_long_unchecked_t asid_next;
30310+ atomic_long_unchecked_t asid_wrap;
30311+ atomic_long_unchecked_t asid_reuse;
30312+ atomic_long_unchecked_t intr;
30313+ atomic_long_unchecked_t intr_cbr;
30314+ atomic_long_unchecked_t intr_tfh;
30315+ atomic_long_unchecked_t intr_spurious;
30316+ atomic_long_unchecked_t intr_mm_lock_failed;
30317+ atomic_long_unchecked_t call_os;
30318+ atomic_long_unchecked_t call_os_wait_queue;
30319+ atomic_long_unchecked_t user_flush_tlb;
30320+ atomic_long_unchecked_t user_unload_context;
30321+ atomic_long_unchecked_t user_exception;
30322+ atomic_long_unchecked_t set_context_option;
30323+ atomic_long_unchecked_t check_context_retarget_intr;
30324+ atomic_long_unchecked_t check_context_unload;
30325+ atomic_long_unchecked_t tlb_dropin;
30326+ atomic_long_unchecked_t tlb_preload_page;
30327+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30328+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30329+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30330+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30331+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30332+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30333+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30334+ atomic_long_unchecked_t tfh_stale_on_fault;
30335+ atomic_long_unchecked_t mmu_invalidate_range;
30336+ atomic_long_unchecked_t mmu_invalidate_page;
30337+ atomic_long_unchecked_t flush_tlb;
30338+ atomic_long_unchecked_t flush_tlb_gru;
30339+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30340+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30341+
30342+ atomic_long_unchecked_t copy_gpa;
30343+ atomic_long_unchecked_t read_gpa;
30344+
30345+ atomic_long_unchecked_t mesq_receive;
30346+ atomic_long_unchecked_t mesq_receive_none;
30347+ atomic_long_unchecked_t mesq_send;
30348+ atomic_long_unchecked_t mesq_send_failed;
30349+ atomic_long_unchecked_t mesq_noop;
30350+ atomic_long_unchecked_t mesq_send_unexpected_error;
30351+ atomic_long_unchecked_t mesq_send_lb_overflow;
30352+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30353+ atomic_long_unchecked_t mesq_send_amo_nacked;
30354+ atomic_long_unchecked_t mesq_send_put_nacked;
30355+ atomic_long_unchecked_t mesq_page_overflow;
30356+ atomic_long_unchecked_t mesq_qf_locked;
30357+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30358+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30359+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30360+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30361+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30362+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30363+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30364+ atomic_long_unchecked_t mesq_noop_put_nacked;
30365+ atomic_long_unchecked_t mesq_noop_page_overflow;
30366
30367 };
30368
30369@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30370 tghop_invalidate, mcsop_last};
30371
30372 struct mcs_op_statistic {
30373- atomic_long_t count;
30374- atomic_long_t total;
30375+ atomic_long_unchecked_t count;
30376+ atomic_long_unchecked_t total;
30377 unsigned long max;
30378 };
30379
30380@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30381
30382 #define STAT(id) do { \
30383 if (gru_options & OPT_STATS) \
30384- atomic_long_inc(&gru_stats.id); \
30385+ atomic_long_inc_unchecked(&gru_stats.id); \
30386 } while (0)
30387
30388 #ifdef CONFIG_SGI_GRU_DEBUG
30389diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xp.h linux-3.0.7/drivers/misc/sgi-xp/xp.h
30390--- linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
30391+++ linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
30392@@ -289,7 +289,7 @@ struct xpc_interface {
30393 xpc_notify_func, void *);
30394 void (*received) (short, int, void *);
30395 enum xp_retval (*partid_to_nasids) (short, void *);
30396-};
30397+} __no_const;
30398
30399 extern struct xpc_interface xpc_interface;
30400
30401diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc.h linux-3.0.7/drivers/misc/sgi-xp/xpc.h
30402--- linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-07-21 22:17:23.000000000 -0400
30403+++ linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-10-11 10:44:33.000000000 -0400
30404@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30405 void (*received_payload) (struct xpc_channel *, void *);
30406 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30407 };
30408+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30409
30410 /* struct xpc_partition act_state values (for XPC HB) */
30411
30412@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30413 /* found in xpc_main.c */
30414 extern struct device *xpc_part;
30415 extern struct device *xpc_chan;
30416-extern struct xpc_arch_operations xpc_arch_ops;
30417+extern xpc_arch_operations_no_const xpc_arch_ops;
30418 extern int xpc_disengage_timelimit;
30419 extern int xpc_disengage_timedout;
30420 extern int xpc_activate_IRQ_rcvd;
30421diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c
30422--- linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-07-21 22:17:23.000000000 -0400
30423+++ linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-10-11 10:44:33.000000000 -0400
30424@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30425 .notifier_call = xpc_system_die,
30426 };
30427
30428-struct xpc_arch_operations xpc_arch_ops;
30429+xpc_arch_operations_no_const xpc_arch_ops;
30430
30431 /*
30432 * Timer function to enforce the timelimit on the partition disengage.
30433diff -urNp linux-3.0.7/drivers/mmc/host/sdhci-pci.c linux-3.0.7/drivers/mmc/host/sdhci-pci.c
30434--- linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-07-21 22:17:23.000000000 -0400
30435+++ linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-10-11 10:44:33.000000000 -0400
30436@@ -524,7 +524,7 @@ static const struct sdhci_pci_fixes sdhc
30437 .probe = via_probe,
30438 };
30439
30440-static const struct pci_device_id pci_ids[] __devinitdata = {
30441+static const struct pci_device_id pci_ids[] __devinitconst = {
30442 {
30443 .vendor = PCI_VENDOR_ID_RICOH,
30444 .device = PCI_DEVICE_ID_RICOH_R5C822,
30445diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c
30446--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
30447+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
30448@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30449 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30450 unsigned long timeo = jiffies + HZ;
30451
30452+ pax_track_stack();
30453+
30454 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30455 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30456 goto sleep;
30457@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30458 unsigned long initial_adr;
30459 int initial_len = len;
30460
30461+ pax_track_stack();
30462+
30463 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30464 adr += chip->start;
30465 initial_adr = adr;
30466@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30467 int retries = 3;
30468 int ret;
30469
30470+ pax_track_stack();
30471+
30472 adr += chip->start;
30473
30474 retry:
30475diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c
30476--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
30477+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
30478@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30479 unsigned long cmd_addr;
30480 struct cfi_private *cfi = map->fldrv_priv;
30481
30482+ pax_track_stack();
30483+
30484 adr += chip->start;
30485
30486 /* Ensure cmd read/writes are aligned. */
30487@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30488 DECLARE_WAITQUEUE(wait, current);
30489 int wbufsize, z;
30490
30491+ pax_track_stack();
30492+
30493 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30494 if (adr & (map_bankwidth(map)-1))
30495 return -EINVAL;
30496@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30497 DECLARE_WAITQUEUE(wait, current);
30498 int ret = 0;
30499
30500+ pax_track_stack();
30501+
30502 adr += chip->start;
30503
30504 /* Let's determine this according to the interleave only once */
30505@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30506 unsigned long timeo = jiffies + HZ;
30507 DECLARE_WAITQUEUE(wait, current);
30508
30509+ pax_track_stack();
30510+
30511 adr += chip->start;
30512
30513 /* Let's determine this according to the interleave only once */
30514@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30515 unsigned long timeo = jiffies + HZ;
30516 DECLARE_WAITQUEUE(wait, current);
30517
30518+ pax_track_stack();
30519+
30520 adr += chip->start;
30521
30522 /* Let's determine this according to the interleave only once */
30523diff -urNp linux-3.0.7/drivers/mtd/devices/doc2000.c linux-3.0.7/drivers/mtd/devices/doc2000.c
30524--- linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
30525+++ linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
30526@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30527
30528 /* The ECC will not be calculated correctly if less than 512 is written */
30529 /* DBB-
30530- if (len != 0x200 && eccbuf)
30531+ if (len != 0x200)
30532 printk(KERN_WARNING
30533 "ECC needs a full sector write (adr: %lx size %lx)\n",
30534 (long) to, (long) len);
30535diff -urNp linux-3.0.7/drivers/mtd/devices/doc2001.c linux-3.0.7/drivers/mtd/devices/doc2001.c
30536--- linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
30537+++ linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
30538@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30539 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30540
30541 /* Don't allow read past end of device */
30542- if (from >= this->totlen)
30543+ if (from >= this->totlen || !len)
30544 return -EINVAL;
30545
30546 /* Don't allow a single read to cross a 512-byte block boundary */
30547diff -urNp linux-3.0.7/drivers/mtd/ftl.c linux-3.0.7/drivers/mtd/ftl.c
30548--- linux-3.0.7/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
30549+++ linux-3.0.7/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
30550@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30551 loff_t offset;
30552 uint16_t srcunitswap = cpu_to_le16(srcunit);
30553
30554+ pax_track_stack();
30555+
30556 eun = &part->EUNInfo[srcunit];
30557 xfer = &part->XferInfo[xferunit];
30558 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30559diff -urNp linux-3.0.7/drivers/mtd/inftlcore.c linux-3.0.7/drivers/mtd/inftlcore.c
30560--- linux-3.0.7/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
30561+++ linux-3.0.7/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
30562@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30563 struct inftl_oob oob;
30564 size_t retlen;
30565
30566+ pax_track_stack();
30567+
30568 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30569 "pending=%d)\n", inftl, thisVUC, pendingblock);
30570
30571diff -urNp linux-3.0.7/drivers/mtd/inftlmount.c linux-3.0.7/drivers/mtd/inftlmount.c
30572--- linux-3.0.7/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
30573+++ linux-3.0.7/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
30574@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30575 struct INFTLPartition *ip;
30576 size_t retlen;
30577
30578+ pax_track_stack();
30579+
30580 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30581
30582 /*
30583diff -urNp linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c
30584--- linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
30585+++ linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
30586@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30587 {
30588 map_word pfow_val[4];
30589
30590+ pax_track_stack();
30591+
30592 /* Check identification string */
30593 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30594 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30595diff -urNp linux-3.0.7/drivers/mtd/mtdchar.c linux-3.0.7/drivers/mtd/mtdchar.c
30596--- linux-3.0.7/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
30597+++ linux-3.0.7/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
30598@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
30599 u_long size;
30600 struct mtd_info_user info;
30601
30602+ pax_track_stack();
30603+
30604 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30605
30606 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30607diff -urNp linux-3.0.7/drivers/mtd/nand/denali.c linux-3.0.7/drivers/mtd/nand/denali.c
30608--- linux-3.0.7/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
30609+++ linux-3.0.7/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
30610@@ -26,6 +26,7 @@
30611 #include <linux/pci.h>
30612 #include <linux/mtd/mtd.h>
30613 #include <linux/module.h>
30614+#include <linux/slab.h>
30615
30616 #include "denali.h"
30617
30618diff -urNp linux-3.0.7/drivers/mtd/nftlcore.c linux-3.0.7/drivers/mtd/nftlcore.c
30619--- linux-3.0.7/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
30620+++ linux-3.0.7/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
30621@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30622 int inplace = 1;
30623 size_t retlen;
30624
30625+ pax_track_stack();
30626+
30627 memset(BlockMap, 0xff, sizeof(BlockMap));
30628 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30629
30630diff -urNp linux-3.0.7/drivers/mtd/nftlmount.c linux-3.0.7/drivers/mtd/nftlmount.c
30631--- linux-3.0.7/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
30632+++ linux-3.0.7/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
30633@@ -24,6 +24,7 @@
30634 #include <asm/errno.h>
30635 #include <linux/delay.h>
30636 #include <linux/slab.h>
30637+#include <linux/sched.h>
30638 #include <linux/mtd/mtd.h>
30639 #include <linux/mtd/nand.h>
30640 #include <linux/mtd/nftl.h>
30641@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
30642 struct mtd_info *mtd = nftl->mbd.mtd;
30643 unsigned int i;
30644
30645+ pax_track_stack();
30646+
30647 /* Assume logical EraseSize == physical erasesize for starting the scan.
30648 We'll sort it out later if we find a MediaHeader which says otherwise */
30649 /* Actually, we won't. The new DiskOnChip driver has already scanned
30650diff -urNp linux-3.0.7/drivers/mtd/ubi/build.c linux-3.0.7/drivers/mtd/ubi/build.c
30651--- linux-3.0.7/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
30652+++ linux-3.0.7/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
30653@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
30654 static int __init bytes_str_to_int(const char *str)
30655 {
30656 char *endp;
30657- unsigned long result;
30658+ unsigned long result, scale = 1;
30659
30660 result = simple_strtoul(str, &endp, 0);
30661 if (str == endp || result >= INT_MAX) {
30662@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
30663
30664 switch (*endp) {
30665 case 'G':
30666- result *= 1024;
30667+ scale *= 1024;
30668 case 'M':
30669- result *= 1024;
30670+ scale *= 1024;
30671 case 'K':
30672- result *= 1024;
30673+ scale *= 1024;
30674 if (endp[1] == 'i' && endp[2] == 'B')
30675 endp += 2;
30676 case '\0':
30677@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
30678 return -EINVAL;
30679 }
30680
30681- return result;
30682+ if ((intoverflow_t)result*scale >= INT_MAX) {
30683+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
30684+ str);
30685+ return -EINVAL;
30686+ }
30687+
30688+ return result*scale;
30689 }
30690
30691 /**
30692diff -urNp linux-3.0.7/drivers/net/atlx/atl2.c linux-3.0.7/drivers/net/atlx/atl2.c
30693--- linux-3.0.7/drivers/net/atlx/atl2.c 2011-07-21 22:17:23.000000000 -0400
30694+++ linux-3.0.7/drivers/net/atlx/atl2.c 2011-10-11 10:44:33.000000000 -0400
30695@@ -2840,7 +2840,7 @@ static void atl2_force_ps(struct atl2_hw
30696 */
30697
30698 #define ATL2_PARAM(X, desc) \
30699- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30700+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30701 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
30702 MODULE_PARM_DESC(X, desc);
30703 #else
30704diff -urNp linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c
30705--- linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
30706+++ linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
30707@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
30708 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
30709 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
30710
30711-static struct bfa_ioc_hwif nw_hwif_ct;
30712+static struct bfa_ioc_hwif nw_hwif_ct = {
30713+ .ioc_pll_init = bfa_ioc_ct_pll_init,
30714+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
30715+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
30716+ .ioc_reg_init = bfa_ioc_ct_reg_init,
30717+ .ioc_map_port = bfa_ioc_ct_map_port,
30718+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
30719+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
30720+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
30721+ .ioc_sync_start = bfa_ioc_ct_sync_start,
30722+ .ioc_sync_join = bfa_ioc_ct_sync_join,
30723+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
30724+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
30725+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
30726+};
30727
30728 /**
30729 * Called from bfa_ioc_attach() to map asic specific calls.
30730@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
30731 void
30732 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
30733 {
30734- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
30735- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
30736- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
30737- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
30738- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
30739- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
30740- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
30741- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
30742- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
30743- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
30744- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
30745- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
30746- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
30747-
30748 ioc->ioc_hwif = &nw_hwif_ct;
30749 }
30750
30751diff -urNp linux-3.0.7/drivers/net/bna/bnad.c linux-3.0.7/drivers/net/bna/bnad.c
30752--- linux-3.0.7/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
30753+++ linux-3.0.7/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
30754@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30755 struct bna_intr_info *intr_info =
30756 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
30757 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
30758- struct bna_tx_event_cbfn tx_cbfn;
30759+ static struct bna_tx_event_cbfn tx_cbfn = {
30760+ /* Initialize the tx event handlers */
30761+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
30762+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
30763+ .tx_stall_cbfn = bnad_cb_tx_stall,
30764+ .tx_resume_cbfn = bnad_cb_tx_resume,
30765+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
30766+ };
30767 struct bna_tx *tx;
30768 unsigned long flags;
30769
30770@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30771 tx_config->txq_depth = bnad->txq_depth;
30772 tx_config->tx_type = BNA_TX_T_REGULAR;
30773
30774- /* Initialize the tx event handlers */
30775- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
30776- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
30777- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
30778- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
30779- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
30780-
30781 /* Get BNA's resource requirement for one tx object */
30782 spin_lock_irqsave(&bnad->bna_lock, flags);
30783 bna_tx_res_req(bnad->num_txq_per_tx,
30784@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
30785 struct bna_intr_info *intr_info =
30786 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
30787 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
30788- struct bna_rx_event_cbfn rx_cbfn;
30789+ static struct bna_rx_event_cbfn rx_cbfn = {
30790+ /* Initialize the Rx event handlers */
30791+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
30792+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
30793+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
30794+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
30795+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
30796+ .rx_post_cbfn = bnad_cb_rx_post
30797+ };
30798 struct bna_rx *rx;
30799 unsigned long flags;
30800
30801 /* Initialize the Rx object configuration */
30802 bnad_init_rx_config(bnad, rx_config);
30803
30804- /* Initialize the Rx event handlers */
30805- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
30806- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
30807- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
30808- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
30809- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
30810- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
30811-
30812 /* Get BNA's resource requirement for one Rx object */
30813 spin_lock_irqsave(&bnad->bna_lock, flags);
30814 bna_rx_res_req(rx_config, res_info);
30815diff -urNp linux-3.0.7/drivers/net/bnx2.c linux-3.0.7/drivers/net/bnx2.c
30816--- linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:54:54.000000000 -0400
30817+++ linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:55:27.000000000 -0400
30818@@ -5831,6 +5831,8 @@ bnx2_test_nvram(struct bnx2 *bp)
30819 int rc = 0;
30820 u32 magic, csum;
30821
30822+ pax_track_stack();
30823+
30824 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
30825 goto test_nvram_done;
30826
30827diff -urNp linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c
30828--- linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30829+++ linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
30830@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
30831 int i, rc;
30832 u32 magic, crc;
30833
30834+ pax_track_stack();
30835+
30836 if (BP_NOMCP(bp))
30837 return 0;
30838
30839diff -urNp linux-3.0.7/drivers/net/can/mscan/mscan.c linux-3.0.7/drivers/net/can/mscan/mscan.c
30840--- linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-07-21 22:17:23.000000000 -0400
30841+++ linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-10-17 02:51:46.000000000 -0400
30842@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(stru
30843 void __iomem *data = &regs->tx.dsr1_0;
30844 u16 *payload = (u16 *)frame->data;
30845
30846- /* It is safe to write into dsr[dlc+1] */
30847- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30848+ for (i = 0; i < frame->can_dlc / 2; i++) {
30849 out_be16(data, *payload++);
30850 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30851 }
30852+ /* write remaining byte if necessary */
30853+ if (frame->can_dlc & 1)
30854+ out_8(data, frame->data[frame->can_dlc - 1]);
30855 }
30856
30857 out_8(&regs->tx.dlr, frame->can_dlc);
30858@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct ne
30859 void __iomem *data = &regs->rx.dsr1_0;
30860 u16 *payload = (u16 *)frame->data;
30861
30862- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30863+ for (i = 0; i < frame->can_dlc / 2; i++) {
30864 *payload++ = in_be16(data);
30865 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30866 }
30867+ /* read remaining byte if necessary */
30868+ if (frame->can_dlc & 1)
30869+ frame->data[frame->can_dlc - 1] = in_8(data);
30870 }
30871
30872 out_8(&regs->canrflg, MSCAN_RXF);
30873diff -urNp linux-3.0.7/drivers/net/cxgb3/l2t.h linux-3.0.7/drivers/net/cxgb3/l2t.h
30874--- linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:54:54.000000000 -0400
30875+++ linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:55:27.000000000 -0400
30876@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
30877 */
30878 struct l2t_skb_cb {
30879 arp_failure_handler_func arp_failure_handler;
30880-};
30881+} __no_const;
30882
30883 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
30884
30885diff -urNp linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c
30886--- linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
30887+++ linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
30888@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
30889 unsigned int nchan = adap->params.nports;
30890 struct msix_entry entries[MAX_INGQ + 1];
30891
30892+ pax_track_stack();
30893+
30894 for (i = 0; i < ARRAY_SIZE(entries); ++i)
30895 entries[i].entry = i;
30896
30897diff -urNp linux-3.0.7/drivers/net/cxgb4/t4_hw.c linux-3.0.7/drivers/net/cxgb4/t4_hw.c
30898--- linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
30899+++ linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
30900@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
30901 u8 vpd[VPD_LEN], csum;
30902 unsigned int vpdr_len, kw_offset, id_len;
30903
30904+ pax_track_stack();
30905+
30906 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
30907 if (ret < 0)
30908 return ret;
30909diff -urNp linux-3.0.7/drivers/net/e1000e/82571.c linux-3.0.7/drivers/net/e1000e/82571.c
30910--- linux-3.0.7/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
30911+++ linux-3.0.7/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
30912@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
30913 {
30914 struct e1000_hw *hw = &adapter->hw;
30915 struct e1000_mac_info *mac = &hw->mac;
30916- struct e1000_mac_operations *func = &mac->ops;
30917+ e1000_mac_operations_no_const *func = &mac->ops;
30918 u32 swsm = 0;
30919 u32 swsm2 = 0;
30920 bool force_clear_smbi = false;
30921diff -urNp linux-3.0.7/drivers/net/e1000e/es2lan.c linux-3.0.7/drivers/net/e1000e/es2lan.c
30922--- linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
30923+++ linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
30924@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
30925 {
30926 struct e1000_hw *hw = &adapter->hw;
30927 struct e1000_mac_info *mac = &hw->mac;
30928- struct e1000_mac_operations *func = &mac->ops;
30929+ e1000_mac_operations_no_const *func = &mac->ops;
30930
30931 /* Set media type */
30932 switch (adapter->pdev->device) {
30933diff -urNp linux-3.0.7/drivers/net/e1000e/hw.h linux-3.0.7/drivers/net/e1000e/hw.h
30934--- linux-3.0.7/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
30935+++ linux-3.0.7/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
30936@@ -776,6 +776,7 @@ struct e1000_mac_operations {
30937 void (*write_vfta)(struct e1000_hw *, u32, u32);
30938 s32 (*read_mac_addr)(struct e1000_hw *);
30939 };
30940+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30941
30942 /* Function pointers for the PHY. */
30943 struct e1000_phy_operations {
30944@@ -799,6 +800,7 @@ struct e1000_phy_operations {
30945 void (*power_up)(struct e1000_hw *);
30946 void (*power_down)(struct e1000_hw *);
30947 };
30948+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30949
30950 /* Function pointers for the NVM. */
30951 struct e1000_nvm_operations {
30952@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
30953 s32 (*validate)(struct e1000_hw *);
30954 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
30955 };
30956+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30957
30958 struct e1000_mac_info {
30959- struct e1000_mac_operations ops;
30960+ e1000_mac_operations_no_const ops;
30961 u8 addr[ETH_ALEN];
30962 u8 perm_addr[ETH_ALEN];
30963
30964@@ -853,7 +856,7 @@ struct e1000_mac_info {
30965 };
30966
30967 struct e1000_phy_info {
30968- struct e1000_phy_operations ops;
30969+ e1000_phy_operations_no_const ops;
30970
30971 enum e1000_phy_type type;
30972
30973@@ -887,7 +890,7 @@ struct e1000_phy_info {
30974 };
30975
30976 struct e1000_nvm_info {
30977- struct e1000_nvm_operations ops;
30978+ e1000_nvm_operations_no_const ops;
30979
30980 enum e1000_nvm_type type;
30981 enum e1000_nvm_override override;
30982diff -urNp linux-3.0.7/drivers/net/fealnx.c linux-3.0.7/drivers/net/fealnx.c
30983--- linux-3.0.7/drivers/net/fealnx.c 2011-07-21 22:17:23.000000000 -0400
30984+++ linux-3.0.7/drivers/net/fealnx.c 2011-10-11 10:44:33.000000000 -0400
30985@@ -150,7 +150,7 @@ struct chip_info {
30986 int flags;
30987 };
30988
30989-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
30990+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
30991 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30992 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
30993 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30994diff -urNp linux-3.0.7/drivers/net/hamradio/6pack.c linux-3.0.7/drivers/net/hamradio/6pack.c
30995--- linux-3.0.7/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
30996+++ linux-3.0.7/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
30997@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
30998 unsigned char buf[512];
30999 int count1;
31000
31001+ pax_track_stack();
31002+
31003 if (!count)
31004 return;
31005
31006diff -urNp linux-3.0.7/drivers/net/igb/e1000_hw.h linux-3.0.7/drivers/net/igb/e1000_hw.h
31007--- linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
31008+++ linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
31009@@ -314,6 +314,7 @@ struct e1000_mac_operations {
31010 s32 (*read_mac_addr)(struct e1000_hw *);
31011 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31012 };
31013+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31014
31015 struct e1000_phy_operations {
31016 s32 (*acquire)(struct e1000_hw *);
31017@@ -330,6 +331,7 @@ struct e1000_phy_operations {
31018 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31019 s32 (*write_reg)(struct e1000_hw *, u32, u16);
31020 };
31021+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31022
31023 struct e1000_nvm_operations {
31024 s32 (*acquire)(struct e1000_hw *);
31025@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31026 s32 (*update)(struct e1000_hw *);
31027 s32 (*validate)(struct e1000_hw *);
31028 };
31029+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31030
31031 struct e1000_info {
31032 s32 (*get_invariants)(struct e1000_hw *);
31033@@ -350,7 +353,7 @@ struct e1000_info {
31034 extern const struct e1000_info e1000_82575_info;
31035
31036 struct e1000_mac_info {
31037- struct e1000_mac_operations ops;
31038+ e1000_mac_operations_no_const ops;
31039
31040 u8 addr[6];
31041 u8 perm_addr[6];
31042@@ -388,7 +391,7 @@ struct e1000_mac_info {
31043 };
31044
31045 struct e1000_phy_info {
31046- struct e1000_phy_operations ops;
31047+ e1000_phy_operations_no_const ops;
31048
31049 enum e1000_phy_type type;
31050
31051@@ -423,7 +426,7 @@ struct e1000_phy_info {
31052 };
31053
31054 struct e1000_nvm_info {
31055- struct e1000_nvm_operations ops;
31056+ e1000_nvm_operations_no_const ops;
31057 enum e1000_nvm_type type;
31058 enum e1000_nvm_override override;
31059
31060@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31061 s32 (*check_for_ack)(struct e1000_hw *, u16);
31062 s32 (*check_for_rst)(struct e1000_hw *, u16);
31063 };
31064+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31065
31066 struct e1000_mbx_stats {
31067 u32 msgs_tx;
31068@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31069 };
31070
31071 struct e1000_mbx_info {
31072- struct e1000_mbx_operations ops;
31073+ e1000_mbx_operations_no_const ops;
31074 struct e1000_mbx_stats stats;
31075 u32 timeout;
31076 u32 usec_delay;
31077diff -urNp linux-3.0.7/drivers/net/igbvf/vf.h linux-3.0.7/drivers/net/igbvf/vf.h
31078--- linux-3.0.7/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
31079+++ linux-3.0.7/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
31080@@ -189,9 +189,10 @@ struct e1000_mac_operations {
31081 s32 (*read_mac_addr)(struct e1000_hw *);
31082 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31083 };
31084+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31085
31086 struct e1000_mac_info {
31087- struct e1000_mac_operations ops;
31088+ e1000_mac_operations_no_const ops;
31089 u8 addr[6];
31090 u8 perm_addr[6];
31091
31092@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31093 s32 (*check_for_ack)(struct e1000_hw *);
31094 s32 (*check_for_rst)(struct e1000_hw *);
31095 };
31096+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31097
31098 struct e1000_mbx_stats {
31099 u32 msgs_tx;
31100@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31101 };
31102
31103 struct e1000_mbx_info {
31104- struct e1000_mbx_operations ops;
31105+ e1000_mbx_operations_no_const ops;
31106 struct e1000_mbx_stats stats;
31107 u32 timeout;
31108 u32 usec_delay;
31109diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_main.c linux-3.0.7/drivers/net/ixgb/ixgb_main.c
31110--- linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
31111+++ linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
31112@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31113 u32 rctl;
31114 int i;
31115
31116+ pax_track_stack();
31117+
31118 /* Check for Promiscuous and All Multicast modes */
31119
31120 rctl = IXGB_READ_REG(hw, RCTL);
31121diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_param.c linux-3.0.7/drivers/net/ixgb/ixgb_param.c
31122--- linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
31123+++ linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
31124@@ -261,6 +261,9 @@ void __devinit
31125 ixgb_check_options(struct ixgb_adapter *adapter)
31126 {
31127 int bd = adapter->bd_number;
31128+
31129+ pax_track_stack();
31130+
31131 if (bd >= IXGB_MAX_NIC) {
31132 pr_notice("Warning: no configuration for board #%i\n", bd);
31133 pr_notice("Using defaults for all values\n");
31134diff -urNp linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h
31135--- linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
31136+++ linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
31137@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
31138 s32 (*update_checksum)(struct ixgbe_hw *);
31139 u16 (*calc_checksum)(struct ixgbe_hw *);
31140 };
31141+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31142
31143 struct ixgbe_mac_operations {
31144 s32 (*init_hw)(struct ixgbe_hw *);
31145@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
31146 /* Flow Control */
31147 s32 (*fc_enable)(struct ixgbe_hw *, s32);
31148 };
31149+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31150
31151 struct ixgbe_phy_operations {
31152 s32 (*identify)(struct ixgbe_hw *);
31153@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
31154 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31155 s32 (*check_overtemp)(struct ixgbe_hw *);
31156 };
31157+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31158
31159 struct ixgbe_eeprom_info {
31160- struct ixgbe_eeprom_operations ops;
31161+ ixgbe_eeprom_operations_no_const ops;
31162 enum ixgbe_eeprom_type type;
31163 u32 semaphore_delay;
31164 u16 word_size;
31165@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
31166
31167 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31168 struct ixgbe_mac_info {
31169- struct ixgbe_mac_operations ops;
31170+ ixgbe_mac_operations_no_const ops;
31171 enum ixgbe_mac_type type;
31172 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31173 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31174@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
31175 };
31176
31177 struct ixgbe_phy_info {
31178- struct ixgbe_phy_operations ops;
31179+ ixgbe_phy_operations_no_const ops;
31180 struct mdio_if_info mdio;
31181 enum ixgbe_phy_type type;
31182 u32 id;
31183@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
31184 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31185 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31186 };
31187+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31188
31189 struct ixgbe_mbx_stats {
31190 u32 msgs_tx;
31191@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
31192 };
31193
31194 struct ixgbe_mbx_info {
31195- struct ixgbe_mbx_operations ops;
31196+ ixgbe_mbx_operations_no_const ops;
31197 struct ixgbe_mbx_stats stats;
31198 u32 timeout;
31199 u32 usec_delay;
31200diff -urNp linux-3.0.7/drivers/net/ixgbevf/vf.h linux-3.0.7/drivers/net/ixgbevf/vf.h
31201--- linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
31202+++ linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
31203@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31204 s32 (*clear_vfta)(struct ixgbe_hw *);
31205 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31206 };
31207+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31208
31209 enum ixgbe_mac_type {
31210 ixgbe_mac_unknown = 0,
31211@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31212 };
31213
31214 struct ixgbe_mac_info {
31215- struct ixgbe_mac_operations ops;
31216+ ixgbe_mac_operations_no_const ops;
31217 u8 addr[6];
31218 u8 perm_addr[6];
31219
31220@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31221 s32 (*check_for_ack)(struct ixgbe_hw *);
31222 s32 (*check_for_rst)(struct ixgbe_hw *);
31223 };
31224+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31225
31226 struct ixgbe_mbx_stats {
31227 u32 msgs_tx;
31228@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31229 };
31230
31231 struct ixgbe_mbx_info {
31232- struct ixgbe_mbx_operations ops;
31233+ ixgbe_mbx_operations_no_const ops;
31234 struct ixgbe_mbx_stats stats;
31235 u32 timeout;
31236 u32 udelay;
31237diff -urNp linux-3.0.7/drivers/net/ksz884x.c linux-3.0.7/drivers/net/ksz884x.c
31238--- linux-3.0.7/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
31239+++ linux-3.0.7/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
31240@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
31241 int rc;
31242 u64 counter[TOTAL_PORT_COUNTER_NUM];
31243
31244+ pax_track_stack();
31245+
31246 mutex_lock(&hw_priv->lock);
31247 n = SWITCH_PORT_NUM;
31248 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31249diff -urNp linux-3.0.7/drivers/net/mlx4/main.c linux-3.0.7/drivers/net/mlx4/main.c
31250--- linux-3.0.7/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
31251+++ linux-3.0.7/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
31252@@ -40,6 +40,7 @@
31253 #include <linux/dma-mapping.h>
31254 #include <linux/slab.h>
31255 #include <linux/io-mapping.h>
31256+#include <linux/sched.h>
31257
31258 #include <linux/mlx4/device.h>
31259 #include <linux/mlx4/doorbell.h>
31260@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
31261 u64 icm_size;
31262 int err;
31263
31264+ pax_track_stack();
31265+
31266 err = mlx4_QUERY_FW(dev);
31267 if (err) {
31268 if (err == -EACCES)
31269diff -urNp linux-3.0.7/drivers/net/niu.c linux-3.0.7/drivers/net/niu.c
31270--- linux-3.0.7/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
31271+++ linux-3.0.7/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
31272@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
31273 int i, num_irqs, err;
31274 u8 first_ldg;
31275
31276+ pax_track_stack();
31277+
31278 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31279 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31280 ldg_num_map[i] = first_ldg + i;
31281diff -urNp linux-3.0.7/drivers/net/pcnet32.c linux-3.0.7/drivers/net/pcnet32.c
31282--- linux-3.0.7/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
31283+++ linux-3.0.7/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
31284@@ -82,7 +82,7 @@ static int cards_found;
31285 /*
31286 * VLB I/O addresses
31287 */
31288-static unsigned int pcnet32_portlist[] __initdata =
31289+static unsigned int pcnet32_portlist[] __devinitdata =
31290 { 0x300, 0x320, 0x340, 0x360, 0 };
31291
31292 static int pcnet32_debug;
31293@@ -270,7 +270,7 @@ struct pcnet32_private {
31294 struct sk_buff **rx_skbuff;
31295 dma_addr_t *tx_dma_addr;
31296 dma_addr_t *rx_dma_addr;
31297- struct pcnet32_access a;
31298+ struct pcnet32_access *a;
31299 spinlock_t lock; /* Guard lock */
31300 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31301 unsigned int rx_ring_size; /* current rx ring size */
31302@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31303 u16 val;
31304
31305 netif_wake_queue(dev);
31306- val = lp->a.read_csr(ioaddr, CSR3);
31307+ val = lp->a->read_csr(ioaddr, CSR3);
31308 val &= 0x00ff;
31309- lp->a.write_csr(ioaddr, CSR3, val);
31310+ lp->a->write_csr(ioaddr, CSR3, val);
31311 napi_enable(&lp->napi);
31312 }
31313
31314@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31315 r = mii_link_ok(&lp->mii_if);
31316 } else if (lp->chip_version >= PCNET32_79C970A) {
31317 ulong ioaddr = dev->base_addr; /* card base I/O address */
31318- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31319+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31320 } else { /* can not detect link on really old chips */
31321 r = 1;
31322 }
31323@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31324 pcnet32_netif_stop(dev);
31325
31326 spin_lock_irqsave(&lp->lock, flags);
31327- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31328+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31329
31330 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31331
31332@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31333 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31334 {
31335 struct pcnet32_private *lp = netdev_priv(dev);
31336- struct pcnet32_access *a = &lp->a; /* access to registers */
31337+ struct pcnet32_access *a = lp->a; /* access to registers */
31338 ulong ioaddr = dev->base_addr; /* card base I/O address */
31339 struct sk_buff *skb; /* sk buff */
31340 int x, i; /* counters */
31341@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31342 pcnet32_netif_stop(dev);
31343
31344 spin_lock_irqsave(&lp->lock, flags);
31345- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31346+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31347
31348 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31349
31350 /* Reset the PCNET32 */
31351- lp->a.reset(ioaddr);
31352- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31353+ lp->a->reset(ioaddr);
31354+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31355
31356 /* switch pcnet32 to 32bit mode */
31357- lp->a.write_bcr(ioaddr, 20, 2);
31358+ lp->a->write_bcr(ioaddr, 20, 2);
31359
31360 /* purge & init rings but don't actually restart */
31361 pcnet32_restart(dev, 0x0000);
31362
31363- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31364+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31365
31366 /* Initialize Transmit buffers. */
31367 size = data_len + 15;
31368@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31369
31370 /* set int loopback in CSR15 */
31371 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31372- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31373+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31374
31375 teststatus = cpu_to_le16(0x8000);
31376- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31377+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31378
31379 /* Check status of descriptors */
31380 for (x = 0; x < numbuffs; x++) {
31381@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31382 }
31383 }
31384
31385- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31386+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31387 wmb();
31388 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31389 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31390@@ -1015,7 +1015,7 @@ clean_up:
31391 pcnet32_restart(dev, CSR0_NORMAL);
31392 } else {
31393 pcnet32_purge_rx_ring(dev);
31394- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31395+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31396 }
31397 spin_unlock_irqrestore(&lp->lock, flags);
31398
31399@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31400 enum ethtool_phys_id_state state)
31401 {
31402 struct pcnet32_private *lp = netdev_priv(dev);
31403- struct pcnet32_access *a = &lp->a;
31404+ struct pcnet32_access *a = lp->a;
31405 ulong ioaddr = dev->base_addr;
31406 unsigned long flags;
31407 int i;
31408@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31409 {
31410 int csr5;
31411 struct pcnet32_private *lp = netdev_priv(dev);
31412- struct pcnet32_access *a = &lp->a;
31413+ struct pcnet32_access *a = lp->a;
31414 ulong ioaddr = dev->base_addr;
31415 int ticks;
31416
31417@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31418 spin_lock_irqsave(&lp->lock, flags);
31419 if (pcnet32_tx(dev)) {
31420 /* reset the chip to clear the error condition, then restart */
31421- lp->a.reset(ioaddr);
31422- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31423+ lp->a->reset(ioaddr);
31424+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31425 pcnet32_restart(dev, CSR0_START);
31426 netif_wake_queue(dev);
31427 }
31428@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31429 __napi_complete(napi);
31430
31431 /* clear interrupt masks */
31432- val = lp->a.read_csr(ioaddr, CSR3);
31433+ val = lp->a->read_csr(ioaddr, CSR3);
31434 val &= 0x00ff;
31435- lp->a.write_csr(ioaddr, CSR3, val);
31436+ lp->a->write_csr(ioaddr, CSR3, val);
31437
31438 /* Set interrupt enable. */
31439- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31440+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31441
31442 spin_unlock_irqrestore(&lp->lock, flags);
31443 }
31444@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31445 int i, csr0;
31446 u16 *buff = ptr;
31447 struct pcnet32_private *lp = netdev_priv(dev);
31448- struct pcnet32_access *a = &lp->a;
31449+ struct pcnet32_access *a = lp->a;
31450 ulong ioaddr = dev->base_addr;
31451 unsigned long flags;
31452
31453@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31454 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31455 if (lp->phymask & (1 << j)) {
31456 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31457- lp->a.write_bcr(ioaddr, 33,
31458+ lp->a->write_bcr(ioaddr, 33,
31459 (j << 5) | i);
31460- *buff++ = lp->a.read_bcr(ioaddr, 34);
31461+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31462 }
31463 }
31464 }
31465@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31466 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31467 lp->options |= PCNET32_PORT_FD;
31468
31469- lp->a = *a;
31470+ lp->a = a;
31471
31472 /* prior to register_netdev, dev->name is not yet correct */
31473 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31474@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31475 if (lp->mii) {
31476 /* lp->phycount and lp->phymask are set to 0 by memset above */
31477
31478- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31479+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31480 /* scan for PHYs */
31481 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31482 unsigned short id1, id2;
31483@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31484 pr_info("Found PHY %04x:%04x at address %d\n",
31485 id1, id2, i);
31486 }
31487- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31488+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31489 if (lp->phycount > 1)
31490 lp->options |= PCNET32_PORT_MII;
31491 }
31492@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31493 }
31494
31495 /* Reset the PCNET32 */
31496- lp->a.reset(ioaddr);
31497+ lp->a->reset(ioaddr);
31498
31499 /* switch pcnet32 to 32bit mode */
31500- lp->a.write_bcr(ioaddr, 20, 2);
31501+ lp->a->write_bcr(ioaddr, 20, 2);
31502
31503 netif_printk(lp, ifup, KERN_DEBUG, dev,
31504 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31505@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31506 (u32) (lp->init_dma_addr));
31507
31508 /* set/reset autoselect bit */
31509- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31510+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31511 if (lp->options & PCNET32_PORT_ASEL)
31512 val |= 2;
31513- lp->a.write_bcr(ioaddr, 2, val);
31514+ lp->a->write_bcr(ioaddr, 2, val);
31515
31516 /* handle full duplex setting */
31517 if (lp->mii_if.full_duplex) {
31518- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31519+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31520 if (lp->options & PCNET32_PORT_FD) {
31521 val |= 1;
31522 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31523@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31524 if (lp->chip_version == 0x2627)
31525 val |= 3;
31526 }
31527- lp->a.write_bcr(ioaddr, 9, val);
31528+ lp->a->write_bcr(ioaddr, 9, val);
31529 }
31530
31531 /* set/reset GPSI bit in test register */
31532- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31533+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31534 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31535 val |= 0x10;
31536- lp->a.write_csr(ioaddr, 124, val);
31537+ lp->a->write_csr(ioaddr, 124, val);
31538
31539 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31540 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31541@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31542 * duplex, and/or enable auto negotiation, and clear DANAS
31543 */
31544 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31545- lp->a.write_bcr(ioaddr, 32,
31546- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31547+ lp->a->write_bcr(ioaddr, 32,
31548+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31549 /* disable Auto Negotiation, set 10Mpbs, HD */
31550- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31551+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31552 if (lp->options & PCNET32_PORT_FD)
31553 val |= 0x10;
31554 if (lp->options & PCNET32_PORT_100)
31555 val |= 0x08;
31556- lp->a.write_bcr(ioaddr, 32, val);
31557+ lp->a->write_bcr(ioaddr, 32, val);
31558 } else {
31559 if (lp->options & PCNET32_PORT_ASEL) {
31560- lp->a.write_bcr(ioaddr, 32,
31561- lp->a.read_bcr(ioaddr,
31562+ lp->a->write_bcr(ioaddr, 32,
31563+ lp->a->read_bcr(ioaddr,
31564 32) | 0x0080);
31565 /* enable auto negotiate, setup, disable fd */
31566- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31567+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31568 val |= 0x20;
31569- lp->a.write_bcr(ioaddr, 32, val);
31570+ lp->a->write_bcr(ioaddr, 32, val);
31571 }
31572 }
31573 } else {
31574@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31575 * There is really no good other way to handle multiple PHYs
31576 * other than turning off all automatics
31577 */
31578- val = lp->a.read_bcr(ioaddr, 2);
31579- lp->a.write_bcr(ioaddr, 2, val & ~2);
31580- val = lp->a.read_bcr(ioaddr, 32);
31581- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31582+ val = lp->a->read_bcr(ioaddr, 2);
31583+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31584+ val = lp->a->read_bcr(ioaddr, 32);
31585+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31586
31587 if (!(lp->options & PCNET32_PORT_ASEL)) {
31588 /* setup ecmd */
31589@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31590 ethtool_cmd_speed_set(&ecmd,
31591 (lp->options & PCNET32_PORT_100) ?
31592 SPEED_100 : SPEED_10);
31593- bcr9 = lp->a.read_bcr(ioaddr, 9);
31594+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31595
31596 if (lp->options & PCNET32_PORT_FD) {
31597 ecmd.duplex = DUPLEX_FULL;
31598@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31599 ecmd.duplex = DUPLEX_HALF;
31600 bcr9 |= ~(1 << 0);
31601 }
31602- lp->a.write_bcr(ioaddr, 9, bcr9);
31603+ lp->a->write_bcr(ioaddr, 9, bcr9);
31604 }
31605
31606 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31607@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31608
31609 #ifdef DO_DXSUFLO
31610 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31611- val = lp->a.read_csr(ioaddr, CSR3);
31612+ val = lp->a->read_csr(ioaddr, CSR3);
31613 val |= 0x40;
31614- lp->a.write_csr(ioaddr, CSR3, val);
31615+ lp->a->write_csr(ioaddr, CSR3, val);
31616 }
31617 #endif
31618
31619@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31620 napi_enable(&lp->napi);
31621
31622 /* Re-initialize the PCNET32, and start it when done. */
31623- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31624- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31625+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31626+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31627
31628- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31629- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31630+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31631+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31632
31633 netif_start_queue(dev);
31634
31635@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31636
31637 i = 0;
31638 while (i++ < 100)
31639- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31640+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31641 break;
31642 /*
31643 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31644 * reports that doing so triggers a bug in the '974.
31645 */
31646- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31647+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31648
31649 netif_printk(lp, ifup, KERN_DEBUG, dev,
31650 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31651 i,
31652 (u32) (lp->init_dma_addr),
31653- lp->a.read_csr(ioaddr, CSR0));
31654+ lp->a->read_csr(ioaddr, CSR0));
31655
31656 spin_unlock_irqrestore(&lp->lock, flags);
31657
31658@@ -2218,7 +2218,7 @@ err_free_ring:
31659 * Switch back to 16bit mode to avoid problems with dumb
31660 * DOS packet driver after a warm reboot
31661 */
31662- lp->a.write_bcr(ioaddr, 20, 4);
31663+ lp->a->write_bcr(ioaddr, 20, 4);
31664
31665 err_free_irq:
31666 spin_unlock_irqrestore(&lp->lock, flags);
31667@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
31668
31669 /* wait for stop */
31670 for (i = 0; i < 100; i++)
31671- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
31672+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
31673 break;
31674
31675 if (i >= 100)
31676@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
31677 return;
31678
31679 /* ReInit Ring */
31680- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31681+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31682 i = 0;
31683 while (i++ < 1000)
31684- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31685+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31686 break;
31687
31688- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
31689+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
31690 }
31691
31692 static void pcnet32_tx_timeout(struct net_device *dev)
31693@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
31694 /* Transmitter timeout, serious problems. */
31695 if (pcnet32_debug & NETIF_MSG_DRV)
31696 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
31697- dev->name, lp->a.read_csr(ioaddr, CSR0));
31698- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31699+ dev->name, lp->a->read_csr(ioaddr, CSR0));
31700+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31701 dev->stats.tx_errors++;
31702 if (netif_msg_tx_err(lp)) {
31703 int i;
31704@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31705
31706 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
31707 "%s() called, csr0 %4.4x\n",
31708- __func__, lp->a.read_csr(ioaddr, CSR0));
31709+ __func__, lp->a->read_csr(ioaddr, CSR0));
31710
31711 /* Default status -- will not enable Successful-TxDone
31712 * interrupt when that option is available to us.
31713@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31714 dev->stats.tx_bytes += skb->len;
31715
31716 /* Trigger an immediate send poll. */
31717- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31718+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31719
31720 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
31721 lp->tx_full = 1;
31722@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
31723
31724 spin_lock(&lp->lock);
31725
31726- csr0 = lp->a.read_csr(ioaddr, CSR0);
31727+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31728 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
31729 if (csr0 == 0xffff)
31730 break; /* PCMCIA remove happened */
31731 /* Acknowledge all of the current interrupt sources ASAP. */
31732- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31733+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31734
31735 netif_printk(lp, intr, KERN_DEBUG, dev,
31736 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
31737- csr0, lp->a.read_csr(ioaddr, CSR0));
31738+ csr0, lp->a->read_csr(ioaddr, CSR0));
31739
31740 /* Log misc errors. */
31741 if (csr0 & 0x4000)
31742@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
31743 if (napi_schedule_prep(&lp->napi)) {
31744 u16 val;
31745 /* set interrupt masks */
31746- val = lp->a.read_csr(ioaddr, CSR3);
31747+ val = lp->a->read_csr(ioaddr, CSR3);
31748 val |= 0x5f00;
31749- lp->a.write_csr(ioaddr, CSR3, val);
31750+ lp->a->write_csr(ioaddr, CSR3, val);
31751
31752 __napi_schedule(&lp->napi);
31753 break;
31754 }
31755- csr0 = lp->a.read_csr(ioaddr, CSR0);
31756+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31757 }
31758
31759 netif_printk(lp, intr, KERN_DEBUG, dev,
31760 "exiting interrupt, csr0=%#4.4x\n",
31761- lp->a.read_csr(ioaddr, CSR0));
31762+ lp->a->read_csr(ioaddr, CSR0));
31763
31764 spin_unlock(&lp->lock);
31765
31766@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
31767
31768 spin_lock_irqsave(&lp->lock, flags);
31769
31770- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31771+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31772
31773 netif_printk(lp, ifdown, KERN_DEBUG, dev,
31774 "Shutting down ethercard, status was %2.2x\n",
31775- lp->a.read_csr(ioaddr, CSR0));
31776+ lp->a->read_csr(ioaddr, CSR0));
31777
31778 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
31779- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31780+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31781
31782 /*
31783 * Switch back to 16bit mode to avoid problems with dumb
31784 * DOS packet driver after a warm reboot
31785 */
31786- lp->a.write_bcr(ioaddr, 20, 4);
31787+ lp->a->write_bcr(ioaddr, 20, 4);
31788
31789 spin_unlock_irqrestore(&lp->lock, flags);
31790
31791@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
31792 unsigned long flags;
31793
31794 spin_lock_irqsave(&lp->lock, flags);
31795- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31796+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31797 spin_unlock_irqrestore(&lp->lock, flags);
31798
31799 return &dev->stats;
31800@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
31801 if (dev->flags & IFF_ALLMULTI) {
31802 ib->filter[0] = cpu_to_le32(~0U);
31803 ib->filter[1] = cpu_to_le32(~0U);
31804- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31805- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31806- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31807- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31808+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31809+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31810+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31811+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31812 return;
31813 }
31814 /* clear the multicast filter */
31815@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
31816 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
31817 }
31818 for (i = 0; i < 4; i++)
31819- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
31820+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
31821 le16_to_cpu(mcast_table[i]));
31822 }
31823
31824@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
31825
31826 spin_lock_irqsave(&lp->lock, flags);
31827 suspended = pcnet32_suspend(dev, &flags, 0);
31828- csr15 = lp->a.read_csr(ioaddr, CSR15);
31829+ csr15 = lp->a->read_csr(ioaddr, CSR15);
31830 if (dev->flags & IFF_PROMISC) {
31831 /* Log any net taps. */
31832 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
31833 lp->init_block->mode =
31834 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
31835 7);
31836- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
31837+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
31838 } else {
31839 lp->init_block->mode =
31840 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
31841- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31842+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31843 pcnet32_load_multicast(dev);
31844 }
31845
31846 if (suspended) {
31847 int csr5;
31848 /* clear SUSPEND (SPND) - CSR5 bit 0 */
31849- csr5 = lp->a.read_csr(ioaddr, CSR5);
31850- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31851+ csr5 = lp->a->read_csr(ioaddr, CSR5);
31852+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31853 } else {
31854- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31855+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31856 pcnet32_restart(dev, CSR0_NORMAL);
31857 netif_wake_queue(dev);
31858 }
31859@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
31860 if (!lp->mii)
31861 return 0;
31862
31863- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31864- val_out = lp->a.read_bcr(ioaddr, 34);
31865+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31866+ val_out = lp->a->read_bcr(ioaddr, 34);
31867
31868 return val_out;
31869 }
31870@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
31871 if (!lp->mii)
31872 return;
31873
31874- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31875- lp->a.write_bcr(ioaddr, 34, val);
31876+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31877+ lp->a->write_bcr(ioaddr, 34, val);
31878 }
31879
31880 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31881@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
31882 curr_link = mii_link_ok(&lp->mii_if);
31883 } else {
31884 ulong ioaddr = dev->base_addr; /* card base I/O address */
31885- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31886+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31887 }
31888 if (!curr_link) {
31889 if (prev_link || verbose) {
31890@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
31891 (ecmd.duplex == DUPLEX_FULL)
31892 ? "full" : "half");
31893 }
31894- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
31895+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
31896 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
31897 if (lp->mii_if.full_duplex)
31898 bcr9 |= (1 << 0);
31899 else
31900 bcr9 &= ~(1 << 0);
31901- lp->a.write_bcr(dev->base_addr, 9, bcr9);
31902+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
31903 }
31904 } else {
31905 netif_info(lp, link, dev, "link up\n");
31906diff -urNp linux-3.0.7/drivers/net/ppp_generic.c linux-3.0.7/drivers/net/ppp_generic.c
31907--- linux-3.0.7/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
31908+++ linux-3.0.7/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
31909@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
31910 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
31911 struct ppp_stats stats;
31912 struct ppp_comp_stats cstats;
31913- char *vers;
31914
31915 switch (cmd) {
31916 case SIOCGPPPSTATS:
31917@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
31918 break;
31919
31920 case SIOCGPPPVER:
31921- vers = PPP_VERSION;
31922- if (copy_to_user(addr, vers, strlen(vers) + 1))
31923+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
31924 break;
31925 err = 0;
31926 break;
31927diff -urNp linux-3.0.7/drivers/net/r8169.c linux-3.0.7/drivers/net/r8169.c
31928--- linux-3.0.7/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
31929+++ linux-3.0.7/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
31930@@ -645,12 +645,12 @@ struct rtl8169_private {
31931 struct mdio_ops {
31932 void (*write)(void __iomem *, int, int);
31933 int (*read)(void __iomem *, int);
31934- } mdio_ops;
31935+ } __no_const mdio_ops;
31936
31937 struct pll_power_ops {
31938 void (*down)(struct rtl8169_private *);
31939 void (*up)(struct rtl8169_private *);
31940- } pll_power_ops;
31941+ } __no_const pll_power_ops;
31942
31943 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
31944 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
31945diff -urNp linux-3.0.7/drivers/net/sis190.c linux-3.0.7/drivers/net/sis190.c
31946--- linux-3.0.7/drivers/net/sis190.c 2011-09-02 18:11:21.000000000 -0400
31947+++ linux-3.0.7/drivers/net/sis190.c 2011-10-11 10:44:33.000000000 -0400
31948@@ -1623,7 +1623,7 @@ static int __devinit sis190_get_mac_addr
31949 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
31950 struct net_device *dev)
31951 {
31952- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
31953+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
31954 struct sis190_private *tp = netdev_priv(dev);
31955 struct pci_dev *isa_bridge;
31956 u8 reg, tmp8;
31957diff -urNp linux-3.0.7/drivers/net/sundance.c linux-3.0.7/drivers/net/sundance.c
31958--- linux-3.0.7/drivers/net/sundance.c 2011-07-21 22:17:23.000000000 -0400
31959+++ linux-3.0.7/drivers/net/sundance.c 2011-10-11 10:44:33.000000000 -0400
31960@@ -218,7 +218,7 @@ enum {
31961 struct pci_id_info {
31962 const char *name;
31963 };
31964-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31965+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31966 {"D-Link DFE-550TX FAST Ethernet Adapter"},
31967 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
31968 {"D-Link DFE-580TX 4 port Server Adapter"},
31969diff -urNp linux-3.0.7/drivers/net/tg3.h linux-3.0.7/drivers/net/tg3.h
31970--- linux-3.0.7/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
31971+++ linux-3.0.7/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
31972@@ -134,6 +134,7 @@
31973 #define CHIPREV_ID_5750_A0 0x4000
31974 #define CHIPREV_ID_5750_A1 0x4001
31975 #define CHIPREV_ID_5750_A3 0x4003
31976+#define CHIPREV_ID_5750_C1 0x4201
31977 #define CHIPREV_ID_5750_C2 0x4202
31978 #define CHIPREV_ID_5752_A0_HW 0x5000
31979 #define CHIPREV_ID_5752_A0 0x6000
31980diff -urNp linux-3.0.7/drivers/net/tokenring/abyss.c linux-3.0.7/drivers/net/tokenring/abyss.c
31981--- linux-3.0.7/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
31982+++ linux-3.0.7/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
31983@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
31984
31985 static int __init abyss_init (void)
31986 {
31987- abyss_netdev_ops = tms380tr_netdev_ops;
31988+ pax_open_kernel();
31989+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31990
31991- abyss_netdev_ops.ndo_open = abyss_open;
31992- abyss_netdev_ops.ndo_stop = abyss_close;
31993+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
31994+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
31995+ pax_close_kernel();
31996
31997 return pci_register_driver(&abyss_driver);
31998 }
31999diff -urNp linux-3.0.7/drivers/net/tokenring/madgemc.c linux-3.0.7/drivers/net/tokenring/madgemc.c
32000--- linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
32001+++ linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
32002@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32003
32004 static int __init madgemc_init (void)
32005 {
32006- madgemc_netdev_ops = tms380tr_netdev_ops;
32007- madgemc_netdev_ops.ndo_open = madgemc_open;
32008- madgemc_netdev_ops.ndo_stop = madgemc_close;
32009+ pax_open_kernel();
32010+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32011+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32012+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32013+ pax_close_kernel();
32014
32015 return mca_register_driver (&madgemc_driver);
32016 }
32017diff -urNp linux-3.0.7/drivers/net/tokenring/proteon.c linux-3.0.7/drivers/net/tokenring/proteon.c
32018--- linux-3.0.7/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
32019+++ linux-3.0.7/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
32020@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32021 struct platform_device *pdev;
32022 int i, num = 0, err = 0;
32023
32024- proteon_netdev_ops = tms380tr_netdev_ops;
32025- proteon_netdev_ops.ndo_open = proteon_open;
32026- proteon_netdev_ops.ndo_stop = tms380tr_close;
32027+ pax_open_kernel();
32028+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32029+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32030+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32031+ pax_close_kernel();
32032
32033 err = platform_driver_register(&proteon_driver);
32034 if (err)
32035diff -urNp linux-3.0.7/drivers/net/tokenring/skisa.c linux-3.0.7/drivers/net/tokenring/skisa.c
32036--- linux-3.0.7/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
32037+++ linux-3.0.7/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
32038@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32039 struct platform_device *pdev;
32040 int i, num = 0, err = 0;
32041
32042- sk_isa_netdev_ops = tms380tr_netdev_ops;
32043- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32044- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32045+ pax_open_kernel();
32046+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32047+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32048+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32049+ pax_close_kernel();
32050
32051 err = platform_driver_register(&sk_isa_driver);
32052 if (err)
32053diff -urNp linux-3.0.7/drivers/net/tulip/de2104x.c linux-3.0.7/drivers/net/tulip/de2104x.c
32054--- linux-3.0.7/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
32055+++ linux-3.0.7/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
32056@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
32057 struct de_srom_info_leaf *il;
32058 void *bufp;
32059
32060+ pax_track_stack();
32061+
32062 /* download entire eeprom */
32063 for (i = 0; i < DE_EEPROM_WORDS; i++)
32064 ((__le16 *)ee_data)[i] =
32065diff -urNp linux-3.0.7/drivers/net/tulip/de4x5.c linux-3.0.7/drivers/net/tulip/de4x5.c
32066--- linux-3.0.7/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
32067+++ linux-3.0.7/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
32068@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
32069 for (i=0; i<ETH_ALEN; i++) {
32070 tmp.addr[i] = dev->dev_addr[i];
32071 }
32072- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32073+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32074 break;
32075
32076 case DE4X5_SET_HWADDR: /* Set the hardware address */
32077@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
32078 spin_lock_irqsave(&lp->lock, flags);
32079 memcpy(&statbuf, &lp->pktStats, ioc->len);
32080 spin_unlock_irqrestore(&lp->lock, flags);
32081- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32082+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32083 return -EFAULT;
32084 break;
32085 }
32086diff -urNp linux-3.0.7/drivers/net/tulip/eeprom.c linux-3.0.7/drivers/net/tulip/eeprom.c
32087--- linux-3.0.7/drivers/net/tulip/eeprom.c 2011-07-21 22:17:23.000000000 -0400
32088+++ linux-3.0.7/drivers/net/tulip/eeprom.c 2011-10-11 10:44:33.000000000 -0400
32089@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32090 {NULL}};
32091
32092
32093-static const char *block_name[] __devinitdata = {
32094+static const char *block_name[] __devinitconst = {
32095 "21140 non-MII",
32096 "21140 MII PHY",
32097 "21142 Serial PHY",
32098diff -urNp linux-3.0.7/drivers/net/tulip/winbond-840.c linux-3.0.7/drivers/net/tulip/winbond-840.c
32099--- linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-07-21 22:17:23.000000000 -0400
32100+++ linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-10-11 10:44:33.000000000 -0400
32101@@ -236,7 +236,7 @@ struct pci_id_info {
32102 int drv_flags; /* Driver use, intended as capability flags. */
32103 };
32104
32105-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32106+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32107 { /* Sometime a Level-One switch card. */
32108 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32109 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32110diff -urNp linux-3.0.7/drivers/net/usb/hso.c linux-3.0.7/drivers/net/usb/hso.c
32111--- linux-3.0.7/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
32112+++ linux-3.0.7/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
32113@@ -71,7 +71,7 @@
32114 #include <asm/byteorder.h>
32115 #include <linux/serial_core.h>
32116 #include <linux/serial.h>
32117-
32118+#include <asm/local.h>
32119
32120 #define MOD_AUTHOR "Option Wireless"
32121 #define MOD_DESCRIPTION "USB High Speed Option driver"
32122@@ -257,7 +257,7 @@ struct hso_serial {
32123
32124 /* from usb_serial_port */
32125 struct tty_struct *tty;
32126- int open_count;
32127+ local_t open_count;
32128 spinlock_t serial_lock;
32129
32130 int (*write_data) (struct hso_serial *serial);
32131@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32132 struct urb *urb;
32133
32134 urb = serial->rx_urb[0];
32135- if (serial->open_count > 0) {
32136+ if (local_read(&serial->open_count) > 0) {
32137 count = put_rxbuf_data(urb, serial);
32138 if (count == -1)
32139 return;
32140@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32141 DUMP1(urb->transfer_buffer, urb->actual_length);
32142
32143 /* Anyone listening? */
32144- if (serial->open_count == 0)
32145+ if (local_read(&serial->open_count) == 0)
32146 return;
32147
32148 if (status == 0) {
32149@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32150 spin_unlock_irq(&serial->serial_lock);
32151
32152 /* check for port already opened, if not set the termios */
32153- serial->open_count++;
32154- if (serial->open_count == 1) {
32155+ if (local_inc_return(&serial->open_count) == 1) {
32156 serial->rx_state = RX_IDLE;
32157 /* Force default termio settings */
32158 _hso_serial_set_termios(tty, NULL);
32159@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32160 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32161 if (result) {
32162 hso_stop_serial_device(serial->parent);
32163- serial->open_count--;
32164+ local_dec(&serial->open_count);
32165 kref_put(&serial->parent->ref, hso_serial_ref_free);
32166 }
32167 } else {
32168@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32169
32170 /* reset the rts and dtr */
32171 /* do the actual close */
32172- serial->open_count--;
32173+ local_dec(&serial->open_count);
32174
32175- if (serial->open_count <= 0) {
32176- serial->open_count = 0;
32177+ if (local_read(&serial->open_count) <= 0) {
32178+ local_set(&serial->open_count, 0);
32179 spin_lock_irq(&serial->serial_lock);
32180 if (serial->tty == tty) {
32181 serial->tty->driver_data = NULL;
32182@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32183
32184 /* the actual setup */
32185 spin_lock_irqsave(&serial->serial_lock, flags);
32186- if (serial->open_count)
32187+ if (local_read(&serial->open_count))
32188 _hso_serial_set_termios(tty, old);
32189 else
32190 tty->termios = old;
32191@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32192 D1("Pending read interrupt on port %d\n", i);
32193 spin_lock(&serial->serial_lock);
32194 if (serial->rx_state == RX_IDLE &&
32195- serial->open_count > 0) {
32196+ local_read(&serial->open_count) > 0) {
32197 /* Setup and send a ctrl req read on
32198 * port i */
32199 if (!serial->rx_urb_filled[0]) {
32200@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32201 /* Start all serial ports */
32202 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32203 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32204- if (dev2ser(serial_table[i])->open_count) {
32205+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32206 result =
32207 hso_start_serial_device(serial_table[i], GFP_NOIO);
32208 hso_kick_transmit(dev2ser(serial_table[i]));
32209diff -urNp linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c
32210--- linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
32211+++ linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
32212@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
32213 * Return with error code if any of the queue indices
32214 * is out of range
32215 */
32216- if (p->ring_index[i] < 0 ||
32217- p->ring_index[i] >= adapter->num_rx_queues)
32218+ if (p->ring_index[i] >= adapter->num_rx_queues)
32219 return -EINVAL;
32220 }
32221
32222diff -urNp linux-3.0.7/drivers/net/vxge/vxge-config.h linux-3.0.7/drivers/net/vxge/vxge-config.h
32223--- linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
32224+++ linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
32225@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
32226 void (*link_down)(struct __vxge_hw_device *devh);
32227 void (*crit_err)(struct __vxge_hw_device *devh,
32228 enum vxge_hw_event type, u64 ext_data);
32229-};
32230+} __no_const;
32231
32232 /*
32233 * struct __vxge_hw_blockpool_entry - Block private data structure
32234diff -urNp linux-3.0.7/drivers/net/vxge/vxge-main.c linux-3.0.7/drivers/net/vxge/vxge-main.c
32235--- linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
32236+++ linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
32237@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32238 struct sk_buff *completed[NR_SKB_COMPLETED];
32239 int more;
32240
32241+ pax_track_stack();
32242+
32243 do {
32244 more = 0;
32245 skb_ptr = completed;
32246@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
32247 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32248 int index;
32249
32250+ pax_track_stack();
32251+
32252 /*
32253 * Filling
32254 * - itable with bucket numbers
32255diff -urNp linux-3.0.7/drivers/net/vxge/vxge-traffic.h linux-3.0.7/drivers/net/vxge/vxge-traffic.h
32256--- linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
32257+++ linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
32258@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32259 struct vxge_hw_mempool_dma *dma_object,
32260 u32 index,
32261 u32 is_last);
32262-};
32263+} __no_const;
32264
32265 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32266 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32267diff -urNp linux-3.0.7/drivers/net/wan/cycx_x25.c linux-3.0.7/drivers/net/wan/cycx_x25.c
32268--- linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
32269+++ linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
32270@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
32271 unsigned char hex[1024],
32272 * phex = hex;
32273
32274+ pax_track_stack();
32275+
32276 if (len >= (sizeof(hex) / 2))
32277 len = (sizeof(hex) / 2) - 1;
32278
32279diff -urNp linux-3.0.7/drivers/net/wan/hdlc_x25.c linux-3.0.7/drivers/net/wan/hdlc_x25.c
32280--- linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
32281+++ linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
32282@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32283
32284 static int x25_open(struct net_device *dev)
32285 {
32286- struct lapb_register_struct cb;
32287+ static struct lapb_register_struct cb = {
32288+ .connect_confirmation = x25_connected,
32289+ .connect_indication = x25_connected,
32290+ .disconnect_confirmation = x25_disconnected,
32291+ .disconnect_indication = x25_disconnected,
32292+ .data_indication = x25_data_indication,
32293+ .data_transmit = x25_data_transmit
32294+ };
32295 int result;
32296
32297- cb.connect_confirmation = x25_connected;
32298- cb.connect_indication = x25_connected;
32299- cb.disconnect_confirmation = x25_disconnected;
32300- cb.disconnect_indication = x25_disconnected;
32301- cb.data_indication = x25_data_indication;
32302- cb.data_transmit = x25_data_transmit;
32303-
32304 result = lapb_register(dev, &cb);
32305 if (result != LAPB_OK)
32306 return result;
32307diff -urNp linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c
32308--- linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
32309+++ linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
32310@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32311 int do_autopm = 1;
32312 DECLARE_COMPLETION_ONSTACK(notif_completion);
32313
32314+ pax_track_stack();
32315+
32316 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32317 i2400m, ack, ack_size);
32318 BUG_ON(_ack == i2400m->bm_ack_buf);
32319diff -urNp linux-3.0.7/drivers/net/wireless/airo.c linux-3.0.7/drivers/net/wireless/airo.c
32320--- linux-3.0.7/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
32321+++ linux-3.0.7/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
32322@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32323 BSSListElement * loop_net;
32324 BSSListElement * tmp_net;
32325
32326+ pax_track_stack();
32327+
32328 /* Blow away current list of scan results */
32329 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32330 list_move_tail (&loop_net->list, &ai->network_free_list);
32331@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32332 WepKeyRid wkr;
32333 int rc;
32334
32335+ pax_track_stack();
32336+
32337 memset( &mySsid, 0, sizeof( mySsid ) );
32338 kfree (ai->flash);
32339 ai->flash = NULL;
32340@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32341 __le32 *vals = stats.vals;
32342 int len;
32343
32344+ pax_track_stack();
32345+
32346 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32347 return -ENOMEM;
32348 data = file->private_data;
32349@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32350 /* If doLoseSync is not 1, we won't do a Lose Sync */
32351 int doLoseSync = -1;
32352
32353+ pax_track_stack();
32354+
32355 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32356 return -ENOMEM;
32357 data = file->private_data;
32358@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32359 int i;
32360 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32361
32362+ pax_track_stack();
32363+
32364 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32365 if (!qual)
32366 return -ENOMEM;
32367@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32368 CapabilityRid cap_rid;
32369 __le32 *vals = stats_rid.vals;
32370
32371+ pax_track_stack();
32372+
32373 /* Get stats out of the card */
32374 clear_bit(JOB_WSTATS, &local->jobs);
32375 if (local->power.event) {
32376diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath.h linux-3.0.7/drivers/net/wireless/ath/ath.h
32377--- linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
32378+++ linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
32379@@ -121,6 +121,7 @@ struct ath_ops {
32380 void (*write_flush) (void *);
32381 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32382 };
32383+typedef struct ath_ops __no_const ath_ops_no_const;
32384
32385 struct ath_common;
32386 struct ath_bus_ops;
32387diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c
32388--- linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
32389+++ linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
32390@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
32391 unsigned int v;
32392 u64 tsf;
32393
32394+ pax_track_stack();
32395+
32396 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32397 len += snprintf(buf+len, sizeof(buf)-len,
32398 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32399@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
32400 unsigned int len = 0;
32401 unsigned int i;
32402
32403+ pax_track_stack();
32404+
32405 len += snprintf(buf+len, sizeof(buf)-len,
32406 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32407
32408@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
32409 unsigned int i;
32410 unsigned int v;
32411
32412+ pax_track_stack();
32413+
32414 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
32415 sc->ah->ah_ant_mode);
32416 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
32417@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
32418 unsigned int len = 0;
32419 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
32420
32421+ pax_track_stack();
32422+
32423 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
32424 sc->bssidmask);
32425 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
32426@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
32427 unsigned int len = 0;
32428 int i;
32429
32430+ pax_track_stack();
32431+
32432 len += snprintf(buf+len, sizeof(buf)-len,
32433 "RX\n---------------------\n");
32434 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
32435@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
32436 char buf[700];
32437 unsigned int len = 0;
32438
32439+ pax_track_stack();
32440+
32441 len += snprintf(buf+len, sizeof(buf)-len,
32442 "HW has PHY error counters:\t%s\n",
32443 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
32444@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32445 struct ath5k_buf *bf, *bf0;
32446 int i, n;
32447
32448+ pax_track_stack();
32449+
32450 len += snprintf(buf+len, sizeof(buf)-len,
32451 "available txbuffers: %d\n", sc->txbuf_len);
32452
32453diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32454--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
32455+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
32456@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32457 int i, im, j;
32458 int nmeasurement;
32459
32460+ pax_track_stack();
32461+
32462 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32463 if (ah->txchainmask & (1 << i))
32464 num_chains++;
32465diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32466--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
32467+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
32468@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
32469 int theta_low_bin = 0;
32470 int i;
32471
32472+ pax_track_stack();
32473+
32474 /* disregard any bin that contains <= 16 samples */
32475 thresh_accum_cnt = 16;
32476 scale_factor = 5;
32477diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c
32478--- linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
32479+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
32480@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
32481 char buf[512];
32482 unsigned int len = 0;
32483
32484+ pax_track_stack();
32485+
32486 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32487 len += snprintf(buf + len, sizeof(buf) - len,
32488 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32489@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
32490 u8 addr[ETH_ALEN];
32491 u32 tmp;
32492
32493+ pax_track_stack();
32494+
32495 len += snprintf(buf + len, sizeof(buf) - len,
32496 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32497 wiphy_name(sc->hw->wiphy),
32498diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32499--- linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
32500+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
32501@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32502 unsigned int len = 0;
32503 int ret = 0;
32504
32505+ pax_track_stack();
32506+
32507 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32508
32509 ath9k_htc_ps_wakeup(priv);
32510@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32511 unsigned int len = 0;
32512 int ret = 0;
32513
32514+ pax_track_stack();
32515+
32516 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32517
32518 ath9k_htc_ps_wakeup(priv);
32519@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32520 unsigned int len = 0;
32521 int ret = 0;
32522
32523+ pax_track_stack();
32524+
32525 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32526
32527 ath9k_htc_ps_wakeup(priv);
32528@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32529 char buf[512];
32530 unsigned int len = 0;
32531
32532+ pax_track_stack();
32533+
32534 len += snprintf(buf + len, sizeof(buf) - len,
32535 "%20s : %10u\n", "Buffers queued",
32536 priv->debug.tx_stats.buf_queued);
32537@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32538 char buf[512];
32539 unsigned int len = 0;
32540
32541+ pax_track_stack();
32542+
32543 spin_lock_bh(&priv->tx.tx_lock);
32544
32545 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32546@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32547 char buf[512];
32548 unsigned int len = 0;
32549
32550+ pax_track_stack();
32551+
32552 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32553 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32554
32555diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h
32556--- linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
32557+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
32558@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
32559
32560 /* ANI */
32561 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32562-};
32563+} __no_const;
32564
32565 /**
32566 * struct ath_hw_ops - callbacks used by hardware code and driver code
32567@@ -637,7 +637,7 @@ struct ath_hw_ops {
32568 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32569 struct ath_hw_antcomb_conf *antconf);
32570
32571-};
32572+} __no_const;
32573
32574 struct ath_nf_limits {
32575 s16 max;
32576@@ -650,7 +650,7 @@ struct ath_nf_limits {
32577 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32578
32579 struct ath_hw {
32580- struct ath_ops reg_ops;
32581+ ath_ops_no_const reg_ops;
32582
32583 struct ieee80211_hw *hw;
32584 struct ath_common common;
32585diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c
32586--- linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
32587+++ linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
32588@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
32589 int err;
32590 DECLARE_SSID_BUF(ssid);
32591
32592+ pax_track_stack();
32593+
32594 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32595
32596 if (ssid_len)
32597@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
32598 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32599 int err;
32600
32601+ pax_track_stack();
32602+
32603 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32604 idx, keylen, len);
32605
32606diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c
32607--- linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
32608+++ linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
32609@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32610 unsigned long flags;
32611 DECLARE_SSID_BUF(ssid);
32612
32613+ pax_track_stack();
32614+
32615 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32616 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32617 print_ssid(ssid, info_element->data, info_element->len),
32618diff -urNp linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c
32619--- linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:54:54.000000000 -0400
32620+++ linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:55:27.000000000 -0400
32621@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
32622 */
32623 if (iwl3945_mod_params.disable_hw_scan) {
32624 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32625- iwl3945_hw_ops.hw_scan = NULL;
32626+ pax_open_kernel();
32627+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32628+ pax_close_kernel();
32629 }
32630
32631 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32632diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32633--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
32634+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
32635@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
32636 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32637 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32638
32639+ pax_track_stack();
32640+
32641 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32642
32643 /* Treat uninitialized rate scaling data same as non-existing. */
32644@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
32645 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32646 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32647
32648+ pax_track_stack();
32649+
32650 /* Override starting rate (index 0) if needed for debug purposes */
32651 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32652
32653diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h
32654--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
32655+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
32656@@ -68,8 +68,8 @@ do {
32657 } while (0)
32658
32659 #else
32660-#define IWL_DEBUG(__priv, level, fmt, args...)
32661-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32662+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32663+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32664 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32665 const void *p, u32 len)
32666 {}
32667diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32668--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
32669+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
32670@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
32671 int pos = 0;
32672 const size_t bufsz = sizeof(buf);
32673
32674+ pax_track_stack();
32675+
32676 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32677 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32678 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32679@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32680 char buf[256 * NUM_IWL_RXON_CTX];
32681 const size_t bufsz = sizeof(buf);
32682
32683+ pax_track_stack();
32684+
32685 for_each_context(priv, ctx) {
32686 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32687 ctx->ctxid);
32688diff -urNp linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c
32689--- linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
32690+++ linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
32691@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32692 int buf_len = 512;
32693 size_t len = 0;
32694
32695+ pax_track_stack();
32696+
32697 if (*ppos != 0)
32698 return 0;
32699 if (count < sizeof(buf))
32700diff -urNp linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c
32701--- linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
32702+++ linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
32703@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
32704 return -EINVAL;
32705
32706 if (fake_hw_scan) {
32707- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32708- mac80211_hwsim_ops.sw_scan_start = NULL;
32709- mac80211_hwsim_ops.sw_scan_complete = NULL;
32710+ pax_open_kernel();
32711+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32712+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
32713+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
32714+ pax_close_kernel();
32715 }
32716
32717 spin_lock_init(&hwsim_radio_lock);
32718diff -urNp linux-3.0.7/drivers/net/wireless/rndis_wlan.c linux-3.0.7/drivers/net/wireless/rndis_wlan.c
32719--- linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
32720+++ linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
32721@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
32722
32723 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
32724
32725- if (rts_threshold < 0 || rts_threshold > 2347)
32726+ if (rts_threshold > 2347)
32727 rts_threshold = 2347;
32728
32729 tmp = cpu_to_le32(rts_threshold);
32730diff -urNp linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
32731--- linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
32732+++ linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
32733@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
32734 u8 rfpath;
32735 u8 num_total_rfpath = rtlphy->num_total_rfpath;
32736
32737+ pax_track_stack();
32738+
32739 precommoncmdcnt = 0;
32740 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
32741 MAX_PRECMD_CNT,
32742diff -urNp linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h
32743--- linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
32744+++ linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
32745@@ -266,7 +266,7 @@ struct wl1251_if_operations {
32746 void (*reset)(struct wl1251 *wl);
32747 void (*enable_irq)(struct wl1251 *wl);
32748 void (*disable_irq)(struct wl1251 *wl);
32749-};
32750+} __no_const;
32751
32752 struct wl1251 {
32753 struct ieee80211_hw *hw;
32754diff -urNp linux-3.0.7/drivers/net/wireless/wl12xx/spi.c linux-3.0.7/drivers/net/wireless/wl12xx/spi.c
32755--- linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
32756+++ linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
32757@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
32758 u32 chunk_len;
32759 int i;
32760
32761+ pax_track_stack();
32762+
32763 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
32764
32765 spi_message_init(&m);
32766diff -urNp linux-3.0.7/drivers/oprofile/buffer_sync.c linux-3.0.7/drivers/oprofile/buffer_sync.c
32767--- linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
32768+++ linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
32769@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
32770 if (cookie == NO_COOKIE)
32771 offset = pc;
32772 if (cookie == INVALID_COOKIE) {
32773- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32774+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32775 offset = pc;
32776 }
32777 if (cookie != last_cookie) {
32778@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
32779 /* add userspace sample */
32780
32781 if (!mm) {
32782- atomic_inc(&oprofile_stats.sample_lost_no_mm);
32783+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32784 return 0;
32785 }
32786
32787 cookie = lookup_dcookie(mm, s->eip, &offset);
32788
32789 if (cookie == INVALID_COOKIE) {
32790- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32791+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32792 return 0;
32793 }
32794
32795@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
32796 /* ignore backtraces if failed to add a sample */
32797 if (state == sb_bt_start) {
32798 state = sb_bt_ignore;
32799- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32800+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32801 }
32802 }
32803 release_mm(mm);
32804diff -urNp linux-3.0.7/drivers/oprofile/event_buffer.c linux-3.0.7/drivers/oprofile/event_buffer.c
32805--- linux-3.0.7/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
32806+++ linux-3.0.7/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
32807@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32808 }
32809
32810 if (buffer_pos == buffer_size) {
32811- atomic_inc(&oprofile_stats.event_lost_overflow);
32812+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32813 return;
32814 }
32815
32816diff -urNp linux-3.0.7/drivers/oprofile/oprof.c linux-3.0.7/drivers/oprofile/oprof.c
32817--- linux-3.0.7/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
32818+++ linux-3.0.7/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
32819@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32820 if (oprofile_ops.switch_events())
32821 return;
32822
32823- atomic_inc(&oprofile_stats.multiplex_counter);
32824+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32825 start_switch_worker();
32826 }
32827
32828diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.c linux-3.0.7/drivers/oprofile/oprofile_stats.c
32829--- linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
32830+++ linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
32831@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32832 cpu_buf->sample_invalid_eip = 0;
32833 }
32834
32835- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32836- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32837- atomic_set(&oprofile_stats.event_lost_overflow, 0);
32838- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32839- atomic_set(&oprofile_stats.multiplex_counter, 0);
32840+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32841+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32842+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32843+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32844+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32845 }
32846
32847
32848diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.h linux-3.0.7/drivers/oprofile/oprofile_stats.h
32849--- linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
32850+++ linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
32851@@ -13,11 +13,11 @@
32852 #include <asm/atomic.h>
32853
32854 struct oprofile_stat_struct {
32855- atomic_t sample_lost_no_mm;
32856- atomic_t sample_lost_no_mapping;
32857- atomic_t bt_lost_no_mapping;
32858- atomic_t event_lost_overflow;
32859- atomic_t multiplex_counter;
32860+ atomic_unchecked_t sample_lost_no_mm;
32861+ atomic_unchecked_t sample_lost_no_mapping;
32862+ atomic_unchecked_t bt_lost_no_mapping;
32863+ atomic_unchecked_t event_lost_overflow;
32864+ atomic_unchecked_t multiplex_counter;
32865 };
32866
32867 extern struct oprofile_stat_struct oprofile_stats;
32868diff -urNp linux-3.0.7/drivers/oprofile/oprofilefs.c linux-3.0.7/drivers/oprofile/oprofilefs.c
32869--- linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
32870+++ linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
32871@@ -186,7 +186,7 @@ static const struct file_operations atom
32872
32873
32874 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32875- char const *name, atomic_t *val)
32876+ char const *name, atomic_unchecked_t *val)
32877 {
32878 return __oprofilefs_create_file(sb, root, name,
32879 &atomic_ro_fops, 0444, val);
32880diff -urNp linux-3.0.7/drivers/parport/procfs.c linux-3.0.7/drivers/parport/procfs.c
32881--- linux-3.0.7/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
32882+++ linux-3.0.7/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
32883@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32884
32885 *ppos += len;
32886
32887- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32888+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32889 }
32890
32891 #ifdef CONFIG_PARPORT_1284
32892@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32893
32894 *ppos += len;
32895
32896- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32897+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32898 }
32899 #endif /* IEEE1284.3 support. */
32900
32901diff -urNp linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h
32902--- linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
32903+++ linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
32904@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
32905 int (*hardware_test) (struct slot* slot, u32 value);
32906 u8 (*get_power) (struct slot* slot);
32907 int (*set_power) (struct slot* slot, int value);
32908-};
32909+} __no_const;
32910
32911 struct cpci_hp_controller {
32912 unsigned int irq;
32913diff -urNp linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c
32914--- linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
32915+++ linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
32916@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32917
32918 void compaq_nvram_init (void __iomem *rom_start)
32919 {
32920+
32921+#ifndef CONFIG_PAX_KERNEXEC
32922 if (rom_start) {
32923 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32924 }
32925+#endif
32926+
32927 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32928
32929 /* initialize our int15 lock */
32930diff -urNp linux-3.0.7/drivers/pci/pcie/aspm.c linux-3.0.7/drivers/pci/pcie/aspm.c
32931--- linux-3.0.7/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
32932+++ linux-3.0.7/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
32933@@ -27,9 +27,9 @@
32934 #define MODULE_PARAM_PREFIX "pcie_aspm."
32935
32936 /* Note: those are not register definitions */
32937-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32938-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32939-#define ASPM_STATE_L1 (4) /* L1 state */
32940+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32941+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32942+#define ASPM_STATE_L1 (4U) /* L1 state */
32943 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32944 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32945
32946diff -urNp linux-3.0.7/drivers/pci/probe.c linux-3.0.7/drivers/pci/probe.c
32947--- linux-3.0.7/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
32948+++ linux-3.0.7/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
32949@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
32950 u32 l, sz, mask;
32951 u16 orig_cmd;
32952
32953- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
32954+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
32955
32956 if (!dev->mmio_always_on) {
32957 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
32958diff -urNp linux-3.0.7/drivers/pci/proc.c linux-3.0.7/drivers/pci/proc.c
32959--- linux-3.0.7/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
32960+++ linux-3.0.7/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
32961@@ -476,7 +476,16 @@ static const struct file_operations proc
32962 static int __init pci_proc_init(void)
32963 {
32964 struct pci_dev *dev = NULL;
32965+
32966+#ifdef CONFIG_GRKERNSEC_PROC_ADD
32967+#ifdef CONFIG_GRKERNSEC_PROC_USER
32968+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32969+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32970+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32971+#endif
32972+#else
32973 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32974+#endif
32975 proc_create("devices", 0, proc_bus_pci_dir,
32976 &proc_bus_pci_dev_operations);
32977 proc_initialized = 1;
32978diff -urNp linux-3.0.7/drivers/pci/xen-pcifront.c linux-3.0.7/drivers/pci/xen-pcifront.c
32979--- linux-3.0.7/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
32980+++ linux-3.0.7/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
32981@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
32982 struct pcifront_sd *sd = bus->sysdata;
32983 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32984
32985+ pax_track_stack();
32986+
32987 if (verbose_request)
32988 dev_info(&pdev->xdev->dev,
32989 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
32990@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
32991 struct pcifront_sd *sd = bus->sysdata;
32992 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32993
32994+ pax_track_stack();
32995+
32996 if (verbose_request)
32997 dev_info(&pdev->xdev->dev,
32998 "write dev=%04x:%02x:%02x.%01x - "
32999@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33000 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33001 struct msi_desc *entry;
33002
33003+ pax_track_stack();
33004+
33005 if (nvec > SH_INFO_MAX_VEC) {
33006 dev_err(&dev->dev, "too much vector for pci frontend: %x."
33007 " Increase SH_INFO_MAX_VEC.\n", nvec);
33008@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33009 struct pcifront_sd *sd = dev->bus->sysdata;
33010 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33011
33012+ pax_track_stack();
33013+
33014 err = do_pci_op(pdev, &op);
33015
33016 /* What should do for error ? */
33017@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33018 struct pcifront_sd *sd = dev->bus->sysdata;
33019 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33020
33021+ pax_track_stack();
33022+
33023 err = do_pci_op(pdev, &op);
33024 if (likely(!err)) {
33025 vector[0] = op.value;
33026diff -urNp linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c
33027--- linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
33028+++ linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
33029@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33030 return 0;
33031 }
33032
33033-void static hotkey_mask_warn_incomplete_mask(void)
33034+static void hotkey_mask_warn_incomplete_mask(void)
33035 {
33036 /* log only what the user can fix... */
33037 const u32 wantedmask = hotkey_driver_mask &
33038diff -urNp linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c
33039--- linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
33040+++ linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
33041@@ -59,7 +59,7 @@ do { \
33042 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33043 } while(0)
33044
33045-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33046+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33047 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33048
33049 /*
33050@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33051
33052 cpu = get_cpu();
33053 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33054+
33055+ pax_open_kernel();
33056 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33057+ pax_close_kernel();
33058
33059 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33060 spin_lock_irqsave(&pnp_bios_lock, flags);
33061@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33062 :"memory");
33063 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33064
33065+ pax_open_kernel();
33066 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33067+ pax_close_kernel();
33068+
33069 put_cpu();
33070
33071 /* If we get here and this is set then the PnP BIOS faulted on us. */
33072@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33073 return status;
33074 }
33075
33076-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33077+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33078 {
33079 int i;
33080
33081@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33082 pnp_bios_callpoint.offset = header->fields.pm16offset;
33083 pnp_bios_callpoint.segment = PNP_CS16;
33084
33085+ pax_open_kernel();
33086+
33087 for_each_possible_cpu(i) {
33088 struct desc_struct *gdt = get_cpu_gdt_table(i);
33089 if (!gdt)
33090@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33091 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33092 (unsigned long)__va(header->fields.pm16dseg));
33093 }
33094+
33095+ pax_close_kernel();
33096 }
33097diff -urNp linux-3.0.7/drivers/pnp/resource.c linux-3.0.7/drivers/pnp/resource.c
33098--- linux-3.0.7/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
33099+++ linux-3.0.7/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
33100@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33101 return 1;
33102
33103 /* check if the resource is valid */
33104- if (*irq < 0 || *irq > 15)
33105+ if (*irq > 15)
33106 return 0;
33107
33108 /* check if the resource is reserved */
33109@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33110 return 1;
33111
33112 /* check if the resource is valid */
33113- if (*dma < 0 || *dma == 4 || *dma > 7)
33114+ if (*dma == 4 || *dma > 7)
33115 return 0;
33116
33117 /* check if the resource is reserved */
33118diff -urNp linux-3.0.7/drivers/power/bq27x00_battery.c linux-3.0.7/drivers/power/bq27x00_battery.c
33119--- linux-3.0.7/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
33120+++ linux-3.0.7/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
33121@@ -67,7 +67,7 @@
33122 struct bq27x00_device_info;
33123 struct bq27x00_access_methods {
33124 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33125-};
33126+} __no_const;
33127
33128 enum bq27x00_chip { BQ27000, BQ27500 };
33129
33130diff -urNp linux-3.0.7/drivers/regulator/max8660.c linux-3.0.7/drivers/regulator/max8660.c
33131--- linux-3.0.7/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
33132+++ linux-3.0.7/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
33133@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33134 max8660->shadow_regs[MAX8660_OVER1] = 5;
33135 } else {
33136 /* Otherwise devices can be toggled via software */
33137- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33138- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33139+ pax_open_kernel();
33140+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33141+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33142+ pax_close_kernel();
33143 }
33144
33145 /*
33146diff -urNp linux-3.0.7/drivers/regulator/mc13892-regulator.c linux-3.0.7/drivers/regulator/mc13892-regulator.c
33147--- linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
33148+++ linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
33149@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33150 }
33151 mc13xxx_unlock(mc13892);
33152
33153- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33154+ pax_open_kernel();
33155+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33156 = mc13892_vcam_set_mode;
33157- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33158+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33159 = mc13892_vcam_get_mode;
33160+ pax_close_kernel();
33161 for (i = 0; i < pdata->num_regulators; i++) {
33162 init_data = &pdata->regulators[i];
33163 priv->regulators[i] = regulator_register(
33164diff -urNp linux-3.0.7/drivers/rtc/rtc-dev.c linux-3.0.7/drivers/rtc/rtc-dev.c
33165--- linux-3.0.7/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
33166+++ linux-3.0.7/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
33167@@ -14,6 +14,7 @@
33168 #include <linux/module.h>
33169 #include <linux/rtc.h>
33170 #include <linux/sched.h>
33171+#include <linux/grsecurity.h>
33172 #include "rtc-core.h"
33173
33174 static dev_t rtc_devt;
33175@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33176 if (copy_from_user(&tm, uarg, sizeof(tm)))
33177 return -EFAULT;
33178
33179+ gr_log_timechange();
33180+
33181 return rtc_set_time(rtc, &tm);
33182
33183 case RTC_PIE_ON:
33184diff -urNp linux-3.0.7/drivers/scsi/BusLogic.c linux-3.0.7/drivers/scsi/BusLogic.c
33185--- linux-3.0.7/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
33186+++ linux-3.0.7/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
33187@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33188 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33189 *PrototypeHostAdapter)
33190 {
33191+ pax_track_stack();
33192+
33193 /*
33194 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33195 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33196diff -urNp linux-3.0.7/drivers/scsi/aacraid/aacraid.h linux-3.0.7/drivers/scsi/aacraid/aacraid.h
33197--- linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
33198+++ linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
33199@@ -492,7 +492,7 @@ struct adapter_ops
33200 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33201 /* Administrative operations */
33202 int (*adapter_comm)(struct aac_dev * dev, int comm);
33203-};
33204+} __no_const;
33205
33206 /*
33207 * Define which interrupt handler needs to be installed
33208diff -urNp linux-3.0.7/drivers/scsi/aacraid/commctrl.c linux-3.0.7/drivers/scsi/aacraid/commctrl.c
33209--- linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
33210+++ linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
33211@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33212 u32 actual_fibsize64, actual_fibsize = 0;
33213 int i;
33214
33215+ pax_track_stack();
33216
33217 if (dev->in_reset) {
33218 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33219diff -urNp linux-3.0.7/drivers/scsi/aacraid/linit.c linux-3.0.7/drivers/scsi/aacraid/linit.c
33220--- linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-07-21 22:17:23.000000000 -0400
33221+++ linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-10-11 10:44:33.000000000 -0400
33222@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33223 #elif defined(__devinitconst)
33224 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33225 #else
33226-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33227+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33228 #endif
33229 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33230 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33231diff -urNp linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c
33232--- linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-07-21 22:17:23.000000000 -0400
33233+++ linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-10-11 10:44:33.000000000 -0400
33234@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33235 .lldd_control_phy = asd_control_phy,
33236 };
33237
33238-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33239+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33240 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33241 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33242 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33243diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa.h linux-3.0.7/drivers/scsi/bfa/bfa.h
33244--- linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
33245+++ linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
33246@@ -238,7 +238,7 @@ struct bfa_hwif_s {
33247 u32 *nvecs, u32 *maxvec);
33248 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
33249 u32 *end);
33250-};
33251+} __no_const;
33252 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33253
33254 struct bfa_iocfc_s {
33255diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c
33256--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
33257+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
33258@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33259 u16 len, count;
33260 u16 templen;
33261
33262+ pax_track_stack();
33263+
33264 /*
33265 * get hba attributes
33266 */
33267@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33268 u8 count = 0;
33269 u16 templen;
33270
33271+ pax_track_stack();
33272+
33273 /*
33274 * get port attributes
33275 */
33276diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c
33277--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
33278+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
33279@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33280 struct fc_rpsc_speed_info_s speeds;
33281 struct bfa_port_attr_s pport_attr;
33282
33283+ pax_track_stack();
33284+
33285 bfa_trc(port->fcs, rx_fchs->s_id);
33286 bfa_trc(port->fcs, rx_fchs->d_id);
33287
33288diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h
33289--- linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
33290+++ linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
33291@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
33292 bfa_ioc_disable_cbfn_t disable_cbfn;
33293 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33294 bfa_ioc_reset_cbfn_t reset_cbfn;
33295-};
33296+} __no_const;
33297
33298 /*
33299 * Heartbeat failure notification queue element.
33300@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
33301 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
33302 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33303 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33304-};
33305+} __no_const;
33306
33307 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
33308 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
33309diff -urNp linux-3.0.7/drivers/scsi/bfa/bfad.c linux-3.0.7/drivers/scsi/bfa/bfad.c
33310--- linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
33311+++ linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
33312@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33313 struct bfad_vport_s *vport, *vport_new;
33314 struct bfa_fcs_driver_info_s driver_info;
33315
33316+ pax_track_stack();
33317+
33318 /* Fill the driver_info info to fcs*/
33319 memset(&driver_info, 0, sizeof(driver_info));
33320 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
33321diff -urNp linux-3.0.7/drivers/scsi/dpt_i2o.c linux-3.0.7/drivers/scsi/dpt_i2o.c
33322--- linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
33323+++ linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
33324@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33325 dma_addr_t addr;
33326 ulong flags = 0;
33327
33328+ pax_track_stack();
33329+
33330 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33331 // get user msg size in u32s
33332 if(get_user(size, &user_msg[0])){
33333@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33334 s32 rcode;
33335 dma_addr_t addr;
33336
33337+ pax_track_stack();
33338+
33339 memset(msg, 0 , sizeof(msg));
33340 len = scsi_bufflen(cmd);
33341 direction = 0x00000000;
33342diff -urNp linux-3.0.7/drivers/scsi/eata.c linux-3.0.7/drivers/scsi/eata.c
33343--- linux-3.0.7/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
33344+++ linux-3.0.7/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
33345@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33346 struct hostdata *ha;
33347 char name[16];
33348
33349+ pax_track_stack();
33350+
33351 sprintf(name, "%s%d", driver_name, j);
33352
33353 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33354diff -urNp linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c
33355--- linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
33356+++ linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
33357@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33358 } buf;
33359 int rc;
33360
33361+ pax_track_stack();
33362+
33363 fiph = (struct fip_header *)skb->data;
33364 sub = fiph->fip_subcode;
33365
33366diff -urNp linux-3.0.7/drivers/scsi/gdth.c linux-3.0.7/drivers/scsi/gdth.c
33367--- linux-3.0.7/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
33368+++ linux-3.0.7/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
33369@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33370 unsigned long flags;
33371 gdth_ha_str *ha;
33372
33373+ pax_track_stack();
33374+
33375 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33376 return -EFAULT;
33377 ha = gdth_find_ha(ldrv.ionode);
33378@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33379 gdth_ha_str *ha;
33380 int rval;
33381
33382+ pax_track_stack();
33383+
33384 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33385 res.number >= MAX_HDRIVES)
33386 return -EFAULT;
33387@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33388 gdth_ha_str *ha;
33389 int rval;
33390
33391+ pax_track_stack();
33392+
33393 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33394 return -EFAULT;
33395 ha = gdth_find_ha(gen.ionode);
33396@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33397 int i;
33398 gdth_cmd_str gdtcmd;
33399 char cmnd[MAX_COMMAND_SIZE];
33400+
33401+ pax_track_stack();
33402+
33403 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33404
33405 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33406diff -urNp linux-3.0.7/drivers/scsi/gdth_proc.c linux-3.0.7/drivers/scsi/gdth_proc.c
33407--- linux-3.0.7/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
33408+++ linux-3.0.7/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
33409@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33410 u64 paddr;
33411
33412 char cmnd[MAX_COMMAND_SIZE];
33413+
33414+ pax_track_stack();
33415+
33416 memset(cmnd, 0xff, 12);
33417 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33418
33419@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33420 gdth_hget_str *phg;
33421 char cmnd[MAX_COMMAND_SIZE];
33422
33423+ pax_track_stack();
33424+
33425 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33426 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33427 if (!gdtcmd || !estr)
33428diff -urNp linux-3.0.7/drivers/scsi/hosts.c linux-3.0.7/drivers/scsi/hosts.c
33429--- linux-3.0.7/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
33430+++ linux-3.0.7/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
33431@@ -42,7 +42,7 @@
33432 #include "scsi_logging.h"
33433
33434
33435-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33436+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33437
33438
33439 static void scsi_host_cls_release(struct device *dev)
33440@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33441 * subtract one because we increment first then return, but we need to
33442 * know what the next host number was before increment
33443 */
33444- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33445+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33446 shost->dma_channel = 0xff;
33447
33448 /* These three are default values which can be overridden */
33449diff -urNp linux-3.0.7/drivers/scsi/hpsa.c linux-3.0.7/drivers/scsi/hpsa.c
33450--- linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:54:54.000000000 -0400
33451+++ linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:55:27.000000000 -0400
33452@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33453 u32 a;
33454
33455 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33456- return h->access.command_completed(h);
33457+ return h->access->command_completed(h);
33458
33459 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33460 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33461@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33462 while (!list_empty(&h->reqQ)) {
33463 c = list_entry(h->reqQ.next, struct CommandList, list);
33464 /* can't do anything if fifo is full */
33465- if ((h->access.fifo_full(h))) {
33466+ if ((h->access->fifo_full(h))) {
33467 dev_warn(&h->pdev->dev, "fifo full\n");
33468 break;
33469 }
33470@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33471 h->Qdepth--;
33472
33473 /* Tell the controller execute command */
33474- h->access.submit_command(h, c);
33475+ h->access->submit_command(h, c);
33476
33477 /* Put job onto the completed Q */
33478 addQ(&h->cmpQ, c);
33479@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33480
33481 static inline unsigned long get_next_completion(struct ctlr_info *h)
33482 {
33483- return h->access.command_completed(h);
33484+ return h->access->command_completed(h);
33485 }
33486
33487 static inline bool interrupt_pending(struct ctlr_info *h)
33488 {
33489- return h->access.intr_pending(h);
33490+ return h->access->intr_pending(h);
33491 }
33492
33493 static inline long interrupt_not_for_us(struct ctlr_info *h)
33494 {
33495- return (h->access.intr_pending(h) == 0) ||
33496+ return (h->access->intr_pending(h) == 0) ||
33497 (h->interrupts_enabled == 0);
33498 }
33499
33500@@ -3874,7 +3874,7 @@ static int __devinit hpsa_pci_init(struc
33501 if (prod_index < 0)
33502 return -ENODEV;
33503 h->product_name = products[prod_index].product_name;
33504- h->access = *(products[prod_index].access);
33505+ h->access = products[prod_index].access;
33506
33507 if (hpsa_board_disabled(h->pdev)) {
33508 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33509@@ -4151,7 +4151,7 @@ reinit_after_soft_reset:
33510 }
33511
33512 /* make sure the board interrupts are off */
33513- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33514+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33515
33516 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33517 goto clean2;
33518@@ -4185,7 +4185,7 @@ reinit_after_soft_reset:
33519 * fake ones to scoop up any residual completions.
33520 */
33521 spin_lock_irqsave(&h->lock, flags);
33522- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33523+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33524 spin_unlock_irqrestore(&h->lock, flags);
33525 free_irq(h->intr[h->intr_mode], h);
33526 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33527@@ -4204,9 +4204,9 @@ reinit_after_soft_reset:
33528 dev_info(&h->pdev->dev, "Board READY.\n");
33529 dev_info(&h->pdev->dev,
33530 "Waiting for stale completions to drain.\n");
33531- h->access.set_intr_mask(h, HPSA_INTR_ON);
33532+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33533 msleep(10000);
33534- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33535+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33536
33537 rc = controller_reset_failed(h->cfgtable);
33538 if (rc)
33539@@ -4227,7 +4227,7 @@ reinit_after_soft_reset:
33540 }
33541
33542 /* Turn the interrupts on so we can service requests */
33543- h->access.set_intr_mask(h, HPSA_INTR_ON);
33544+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33545
33546 hpsa_hba_inquiry(h);
33547 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33548@@ -4280,7 +4280,7 @@ static void hpsa_shutdown(struct pci_dev
33549 * To write all data in the battery backed cache to disks
33550 */
33551 hpsa_flush_cache(h);
33552- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33553+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33554 free_irq(h->intr[h->intr_mode], h);
33555 #ifdef CONFIG_PCI_MSI
33556 if (h->msix_vector)
33557@@ -4443,7 +4443,7 @@ static __devinit void hpsa_enter_perform
33558 return;
33559 }
33560 /* Change the access methods to the performant access methods */
33561- h->access = SA5_performant_access;
33562+ h->access = &SA5_performant_access;
33563 h->transMethod = CFGTBL_Trans_Performant;
33564 }
33565
33566diff -urNp linux-3.0.7/drivers/scsi/hpsa.h linux-3.0.7/drivers/scsi/hpsa.h
33567--- linux-3.0.7/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
33568+++ linux-3.0.7/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
33569@@ -73,7 +73,7 @@ struct ctlr_info {
33570 unsigned int msix_vector;
33571 unsigned int msi_vector;
33572 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33573- struct access_method access;
33574+ struct access_method *access;
33575
33576 /* queue and queue Info */
33577 struct list_head reqQ;
33578diff -urNp linux-3.0.7/drivers/scsi/ips.h linux-3.0.7/drivers/scsi/ips.h
33579--- linux-3.0.7/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
33580+++ linux-3.0.7/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
33581@@ -1027,7 +1027,7 @@ typedef struct {
33582 int (*intr)(struct ips_ha *);
33583 void (*enableint)(struct ips_ha *);
33584 uint32_t (*statupd)(struct ips_ha *);
33585-} ips_hw_func_t;
33586+} __no_const ips_hw_func_t;
33587
33588 typedef struct ips_ha {
33589 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33590diff -urNp linux-3.0.7/drivers/scsi/libfc/fc_exch.c linux-3.0.7/drivers/scsi/libfc/fc_exch.c
33591--- linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
33592+++ linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
33593@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33594 * all together if not used XXX
33595 */
33596 struct {
33597- atomic_t no_free_exch;
33598- atomic_t no_free_exch_xid;
33599- atomic_t xid_not_found;
33600- atomic_t xid_busy;
33601- atomic_t seq_not_found;
33602- atomic_t non_bls_resp;
33603+ atomic_unchecked_t no_free_exch;
33604+ atomic_unchecked_t no_free_exch_xid;
33605+ atomic_unchecked_t xid_not_found;
33606+ atomic_unchecked_t xid_busy;
33607+ atomic_unchecked_t seq_not_found;
33608+ atomic_unchecked_t non_bls_resp;
33609 } stats;
33610 };
33611
33612@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
33613 /* allocate memory for exchange */
33614 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33615 if (!ep) {
33616- atomic_inc(&mp->stats.no_free_exch);
33617+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33618 goto out;
33619 }
33620 memset(ep, 0, sizeof(*ep));
33621@@ -761,7 +761,7 @@ out:
33622 return ep;
33623 err:
33624 spin_unlock_bh(&pool->lock);
33625- atomic_inc(&mp->stats.no_free_exch_xid);
33626+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33627 mempool_free(ep, mp->ep_pool);
33628 return NULL;
33629 }
33630@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33631 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33632 ep = fc_exch_find(mp, xid);
33633 if (!ep) {
33634- atomic_inc(&mp->stats.xid_not_found);
33635+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33636 reject = FC_RJT_OX_ID;
33637 goto out;
33638 }
33639@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33640 ep = fc_exch_find(mp, xid);
33641 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33642 if (ep) {
33643- atomic_inc(&mp->stats.xid_busy);
33644+ atomic_inc_unchecked(&mp->stats.xid_busy);
33645 reject = FC_RJT_RX_ID;
33646 goto rel;
33647 }
33648@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33649 }
33650 xid = ep->xid; /* get our XID */
33651 } else if (!ep) {
33652- atomic_inc(&mp->stats.xid_not_found);
33653+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33654 reject = FC_RJT_RX_ID; /* XID not found */
33655 goto out;
33656 }
33657@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33658 } else {
33659 sp = &ep->seq;
33660 if (sp->id != fh->fh_seq_id) {
33661- atomic_inc(&mp->stats.seq_not_found);
33662+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33663 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33664 goto rel;
33665 }
33666@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
33667
33668 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33669 if (!ep) {
33670- atomic_inc(&mp->stats.xid_not_found);
33671+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33672 goto out;
33673 }
33674 if (ep->esb_stat & ESB_ST_COMPLETE) {
33675- atomic_inc(&mp->stats.xid_not_found);
33676+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33677 goto rel;
33678 }
33679 if (ep->rxid == FC_XID_UNKNOWN)
33680 ep->rxid = ntohs(fh->fh_rx_id);
33681 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33682- atomic_inc(&mp->stats.xid_not_found);
33683+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33684 goto rel;
33685 }
33686 if (ep->did != ntoh24(fh->fh_s_id) &&
33687 ep->did != FC_FID_FLOGI) {
33688- atomic_inc(&mp->stats.xid_not_found);
33689+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33690 goto rel;
33691 }
33692 sof = fr_sof(fp);
33693@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
33694 sp->ssb_stat |= SSB_ST_RESP;
33695 sp->id = fh->fh_seq_id;
33696 } else if (sp->id != fh->fh_seq_id) {
33697- atomic_inc(&mp->stats.seq_not_found);
33698+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33699 goto rel;
33700 }
33701
33702@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
33703 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33704
33705 if (!sp)
33706- atomic_inc(&mp->stats.xid_not_found);
33707+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33708 else
33709- atomic_inc(&mp->stats.non_bls_resp);
33710+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
33711
33712 fc_frame_free(fp);
33713 }
33714diff -urNp linux-3.0.7/drivers/scsi/libsas/sas_ata.c linux-3.0.7/drivers/scsi/libsas/sas_ata.c
33715--- linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
33716+++ linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
33717@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
33718 .postreset = ata_std_postreset,
33719 .error_handler = ata_std_error_handler,
33720 .post_internal_cmd = sas_ata_post_internal,
33721- .qc_defer = ata_std_qc_defer,
33722+ .qc_defer = ata_std_qc_defer,
33723 .qc_prep = ata_noop_qc_prep,
33724 .qc_issue = sas_ata_qc_issue,
33725 .qc_fill_rtf = sas_ata_qc_fill_rtf,
33726diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc.h linux-3.0.7/drivers/scsi/lpfc/lpfc.h
33727--- linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:54:54.000000000 -0400
33728+++ linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:55:27.000000000 -0400
33729@@ -425,7 +425,7 @@ struct lpfc_vport {
33730 struct dentry *debug_nodelist;
33731 struct dentry *vport_debugfs_root;
33732 struct lpfc_debugfs_trc *disc_trc;
33733- atomic_t disc_trc_cnt;
33734+ atomic_unchecked_t disc_trc_cnt;
33735 #endif
33736 uint8_t stat_data_enabled;
33737 uint8_t stat_data_blocked;
33738@@ -832,8 +832,8 @@ struct lpfc_hba {
33739 struct timer_list fabric_block_timer;
33740 unsigned long bit_flags;
33741 #define FABRIC_COMANDS_BLOCKED 0
33742- atomic_t num_rsrc_err;
33743- atomic_t num_cmd_success;
33744+ atomic_unchecked_t num_rsrc_err;
33745+ atomic_unchecked_t num_cmd_success;
33746 unsigned long last_rsrc_error_time;
33747 unsigned long last_ramp_down_time;
33748 unsigned long last_ramp_up_time;
33749@@ -847,7 +847,7 @@ struct lpfc_hba {
33750 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33751 struct dentry *debug_slow_ring_trc;
33752 struct lpfc_debugfs_trc *slow_ring_trc;
33753- atomic_t slow_ring_trc_cnt;
33754+ atomic_unchecked_t slow_ring_trc_cnt;
33755 /* iDiag debugfs sub-directory */
33756 struct dentry *idiag_root;
33757 struct dentry *idiag_pci_cfg;
33758diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c
33759--- linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
33760+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
33761@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
33762
33763 #include <linux/debugfs.h>
33764
33765-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33766+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33767 static unsigned long lpfc_debugfs_start_time = 0L;
33768
33769 /* iDiag */
33770@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33771 lpfc_debugfs_enable = 0;
33772
33773 len = 0;
33774- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33775+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33776 (lpfc_debugfs_max_disc_trc - 1);
33777 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33778 dtp = vport->disc_trc + i;
33779@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33780 lpfc_debugfs_enable = 0;
33781
33782 len = 0;
33783- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33784+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33785 (lpfc_debugfs_max_slow_ring_trc - 1);
33786 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33787 dtp = phba->slow_ring_trc + i;
33788@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33789 uint32_t *ptr;
33790 char buffer[1024];
33791
33792+ pax_track_stack();
33793+
33794 off = 0;
33795 spin_lock_irq(&phba->hbalock);
33796
33797@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33798 !vport || !vport->disc_trc)
33799 return;
33800
33801- index = atomic_inc_return(&vport->disc_trc_cnt) &
33802+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33803 (lpfc_debugfs_max_disc_trc - 1);
33804 dtp = vport->disc_trc + index;
33805 dtp->fmt = fmt;
33806 dtp->data1 = data1;
33807 dtp->data2 = data2;
33808 dtp->data3 = data3;
33809- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33810+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33811 dtp->jif = jiffies;
33812 #endif
33813 return;
33814@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33815 !phba || !phba->slow_ring_trc)
33816 return;
33817
33818- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33819+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33820 (lpfc_debugfs_max_slow_ring_trc - 1);
33821 dtp = phba->slow_ring_trc + index;
33822 dtp->fmt = fmt;
33823 dtp->data1 = data1;
33824 dtp->data2 = data2;
33825 dtp->data3 = data3;
33826- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33827+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33828 dtp->jif = jiffies;
33829 #endif
33830 return;
33831@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33832 "slow_ring buffer\n");
33833 goto debug_failed;
33834 }
33835- atomic_set(&phba->slow_ring_trc_cnt, 0);
33836+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33837 memset(phba->slow_ring_trc, 0,
33838 (sizeof(struct lpfc_debugfs_trc) *
33839 lpfc_debugfs_max_slow_ring_trc));
33840@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33841 "buffer\n");
33842 goto debug_failed;
33843 }
33844- atomic_set(&vport->disc_trc_cnt, 0);
33845+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33846
33847 snprintf(name, sizeof(name), "discovery_trace");
33848 vport->debug_disc_trc =
33849diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c
33850--- linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:54:54.000000000 -0400
33851+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:55:27.000000000 -0400
33852@@ -9971,8 +9971,10 @@ lpfc_init(void)
33853 printk(LPFC_COPYRIGHT "\n");
33854
33855 if (lpfc_enable_npiv) {
33856- lpfc_transport_functions.vport_create = lpfc_vport_create;
33857- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33858+ pax_open_kernel();
33859+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
33860+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33861+ pax_close_kernel();
33862 }
33863 lpfc_transport_template =
33864 fc_attach_transport(&lpfc_transport_functions);
33865diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c
33866--- linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:54:54.000000000 -0400
33867+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:55:27.000000000 -0400
33868@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33869 uint32_t evt_posted;
33870
33871 spin_lock_irqsave(&phba->hbalock, flags);
33872- atomic_inc(&phba->num_rsrc_err);
33873+ atomic_inc_unchecked(&phba->num_rsrc_err);
33874 phba->last_rsrc_error_time = jiffies;
33875
33876 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33877@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33878 unsigned long flags;
33879 struct lpfc_hba *phba = vport->phba;
33880 uint32_t evt_posted;
33881- atomic_inc(&phba->num_cmd_success);
33882+ atomic_inc_unchecked(&phba->num_cmd_success);
33883
33884 if (vport->cfg_lun_queue_depth <= queue_depth)
33885 return;
33886@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33887 unsigned long num_rsrc_err, num_cmd_success;
33888 int i;
33889
33890- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33891- num_cmd_success = atomic_read(&phba->num_cmd_success);
33892+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33893+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33894
33895 vports = lpfc_create_vport_work_array(phba);
33896 if (vports != NULL)
33897@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33898 }
33899 }
33900 lpfc_destroy_vport_work_array(phba, vports);
33901- atomic_set(&phba->num_rsrc_err, 0);
33902- atomic_set(&phba->num_cmd_success, 0);
33903+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33904+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33905 }
33906
33907 /**
33908@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33909 }
33910 }
33911 lpfc_destroy_vport_work_array(phba, vports);
33912- atomic_set(&phba->num_rsrc_err, 0);
33913- atomic_set(&phba->num_cmd_success, 0);
33914+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33915+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33916 }
33917
33918 /**
33919diff -urNp linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c
33920--- linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
33921+++ linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
33922@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33923 int rval;
33924 int i;
33925
33926+ pax_track_stack();
33927+
33928 // Allocate memory for the base list of scb for management module.
33929 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33930
33931diff -urNp linux-3.0.7/drivers/scsi/osd/osd_initiator.c linux-3.0.7/drivers/scsi/osd/osd_initiator.c
33932--- linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
33933+++ linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
33934@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
33935 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33936 int ret;
33937
33938+ pax_track_stack();
33939+
33940 or = osd_start_request(od, GFP_KERNEL);
33941 if (!or)
33942 return -ENOMEM;
33943diff -urNp linux-3.0.7/drivers/scsi/pmcraid.c linux-3.0.7/drivers/scsi/pmcraid.c
33944--- linux-3.0.7/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
33945+++ linux-3.0.7/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
33946@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
33947 res->scsi_dev = scsi_dev;
33948 scsi_dev->hostdata = res;
33949 res->change_detected = 0;
33950- atomic_set(&res->read_failures, 0);
33951- atomic_set(&res->write_failures, 0);
33952+ atomic_set_unchecked(&res->read_failures, 0);
33953+ atomic_set_unchecked(&res->write_failures, 0);
33954 rc = 0;
33955 }
33956 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33957@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
33958
33959 /* If this was a SCSI read/write command keep count of errors */
33960 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33961- atomic_inc(&res->read_failures);
33962+ atomic_inc_unchecked(&res->read_failures);
33963 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33964- atomic_inc(&res->write_failures);
33965+ atomic_inc_unchecked(&res->write_failures);
33966
33967 if (!RES_IS_GSCSI(res->cfg_entry) &&
33968 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33969@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
33970 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33971 * hrrq_id assigned here in queuecommand
33972 */
33973- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33974+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33975 pinstance->num_hrrq;
33976 cmd->cmd_done = pmcraid_io_done;
33977
33978@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
33979 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33980 * hrrq_id assigned here in queuecommand
33981 */
33982- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33983+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33984 pinstance->num_hrrq;
33985
33986 if (request_size) {
33987@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
33988
33989 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33990 /* add resources only after host is added into system */
33991- if (!atomic_read(&pinstance->expose_resources))
33992+ if (!atomic_read_unchecked(&pinstance->expose_resources))
33993 return;
33994
33995 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
33996@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
33997 init_waitqueue_head(&pinstance->reset_wait_q);
33998
33999 atomic_set(&pinstance->outstanding_cmds, 0);
34000- atomic_set(&pinstance->last_message_id, 0);
34001- atomic_set(&pinstance->expose_resources, 0);
34002+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34003+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34004
34005 INIT_LIST_HEAD(&pinstance->free_res_q);
34006 INIT_LIST_HEAD(&pinstance->used_res_q);
34007@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34008 /* Schedule worker thread to handle CCN and take care of adding and
34009 * removing devices to OS
34010 */
34011- atomic_set(&pinstance->expose_resources, 1);
34012+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34013 schedule_work(&pinstance->worker_q);
34014 return rc;
34015
34016diff -urNp linux-3.0.7/drivers/scsi/pmcraid.h linux-3.0.7/drivers/scsi/pmcraid.h
34017--- linux-3.0.7/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
34018+++ linux-3.0.7/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
34019@@ -749,7 +749,7 @@ struct pmcraid_instance {
34020 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34021
34022 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34023- atomic_t last_message_id;
34024+ atomic_unchecked_t last_message_id;
34025
34026 /* configuration table */
34027 struct pmcraid_config_table *cfg_table;
34028@@ -778,7 +778,7 @@ struct pmcraid_instance {
34029 atomic_t outstanding_cmds;
34030
34031 /* should add/delete resources to mid-layer now ?*/
34032- atomic_t expose_resources;
34033+ atomic_unchecked_t expose_resources;
34034
34035
34036
34037@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34038 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34039 };
34040 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34041- atomic_t read_failures; /* count of failed READ commands */
34042- atomic_t write_failures; /* count of failed WRITE commands */
34043+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34044+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34045
34046 /* To indicate add/delete/modify during CCN */
34047 u8 change_detected;
34048diff -urNp linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h
34049--- linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
34050+++ linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
34051@@ -2244,7 +2244,7 @@ struct isp_operations {
34052 int (*get_flash_version) (struct scsi_qla_host *, void *);
34053 int (*start_scsi) (srb_t *);
34054 int (*abort_isp) (struct scsi_qla_host *);
34055-};
34056+} __no_const;
34057
34058 /* MSI-X Support *************************************************************/
34059
34060diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h
34061--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
34062+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
34063@@ -256,7 +256,7 @@ struct ddb_entry {
34064 atomic_t retry_relogin_timer; /* Min Time between relogins
34065 * (4000 only) */
34066 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34067- atomic_t relogin_retry_count; /* Num of times relogin has been
34068+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34069 * retried */
34070
34071 uint16_t port;
34072diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c
34073--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
34074+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
34075@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34076 ddb_entry->fw_ddb_index = fw_ddb_index;
34077 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34078 atomic_set(&ddb_entry->relogin_timer, 0);
34079- atomic_set(&ddb_entry->relogin_retry_count, 0);
34080+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34081 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34082 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34083 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34084@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34085 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34086 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34087 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34088- atomic_set(&ddb_entry->relogin_retry_count, 0);
34089+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34090 atomic_set(&ddb_entry->relogin_timer, 0);
34091 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34092 iscsi_unblock_session(ddb_entry->sess);
34093diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c
34094--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
34095+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
34096@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34097 ddb_entry->fw_ddb_device_state ==
34098 DDB_DS_SESSION_FAILED) {
34099 /* Reset retry relogin timer */
34100- atomic_inc(&ddb_entry->relogin_retry_count);
34101+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34102 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34103 " timed out-retrying"
34104 " relogin (%d)\n",
34105 ha->host_no,
34106 ddb_entry->fw_ddb_index,
34107- atomic_read(&ddb_entry->
34108+ atomic_read_unchecked(&ddb_entry->
34109 relogin_retry_count))
34110 );
34111 start_dpc++;
34112diff -urNp linux-3.0.7/drivers/scsi/scsi.c linux-3.0.7/drivers/scsi/scsi.c
34113--- linux-3.0.7/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
34114+++ linux-3.0.7/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
34115@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34116 unsigned long timeout;
34117 int rtn = 0;
34118
34119- atomic_inc(&cmd->device->iorequest_cnt);
34120+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34121
34122 /* check if the device is still usable */
34123 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34124diff -urNp linux-3.0.7/drivers/scsi/scsi_debug.c linux-3.0.7/drivers/scsi/scsi_debug.c
34125--- linux-3.0.7/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
34126+++ linux-3.0.7/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
34127@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34128 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34129 unsigned char *cmd = (unsigned char *)scp->cmnd;
34130
34131+ pax_track_stack();
34132+
34133 if ((errsts = check_readiness(scp, 1, devip)))
34134 return errsts;
34135 memset(arr, 0, sizeof(arr));
34136@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34137 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34138 unsigned char *cmd = (unsigned char *)scp->cmnd;
34139
34140+ pax_track_stack();
34141+
34142 if ((errsts = check_readiness(scp, 1, devip)))
34143 return errsts;
34144 memset(arr, 0, sizeof(arr));
34145diff -urNp linux-3.0.7/drivers/scsi/scsi_lib.c linux-3.0.7/drivers/scsi/scsi_lib.c
34146--- linux-3.0.7/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
34147+++ linux-3.0.7/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
34148@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
34149 shost = sdev->host;
34150 scsi_init_cmd_errh(cmd);
34151 cmd->result = DID_NO_CONNECT << 16;
34152- atomic_inc(&cmd->device->iorequest_cnt);
34153+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34154
34155 /*
34156 * SCSI request completion path will do scsi_device_unbusy(),
34157@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
34158
34159 INIT_LIST_HEAD(&cmd->eh_entry);
34160
34161- atomic_inc(&cmd->device->iodone_cnt);
34162+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34163 if (cmd->result)
34164- atomic_inc(&cmd->device->ioerr_cnt);
34165+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34166
34167 disposition = scsi_decide_disposition(cmd);
34168 if (disposition != SUCCESS &&
34169diff -urNp linux-3.0.7/drivers/scsi/scsi_sysfs.c linux-3.0.7/drivers/scsi/scsi_sysfs.c
34170--- linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
34171+++ linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
34172@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34173 char *buf) \
34174 { \
34175 struct scsi_device *sdev = to_scsi_device(dev); \
34176- unsigned long long count = atomic_read(&sdev->field); \
34177+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34178 return snprintf(buf, 20, "0x%llx\n", count); \
34179 } \
34180 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34181diff -urNp linux-3.0.7/drivers/scsi/scsi_tgt_lib.c linux-3.0.7/drivers/scsi/scsi_tgt_lib.c
34182--- linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
34183+++ linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
34184@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34185 int err;
34186
34187 dprintk("%lx %u\n", uaddr, len);
34188- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34189+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34190 if (err) {
34191 /*
34192 * TODO: need to fixup sg_tablesize, max_segment_size,
34193diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_fc.c linux-3.0.7/drivers/scsi/scsi_transport_fc.c
34194--- linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
34195+++ linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
34196@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34197 * Netlink Infrastructure
34198 */
34199
34200-static atomic_t fc_event_seq;
34201+static atomic_unchecked_t fc_event_seq;
34202
34203 /**
34204 * fc_get_event_number - Obtain the next sequential FC event number
34205@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34206 u32
34207 fc_get_event_number(void)
34208 {
34209- return atomic_add_return(1, &fc_event_seq);
34210+ return atomic_add_return_unchecked(1, &fc_event_seq);
34211 }
34212 EXPORT_SYMBOL(fc_get_event_number);
34213
34214@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34215 {
34216 int error;
34217
34218- atomic_set(&fc_event_seq, 0);
34219+ atomic_set_unchecked(&fc_event_seq, 0);
34220
34221 error = transport_class_register(&fc_host_class);
34222 if (error)
34223@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34224 char *cp;
34225
34226 *val = simple_strtoul(buf, &cp, 0);
34227- if ((*cp && (*cp != '\n')) || (*val < 0))
34228+ if (*cp && (*cp != '\n'))
34229 return -EINVAL;
34230 /*
34231 * Check for overflow; dev_loss_tmo is u32
34232diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c
34233--- linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
34234+++ linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
34235@@ -83,7 +83,7 @@ struct iscsi_internal {
34236 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34237 };
34238
34239-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34240+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34241 static struct workqueue_struct *iscsi_eh_timer_workq;
34242
34243 /*
34244@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34245 int err;
34246
34247 ihost = shost->shost_data;
34248- session->sid = atomic_add_return(1, &iscsi_session_nr);
34249+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34250
34251 if (id == ISCSI_MAX_TARGET) {
34252 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34253@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34254 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34255 ISCSI_TRANSPORT_VERSION);
34256
34257- atomic_set(&iscsi_session_nr, 0);
34258+ atomic_set_unchecked(&iscsi_session_nr, 0);
34259
34260 err = class_register(&iscsi_transport_class);
34261 if (err)
34262diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_srp.c linux-3.0.7/drivers/scsi/scsi_transport_srp.c
34263--- linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
34264+++ linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
34265@@ -33,7 +33,7 @@
34266 #include "scsi_transport_srp_internal.h"
34267
34268 struct srp_host_attrs {
34269- atomic_t next_port_id;
34270+ atomic_unchecked_t next_port_id;
34271 };
34272 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34273
34274@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34275 struct Scsi_Host *shost = dev_to_shost(dev);
34276 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34277
34278- atomic_set(&srp_host->next_port_id, 0);
34279+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34280 return 0;
34281 }
34282
34283@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34284 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34285 rport->roles = ids->roles;
34286
34287- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34288+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34289 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34290
34291 transport_setup_device(&rport->dev);
34292diff -urNp linux-3.0.7/drivers/scsi/sg.c linux-3.0.7/drivers/scsi/sg.c
34293--- linux-3.0.7/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
34294+++ linux-3.0.7/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
34295@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34296 sdp->disk->disk_name,
34297 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34298 NULL,
34299- (char *)arg);
34300+ (char __user *)arg);
34301 case BLKTRACESTART:
34302 return blk_trace_startstop(sdp->device->request_queue, 1);
34303 case BLKTRACESTOP:
34304@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34305 const struct file_operations * fops;
34306 };
34307
34308-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34309+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34310 {"allow_dio", &adio_fops},
34311 {"debug", &debug_fops},
34312 {"def_reserved_size", &dressz_fops},
34313@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34314 {
34315 int k, mask;
34316 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34317- struct sg_proc_leaf * leaf;
34318+ const struct sg_proc_leaf * leaf;
34319
34320 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34321 if (!sg_proc_sgp)
34322diff -urNp linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c
34323--- linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
34324+++ linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
34325@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34326 int do_iounmap = 0;
34327 int do_disable_device = 1;
34328
34329+ pax_track_stack();
34330+
34331 memset(&sym_dev, 0, sizeof(sym_dev));
34332 memset(&nvram, 0, sizeof(nvram));
34333 sym_dev.pdev = pdev;
34334diff -urNp linux-3.0.7/drivers/scsi/vmw_pvscsi.c linux-3.0.7/drivers/scsi/vmw_pvscsi.c
34335--- linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
34336+++ linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
34337@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34338 dma_addr_t base;
34339 unsigned i;
34340
34341+ pax_track_stack();
34342+
34343 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34344 cmd.reqRingNumPages = adapter->req_pages;
34345 cmd.cmpRingNumPages = adapter->cmp_pages;
34346diff -urNp linux-3.0.7/drivers/spi/dw_spi_pci.c linux-3.0.7/drivers/spi/dw_spi_pci.c
34347--- linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-07-21 22:17:23.000000000 -0400
34348+++ linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-10-11 10:44:33.000000000 -0400
34349@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34350 #define spi_resume NULL
34351 #endif
34352
34353-static const struct pci_device_id pci_ids[] __devinitdata = {
34354+static const struct pci_device_id pci_ids[] __devinitconst = {
34355 /* Intel MID platform SPI controller 0 */
34356 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34357 {},
34358diff -urNp linux-3.0.7/drivers/spi/spi.c linux-3.0.7/drivers/spi/spi.c
34359--- linux-3.0.7/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
34360+++ linux-3.0.7/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
34361@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34362 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34363
34364 /* portable code must never pass more than 32 bytes */
34365-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34366+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34367
34368 static u8 *buf;
34369
34370diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34371--- linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
34372+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
34373@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34374 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34375
34376
34377-static struct net_device_ops ar6000_netdev_ops = {
34378+static net_device_ops_no_const ar6000_netdev_ops = {
34379 .ndo_init = NULL,
34380 .ndo_open = ar6000_open,
34381 .ndo_stop = ar6000_close,
34382diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34383--- linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
34384+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
34385@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34386 typedef struct ar6k_pal_config_s
34387 {
34388 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34389-}ar6k_pal_config_t;
34390+} __no_const ar6k_pal_config_t;
34391
34392 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34393 #endif /* _AR6K_PAL_H_ */
34394diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34395--- linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
34396+++ linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
34397@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
34398 free_netdev(ifp->net);
34399 }
34400 /* Allocate etherdev, including space for private structure */
34401- ifp->net = alloc_etherdev(sizeof(dhd));
34402+ ifp->net = alloc_etherdev(sizeof(*dhd));
34403 if (!ifp->net) {
34404 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34405 ret = -ENOMEM;
34406 }
34407 if (ret == 0) {
34408 strcpy(ifp->net->name, ifp->name);
34409- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
34410+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
34411 err = dhd_net_attach(&dhd->pub, ifp->idx);
34412 if (err != 0) {
34413 DHD_ERROR(("%s: dhd_net_attach failed, "
34414@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34415 strcpy(nv_path, nvram_path);
34416
34417 /* Allocate etherdev, including space for private structure */
34418- net = alloc_etherdev(sizeof(dhd));
34419+ net = alloc_etherdev(sizeof(*dhd));
34420 if (!net) {
34421 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34422 goto fail;
34423@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34424 /*
34425 * Save the dhd_info into the priv
34426 */
34427- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34428+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34429
34430 /* Set network interface name if it was provided as module parameter */
34431 if (iface_name[0]) {
34432@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34433 /*
34434 * Save the dhd_info into the priv
34435 */
34436- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34437+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34438
34439 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
34440 g_bus = bus;
34441diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
34442--- linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
34443+++ linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
34444@@ -593,7 +593,7 @@ struct phy_func_ptr {
34445 initfn_t carrsuppr;
34446 rxsigpwrfn_t rxsigpwr;
34447 detachfn_t detach;
34448-};
34449+} __no_const;
34450 typedef struct phy_func_ptr phy_func_ptr_t;
34451
34452 struct phy_info {
34453diff -urNp linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h
34454--- linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
34455+++ linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
34456@@ -185,7 +185,7 @@ typedef struct {
34457 u16 func, uint bustype, void *regsva, void *param);
34458 /* detach from device */
34459 void (*detach) (void *ch);
34460-} bcmsdh_driver_t;
34461+} __no_const bcmsdh_driver_t;
34462
34463 /* platform specific/high level functions */
34464 extern int bcmsdh_register(bcmsdh_driver_t *driver);
34465diff -urNp linux-3.0.7/drivers/staging/et131x/et1310_tx.c linux-3.0.7/drivers/staging/et131x/et1310_tx.c
34466--- linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
34467+++ linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
34468@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34469 struct net_device_stats *stats = &etdev->net_stats;
34470
34471 if (tcb->flags & fMP_DEST_BROAD)
34472- atomic_inc(&etdev->Stats.brdcstxmt);
34473+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34474 else if (tcb->flags & fMP_DEST_MULTI)
34475- atomic_inc(&etdev->Stats.multixmt);
34476+ atomic_inc_unchecked(&etdev->Stats.multixmt);
34477 else
34478- atomic_inc(&etdev->Stats.unixmt);
34479+ atomic_inc_unchecked(&etdev->Stats.unixmt);
34480
34481 if (tcb->skb) {
34482 stats->tx_bytes += tcb->skb->len;
34483diff -urNp linux-3.0.7/drivers/staging/et131x/et131x_adapter.h linux-3.0.7/drivers/staging/et131x/et131x_adapter.h
34484--- linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
34485+++ linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
34486@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
34487 * operations
34488 */
34489 u32 unircv; /* # multicast packets received */
34490- atomic_t unixmt; /* # multicast packets for Tx */
34491+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34492 u32 multircv; /* # multicast packets received */
34493- atomic_t multixmt; /* # multicast packets for Tx */
34494+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34495 u32 brdcstrcv; /* # broadcast packets received */
34496- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34497+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34498 u32 norcvbuf; /* # Rx packets discarded */
34499 u32 noxmtbuf; /* # Tx packets discarded */
34500
34501diff -urNp linux-3.0.7/drivers/staging/hv/channel.c linux-3.0.7/drivers/staging/hv/channel.c
34502--- linux-3.0.7/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
34503+++ linux-3.0.7/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
34504@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34505 int ret = 0;
34506 int t;
34507
34508- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34509- atomic_inc(&vmbus_connection.next_gpadl_handle);
34510+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34511+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34512
34513 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34514 if (ret)
34515diff -urNp linux-3.0.7/drivers/staging/hv/hv.c linux-3.0.7/drivers/staging/hv/hv.c
34516--- linux-3.0.7/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
34517+++ linux-3.0.7/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
34518@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34519 u64 output_address = (output) ? virt_to_phys(output) : 0;
34520 u32 output_address_hi = output_address >> 32;
34521 u32 output_address_lo = output_address & 0xFFFFFFFF;
34522- volatile void *hypercall_page = hv_context.hypercall_page;
34523+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34524
34525 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34526 "=a"(hv_status_lo) : "d" (control_hi),
34527diff -urNp linux-3.0.7/drivers/staging/hv/hv_mouse.c linux-3.0.7/drivers/staging/hv/hv_mouse.c
34528--- linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
34529+++ linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
34530@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
34531 if (hid_dev) {
34532 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34533
34534- hid_dev->ll_driver->open = mousevsc_hid_open;
34535- hid_dev->ll_driver->close = mousevsc_hid_close;
34536+ pax_open_kernel();
34537+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34538+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34539+ pax_close_kernel();
34540
34541 hid_dev->bus = BUS_VIRTUAL;
34542 hid_dev->vendor = input_device_ctx->device_info.vendor;
34543diff -urNp linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h
34544--- linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
34545+++ linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
34546@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34547 struct vmbus_connection {
34548 enum vmbus_connect_state conn_state;
34549
34550- atomic_t next_gpadl_handle;
34551+ atomic_unchecked_t next_gpadl_handle;
34552
34553 /*
34554 * Represents channel interrupts. Each bit position represents a
34555diff -urNp linux-3.0.7/drivers/staging/hv/rndis_filter.c linux-3.0.7/drivers/staging/hv/rndis_filter.c
34556--- linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
34557+++ linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
34558@@ -43,7 +43,7 @@ struct rndis_device {
34559
34560 enum rndis_device_state state;
34561 u32 link_stat;
34562- atomic_t new_req_id;
34563+ atomic_unchecked_t new_req_id;
34564
34565 spinlock_t request_lock;
34566 struct list_head req_list;
34567@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34568 * template
34569 */
34570 set = &rndis_msg->msg.set_req;
34571- set->req_id = atomic_inc_return(&dev->new_req_id);
34572+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34573
34574 /* Add to the request list */
34575 spin_lock_irqsave(&dev->request_lock, flags);
34576@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
34577
34578 /* Setup the rndis set */
34579 halt = &request->request_msg.msg.halt_req;
34580- halt->req_id = atomic_inc_return(&dev->new_req_id);
34581+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34582
34583 /* Ignore return since this msg is optional. */
34584 rndis_filter_send_request(dev, request);
34585diff -urNp linux-3.0.7/drivers/staging/hv/vmbus_drv.c linux-3.0.7/drivers/staging/hv/vmbus_drv.c
34586--- linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
34587+++ linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
34588@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
34589 {
34590 int ret = 0;
34591
34592- static atomic_t device_num = ATOMIC_INIT(0);
34593+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34594
34595 /* Set the device name. Otherwise, device_register() will fail. */
34596 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34597- atomic_inc_return(&device_num));
34598+ atomic_inc_return_unchecked(&device_num));
34599
34600 /* The new device belongs to this bus */
34601 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34602diff -urNp linux-3.0.7/drivers/staging/iio/ring_generic.h linux-3.0.7/drivers/staging/iio/ring_generic.h
34603--- linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
34604+++ linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
34605@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34606
34607 int (*is_enabled)(struct iio_ring_buffer *ring);
34608 int (*enable)(struct iio_ring_buffer *ring);
34609-};
34610+} __no_const;
34611
34612 struct iio_ring_setup_ops {
34613 int (*preenable)(struct iio_dev *);
34614diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet-rx.c linux-3.0.7/drivers/staging/octeon/ethernet-rx.c
34615--- linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
34616+++ linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
34617@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
34618 /* Increment RX stats for virtual ports */
34619 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34620 #ifdef CONFIG_64BIT
34621- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34622- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34623+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34624+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34625 #else
34626- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34627- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34628+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34629+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34630 #endif
34631 }
34632 netif_receive_skb(skb);
34633@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
34634 dev->name);
34635 */
34636 #ifdef CONFIG_64BIT
34637- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34638+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34639 #else
34640- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34641+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34642 #endif
34643 dev_kfree_skb_irq(skb);
34644 }
34645diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet.c linux-3.0.7/drivers/staging/octeon/ethernet.c
34646--- linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
34647+++ linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
34648@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34649 * since the RX tasklet also increments it.
34650 */
34651 #ifdef CONFIG_64BIT
34652- atomic64_add(rx_status.dropped_packets,
34653- (atomic64_t *)&priv->stats.rx_dropped);
34654+ atomic64_add_unchecked(rx_status.dropped_packets,
34655+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34656 #else
34657- atomic_add(rx_status.dropped_packets,
34658- (atomic_t *)&priv->stats.rx_dropped);
34659+ atomic_add_unchecked(rx_status.dropped_packets,
34660+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34661 #endif
34662 }
34663
34664diff -urNp linux-3.0.7/drivers/staging/pohmelfs/inode.c linux-3.0.7/drivers/staging/pohmelfs/inode.c
34665--- linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34666+++ linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
34667@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
34668 mutex_init(&psb->mcache_lock);
34669 psb->mcache_root = RB_ROOT;
34670 psb->mcache_timeout = msecs_to_jiffies(5000);
34671- atomic_long_set(&psb->mcache_gen, 0);
34672+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
34673
34674 psb->trans_max_pages = 100;
34675
34676@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
34677 INIT_LIST_HEAD(&psb->crypto_ready_list);
34678 INIT_LIST_HEAD(&psb->crypto_active_list);
34679
34680- atomic_set(&psb->trans_gen, 1);
34681+ atomic_set_unchecked(&psb->trans_gen, 1);
34682 atomic_long_set(&psb->total_inodes, 0);
34683
34684 mutex_init(&psb->state_lock);
34685diff -urNp linux-3.0.7/drivers/staging/pohmelfs/mcache.c linux-3.0.7/drivers/staging/pohmelfs/mcache.c
34686--- linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
34687+++ linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
34688@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34689 m->data = data;
34690 m->start = start;
34691 m->size = size;
34692- m->gen = atomic_long_inc_return(&psb->mcache_gen);
34693+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34694
34695 mutex_lock(&psb->mcache_lock);
34696 err = pohmelfs_mcache_insert(psb, m);
34697diff -urNp linux-3.0.7/drivers/staging/pohmelfs/netfs.h linux-3.0.7/drivers/staging/pohmelfs/netfs.h
34698--- linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
34699+++ linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
34700@@ -571,14 +571,14 @@ struct pohmelfs_config;
34701 struct pohmelfs_sb {
34702 struct rb_root mcache_root;
34703 struct mutex mcache_lock;
34704- atomic_long_t mcache_gen;
34705+ atomic_long_unchecked_t mcache_gen;
34706 unsigned long mcache_timeout;
34707
34708 unsigned int idx;
34709
34710 unsigned int trans_retries;
34711
34712- atomic_t trans_gen;
34713+ atomic_unchecked_t trans_gen;
34714
34715 unsigned int crypto_attached_size;
34716 unsigned int crypto_align_size;
34717diff -urNp linux-3.0.7/drivers/staging/pohmelfs/trans.c linux-3.0.7/drivers/staging/pohmelfs/trans.c
34718--- linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
34719+++ linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
34720@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34721 int err;
34722 struct netfs_cmd *cmd = t->iovec.iov_base;
34723
34724- t->gen = atomic_inc_return(&psb->trans_gen);
34725+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34726
34727 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34728 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34729diff -urNp linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h
34730--- linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
34731+++ linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
34732@@ -83,7 +83,7 @@ struct _io_ops {
34733 u8 *pmem);
34734 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
34735 u8 *pmem);
34736-};
34737+} __no_const;
34738
34739 struct io_req {
34740 struct list_head list;
34741diff -urNp linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c
34742--- linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
34743+++ linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
34744@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
34745 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
34746
34747 if (rlen)
34748- if (copy_to_user(data, &resp, rlen))
34749+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
34750 return -EFAULT;
34751
34752 return 0;
34753diff -urNp linux-3.0.7/drivers/staging/tty/stallion.c linux-3.0.7/drivers/staging/tty/stallion.c
34754--- linux-3.0.7/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
34755+++ linux-3.0.7/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
34756@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
34757 struct stlport stl_dummyport;
34758 struct stlport *portp;
34759
34760+ pax_track_stack();
34761+
34762 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
34763 return -EFAULT;
34764 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
34765diff -urNp linux-3.0.7/drivers/staging/usbip/usbip_common.h linux-3.0.7/drivers/staging/usbip/usbip_common.h
34766--- linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
34767+++ linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
34768@@ -315,7 +315,7 @@ struct usbip_device {
34769 void (*shutdown)(struct usbip_device *);
34770 void (*reset)(struct usbip_device *);
34771 void (*unusable)(struct usbip_device *);
34772- } eh_ops;
34773+ } __no_const eh_ops;
34774 };
34775
34776 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
34777diff -urNp linux-3.0.7/drivers/staging/usbip/vhci.h linux-3.0.7/drivers/staging/usbip/vhci.h
34778--- linux-3.0.7/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
34779+++ linux-3.0.7/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
34780@@ -94,7 +94,7 @@ struct vhci_hcd {
34781 unsigned resuming:1;
34782 unsigned long re_timeout;
34783
34784- atomic_t seqnum;
34785+ atomic_unchecked_t seqnum;
34786
34787 /*
34788 * NOTE:
34789diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_hcd.c linux-3.0.7/drivers/staging/usbip/vhci_hcd.c
34790--- linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
34791+++ linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
34792@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
34793 return;
34794 }
34795
34796- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34797+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34798 if (priv->seqnum == 0xffff)
34799 dev_info(&urb->dev->dev, "seqnum max\n");
34800
34801@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
34802 return -ENOMEM;
34803 }
34804
34805- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34806+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34807 if (unlink->seqnum == 0xffff)
34808 pr_info("seqnum max\n");
34809
34810@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
34811 vdev->rhport = rhport;
34812 }
34813
34814- atomic_set(&vhci->seqnum, 0);
34815+ atomic_set_unchecked(&vhci->seqnum, 0);
34816 spin_lock_init(&vhci->lock);
34817
34818 hcd->power_budget = 0; /* no limit */
34819diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_rx.c linux-3.0.7/drivers/staging/usbip/vhci_rx.c
34820--- linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
34821+++ linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
34822@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
34823 if (!urb) {
34824 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
34825 pr_info("max seqnum %d\n",
34826- atomic_read(&the_controller->seqnum));
34827+ atomic_read_unchecked(&the_controller->seqnum));
34828 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34829 return;
34830 }
34831diff -urNp linux-3.0.7/drivers/staging/vt6655/hostap.c linux-3.0.7/drivers/staging/vt6655/hostap.c
34832--- linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
34833+++ linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
34834@@ -79,14 +79,13 @@ static int msglevel
34835 *
34836 */
34837
34838+static net_device_ops_no_const apdev_netdev_ops;
34839+
34840 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34841 {
34842 PSDevice apdev_priv;
34843 struct net_device *dev = pDevice->dev;
34844 int ret;
34845- const struct net_device_ops apdev_netdev_ops = {
34846- .ndo_start_xmit = pDevice->tx_80211,
34847- };
34848
34849 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34850
34851@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
34852 *apdev_priv = *pDevice;
34853 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34854
34855+ /* only half broken now */
34856+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34857 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34858
34859 pDevice->apdev->type = ARPHRD_IEEE80211;
34860diff -urNp linux-3.0.7/drivers/staging/vt6656/hostap.c linux-3.0.7/drivers/staging/vt6656/hostap.c
34861--- linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
34862+++ linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
34863@@ -80,14 +80,13 @@ static int msglevel
34864 *
34865 */
34866
34867+static net_device_ops_no_const apdev_netdev_ops;
34868+
34869 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34870 {
34871 PSDevice apdev_priv;
34872 struct net_device *dev = pDevice->dev;
34873 int ret;
34874- const struct net_device_ops apdev_netdev_ops = {
34875- .ndo_start_xmit = pDevice->tx_80211,
34876- };
34877
34878 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34879
34880@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
34881 *apdev_priv = *pDevice;
34882 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34883
34884+ /* only half broken now */
34885+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34886 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34887
34888 pDevice->apdev->type = ARPHRD_IEEE80211;
34889diff -urNp linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c
34890--- linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
34891+++ linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
34892@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
34893
34894 struct usbctlx_completor {
34895 int (*complete) (struct usbctlx_completor *);
34896-};
34897+} __no_const;
34898
34899 static int
34900 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
34901diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.c linux-3.0.7/drivers/staging/zcache/tmem.c
34902--- linux-3.0.7/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
34903+++ linux-3.0.7/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
34904@@ -39,7 +39,7 @@
34905 * A tmem host implementation must use this function to register callbacks
34906 * for memory allocation.
34907 */
34908-static struct tmem_hostops tmem_hostops;
34909+static tmem_hostops_no_const tmem_hostops;
34910
34911 static void tmem_objnode_tree_init(void);
34912
34913@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
34914 * A tmem host implementation must use this function to register
34915 * callbacks for a page-accessible memory (PAM) implementation
34916 */
34917-static struct tmem_pamops tmem_pamops;
34918+static tmem_pamops_no_const tmem_pamops;
34919
34920 void tmem_register_pamops(struct tmem_pamops *m)
34921 {
34922diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.h linux-3.0.7/drivers/staging/zcache/tmem.h
34923--- linux-3.0.7/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
34924+++ linux-3.0.7/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
34925@@ -171,6 +171,7 @@ struct tmem_pamops {
34926 int (*get_data)(struct page *, void *, struct tmem_pool *);
34927 void (*free)(void *, struct tmem_pool *);
34928 };
34929+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
34930 extern void tmem_register_pamops(struct tmem_pamops *m);
34931
34932 /* memory allocation methods provided by the host implementation */
34933@@ -180,6 +181,7 @@ struct tmem_hostops {
34934 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
34935 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
34936 };
34937+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
34938 extern void tmem_register_hostops(struct tmem_hostops *m);
34939
34940 /* core tmem accessor functions */
34941diff -urNp linux-3.0.7/drivers/target/target_core_alua.c linux-3.0.7/drivers/target/target_core_alua.c
34942--- linux-3.0.7/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
34943+++ linux-3.0.7/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
34944@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
34945 char path[ALUA_METADATA_PATH_LEN];
34946 int len;
34947
34948+ pax_track_stack();
34949+
34950 memset(path, 0, ALUA_METADATA_PATH_LEN);
34951
34952 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
34953@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
34954 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
34955 int len;
34956
34957+ pax_track_stack();
34958+
34959 memset(path, 0, ALUA_METADATA_PATH_LEN);
34960 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
34961
34962diff -urNp linux-3.0.7/drivers/target/target_core_cdb.c linux-3.0.7/drivers/target/target_core_cdb.c
34963--- linux-3.0.7/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
34964+++ linux-3.0.7/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
34965@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
34966 int length = 0;
34967 unsigned char buf[SE_MODE_PAGE_BUF];
34968
34969+ pax_track_stack();
34970+
34971 memset(buf, 0, SE_MODE_PAGE_BUF);
34972
34973 switch (cdb[2] & 0x3f) {
34974diff -urNp linux-3.0.7/drivers/target/target_core_configfs.c linux-3.0.7/drivers/target/target_core_configfs.c
34975--- linux-3.0.7/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
34976+++ linux-3.0.7/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
34977@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
34978 ssize_t len = 0;
34979 int reg_count = 0, prf_isid;
34980
34981+ pax_track_stack();
34982+
34983 if (!(su_dev->se_dev_ptr))
34984 return -ENODEV;
34985
34986diff -urNp linux-3.0.7/drivers/target/target_core_pr.c linux-3.0.7/drivers/target/target_core_pr.c
34987--- linux-3.0.7/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
34988+++ linux-3.0.7/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
34989@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
34990 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
34991 u16 tpgt;
34992
34993+ pax_track_stack();
34994+
34995 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
34996 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
34997 /*
34998@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
34999 ssize_t len = 0;
35000 int reg_count = 0;
35001
35002+ pax_track_stack();
35003+
35004 memset(buf, 0, pr_aptpl_buf_len);
35005 /*
35006 * Called to clear metadata once APTPL has been deactivated.
35007@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
35008 char path[512];
35009 int ret;
35010
35011+ pax_track_stack();
35012+
35013 memset(iov, 0, sizeof(struct iovec));
35014 memset(path, 0, 512);
35015
35016diff -urNp linux-3.0.7/drivers/target/target_core_tmr.c linux-3.0.7/drivers/target/target_core_tmr.c
35017--- linux-3.0.7/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
35018+++ linux-3.0.7/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
35019@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
35020 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
35021 T_TASK(cmd)->t_task_cdbs,
35022 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35023- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35024+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35025 atomic_read(&T_TASK(cmd)->t_transport_active),
35026 atomic_read(&T_TASK(cmd)->t_transport_stop),
35027 atomic_read(&T_TASK(cmd)->t_transport_sent));
35028@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
35029 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
35030 " task: %p, t_fe_count: %d dev: %p\n", task,
35031 fe_count, dev);
35032- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
35033+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
35034 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
35035 flags);
35036 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35037@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
35038 }
35039 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35040 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35041- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
35042+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
35043 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
35044 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35045
35046diff -urNp linux-3.0.7/drivers/target/target_core_transport.c linux-3.0.7/drivers/target/target_core_transport.c
35047--- linux-3.0.7/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
35048+++ linux-3.0.7/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
35049@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
35050
35051 dev->queue_depth = dev_limits->queue_depth;
35052 atomic_set(&dev->depth_left, dev->queue_depth);
35053- atomic_set(&dev->dev_ordered_id, 0);
35054+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35055
35056 se_dev_set_default_attribs(dev, dev_limits);
35057
35058@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
35059 * Used to determine when ORDERED commands should go from
35060 * Dormant to Active status.
35061 */
35062- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
35063+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
35064 smp_mb__after_atomic_inc();
35065 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35066 cmd->se_ordered_id, cmd->sam_task_attr,
35067@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
35068 " t_transport_active: %d t_transport_stop: %d"
35069 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
35070 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35071- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35072+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35073 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
35074 atomic_read(&T_TASK(cmd)->t_transport_active),
35075 atomic_read(&T_TASK(cmd)->t_transport_stop),
35076@@ -2673,9 +2673,9 @@ check_depth:
35077 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
35078 atomic_set(&task->task_active, 1);
35079 atomic_set(&task->task_sent, 1);
35080- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
35081+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
35082
35083- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
35084+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
35085 T_TASK(cmd)->t_task_cdbs)
35086 atomic_set(&cmd->transport_sent, 1);
35087
35088@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
35089 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
35090 }
35091 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
35092- atomic_read(&T_TASK(cmd)->t_transport_aborted))
35093+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
35094 goto remove;
35095
35096 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
35097@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
35098 {
35099 int ret = 0;
35100
35101- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
35102+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
35103 if (!(send_status) ||
35104 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35105 return 1;
35106@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
35107 */
35108 if (cmd->data_direction == DMA_TO_DEVICE) {
35109 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
35110- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
35111+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
35112 smp_mb__after_atomic_inc();
35113 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35114 transport_new_cmd_failure(cmd);
35115@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
35116 CMD_TFO(cmd)->get_task_tag(cmd),
35117 T_TASK(cmd)->t_task_cdbs,
35118 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35119- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35120+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35121 atomic_read(&T_TASK(cmd)->t_transport_active),
35122 atomic_read(&T_TASK(cmd)->t_transport_stop),
35123 atomic_read(&T_TASK(cmd)->t_transport_sent));
35124diff -urNp linux-3.0.7/drivers/telephony/ixj.c linux-3.0.7/drivers/telephony/ixj.c
35125--- linux-3.0.7/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
35126+++ linux-3.0.7/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
35127@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35128 bool mContinue;
35129 char *pIn, *pOut;
35130
35131+ pax_track_stack();
35132+
35133 if (!SCI_Prepare(j))
35134 return 0;
35135
35136diff -urNp linux-3.0.7/drivers/tty/hvc/hvcs.c linux-3.0.7/drivers/tty/hvc/hvcs.c
35137--- linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
35138+++ linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
35139@@ -83,6 +83,7 @@
35140 #include <asm/hvcserver.h>
35141 #include <asm/uaccess.h>
35142 #include <asm/vio.h>
35143+#include <asm/local.h>
35144
35145 /*
35146 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35147@@ -270,7 +271,7 @@ struct hvcs_struct {
35148 unsigned int index;
35149
35150 struct tty_struct *tty;
35151- int open_count;
35152+ local_t open_count;
35153
35154 /*
35155 * Used to tell the driver kernel_thread what operations need to take
35156@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35157
35158 spin_lock_irqsave(&hvcsd->lock, flags);
35159
35160- if (hvcsd->open_count > 0) {
35161+ if (local_read(&hvcsd->open_count) > 0) {
35162 spin_unlock_irqrestore(&hvcsd->lock, flags);
35163 printk(KERN_INFO "HVCS: vterm state unchanged. "
35164 "The hvcs device node is still in use.\n");
35165@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35166 if ((retval = hvcs_partner_connect(hvcsd)))
35167 goto error_release;
35168
35169- hvcsd->open_count = 1;
35170+ local_set(&hvcsd->open_count, 1);
35171 hvcsd->tty = tty;
35172 tty->driver_data = hvcsd;
35173
35174@@ -1179,7 +1180,7 @@ fast_open:
35175
35176 spin_lock_irqsave(&hvcsd->lock, flags);
35177 kref_get(&hvcsd->kref);
35178- hvcsd->open_count++;
35179+ local_inc(&hvcsd->open_count);
35180 hvcsd->todo_mask |= HVCS_SCHED_READ;
35181 spin_unlock_irqrestore(&hvcsd->lock, flags);
35182
35183@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35184 hvcsd = tty->driver_data;
35185
35186 spin_lock_irqsave(&hvcsd->lock, flags);
35187- if (--hvcsd->open_count == 0) {
35188+ if (local_dec_and_test(&hvcsd->open_count)) {
35189
35190 vio_disable_interrupts(hvcsd->vdev);
35191
35192@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35193 free_irq(irq, hvcsd);
35194 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35195 return;
35196- } else if (hvcsd->open_count < 0) {
35197+ } else if (local_read(&hvcsd->open_count) < 0) {
35198 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35199 " is missmanaged.\n",
35200- hvcsd->vdev->unit_address, hvcsd->open_count);
35201+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35202 }
35203
35204 spin_unlock_irqrestore(&hvcsd->lock, flags);
35205@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35206
35207 spin_lock_irqsave(&hvcsd->lock, flags);
35208 /* Preserve this so that we know how many kref refs to put */
35209- temp_open_count = hvcsd->open_count;
35210+ temp_open_count = local_read(&hvcsd->open_count);
35211
35212 /*
35213 * Don't kref put inside the spinlock because the destruction
35214@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35215 hvcsd->tty->driver_data = NULL;
35216 hvcsd->tty = NULL;
35217
35218- hvcsd->open_count = 0;
35219+ local_set(&hvcsd->open_count, 0);
35220
35221 /* This will drop any buffered data on the floor which is OK in a hangup
35222 * scenario. */
35223@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35224 * the middle of a write operation? This is a crummy place to do this
35225 * but we want to keep it all in the spinlock.
35226 */
35227- if (hvcsd->open_count <= 0) {
35228+ if (local_read(&hvcsd->open_count) <= 0) {
35229 spin_unlock_irqrestore(&hvcsd->lock, flags);
35230 return -ENODEV;
35231 }
35232@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35233 {
35234 struct hvcs_struct *hvcsd = tty->driver_data;
35235
35236- if (!hvcsd || hvcsd->open_count <= 0)
35237+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35238 return 0;
35239
35240 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35241diff -urNp linux-3.0.7/drivers/tty/ipwireless/tty.c linux-3.0.7/drivers/tty/ipwireless/tty.c
35242--- linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
35243+++ linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
35244@@ -29,6 +29,7 @@
35245 #include <linux/tty_driver.h>
35246 #include <linux/tty_flip.h>
35247 #include <linux/uaccess.h>
35248+#include <asm/local.h>
35249
35250 #include "tty.h"
35251 #include "network.h"
35252@@ -51,7 +52,7 @@ struct ipw_tty {
35253 int tty_type;
35254 struct ipw_network *network;
35255 struct tty_struct *linux_tty;
35256- int open_count;
35257+ local_t open_count;
35258 unsigned int control_lines;
35259 struct mutex ipw_tty_mutex;
35260 int tx_bytes_queued;
35261@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35262 mutex_unlock(&tty->ipw_tty_mutex);
35263 return -ENODEV;
35264 }
35265- if (tty->open_count == 0)
35266+ if (local_read(&tty->open_count) == 0)
35267 tty->tx_bytes_queued = 0;
35268
35269- tty->open_count++;
35270+ local_inc(&tty->open_count);
35271
35272 tty->linux_tty = linux_tty;
35273 linux_tty->driver_data = tty;
35274@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35275
35276 static void do_ipw_close(struct ipw_tty *tty)
35277 {
35278- tty->open_count--;
35279-
35280- if (tty->open_count == 0) {
35281+ if (local_dec_return(&tty->open_count) == 0) {
35282 struct tty_struct *linux_tty = tty->linux_tty;
35283
35284 if (linux_tty != NULL) {
35285@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35286 return;
35287
35288 mutex_lock(&tty->ipw_tty_mutex);
35289- if (tty->open_count == 0) {
35290+ if (local_read(&tty->open_count) == 0) {
35291 mutex_unlock(&tty->ipw_tty_mutex);
35292 return;
35293 }
35294@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35295 return;
35296 }
35297
35298- if (!tty->open_count) {
35299+ if (!local_read(&tty->open_count)) {
35300 mutex_unlock(&tty->ipw_tty_mutex);
35301 return;
35302 }
35303@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35304 return -ENODEV;
35305
35306 mutex_lock(&tty->ipw_tty_mutex);
35307- if (!tty->open_count) {
35308+ if (!local_read(&tty->open_count)) {
35309 mutex_unlock(&tty->ipw_tty_mutex);
35310 return -EINVAL;
35311 }
35312@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35313 if (!tty)
35314 return -ENODEV;
35315
35316- if (!tty->open_count)
35317+ if (!local_read(&tty->open_count))
35318 return -EINVAL;
35319
35320 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35321@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35322 if (!tty)
35323 return 0;
35324
35325- if (!tty->open_count)
35326+ if (!local_read(&tty->open_count))
35327 return 0;
35328
35329 return tty->tx_bytes_queued;
35330@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35331 if (!tty)
35332 return -ENODEV;
35333
35334- if (!tty->open_count)
35335+ if (!local_read(&tty->open_count))
35336 return -EINVAL;
35337
35338 return get_control_lines(tty);
35339@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35340 if (!tty)
35341 return -ENODEV;
35342
35343- if (!tty->open_count)
35344+ if (!local_read(&tty->open_count))
35345 return -EINVAL;
35346
35347 return set_control_lines(tty, set, clear);
35348@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35349 if (!tty)
35350 return -ENODEV;
35351
35352- if (!tty->open_count)
35353+ if (!local_read(&tty->open_count))
35354 return -EINVAL;
35355
35356 /* FIXME: Exactly how is the tty object locked here .. */
35357@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35358 against a parallel ioctl etc */
35359 mutex_lock(&ttyj->ipw_tty_mutex);
35360 }
35361- while (ttyj->open_count)
35362+ while (local_read(&ttyj->open_count))
35363 do_ipw_close(ttyj);
35364 ipwireless_disassociate_network_ttys(network,
35365 ttyj->channel_idx);
35366diff -urNp linux-3.0.7/drivers/tty/n_gsm.c linux-3.0.7/drivers/tty/n_gsm.c
35367--- linux-3.0.7/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
35368+++ linux-3.0.7/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
35369@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35370 return NULL;
35371 spin_lock_init(&dlci->lock);
35372 dlci->fifo = &dlci->_fifo;
35373- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35374+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35375 kfree(dlci);
35376 return NULL;
35377 }
35378diff -urNp linux-3.0.7/drivers/tty/n_tty.c linux-3.0.7/drivers/tty/n_tty.c
35379--- linux-3.0.7/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
35380+++ linux-3.0.7/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
35381@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35382 {
35383 *ops = tty_ldisc_N_TTY;
35384 ops->owner = NULL;
35385- ops->refcount = ops->flags = 0;
35386+ atomic_set(&ops->refcount, 0);
35387+ ops->flags = 0;
35388 }
35389 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35390diff -urNp linux-3.0.7/drivers/tty/pty.c linux-3.0.7/drivers/tty/pty.c
35391--- linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:54:54.000000000 -0400
35392+++ linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:55:28.000000000 -0400
35393@@ -767,8 +767,10 @@ static void __init unix98_pty_init(void)
35394 register_sysctl_table(pty_root_table);
35395
35396 /* Now create the /dev/ptmx special device */
35397+ pax_open_kernel();
35398 tty_default_fops(&ptmx_fops);
35399- ptmx_fops.open = ptmx_open;
35400+ *(void **)&ptmx_fops.open = ptmx_open;
35401+ pax_close_kernel();
35402
35403 cdev_init(&ptmx_cdev, &ptmx_fops);
35404 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35405diff -urNp linux-3.0.7/drivers/tty/rocket.c linux-3.0.7/drivers/tty/rocket.c
35406--- linux-3.0.7/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
35407+++ linux-3.0.7/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
35408@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35409 struct rocket_ports tmp;
35410 int board;
35411
35412+ pax_track_stack();
35413+
35414 if (!retports)
35415 return -EFAULT;
35416 memset(&tmp, 0, sizeof (tmp));
35417diff -urNp linux-3.0.7/drivers/tty/serial/kgdboc.c linux-3.0.7/drivers/tty/serial/kgdboc.c
35418--- linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
35419+++ linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
35420@@ -23,8 +23,9 @@
35421 #define MAX_CONFIG_LEN 40
35422
35423 static struct kgdb_io kgdboc_io_ops;
35424+static struct kgdb_io kgdboc_io_ops_console;
35425
35426-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35427+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35428 static int configured = -1;
35429
35430 static char config[MAX_CONFIG_LEN];
35431@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35432 kgdboc_unregister_kbd();
35433 if (configured == 1)
35434 kgdb_unregister_io_module(&kgdboc_io_ops);
35435+ else if (configured == 2)
35436+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35437 }
35438
35439 static int configure_kgdboc(void)
35440@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35441 int err;
35442 char *cptr = config;
35443 struct console *cons;
35444+ int is_console = 0;
35445
35446 err = kgdboc_option_setup(config);
35447 if (err || !strlen(config) || isspace(config[0]))
35448 goto noconfig;
35449
35450 err = -ENODEV;
35451- kgdboc_io_ops.is_console = 0;
35452 kgdb_tty_driver = NULL;
35453
35454 kgdboc_use_kms = 0;
35455@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35456 int idx;
35457 if (cons->device && cons->device(cons, &idx) == p &&
35458 idx == tty_line) {
35459- kgdboc_io_ops.is_console = 1;
35460+ is_console = 1;
35461 break;
35462 }
35463 cons = cons->next;
35464@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35465 kgdb_tty_line = tty_line;
35466
35467 do_register:
35468- err = kgdb_register_io_module(&kgdboc_io_ops);
35469+ if (is_console) {
35470+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35471+ configured = 2;
35472+ } else {
35473+ err = kgdb_register_io_module(&kgdboc_io_ops);
35474+ configured = 1;
35475+ }
35476 if (err)
35477 goto noconfig;
35478
35479- configured = 1;
35480-
35481 return 0;
35482
35483 noconfig:
35484@@ -212,7 +219,7 @@ noconfig:
35485 static int __init init_kgdboc(void)
35486 {
35487 /* Already configured? */
35488- if (configured == 1)
35489+ if (configured >= 1)
35490 return 0;
35491
35492 return configure_kgdboc();
35493@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35494 if (config[len - 1] == '\n')
35495 config[len - 1] = '\0';
35496
35497- if (configured == 1)
35498+ if (configured >= 1)
35499 cleanup_kgdboc();
35500
35501 /* Go and configure with the new params. */
35502@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35503 .post_exception = kgdboc_post_exp_handler,
35504 };
35505
35506+static struct kgdb_io kgdboc_io_ops_console = {
35507+ .name = "kgdboc",
35508+ .read_char = kgdboc_get_char,
35509+ .write_char = kgdboc_put_char,
35510+ .pre_exception = kgdboc_pre_exp_handler,
35511+ .post_exception = kgdboc_post_exp_handler,
35512+ .is_console = 1
35513+};
35514+
35515 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35516 /* This is only available if kgdboc is a built in for early debugging */
35517 static int __init kgdboc_early_init(char *opt)
35518diff -urNp linux-3.0.7/drivers/tty/serial/mfd.c linux-3.0.7/drivers/tty/serial/mfd.c
35519--- linux-3.0.7/drivers/tty/serial/mfd.c 2011-07-21 22:17:23.000000000 -0400
35520+++ linux-3.0.7/drivers/tty/serial/mfd.c 2011-10-11 10:44:33.000000000 -0400
35521@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35522 }
35523
35524 /* First 3 are UART ports, and the 4th is the DMA */
35525-static const struct pci_device_id pci_ids[] __devinitdata = {
35526+static const struct pci_device_id pci_ids[] __devinitconst = {
35527 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35528 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35529 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35530diff -urNp linux-3.0.7/drivers/tty/serial/mrst_max3110.c linux-3.0.7/drivers/tty/serial/mrst_max3110.c
35531--- linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:54:54.000000000 -0400
35532+++ linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:55:28.000000000 -0400
35533@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35534 int loop = 1, num, total = 0;
35535 u8 recv_buf[512], *pbuf;
35536
35537+ pax_track_stack();
35538+
35539 pbuf = recv_buf;
35540 do {
35541 num = max3110_read_multi(max, pbuf);
35542diff -urNp linux-3.0.7/drivers/tty/tty_io.c linux-3.0.7/drivers/tty/tty_io.c
35543--- linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:54:54.000000000 -0400
35544+++ linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:55:28.000000000 -0400
35545@@ -3214,7 +3214,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35546
35547 void tty_default_fops(struct file_operations *fops)
35548 {
35549- *fops = tty_fops;
35550+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35551 }
35552
35553 /*
35554diff -urNp linux-3.0.7/drivers/tty/tty_ldisc.c linux-3.0.7/drivers/tty/tty_ldisc.c
35555--- linux-3.0.7/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
35556+++ linux-3.0.7/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
35557@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35558 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35559 struct tty_ldisc_ops *ldo = ld->ops;
35560
35561- ldo->refcount--;
35562+ atomic_dec(&ldo->refcount);
35563 module_put(ldo->owner);
35564 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35565
35566@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35567 spin_lock_irqsave(&tty_ldisc_lock, flags);
35568 tty_ldiscs[disc] = new_ldisc;
35569 new_ldisc->num = disc;
35570- new_ldisc->refcount = 0;
35571+ atomic_set(&new_ldisc->refcount, 0);
35572 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35573
35574 return ret;
35575@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35576 return -EINVAL;
35577
35578 spin_lock_irqsave(&tty_ldisc_lock, flags);
35579- if (tty_ldiscs[disc]->refcount)
35580+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35581 ret = -EBUSY;
35582 else
35583 tty_ldiscs[disc] = NULL;
35584@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35585 if (ldops) {
35586 ret = ERR_PTR(-EAGAIN);
35587 if (try_module_get(ldops->owner)) {
35588- ldops->refcount++;
35589+ atomic_inc(&ldops->refcount);
35590 ret = ldops;
35591 }
35592 }
35593@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35594 unsigned long flags;
35595
35596 spin_lock_irqsave(&tty_ldisc_lock, flags);
35597- ldops->refcount--;
35598+ atomic_dec(&ldops->refcount);
35599 module_put(ldops->owner);
35600 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35601 }
35602diff -urNp linux-3.0.7/drivers/tty/vt/keyboard.c linux-3.0.7/drivers/tty/vt/keyboard.c
35603--- linux-3.0.7/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
35604+++ linux-3.0.7/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
35605@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35606 kbd->kbdmode == VC_OFF) &&
35607 value != KVAL(K_SAK))
35608 return; /* SAK is allowed even in raw mode */
35609+
35610+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35611+ {
35612+ void *func = fn_handler[value];
35613+ if (func == fn_show_state || func == fn_show_ptregs ||
35614+ func == fn_show_mem)
35615+ return;
35616+ }
35617+#endif
35618+
35619 fn_handler[value](vc);
35620 }
35621
35622diff -urNp linux-3.0.7/drivers/tty/vt/vt.c linux-3.0.7/drivers/tty/vt/vt.c
35623--- linux-3.0.7/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
35624+++ linux-3.0.7/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
35625@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35626
35627 static void notify_write(struct vc_data *vc, unsigned int unicode)
35628 {
35629- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35630+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
35631 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35632 }
35633
35634diff -urNp linux-3.0.7/drivers/tty/vt/vt_ioctl.c linux-3.0.7/drivers/tty/vt/vt_ioctl.c
35635--- linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35636+++ linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
35637@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35638 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35639 return -EFAULT;
35640
35641- if (!capable(CAP_SYS_TTY_CONFIG))
35642- perm = 0;
35643-
35644 switch (cmd) {
35645 case KDGKBENT:
35646 key_map = key_maps[s];
35647@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35648 val = (i ? K_HOLE : K_NOSUCHMAP);
35649 return put_user(val, &user_kbe->kb_value);
35650 case KDSKBENT:
35651+ if (!capable(CAP_SYS_TTY_CONFIG))
35652+ perm = 0;
35653+
35654 if (!perm)
35655 return -EPERM;
35656 if (!i && v == K_NOSUCHMAP) {
35657@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35658 int i, j, k;
35659 int ret;
35660
35661- if (!capable(CAP_SYS_TTY_CONFIG))
35662- perm = 0;
35663-
35664 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35665 if (!kbs) {
35666 ret = -ENOMEM;
35667@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35668 kfree(kbs);
35669 return ((p && *p) ? -EOVERFLOW : 0);
35670 case KDSKBSENT:
35671+ if (!capable(CAP_SYS_TTY_CONFIG))
35672+ perm = 0;
35673+
35674 if (!perm) {
35675 ret = -EPERM;
35676 goto reterr;
35677diff -urNp linux-3.0.7/drivers/uio/uio.c linux-3.0.7/drivers/uio/uio.c
35678--- linux-3.0.7/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
35679+++ linux-3.0.7/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
35680@@ -25,6 +25,7 @@
35681 #include <linux/kobject.h>
35682 #include <linux/cdev.h>
35683 #include <linux/uio_driver.h>
35684+#include <asm/local.h>
35685
35686 #define UIO_MAX_DEVICES (1U << MINORBITS)
35687
35688@@ -32,10 +33,10 @@ struct uio_device {
35689 struct module *owner;
35690 struct device *dev;
35691 int minor;
35692- atomic_t event;
35693+ atomic_unchecked_t event;
35694 struct fasync_struct *async_queue;
35695 wait_queue_head_t wait;
35696- int vma_count;
35697+ local_t vma_count;
35698 struct uio_info *info;
35699 struct kobject *map_dir;
35700 struct kobject *portio_dir;
35701@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
35702 struct device_attribute *attr, char *buf)
35703 {
35704 struct uio_device *idev = dev_get_drvdata(dev);
35705- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35706+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35707 }
35708
35709 static struct device_attribute uio_class_attributes[] = {
35710@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
35711 {
35712 struct uio_device *idev = info->uio_dev;
35713
35714- atomic_inc(&idev->event);
35715+ atomic_inc_unchecked(&idev->event);
35716 wake_up_interruptible(&idev->wait);
35717 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35718 }
35719@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
35720 }
35721
35722 listener->dev = idev;
35723- listener->event_count = atomic_read(&idev->event);
35724+ listener->event_count = atomic_read_unchecked(&idev->event);
35725 filep->private_data = listener;
35726
35727 if (idev->info->open) {
35728@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
35729 return -EIO;
35730
35731 poll_wait(filep, &idev->wait, wait);
35732- if (listener->event_count != atomic_read(&idev->event))
35733+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35734 return POLLIN | POLLRDNORM;
35735 return 0;
35736 }
35737@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
35738 do {
35739 set_current_state(TASK_INTERRUPTIBLE);
35740
35741- event_count = atomic_read(&idev->event);
35742+ event_count = atomic_read_unchecked(&idev->event);
35743 if (event_count != listener->event_count) {
35744 if (copy_to_user(buf, &event_count, count))
35745 retval = -EFAULT;
35746@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
35747 static void uio_vma_open(struct vm_area_struct *vma)
35748 {
35749 struct uio_device *idev = vma->vm_private_data;
35750- idev->vma_count++;
35751+ local_inc(&idev->vma_count);
35752 }
35753
35754 static void uio_vma_close(struct vm_area_struct *vma)
35755 {
35756 struct uio_device *idev = vma->vm_private_data;
35757- idev->vma_count--;
35758+ local_dec(&idev->vma_count);
35759 }
35760
35761 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35762@@ -823,7 +824,7 @@ int __uio_register_device(struct module
35763 idev->owner = owner;
35764 idev->info = info;
35765 init_waitqueue_head(&idev->wait);
35766- atomic_set(&idev->event, 0);
35767+ atomic_set_unchecked(&idev->event, 0);
35768
35769 ret = uio_get_minor(idev);
35770 if (ret)
35771diff -urNp linux-3.0.7/drivers/usb/atm/cxacru.c linux-3.0.7/drivers/usb/atm/cxacru.c
35772--- linux-3.0.7/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
35773+++ linux-3.0.7/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
35774@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
35775 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35776 if (ret < 2)
35777 return -EINVAL;
35778- if (index < 0 || index > 0x7f)
35779+ if (index > 0x7f)
35780 return -EINVAL;
35781 pos += tmp;
35782
35783diff -urNp linux-3.0.7/drivers/usb/atm/usbatm.c linux-3.0.7/drivers/usb/atm/usbatm.c
35784--- linux-3.0.7/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
35785+++ linux-3.0.7/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
35786@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
35787 if (printk_ratelimit())
35788 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35789 __func__, vpi, vci);
35790- atomic_inc(&vcc->stats->rx_err);
35791+ atomic_inc_unchecked(&vcc->stats->rx_err);
35792 return;
35793 }
35794
35795@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
35796 if (length > ATM_MAX_AAL5_PDU) {
35797 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35798 __func__, length, vcc);
35799- atomic_inc(&vcc->stats->rx_err);
35800+ atomic_inc_unchecked(&vcc->stats->rx_err);
35801 goto out;
35802 }
35803
35804@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
35805 if (sarb->len < pdu_length) {
35806 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35807 __func__, pdu_length, sarb->len, vcc);
35808- atomic_inc(&vcc->stats->rx_err);
35809+ atomic_inc_unchecked(&vcc->stats->rx_err);
35810 goto out;
35811 }
35812
35813 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35814 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35815 __func__, vcc);
35816- atomic_inc(&vcc->stats->rx_err);
35817+ atomic_inc_unchecked(&vcc->stats->rx_err);
35818 goto out;
35819 }
35820
35821@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
35822 if (printk_ratelimit())
35823 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35824 __func__, length);
35825- atomic_inc(&vcc->stats->rx_drop);
35826+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35827 goto out;
35828 }
35829
35830@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
35831
35832 vcc->push(vcc, skb);
35833
35834- atomic_inc(&vcc->stats->rx);
35835+ atomic_inc_unchecked(&vcc->stats->rx);
35836 out:
35837 skb_trim(sarb, 0);
35838 }
35839@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
35840 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35841
35842 usbatm_pop(vcc, skb);
35843- atomic_inc(&vcc->stats->tx);
35844+ atomic_inc_unchecked(&vcc->stats->tx);
35845
35846 skb = skb_dequeue(&instance->sndqueue);
35847 }
35848@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
35849 if (!left--)
35850 return sprintf(page,
35851 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35852- atomic_read(&atm_dev->stats.aal5.tx),
35853- atomic_read(&atm_dev->stats.aal5.tx_err),
35854- atomic_read(&atm_dev->stats.aal5.rx),
35855- atomic_read(&atm_dev->stats.aal5.rx_err),
35856- atomic_read(&atm_dev->stats.aal5.rx_drop));
35857+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35858+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35859+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35860+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35861+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35862
35863 if (!left--) {
35864 if (instance->disconnected)
35865diff -urNp linux-3.0.7/drivers/usb/core/devices.c linux-3.0.7/drivers/usb/core/devices.c
35866--- linux-3.0.7/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
35867+++ linux-3.0.7/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
35868@@ -126,7 +126,7 @@ static const char format_endpt[] =
35869 * time it gets called.
35870 */
35871 static struct device_connect_event {
35872- atomic_t count;
35873+ atomic_unchecked_t count;
35874 wait_queue_head_t wait;
35875 } device_event = {
35876 .count = ATOMIC_INIT(1),
35877@@ -164,7 +164,7 @@ static const struct class_info clas_info
35878
35879 void usbfs_conn_disc_event(void)
35880 {
35881- atomic_add(2, &device_event.count);
35882+ atomic_add_unchecked(2, &device_event.count);
35883 wake_up(&device_event.wait);
35884 }
35885
35886@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
35887
35888 poll_wait(file, &device_event.wait, wait);
35889
35890- event_count = atomic_read(&device_event.count);
35891+ event_count = atomic_read_unchecked(&device_event.count);
35892 if (file->f_version != event_count) {
35893 file->f_version = event_count;
35894 return POLLIN | POLLRDNORM;
35895diff -urNp linux-3.0.7/drivers/usb/core/message.c linux-3.0.7/drivers/usb/core/message.c
35896--- linux-3.0.7/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
35897+++ linux-3.0.7/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
35898@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
35899 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35900 if (buf) {
35901 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35902- if (len > 0) {
35903- smallbuf = kmalloc(++len, GFP_NOIO);
35904+ if (len++ > 0) {
35905+ smallbuf = kmalloc(len, GFP_NOIO);
35906 if (!smallbuf)
35907 return buf;
35908 memcpy(smallbuf, buf, len);
35909diff -urNp linux-3.0.7/drivers/usb/early/ehci-dbgp.c linux-3.0.7/drivers/usb/early/ehci-dbgp.c
35910--- linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
35911+++ linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
35912@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
35913
35914 #ifdef CONFIG_KGDB
35915 static struct kgdb_io kgdbdbgp_io_ops;
35916-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
35917+static struct kgdb_io kgdbdbgp_io_ops_console;
35918+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
35919 #else
35920 #define dbgp_kgdb_mode (0)
35921 #endif
35922@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
35923 .write_char = kgdbdbgp_write_char,
35924 };
35925
35926+static struct kgdb_io kgdbdbgp_io_ops_console = {
35927+ .name = "kgdbdbgp",
35928+ .read_char = kgdbdbgp_read_char,
35929+ .write_char = kgdbdbgp_write_char,
35930+ .is_console = 1
35931+};
35932+
35933 static int kgdbdbgp_wait_time;
35934
35935 static int __init kgdbdbgp_parse_config(char *str)
35936@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
35937 ptr++;
35938 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
35939 }
35940- kgdb_register_io_module(&kgdbdbgp_io_ops);
35941- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
35942+ if (early_dbgp_console.index != -1)
35943+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
35944+ else
35945+ kgdb_register_io_module(&kgdbdbgp_io_ops);
35946
35947 return 0;
35948 }
35949diff -urNp linux-3.0.7/drivers/usb/host/xhci-mem.c linux-3.0.7/drivers/usb/host/xhci-mem.c
35950--- linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
35951+++ linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
35952@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
35953 unsigned int num_tests;
35954 int i, ret;
35955
35956+ pax_track_stack();
35957+
35958 num_tests = ARRAY_SIZE(simple_test_vector);
35959 for (i = 0; i < num_tests; i++) {
35960 ret = xhci_test_trb_in_td(xhci,
35961diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-hc.h linux-3.0.7/drivers/usb/wusbcore/wa-hc.h
35962--- linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
35963+++ linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
35964@@ -192,7 +192,7 @@ struct wahc {
35965 struct list_head xfer_delayed_list;
35966 spinlock_t xfer_list_lock;
35967 struct work_struct xfer_work;
35968- atomic_t xfer_id_count;
35969+ atomic_unchecked_t xfer_id_count;
35970 };
35971
35972
35973@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35974 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35975 spin_lock_init(&wa->xfer_list_lock);
35976 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35977- atomic_set(&wa->xfer_id_count, 1);
35978+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35979 }
35980
35981 /**
35982diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c
35983--- linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
35984+++ linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
35985@@ -294,7 +294,7 @@ out:
35986 */
35987 static void wa_xfer_id_init(struct wa_xfer *xfer)
35988 {
35989- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35990+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35991 }
35992
35993 /*
35994diff -urNp linux-3.0.7/drivers/vhost/vhost.c linux-3.0.7/drivers/vhost/vhost.c
35995--- linux-3.0.7/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
35996+++ linux-3.0.7/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
35997@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
35998 return get_user(vq->last_used_idx, &used->idx);
35999 }
36000
36001-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36002+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36003 {
36004 struct file *eventfp, *filep = NULL,
36005 *pollstart = NULL, *pollstop = NULL;
36006diff -urNp linux-3.0.7/drivers/video/aty/aty128fb.c linux-3.0.7/drivers/video/aty/aty128fb.c
36007--- linux-3.0.7/drivers/video/aty/aty128fb.c 2011-07-21 22:17:23.000000000 -0400
36008+++ linux-3.0.7/drivers/video/aty/aty128fb.c 2011-10-11 10:44:33.000000000 -0400
36009@@ -148,7 +148,7 @@ enum {
36010 };
36011
36012 /* Must match above enum */
36013-static const char *r128_family[] __devinitdata = {
36014+static const char *r128_family[] __devinitconst = {
36015 "AGP",
36016 "PCI",
36017 "PRO AGP",
36018diff -urNp linux-3.0.7/drivers/video/fbcmap.c linux-3.0.7/drivers/video/fbcmap.c
36019--- linux-3.0.7/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
36020+++ linux-3.0.7/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
36021@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36022 rc = -ENODEV;
36023 goto out;
36024 }
36025- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36026- !info->fbops->fb_setcmap)) {
36027+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36028 rc = -EINVAL;
36029 goto out1;
36030 }
36031diff -urNp linux-3.0.7/drivers/video/fbmem.c linux-3.0.7/drivers/video/fbmem.c
36032--- linux-3.0.7/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
36033+++ linux-3.0.7/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
36034@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36035 image->dx += image->width + 8;
36036 }
36037 } else if (rotate == FB_ROTATE_UD) {
36038- for (x = 0; x < num && image->dx >= 0; x++) {
36039+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36040 info->fbops->fb_imageblit(info, image);
36041 image->dx -= image->width + 8;
36042 }
36043@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36044 image->dy += image->height + 8;
36045 }
36046 } else if (rotate == FB_ROTATE_CCW) {
36047- for (x = 0; x < num && image->dy >= 0; x++) {
36048+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36049 info->fbops->fb_imageblit(info, image);
36050 image->dy -= image->height + 8;
36051 }
36052@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36053 int flags = info->flags;
36054 int ret = 0;
36055
36056+ pax_track_stack();
36057+
36058 if (var->activate & FB_ACTIVATE_INV_MODE) {
36059 struct fb_videomode mode1, mode2;
36060
36061@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36062 void __user *argp = (void __user *)arg;
36063 long ret = 0;
36064
36065+ pax_track_stack();
36066+
36067 switch (cmd) {
36068 case FBIOGET_VSCREENINFO:
36069 if (!lock_fb_info(info))
36070@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36071 return -EFAULT;
36072 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36073 return -EINVAL;
36074- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36075+ if (con2fb.framebuffer >= FB_MAX)
36076 return -EINVAL;
36077 if (!registered_fb[con2fb.framebuffer])
36078 request_module("fb%d", con2fb.framebuffer);
36079diff -urNp linux-3.0.7/drivers/video/geode/gx1fb_core.c linux-3.0.7/drivers/video/geode/gx1fb_core.c
36080--- linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-07-21 22:17:23.000000000 -0400
36081+++ linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-10-11 10:44:33.000000000 -0400
36082@@ -29,7 +29,7 @@ static int crt_option = 1;
36083 static char panel_option[32] = "";
36084
36085 /* Modes relevant to the GX1 (taken from modedb.c) */
36086-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36087+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36088 /* 640x480-60 VESA */
36089 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36090 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36091diff -urNp linux-3.0.7/drivers/video/gxt4500.c linux-3.0.7/drivers/video/gxt4500.c
36092--- linux-3.0.7/drivers/video/gxt4500.c 2011-07-21 22:17:23.000000000 -0400
36093+++ linux-3.0.7/drivers/video/gxt4500.c 2011-10-11 10:44:33.000000000 -0400
36094@@ -156,7 +156,7 @@ struct gxt4500_par {
36095 static char *mode_option;
36096
36097 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36098-static const struct fb_videomode defaultmode __devinitdata = {
36099+static const struct fb_videomode defaultmode __devinitconst = {
36100 .refresh = 60,
36101 .xres = 1280,
36102 .yres = 1024,
36103@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36104 return 0;
36105 }
36106
36107-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36108+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36109 .id = "IBM GXT4500P",
36110 .type = FB_TYPE_PACKED_PIXELS,
36111 .visual = FB_VISUAL_PSEUDOCOLOR,
36112diff -urNp linux-3.0.7/drivers/video/i810/i810_accel.c linux-3.0.7/drivers/video/i810/i810_accel.c
36113--- linux-3.0.7/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
36114+++ linux-3.0.7/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
36115@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36116 }
36117 }
36118 printk("ringbuffer lockup!!!\n");
36119+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36120 i810_report_error(mmio);
36121 par->dev_flags |= LOCKUP;
36122 info->pixmap.scan_align = 1;
36123diff -urNp linux-3.0.7/drivers/video/i810/i810_main.c linux-3.0.7/drivers/video/i810/i810_main.c
36124--- linux-3.0.7/drivers/video/i810/i810_main.c 2011-07-21 22:17:23.000000000 -0400
36125+++ linux-3.0.7/drivers/video/i810/i810_main.c 2011-10-11 10:44:33.000000000 -0400
36126@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36127 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36128
36129 /* PCI */
36130-static const char *i810_pci_list[] __devinitdata = {
36131+static const char *i810_pci_list[] __devinitconst = {
36132 "Intel(R) 810 Framebuffer Device" ,
36133 "Intel(R) 810-DC100 Framebuffer Device" ,
36134 "Intel(R) 810E Framebuffer Device" ,
36135diff -urNp linux-3.0.7/drivers/video/jz4740_fb.c linux-3.0.7/drivers/video/jz4740_fb.c
36136--- linux-3.0.7/drivers/video/jz4740_fb.c 2011-07-21 22:17:23.000000000 -0400
36137+++ linux-3.0.7/drivers/video/jz4740_fb.c 2011-10-11 10:44:33.000000000 -0400
36138@@ -136,7 +136,7 @@ struct jzfb {
36139 uint32_t pseudo_palette[16];
36140 };
36141
36142-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36143+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36144 .id = "JZ4740 FB",
36145 .type = FB_TYPE_PACKED_PIXELS,
36146 .visual = FB_VISUAL_TRUECOLOR,
36147diff -urNp linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm
36148--- linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
36149+++ linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
36150@@ -1,1604 +1,1123 @@
36151 P3
36152-# Standard 224-color Linux logo
36153 80 80
36154 255
36155- 0 0 0 0 0 0 0 0 0 0 0 0
36156- 0 0 0 0 0 0 0 0 0 0 0 0
36157- 0 0 0 0 0 0 0 0 0 0 0 0
36158- 0 0 0 0 0 0 0 0 0 0 0 0
36159- 0 0 0 0 0 0 0 0 0 0 0 0
36160- 0 0 0 0 0 0 0 0 0 0 0 0
36161- 0 0 0 0 0 0 0 0 0 0 0 0
36162- 0 0 0 0 0 0 0 0 0 0 0 0
36163- 0 0 0 0 0 0 0 0 0 0 0 0
36164- 6 6 6 6 6 6 10 10 10 10 10 10
36165- 10 10 10 6 6 6 6 6 6 6 6 6
36166- 0 0 0 0 0 0 0 0 0 0 0 0
36167- 0 0 0 0 0 0 0 0 0 0 0 0
36168- 0 0 0 0 0 0 0 0 0 0 0 0
36169- 0 0 0 0 0 0 0 0 0 0 0 0
36170- 0 0 0 0 0 0 0 0 0 0 0 0
36171- 0 0 0 0 0 0 0 0 0 0 0 0
36172- 0 0 0 0 0 0 0 0 0 0 0 0
36173- 0 0 0 0 0 0 0 0 0 0 0 0
36174- 0 0 0 0 0 0 0 0 0 0 0 0
36175- 0 0 0 0 0 0 0 0 0 0 0 0
36176- 0 0 0 0 0 0 0 0 0 0 0 0
36177- 0 0 0 0 0 0 0 0 0 0 0 0
36178- 0 0 0 0 0 0 0 0 0 0 0 0
36179- 0 0 0 0 0 0 0 0 0 0 0 0
36180- 0 0 0 0 0 0 0 0 0 0 0 0
36181- 0 0 0 0 0 0 0 0 0 0 0 0
36182- 0 0 0 0 0 0 0 0 0 0 0 0
36183- 0 0 0 6 6 6 10 10 10 14 14 14
36184- 22 22 22 26 26 26 30 30 30 34 34 34
36185- 30 30 30 30 30 30 26 26 26 18 18 18
36186- 14 14 14 10 10 10 6 6 6 0 0 0
36187- 0 0 0 0 0 0 0 0 0 0 0 0
36188- 0 0 0 0 0 0 0 0 0 0 0 0
36189- 0 0 0 0 0 0 0 0 0 0 0 0
36190- 0 0 0 0 0 0 0 0 0 0 0 0
36191- 0 0 0 0 0 0 0 0 0 0 0 0
36192- 0 0 0 0 0 0 0 0 0 0 0 0
36193- 0 0 0 0 0 0 0 0 0 0 0 0
36194- 0 0 0 0 0 0 0 0 0 0 0 0
36195- 0 0 0 0 0 0 0 0 0 0 0 0
36196- 0 0 0 0 0 1 0 0 1 0 0 0
36197- 0 0 0 0 0 0 0 0 0 0 0 0
36198- 0 0 0 0 0 0 0 0 0 0 0 0
36199- 0 0 0 0 0 0 0 0 0 0 0 0
36200- 0 0 0 0 0 0 0 0 0 0 0 0
36201- 0 0 0 0 0 0 0 0 0 0 0 0
36202- 0 0 0 0 0 0 0 0 0 0 0 0
36203- 6 6 6 14 14 14 26 26 26 42 42 42
36204- 54 54 54 66 66 66 78 78 78 78 78 78
36205- 78 78 78 74 74 74 66 66 66 54 54 54
36206- 42 42 42 26 26 26 18 18 18 10 10 10
36207- 6 6 6 0 0 0 0 0 0 0 0 0
36208- 0 0 0 0 0 0 0 0 0 0 0 0
36209- 0 0 0 0 0 0 0 0 0 0 0 0
36210- 0 0 0 0 0 0 0 0 0 0 0 0
36211- 0 0 0 0 0 0 0 0 0 0 0 0
36212- 0 0 0 0 0 0 0 0 0 0 0 0
36213- 0 0 0 0 0 0 0 0 0 0 0 0
36214- 0 0 0 0 0 0 0 0 0 0 0 0
36215- 0 0 0 0 0 0 0 0 0 0 0 0
36216- 0 0 1 0 0 0 0 0 0 0 0 0
36217- 0 0 0 0 0 0 0 0 0 0 0 0
36218- 0 0 0 0 0 0 0 0 0 0 0 0
36219- 0 0 0 0 0 0 0 0 0 0 0 0
36220- 0 0 0 0 0 0 0 0 0 0 0 0
36221- 0 0 0 0 0 0 0 0 0 0 0 0
36222- 0 0 0 0 0 0 0 0 0 10 10 10
36223- 22 22 22 42 42 42 66 66 66 86 86 86
36224- 66 66 66 38 38 38 38 38 38 22 22 22
36225- 26 26 26 34 34 34 54 54 54 66 66 66
36226- 86 86 86 70 70 70 46 46 46 26 26 26
36227- 14 14 14 6 6 6 0 0 0 0 0 0
36228- 0 0 0 0 0 0 0 0 0 0 0 0
36229- 0 0 0 0 0 0 0 0 0 0 0 0
36230- 0 0 0 0 0 0 0 0 0 0 0 0
36231- 0 0 0 0 0 0 0 0 0 0 0 0
36232- 0 0 0 0 0 0 0 0 0 0 0 0
36233- 0 0 0 0 0 0 0 0 0 0 0 0
36234- 0 0 0 0 0 0 0 0 0 0 0 0
36235- 0 0 0 0 0 0 0 0 0 0 0 0
36236- 0 0 1 0 0 1 0 0 1 0 0 0
36237- 0 0 0 0 0 0 0 0 0 0 0 0
36238- 0 0 0 0 0 0 0 0 0 0 0 0
36239- 0 0 0 0 0 0 0 0 0 0 0 0
36240- 0 0 0 0 0 0 0 0 0 0 0 0
36241- 0 0 0 0 0 0 0 0 0 0 0 0
36242- 0 0 0 0 0 0 10 10 10 26 26 26
36243- 50 50 50 82 82 82 58 58 58 6 6 6
36244- 2 2 6 2 2 6 2 2 6 2 2 6
36245- 2 2 6 2 2 6 2 2 6 2 2 6
36246- 6 6 6 54 54 54 86 86 86 66 66 66
36247- 38 38 38 18 18 18 6 6 6 0 0 0
36248- 0 0 0 0 0 0 0 0 0 0 0 0
36249- 0 0 0 0 0 0 0 0 0 0 0 0
36250- 0 0 0 0 0 0 0 0 0 0 0 0
36251- 0 0 0 0 0 0 0 0 0 0 0 0
36252- 0 0 0 0 0 0 0 0 0 0 0 0
36253- 0 0 0 0 0 0 0 0 0 0 0 0
36254- 0 0 0 0 0 0 0 0 0 0 0 0
36255- 0 0 0 0 0 0 0 0 0 0 0 0
36256- 0 0 0 0 0 0 0 0 0 0 0 0
36257- 0 0 0 0 0 0 0 0 0 0 0 0
36258- 0 0 0 0 0 0 0 0 0 0 0 0
36259- 0 0 0 0 0 0 0 0 0 0 0 0
36260- 0 0 0 0 0 0 0 0 0 0 0 0
36261- 0 0 0 0 0 0 0 0 0 0 0 0
36262- 0 0 0 6 6 6 22 22 22 50 50 50
36263- 78 78 78 34 34 34 2 2 6 2 2 6
36264- 2 2 6 2 2 6 2 2 6 2 2 6
36265- 2 2 6 2 2 6 2 2 6 2 2 6
36266- 2 2 6 2 2 6 6 6 6 70 70 70
36267- 78 78 78 46 46 46 22 22 22 6 6 6
36268- 0 0 0 0 0 0 0 0 0 0 0 0
36269- 0 0 0 0 0 0 0 0 0 0 0 0
36270- 0 0 0 0 0 0 0 0 0 0 0 0
36271- 0 0 0 0 0 0 0 0 0 0 0 0
36272- 0 0 0 0 0 0 0 0 0 0 0 0
36273- 0 0 0 0 0 0 0 0 0 0 0 0
36274- 0 0 0 0 0 0 0 0 0 0 0 0
36275- 0 0 0 0 0 0 0 0 0 0 0 0
36276- 0 0 1 0 0 1 0 0 1 0 0 0
36277- 0 0 0 0 0 0 0 0 0 0 0 0
36278- 0 0 0 0 0 0 0 0 0 0 0 0
36279- 0 0 0 0 0 0 0 0 0 0 0 0
36280- 0 0 0 0 0 0 0 0 0 0 0 0
36281- 0 0 0 0 0 0 0 0 0 0 0 0
36282- 6 6 6 18 18 18 42 42 42 82 82 82
36283- 26 26 26 2 2 6 2 2 6 2 2 6
36284- 2 2 6 2 2 6 2 2 6 2 2 6
36285- 2 2 6 2 2 6 2 2 6 14 14 14
36286- 46 46 46 34 34 34 6 6 6 2 2 6
36287- 42 42 42 78 78 78 42 42 42 18 18 18
36288- 6 6 6 0 0 0 0 0 0 0 0 0
36289- 0 0 0 0 0 0 0 0 0 0 0 0
36290- 0 0 0 0 0 0 0 0 0 0 0 0
36291- 0 0 0 0 0 0 0 0 0 0 0 0
36292- 0 0 0 0 0 0 0 0 0 0 0 0
36293- 0 0 0 0 0 0 0 0 0 0 0 0
36294- 0 0 0 0 0 0 0 0 0 0 0 0
36295- 0 0 0 0 0 0 0 0 0 0 0 0
36296- 0 0 1 0 0 0 0 0 1 0 0 0
36297- 0 0 0 0 0 0 0 0 0 0 0 0
36298- 0 0 0 0 0 0 0 0 0 0 0 0
36299- 0 0 0 0 0 0 0 0 0 0 0 0
36300- 0 0 0 0 0 0 0 0 0 0 0 0
36301- 0 0 0 0 0 0 0 0 0 0 0 0
36302- 10 10 10 30 30 30 66 66 66 58 58 58
36303- 2 2 6 2 2 6 2 2 6 2 2 6
36304- 2 2 6 2 2 6 2 2 6 2 2 6
36305- 2 2 6 2 2 6 2 2 6 26 26 26
36306- 86 86 86 101 101 101 46 46 46 10 10 10
36307- 2 2 6 58 58 58 70 70 70 34 34 34
36308- 10 10 10 0 0 0 0 0 0 0 0 0
36309- 0 0 0 0 0 0 0 0 0 0 0 0
36310- 0 0 0 0 0 0 0 0 0 0 0 0
36311- 0 0 0 0 0 0 0 0 0 0 0 0
36312- 0 0 0 0 0 0 0 0 0 0 0 0
36313- 0 0 0 0 0 0 0 0 0 0 0 0
36314- 0 0 0 0 0 0 0 0 0 0 0 0
36315- 0 0 0 0 0 0 0 0 0 0 0 0
36316- 0 0 1 0 0 1 0 0 1 0 0 0
36317- 0 0 0 0 0 0 0 0 0 0 0 0
36318- 0 0 0 0 0 0 0 0 0 0 0 0
36319- 0 0 0 0 0 0 0 0 0 0 0 0
36320- 0 0 0 0 0 0 0 0 0 0 0 0
36321- 0 0 0 0 0 0 0 0 0 0 0 0
36322- 14 14 14 42 42 42 86 86 86 10 10 10
36323- 2 2 6 2 2 6 2 2 6 2 2 6
36324- 2 2 6 2 2 6 2 2 6 2 2 6
36325- 2 2 6 2 2 6 2 2 6 30 30 30
36326- 94 94 94 94 94 94 58 58 58 26 26 26
36327- 2 2 6 6 6 6 78 78 78 54 54 54
36328- 22 22 22 6 6 6 0 0 0 0 0 0
36329- 0 0 0 0 0 0 0 0 0 0 0 0
36330- 0 0 0 0 0 0 0 0 0 0 0 0
36331- 0 0 0 0 0 0 0 0 0 0 0 0
36332- 0 0 0 0 0 0 0 0 0 0 0 0
36333- 0 0 0 0 0 0 0 0 0 0 0 0
36334- 0 0 0 0 0 0 0 0 0 0 0 0
36335- 0 0 0 0 0 0 0 0 0 0 0 0
36336- 0 0 0 0 0 0 0 0 0 0 0 0
36337- 0 0 0 0 0 0 0 0 0 0 0 0
36338- 0 0 0 0 0 0 0 0 0 0 0 0
36339- 0 0 0 0 0 0 0 0 0 0 0 0
36340- 0 0 0 0 0 0 0 0 0 0 0 0
36341- 0 0 0 0 0 0 0 0 0 6 6 6
36342- 22 22 22 62 62 62 62 62 62 2 2 6
36343- 2 2 6 2 2 6 2 2 6 2 2 6
36344- 2 2 6 2 2 6 2 2 6 2 2 6
36345- 2 2 6 2 2 6 2 2 6 26 26 26
36346- 54 54 54 38 38 38 18 18 18 10 10 10
36347- 2 2 6 2 2 6 34 34 34 82 82 82
36348- 38 38 38 14 14 14 0 0 0 0 0 0
36349- 0 0 0 0 0 0 0 0 0 0 0 0
36350- 0 0 0 0 0 0 0 0 0 0 0 0
36351- 0 0 0 0 0 0 0 0 0 0 0 0
36352- 0 0 0 0 0 0 0 0 0 0 0 0
36353- 0 0 0 0 0 0 0 0 0 0 0 0
36354- 0 0 0 0 0 0 0 0 0 0 0 0
36355- 0 0 0 0 0 0 0 0 0 0 0 0
36356- 0 0 0 0 0 1 0 0 1 0 0 0
36357- 0 0 0 0 0 0 0 0 0 0 0 0
36358- 0 0 0 0 0 0 0 0 0 0 0 0
36359- 0 0 0 0 0 0 0 0 0 0 0 0
36360- 0 0 0 0 0 0 0 0 0 0 0 0
36361- 0 0 0 0 0 0 0 0 0 6 6 6
36362- 30 30 30 78 78 78 30 30 30 2 2 6
36363- 2 2 6 2 2 6 2 2 6 2 2 6
36364- 2 2 6 2 2 6 2 2 6 2 2 6
36365- 2 2 6 2 2 6 2 2 6 10 10 10
36366- 10 10 10 2 2 6 2 2 6 2 2 6
36367- 2 2 6 2 2 6 2 2 6 78 78 78
36368- 50 50 50 18 18 18 6 6 6 0 0 0
36369- 0 0 0 0 0 0 0 0 0 0 0 0
36370- 0 0 0 0 0 0 0 0 0 0 0 0
36371- 0 0 0 0 0 0 0 0 0 0 0 0
36372- 0 0 0 0 0 0 0 0 0 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 0 0 1 0 0 0 0 0 0 0 0 0
36377- 0 0 0 0 0 0 0 0 0 0 0 0
36378- 0 0 0 0 0 0 0 0 0 0 0 0
36379- 0 0 0 0 0 0 0 0 0 0 0 0
36380- 0 0 0 0 0 0 0 0 0 0 0 0
36381- 0 0 0 0 0 0 0 0 0 10 10 10
36382- 38 38 38 86 86 86 14 14 14 2 2 6
36383- 2 2 6 2 2 6 2 2 6 2 2 6
36384- 2 2 6 2 2 6 2 2 6 2 2 6
36385- 2 2 6 2 2 6 2 2 6 2 2 6
36386- 2 2 6 2 2 6 2 2 6 2 2 6
36387- 2 2 6 2 2 6 2 2 6 54 54 54
36388- 66 66 66 26 26 26 6 6 6 0 0 0
36389- 0 0 0 0 0 0 0 0 0 0 0 0
36390- 0 0 0 0 0 0 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 0 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 1 0 0 1 0 0 0
36397- 0 0 0 0 0 0 0 0 0 0 0 0
36398- 0 0 0 0 0 0 0 0 0 0 0 0
36399- 0 0 0 0 0 0 0 0 0 0 0 0
36400- 0 0 0 0 0 0 0 0 0 0 0 0
36401- 0 0 0 0 0 0 0 0 0 14 14 14
36402- 42 42 42 82 82 82 2 2 6 2 2 6
36403- 2 2 6 6 6 6 10 10 10 2 2 6
36404- 2 2 6 2 2 6 2 2 6 2 2 6
36405- 2 2 6 2 2 6 2 2 6 6 6 6
36406- 14 14 14 10 10 10 2 2 6 2 2 6
36407- 2 2 6 2 2 6 2 2 6 18 18 18
36408- 82 82 82 34 34 34 10 10 10 0 0 0
36409- 0 0 0 0 0 0 0 0 0 0 0 0
36410- 0 0 0 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 1 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 0 0 0
36418- 0 0 0 0 0 0 0 0 0 0 0 0
36419- 0 0 0 0 0 0 0 0 0 0 0 0
36420- 0 0 0 0 0 0 0 0 0 0 0 0
36421- 0 0 0 0 0 0 0 0 0 14 14 14
36422- 46 46 46 86 86 86 2 2 6 2 2 6
36423- 6 6 6 6 6 6 22 22 22 34 34 34
36424- 6 6 6 2 2 6 2 2 6 2 2 6
36425- 2 2 6 2 2 6 18 18 18 34 34 34
36426- 10 10 10 50 50 50 22 22 22 2 2 6
36427- 2 2 6 2 2 6 2 2 6 10 10 10
36428- 86 86 86 42 42 42 14 14 14 0 0 0
36429- 0 0 0 0 0 0 0 0 0 0 0 0
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 1 0 0 1 0 0 1 0 0 0
36437- 0 0 0 0 0 0 0 0 0 0 0 0
36438- 0 0 0 0 0 0 0 0 0 0 0 0
36439- 0 0 0 0 0 0 0 0 0 0 0 0
36440- 0 0 0 0 0 0 0 0 0 0 0 0
36441- 0 0 0 0 0 0 0 0 0 14 14 14
36442- 46 46 46 86 86 86 2 2 6 2 2 6
36443- 38 38 38 116 116 116 94 94 94 22 22 22
36444- 22 22 22 2 2 6 2 2 6 2 2 6
36445- 14 14 14 86 86 86 138 138 138 162 162 162
36446-154 154 154 38 38 38 26 26 26 6 6 6
36447- 2 2 6 2 2 6 2 2 6 2 2 6
36448- 86 86 86 46 46 46 14 14 14 0 0 0
36449- 0 0 0 0 0 0 0 0 0 0 0 0
36450- 0 0 0 0 0 0 0 0 0 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 0 0 0 0 0 0 0 0 0 0 0 0
36459- 0 0 0 0 0 0 0 0 0 0 0 0
36460- 0 0 0 0 0 0 0 0 0 0 0 0
36461- 0 0 0 0 0 0 0 0 0 14 14 14
36462- 46 46 46 86 86 86 2 2 6 14 14 14
36463-134 134 134 198 198 198 195 195 195 116 116 116
36464- 10 10 10 2 2 6 2 2 6 6 6 6
36465-101 98 89 187 187 187 210 210 210 218 218 218
36466-214 214 214 134 134 134 14 14 14 6 6 6
36467- 2 2 6 2 2 6 2 2 6 2 2 6
36468- 86 86 86 50 50 50 18 18 18 6 6 6
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 1 0 0 0
36476- 0 0 1 0 0 1 0 0 1 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 0 0 0 0 0 0 0 0 0 0 0 0
36479- 0 0 0 0 0 0 0 0 0 0 0 0
36480- 0 0 0 0 0 0 0 0 0 0 0 0
36481- 0 0 0 0 0 0 0 0 0 14 14 14
36482- 46 46 46 86 86 86 2 2 6 54 54 54
36483-218 218 218 195 195 195 226 226 226 246 246 246
36484- 58 58 58 2 2 6 2 2 6 30 30 30
36485-210 210 210 253 253 253 174 174 174 123 123 123
36486-221 221 221 234 234 234 74 74 74 2 2 6
36487- 2 2 6 2 2 6 2 2 6 2 2 6
36488- 70 70 70 58 58 58 22 22 22 6 6 6
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 0 0 0
36498- 0 0 0 0 0 0 0 0 0 0 0 0
36499- 0 0 0 0 0 0 0 0 0 0 0 0
36500- 0 0 0 0 0 0 0 0 0 0 0 0
36501- 0 0 0 0 0 0 0 0 0 14 14 14
36502- 46 46 46 82 82 82 2 2 6 106 106 106
36503-170 170 170 26 26 26 86 86 86 226 226 226
36504-123 123 123 10 10 10 14 14 14 46 46 46
36505-231 231 231 190 190 190 6 6 6 70 70 70
36506- 90 90 90 238 238 238 158 158 158 2 2 6
36507- 2 2 6 2 2 6 2 2 6 2 2 6
36508- 70 70 70 58 58 58 22 22 22 6 6 6
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 1 0 0 0
36516- 0 0 1 0 0 1 0 0 1 0 0 0
36517- 0 0 0 0 0 0 0 0 0 0 0 0
36518- 0 0 0 0 0 0 0 0 0 0 0 0
36519- 0 0 0 0 0 0 0 0 0 0 0 0
36520- 0 0 0 0 0 0 0 0 0 0 0 0
36521- 0 0 0 0 0 0 0 0 0 14 14 14
36522- 42 42 42 86 86 86 6 6 6 116 116 116
36523-106 106 106 6 6 6 70 70 70 149 149 149
36524-128 128 128 18 18 18 38 38 38 54 54 54
36525-221 221 221 106 106 106 2 2 6 14 14 14
36526- 46 46 46 190 190 190 198 198 198 2 2 6
36527- 2 2 6 2 2 6 2 2 6 2 2 6
36528- 74 74 74 62 62 62 22 22 22 6 6 6
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 1 0 0 0
36536- 0 0 1 0 0 0 0 0 1 0 0 0
36537- 0 0 0 0 0 0 0 0 0 0 0 0
36538- 0 0 0 0 0 0 0 0 0 0 0 0
36539- 0 0 0 0 0 0 0 0 0 0 0 0
36540- 0 0 0 0 0 0 0 0 0 0 0 0
36541- 0 0 0 0 0 0 0 0 0 14 14 14
36542- 42 42 42 94 94 94 14 14 14 101 101 101
36543-128 128 128 2 2 6 18 18 18 116 116 116
36544-118 98 46 121 92 8 121 92 8 98 78 10
36545-162 162 162 106 106 106 2 2 6 2 2 6
36546- 2 2 6 195 195 195 195 195 195 6 6 6
36547- 2 2 6 2 2 6 2 2 6 2 2 6
36548- 74 74 74 62 62 62 22 22 22 6 6 6
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 1 0 0 1
36556- 0 0 1 0 0 0 0 0 1 0 0 0
36557- 0 0 0 0 0 0 0 0 0 0 0 0
36558- 0 0 0 0 0 0 0 0 0 0 0 0
36559- 0 0 0 0 0 0 0 0 0 0 0 0
36560- 0 0 0 0 0 0 0 0 0 0 0 0
36561- 0 0 0 0 0 0 0 0 0 10 10 10
36562- 38 38 38 90 90 90 14 14 14 58 58 58
36563-210 210 210 26 26 26 54 38 6 154 114 10
36564-226 170 11 236 186 11 225 175 15 184 144 12
36565-215 174 15 175 146 61 37 26 9 2 2 6
36566- 70 70 70 246 246 246 138 138 138 2 2 6
36567- 2 2 6 2 2 6 2 2 6 2 2 6
36568- 70 70 70 66 66 66 26 26 26 6 6 6
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 0 0 0 0 0 0 0 0 0
36578- 0 0 0 0 0 0 0 0 0 0 0 0
36579- 0 0 0 0 0 0 0 0 0 0 0 0
36580- 0 0 0 0 0 0 0 0 0 0 0 0
36581- 0 0 0 0 0 0 0 0 0 10 10 10
36582- 38 38 38 86 86 86 14 14 14 10 10 10
36583-195 195 195 188 164 115 192 133 9 225 175 15
36584-239 182 13 234 190 10 232 195 16 232 200 30
36585-245 207 45 241 208 19 232 195 16 184 144 12
36586-218 194 134 211 206 186 42 42 42 2 2 6
36587- 2 2 6 2 2 6 2 2 6 2 2 6
36588- 50 50 50 74 74 74 30 30 30 6 6 6
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 0 0 0 0 0 0 0 0 0 0 0 0
36598- 0 0 0 0 0 0 0 0 0 0 0 0
36599- 0 0 0 0 0 0 0 0 0 0 0 0
36600- 0 0 0 0 0 0 0 0 0 0 0 0
36601- 0 0 0 0 0 0 0 0 0 10 10 10
36602- 34 34 34 86 86 86 14 14 14 2 2 6
36603-121 87 25 192 133 9 219 162 10 239 182 13
36604-236 186 11 232 195 16 241 208 19 244 214 54
36605-246 218 60 246 218 38 246 215 20 241 208 19
36606-241 208 19 226 184 13 121 87 25 2 2 6
36607- 2 2 6 2 2 6 2 2 6 2 2 6
36608- 50 50 50 82 82 82 34 34 34 10 10 10
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 0 0 0
36617- 0 0 0 0 0 0 0 0 0 0 0 0
36618- 0 0 0 0 0 0 0 0 0 0 0 0
36619- 0 0 0 0 0 0 0 0 0 0 0 0
36620- 0 0 0 0 0 0 0 0 0 0 0 0
36621- 0 0 0 0 0 0 0 0 0 10 10 10
36622- 34 34 34 82 82 82 30 30 30 61 42 6
36623-180 123 7 206 145 10 230 174 11 239 182 13
36624-234 190 10 238 202 15 241 208 19 246 218 74
36625-246 218 38 246 215 20 246 215 20 246 215 20
36626-226 184 13 215 174 15 184 144 12 6 6 6
36627- 2 2 6 2 2 6 2 2 6 2 2 6
36628- 26 26 26 94 94 94 42 42 42 14 14 14
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 0 0 0
36637- 0 0 0 0 0 0 0 0 0 0 0 0
36638- 0 0 0 0 0 0 0 0 0 0 0 0
36639- 0 0 0 0 0 0 0 0 0 0 0 0
36640- 0 0 0 0 0 0 0 0 0 0 0 0
36641- 0 0 0 0 0 0 0 0 0 10 10 10
36642- 30 30 30 78 78 78 50 50 50 104 69 6
36643-192 133 9 216 158 10 236 178 12 236 186 11
36644-232 195 16 241 208 19 244 214 54 245 215 43
36645-246 215 20 246 215 20 241 208 19 198 155 10
36646-200 144 11 216 158 10 156 118 10 2 2 6
36647- 2 2 6 2 2 6 2 2 6 2 2 6
36648- 6 6 6 90 90 90 54 54 54 18 18 18
36649- 6 6 6 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 0 0 0
36658- 0 0 0 0 0 0 0 0 0 0 0 0
36659- 0 0 0 0 0 0 0 0 0 0 0 0
36660- 0 0 0 0 0 0 0 0 0 0 0 0
36661- 0 0 0 0 0 0 0 0 0 10 10 10
36662- 30 30 30 78 78 78 46 46 46 22 22 22
36663-137 92 6 210 162 10 239 182 13 238 190 10
36664-238 202 15 241 208 19 246 215 20 246 215 20
36665-241 208 19 203 166 17 185 133 11 210 150 10
36666-216 158 10 210 150 10 102 78 10 2 2 6
36667- 6 6 6 54 54 54 14 14 14 2 2 6
36668- 2 2 6 62 62 62 74 74 74 30 30 30
36669- 10 10 10 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 0 0 0
36678- 0 0 0 0 0 0 0 0 0 0 0 0
36679- 0 0 0 0 0 0 0 0 0 0 0 0
36680- 0 0 0 0 0 0 0 0 0 0 0 0
36681- 0 0 0 0 0 0 0 0 0 10 10 10
36682- 34 34 34 78 78 78 50 50 50 6 6 6
36683- 94 70 30 139 102 15 190 146 13 226 184 13
36684-232 200 30 232 195 16 215 174 15 190 146 13
36685-168 122 10 192 133 9 210 150 10 213 154 11
36686-202 150 34 182 157 106 101 98 89 2 2 6
36687- 2 2 6 78 78 78 116 116 116 58 58 58
36688- 2 2 6 22 22 22 90 90 90 46 46 46
36689- 18 18 18 6 6 6 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 0 0 0
36698- 0 0 0 0 0 0 0 0 0 0 0 0
36699- 0 0 0 0 0 0 0 0 0 0 0 0
36700- 0 0 0 0 0 0 0 0 0 0 0 0
36701- 0 0 0 0 0 0 0 0 0 10 10 10
36702- 38 38 38 86 86 86 50 50 50 6 6 6
36703-128 128 128 174 154 114 156 107 11 168 122 10
36704-198 155 10 184 144 12 197 138 11 200 144 11
36705-206 145 10 206 145 10 197 138 11 188 164 115
36706-195 195 195 198 198 198 174 174 174 14 14 14
36707- 2 2 6 22 22 22 116 116 116 116 116 116
36708- 22 22 22 2 2 6 74 74 74 70 70 70
36709- 30 30 30 10 10 10 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 0 0 0
36718- 0 0 0 0 0 0 0 0 0 0 0 0
36719- 0 0 0 0 0 0 0 0 0 0 0 0
36720- 0 0 0 0 0 0 0 0 0 0 0 0
36721- 0 0 0 0 0 0 6 6 6 18 18 18
36722- 50 50 50 101 101 101 26 26 26 10 10 10
36723-138 138 138 190 190 190 174 154 114 156 107 11
36724-197 138 11 200 144 11 197 138 11 192 133 9
36725-180 123 7 190 142 34 190 178 144 187 187 187
36726-202 202 202 221 221 221 214 214 214 66 66 66
36727- 2 2 6 2 2 6 50 50 50 62 62 62
36728- 6 6 6 2 2 6 10 10 10 90 90 90
36729- 50 50 50 18 18 18 6 6 6 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 0 0 0
36738- 0 0 0 0 0 0 0 0 0 0 0 0
36739- 0 0 0 0 0 0 0 0 0 0 0 0
36740- 0 0 0 0 0 0 0 0 0 0 0 0
36741- 0 0 0 0 0 0 10 10 10 34 34 34
36742- 74 74 74 74 74 74 2 2 6 6 6 6
36743-144 144 144 198 198 198 190 190 190 178 166 146
36744-154 121 60 156 107 11 156 107 11 168 124 44
36745-174 154 114 187 187 187 190 190 190 210 210 210
36746-246 246 246 253 253 253 253 253 253 182 182 182
36747- 6 6 6 2 2 6 2 2 6 2 2 6
36748- 2 2 6 2 2 6 2 2 6 62 62 62
36749- 74 74 74 34 34 34 14 14 14 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 0 0 0
36758- 0 0 0 0 0 0 0 0 0 0 0 0
36759- 0 0 0 0 0 0 0 0 0 0 0 0
36760- 0 0 0 0 0 0 0 0 0 0 0 0
36761- 0 0 0 10 10 10 22 22 22 54 54 54
36762- 94 94 94 18 18 18 2 2 6 46 46 46
36763-234 234 234 221 221 221 190 190 190 190 190 190
36764-190 190 190 187 187 187 187 187 187 190 190 190
36765-190 190 190 195 195 195 214 214 214 242 242 242
36766-253 253 253 253 253 253 253 253 253 253 253 253
36767- 82 82 82 2 2 6 2 2 6 2 2 6
36768- 2 2 6 2 2 6 2 2 6 14 14 14
36769- 86 86 86 54 54 54 22 22 22 6 6 6
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 0 0 0
36778- 0 0 0 0 0 0 0 0 0 0 0 0
36779- 0 0 0 0 0 0 0 0 0 0 0 0
36780- 0 0 0 0 0 0 0 0 0 0 0 0
36781- 6 6 6 18 18 18 46 46 46 90 90 90
36782- 46 46 46 18 18 18 6 6 6 182 182 182
36783-253 253 253 246 246 246 206 206 206 190 190 190
36784-190 190 190 190 190 190 190 190 190 190 190 190
36785-206 206 206 231 231 231 250 250 250 253 253 253
36786-253 253 253 253 253 253 253 253 253 253 253 253
36787-202 202 202 14 14 14 2 2 6 2 2 6
36788- 2 2 6 2 2 6 2 2 6 2 2 6
36789- 42 42 42 86 86 86 42 42 42 18 18 18
36790- 6 6 6 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 0 0 0
36798- 0 0 0 0 0 0 0 0 0 0 0 0
36799- 0 0 0 0 0 0 0 0 0 0 0 0
36800- 0 0 0 0 0 0 0 0 0 6 6 6
36801- 14 14 14 38 38 38 74 74 74 66 66 66
36802- 2 2 6 6 6 6 90 90 90 250 250 250
36803-253 253 253 253 253 253 238 238 238 198 198 198
36804-190 190 190 190 190 190 195 195 195 221 221 221
36805-246 246 246 253 253 253 253 253 253 253 253 253
36806-253 253 253 253 253 253 253 253 253 253 253 253
36807-253 253 253 82 82 82 2 2 6 2 2 6
36808- 2 2 6 2 2 6 2 2 6 2 2 6
36809- 2 2 6 78 78 78 70 70 70 34 34 34
36810- 14 14 14 6 6 6 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 0 0 0
36818- 0 0 0 0 0 0 0 0 0 0 0 0
36819- 0 0 0 0 0 0 0 0 0 0 0 0
36820- 0 0 0 0 0 0 0 0 0 14 14 14
36821- 34 34 34 66 66 66 78 78 78 6 6 6
36822- 2 2 6 18 18 18 218 218 218 253 253 253
36823-253 253 253 253 253 253 253 253 253 246 246 246
36824-226 226 226 231 231 231 246 246 246 253 253 253
36825-253 253 253 253 253 253 253 253 253 253 253 253
36826-253 253 253 253 253 253 253 253 253 253 253 253
36827-253 253 253 178 178 178 2 2 6 2 2 6
36828- 2 2 6 2 2 6 2 2 6 2 2 6
36829- 2 2 6 18 18 18 90 90 90 62 62 62
36830- 30 30 30 10 10 10 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 0 0 0
36838- 0 0 0 0 0 0 0 0 0 0 0 0
36839- 0 0 0 0 0 0 0 0 0 0 0 0
36840- 0 0 0 0 0 0 10 10 10 26 26 26
36841- 58 58 58 90 90 90 18 18 18 2 2 6
36842- 2 2 6 110 110 110 253 253 253 253 253 253
36843-253 253 253 253 253 253 253 253 253 253 253 253
36844-250 250 250 253 253 253 253 253 253 253 253 253
36845-253 253 253 253 253 253 253 253 253 253 253 253
36846-253 253 253 253 253 253 253 253 253 253 253 253
36847-253 253 253 231 231 231 18 18 18 2 2 6
36848- 2 2 6 2 2 6 2 2 6 2 2 6
36849- 2 2 6 2 2 6 18 18 18 94 94 94
36850- 54 54 54 26 26 26 10 10 10 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 0 0 0
36858- 0 0 0 0 0 0 0 0 0 0 0 0
36859- 0 0 0 0 0 0 0 0 0 0 0 0
36860- 0 0 0 6 6 6 22 22 22 50 50 50
36861- 90 90 90 26 26 26 2 2 6 2 2 6
36862- 14 14 14 195 195 195 250 250 250 253 253 253
36863-253 253 253 253 253 253 253 253 253 253 253 253
36864-253 253 253 253 253 253 253 253 253 253 253 253
36865-253 253 253 253 253 253 253 253 253 253 253 253
36866-253 253 253 253 253 253 253 253 253 253 253 253
36867-250 250 250 242 242 242 54 54 54 2 2 6
36868- 2 2 6 2 2 6 2 2 6 2 2 6
36869- 2 2 6 2 2 6 2 2 6 38 38 38
36870- 86 86 86 50 50 50 22 22 22 6 6 6
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 0 0 0
36878- 0 0 0 0 0 0 0 0 0 0 0 0
36879- 0 0 0 0 0 0 0 0 0 0 0 0
36880- 6 6 6 14 14 14 38 38 38 82 82 82
36881- 34 34 34 2 2 6 2 2 6 2 2 6
36882- 42 42 42 195 195 195 246 246 246 253 253 253
36883-253 253 253 253 253 253 253 253 253 250 250 250
36884-242 242 242 242 242 242 250 250 250 253 253 253
36885-253 253 253 253 253 253 253 253 253 253 253 253
36886-253 253 253 250 250 250 246 246 246 238 238 238
36887-226 226 226 231 231 231 101 101 101 6 6 6
36888- 2 2 6 2 2 6 2 2 6 2 2 6
36889- 2 2 6 2 2 6 2 2 6 2 2 6
36890- 38 38 38 82 82 82 42 42 42 14 14 14
36891- 6 6 6 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 0 0 0 0 0 0
36898- 0 0 0 0 0 0 0 0 0 0 0 0
36899- 0 0 0 0 0 0 0 0 0 0 0 0
36900- 10 10 10 26 26 26 62 62 62 66 66 66
36901- 2 2 6 2 2 6 2 2 6 6 6 6
36902- 70 70 70 170 170 170 206 206 206 234 234 234
36903-246 246 246 250 250 250 250 250 250 238 238 238
36904-226 226 226 231 231 231 238 238 238 250 250 250
36905-250 250 250 250 250 250 246 246 246 231 231 231
36906-214 214 214 206 206 206 202 202 202 202 202 202
36907-198 198 198 202 202 202 182 182 182 18 18 18
36908- 2 2 6 2 2 6 2 2 6 2 2 6
36909- 2 2 6 2 2 6 2 2 6 2 2 6
36910- 2 2 6 62 62 62 66 66 66 30 30 30
36911- 10 10 10 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 0 0 0 0 0 0
36918- 0 0 0 0 0 0 0 0 0 0 0 0
36919- 0 0 0 0 0 0 0 0 0 0 0 0
36920- 14 14 14 42 42 42 82 82 82 18 18 18
36921- 2 2 6 2 2 6 2 2 6 10 10 10
36922- 94 94 94 182 182 182 218 218 218 242 242 242
36923-250 250 250 253 253 253 253 253 253 250 250 250
36924-234 234 234 253 253 253 253 253 253 253 253 253
36925-253 253 253 253 253 253 253 253 253 246 246 246
36926-238 238 238 226 226 226 210 210 210 202 202 202
36927-195 195 195 195 195 195 210 210 210 158 158 158
36928- 6 6 6 14 14 14 50 50 50 14 14 14
36929- 2 2 6 2 2 6 2 2 6 2 2 6
36930- 2 2 6 6 6 6 86 86 86 46 46 46
36931- 18 18 18 6 6 6 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 0 0 0 0 0 0 0 0 0
36938- 0 0 0 0 0 0 0 0 0 0 0 0
36939- 0 0 0 0 0 0 0 0 0 6 6 6
36940- 22 22 22 54 54 54 70 70 70 2 2 6
36941- 2 2 6 10 10 10 2 2 6 22 22 22
36942-166 166 166 231 231 231 250 250 250 253 253 253
36943-253 253 253 253 253 253 253 253 253 250 250 250
36944-242 242 242 253 253 253 253 253 253 253 253 253
36945-253 253 253 253 253 253 253 253 253 253 253 253
36946-253 253 253 253 253 253 253 253 253 246 246 246
36947-231 231 231 206 206 206 198 198 198 226 226 226
36948- 94 94 94 2 2 6 6 6 6 38 38 38
36949- 30 30 30 2 2 6 2 2 6 2 2 6
36950- 2 2 6 2 2 6 62 62 62 66 66 66
36951- 26 26 26 10 10 10 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 0 0 0 0 0 0 0 0 0 0 0 0
36958- 0 0 0 0 0 0 0 0 0 0 0 0
36959- 0 0 0 0 0 0 0 0 0 10 10 10
36960- 30 30 30 74 74 74 50 50 50 2 2 6
36961- 26 26 26 26 26 26 2 2 6 106 106 106
36962-238 238 238 253 253 253 253 253 253 253 253 253
36963-253 253 253 253 253 253 253 253 253 253 253 253
36964-253 253 253 253 253 253 253 253 253 253 253 253
36965-253 253 253 253 253 253 253 253 253 253 253 253
36966-253 253 253 253 253 253 253 253 253 253 253 253
36967-253 253 253 246 246 246 218 218 218 202 202 202
36968-210 210 210 14 14 14 2 2 6 2 2 6
36969- 30 30 30 22 22 22 2 2 6 2 2 6
36970- 2 2 6 2 2 6 18 18 18 86 86 86
36971- 42 42 42 14 14 14 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 0 0 0
36977- 0 0 0 0 0 0 0 0 0 0 0 0
36978- 0 0 0 0 0 0 0 0 0 0 0 0
36979- 0 0 0 0 0 0 0 0 0 14 14 14
36980- 42 42 42 90 90 90 22 22 22 2 2 6
36981- 42 42 42 2 2 6 18 18 18 218 218 218
36982-253 253 253 253 253 253 253 253 253 253 253 253
36983-253 253 253 253 253 253 253 253 253 253 253 253
36984-253 253 253 253 253 253 253 253 253 253 253 253
36985-253 253 253 253 253 253 253 253 253 253 253 253
36986-253 253 253 253 253 253 253 253 253 253 253 253
36987-253 253 253 253 253 253 250 250 250 221 221 221
36988-218 218 218 101 101 101 2 2 6 14 14 14
36989- 18 18 18 38 38 38 10 10 10 2 2 6
36990- 2 2 6 2 2 6 2 2 6 78 78 78
36991- 58 58 58 22 22 22 6 6 6 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 0 0 0
36997- 0 0 0 0 0 0 0 0 0 0 0 0
36998- 0 0 0 0 0 0 0 0 0 0 0 0
36999- 0 0 0 0 0 0 6 6 6 18 18 18
37000- 54 54 54 82 82 82 2 2 6 26 26 26
37001- 22 22 22 2 2 6 123 123 123 253 253 253
37002-253 253 253 253 253 253 253 253 253 253 253 253
37003-253 253 253 253 253 253 253 253 253 253 253 253
37004-253 253 253 253 253 253 253 253 253 253 253 253
37005-253 253 253 253 253 253 253 253 253 253 253 253
37006-253 253 253 253 253 253 253 253 253 253 253 253
37007-253 253 253 253 253 253 253 253 253 250 250 250
37008-238 238 238 198 198 198 6 6 6 38 38 38
37009- 58 58 58 26 26 26 38 38 38 2 2 6
37010- 2 2 6 2 2 6 2 2 6 46 46 46
37011- 78 78 78 30 30 30 10 10 10 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 0 0 0 0 0 0
37017- 0 0 0 0 0 0 0 0 0 0 0 0
37018- 0 0 0 0 0 0 0 0 0 0 0 0
37019- 0 0 0 0 0 0 10 10 10 30 30 30
37020- 74 74 74 58 58 58 2 2 6 42 42 42
37021- 2 2 6 22 22 22 231 231 231 253 253 253
37022-253 253 253 253 253 253 253 253 253 253 253 253
37023-253 253 253 253 253 253 253 253 253 250 250 250
37024-253 253 253 253 253 253 253 253 253 253 253 253
37025-253 253 253 253 253 253 253 253 253 253 253 253
37026-253 253 253 253 253 253 253 253 253 253 253 253
37027-253 253 253 253 253 253 253 253 253 253 253 253
37028-253 253 253 246 246 246 46 46 46 38 38 38
37029- 42 42 42 14 14 14 38 38 38 14 14 14
37030- 2 2 6 2 2 6 2 2 6 6 6 6
37031- 86 86 86 46 46 46 14 14 14 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 0 0 0 0 0 0 0 0 0
37037- 0 0 0 0 0 0 0 0 0 0 0 0
37038- 0 0 0 0 0 0 0 0 0 0 0 0
37039- 0 0 0 6 6 6 14 14 14 42 42 42
37040- 90 90 90 18 18 18 18 18 18 26 26 26
37041- 2 2 6 116 116 116 253 253 253 253 253 253
37042-253 253 253 253 253 253 253 253 253 253 253 253
37043-253 253 253 253 253 253 250 250 250 238 238 238
37044-253 253 253 253 253 253 253 253 253 253 253 253
37045-253 253 253 253 253 253 253 253 253 253 253 253
37046-253 253 253 253 253 253 253 253 253 253 253 253
37047-253 253 253 253 253 253 253 253 253 253 253 253
37048-253 253 253 253 253 253 94 94 94 6 6 6
37049- 2 2 6 2 2 6 10 10 10 34 34 34
37050- 2 2 6 2 2 6 2 2 6 2 2 6
37051- 74 74 74 58 58 58 22 22 22 6 6 6
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 0 0 0 0 0 0 0 0 0 0 0 0
37057- 0 0 0 0 0 0 0 0 0 0 0 0
37058- 0 0 0 0 0 0 0 0 0 0 0 0
37059- 0 0 0 10 10 10 26 26 26 66 66 66
37060- 82 82 82 2 2 6 38 38 38 6 6 6
37061- 14 14 14 210 210 210 253 253 253 253 253 253
37062-253 253 253 253 253 253 253 253 253 253 253 253
37063-253 253 253 253 253 253 246 246 246 242 242 242
37064-253 253 253 253 253 253 253 253 253 253 253 253
37065-253 253 253 253 253 253 253 253 253 253 253 253
37066-253 253 253 253 253 253 253 253 253 253 253 253
37067-253 253 253 253 253 253 253 253 253 253 253 253
37068-253 253 253 253 253 253 144 144 144 2 2 6
37069- 2 2 6 2 2 6 2 2 6 46 46 46
37070- 2 2 6 2 2 6 2 2 6 2 2 6
37071- 42 42 42 74 74 74 30 30 30 10 10 10
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 0 0 0 0 0 0 0 0 0 0 0 0
37077- 0 0 0 0 0 0 0 0 0 0 0 0
37078- 0 0 0 0 0 0 0 0 0 0 0 0
37079- 6 6 6 14 14 14 42 42 42 90 90 90
37080- 26 26 26 6 6 6 42 42 42 2 2 6
37081- 74 74 74 250 250 250 253 253 253 253 253 253
37082-253 253 253 253 253 253 253 253 253 253 253 253
37083-253 253 253 253 253 253 242 242 242 242 242 242
37084-253 253 253 253 253 253 253 253 253 253 253 253
37085-253 253 253 253 253 253 253 253 253 253 253 253
37086-253 253 253 253 253 253 253 253 253 253 253 253
37087-253 253 253 253 253 253 253 253 253 253 253 253
37088-253 253 253 253 253 253 182 182 182 2 2 6
37089- 2 2 6 2 2 6 2 2 6 46 46 46
37090- 2 2 6 2 2 6 2 2 6 2 2 6
37091- 10 10 10 86 86 86 38 38 38 10 10 10
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 0 0 0 0 0 0 0 0 0 0 0 0
37097- 0 0 0 0 0 0 0 0 0 0 0 0
37098- 0 0 0 0 0 0 0 0 0 0 0 0
37099- 10 10 10 26 26 26 66 66 66 82 82 82
37100- 2 2 6 22 22 22 18 18 18 2 2 6
37101-149 149 149 253 253 253 253 253 253 253 253 253
37102-253 253 253 253 253 253 253 253 253 253 253 253
37103-253 253 253 253 253 253 234 234 234 242 242 242
37104-253 253 253 253 253 253 253 253 253 253 253 253
37105-253 253 253 253 253 253 253 253 253 253 253 253
37106-253 253 253 253 253 253 253 253 253 253 253 253
37107-253 253 253 253 253 253 253 253 253 253 253 253
37108-253 253 253 253 253 253 206 206 206 2 2 6
37109- 2 2 6 2 2 6 2 2 6 38 38 38
37110- 2 2 6 2 2 6 2 2 6 2 2 6
37111- 6 6 6 86 86 86 46 46 46 14 14 14
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 0 0 0
37116- 0 0 0 0 0 0 0 0 0 0 0 0
37117- 0 0 0 0 0 0 0 0 0 0 0 0
37118- 0 0 0 0 0 0 0 0 0 6 6 6
37119- 18 18 18 46 46 46 86 86 86 18 18 18
37120- 2 2 6 34 34 34 10 10 10 6 6 6
37121-210 210 210 253 253 253 253 253 253 253 253 253
37122-253 253 253 253 253 253 253 253 253 253 253 253
37123-253 253 253 253 253 253 234 234 234 242 242 242
37124-253 253 253 253 253 253 253 253 253 253 253 253
37125-253 253 253 253 253 253 253 253 253 253 253 253
37126-253 253 253 253 253 253 253 253 253 253 253 253
37127-253 253 253 253 253 253 253 253 253 253 253 253
37128-253 253 253 253 253 253 221 221 221 6 6 6
37129- 2 2 6 2 2 6 6 6 6 30 30 30
37130- 2 2 6 2 2 6 2 2 6 2 2 6
37131- 2 2 6 82 82 82 54 54 54 18 18 18
37132- 6 6 6 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 0 0 0 0 0 0 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 0 0 0
37136- 0 0 0 0 0 0 0 0 0 0 0 0
37137- 0 0 0 0 0 0 0 0 0 0 0 0
37138- 0 0 0 0 0 0 0 0 0 10 10 10
37139- 26 26 26 66 66 66 62 62 62 2 2 6
37140- 2 2 6 38 38 38 10 10 10 26 26 26
37141-238 238 238 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 253 253 253 231 231 231 238 238 238
37144-253 253 253 253 253 253 253 253 253 253 253 253
37145-253 253 253 253 253 253 253 253 253 253 253 253
37146-253 253 253 253 253 253 253 253 253 253 253 253
37147-253 253 253 253 253 253 253 253 253 253 253 253
37148-253 253 253 253 253 253 231 231 231 6 6 6
37149- 2 2 6 2 2 6 10 10 10 30 30 30
37150- 2 2 6 2 2 6 2 2 6 2 2 6
37151- 2 2 6 66 66 66 58 58 58 22 22 22
37152- 6 6 6 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 0 0 0 0 0 0 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 0 0 0
37156- 0 0 0 0 0 0 0 0 0 0 0 0
37157- 0 0 0 0 0 0 0 0 0 0 0 0
37158- 0 0 0 0 0 0 0 0 0 10 10 10
37159- 38 38 38 78 78 78 6 6 6 2 2 6
37160- 2 2 6 46 46 46 14 14 14 42 42 42
37161-246 246 246 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 253 253 253 231 231 231 242 242 242
37164-253 253 253 253 253 253 253 253 253 253 253 253
37165-253 253 253 253 253 253 253 253 253 253 253 253
37166-253 253 253 253 253 253 253 253 253 253 253 253
37167-253 253 253 253 253 253 253 253 253 253 253 253
37168-253 253 253 253 253 253 234 234 234 10 10 10
37169- 2 2 6 2 2 6 22 22 22 14 14 14
37170- 2 2 6 2 2 6 2 2 6 2 2 6
37171- 2 2 6 66 66 66 62 62 62 22 22 22
37172- 6 6 6 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 0 0 0 0 0 0 0 0 0 0 0 0
37175- 0 0 0 0 0 0 0 0 0 0 0 0
37176- 0 0 0 0 0 0 0 0 0 0 0 0
37177- 0 0 0 0 0 0 0 0 0 0 0 0
37178- 0 0 0 0 0 0 6 6 6 18 18 18
37179- 50 50 50 74 74 74 2 2 6 2 2 6
37180- 14 14 14 70 70 70 34 34 34 62 62 62
37181-250 250 250 253 253 253 253 253 253 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 253 253 253 231 231 231 246 246 246
37184-253 253 253 253 253 253 253 253 253 253 253 253
37185-253 253 253 253 253 253 253 253 253 253 253 253
37186-253 253 253 253 253 253 253 253 253 253 253 253
37187-253 253 253 253 253 253 253 253 253 253 253 253
37188-253 253 253 253 253 253 234 234 234 14 14 14
37189- 2 2 6 2 2 6 30 30 30 2 2 6
37190- 2 2 6 2 2 6 2 2 6 2 2 6
37191- 2 2 6 66 66 66 62 62 62 22 22 22
37192- 6 6 6 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 0 0 0
37194- 0 0 0 0 0 0 0 0 0 0 0 0
37195- 0 0 0 0 0 0 0 0 0 0 0 0
37196- 0 0 0 0 0 0 0 0 0 0 0 0
37197- 0 0 0 0 0 0 0 0 0 0 0 0
37198- 0 0 0 0 0 0 6 6 6 18 18 18
37199- 54 54 54 62 62 62 2 2 6 2 2 6
37200- 2 2 6 30 30 30 46 46 46 70 70 70
37201-250 250 250 253 253 253 253 253 253 253 253 253
37202-253 253 253 253 253 253 253 253 253 253 253 253
37203-253 253 253 253 253 253 231 231 231 246 246 246
37204-253 253 253 253 253 253 253 253 253 253 253 253
37205-253 253 253 253 253 253 253 253 253 253 253 253
37206-253 253 253 253 253 253 253 253 253 253 253 253
37207-253 253 253 253 253 253 253 253 253 253 253 253
37208-253 253 253 253 253 253 226 226 226 10 10 10
37209- 2 2 6 6 6 6 30 30 30 2 2 6
37210- 2 2 6 2 2 6 2 2 6 2 2 6
37211- 2 2 6 66 66 66 58 58 58 22 22 22
37212- 6 6 6 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 0 0 0
37214- 0 0 0 0 0 0 0 0 0 0 0 0
37215- 0 0 0 0 0 0 0 0 0 0 0 0
37216- 0 0 0 0 0 0 0 0 0 0 0 0
37217- 0 0 0 0 0 0 0 0 0 0 0 0
37218- 0 0 0 0 0 0 6 6 6 22 22 22
37219- 58 58 58 62 62 62 2 2 6 2 2 6
37220- 2 2 6 2 2 6 30 30 30 78 78 78
37221-250 250 250 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 253 253 253
37223-253 253 253 253 253 253 231 231 231 246 246 246
37224-253 253 253 253 253 253 253 253 253 253 253 253
37225-253 253 253 253 253 253 253 253 253 253 253 253
37226-253 253 253 253 253 253 253 253 253 253 253 253
37227-253 253 253 253 253 253 253 253 253 253 253 253
37228-253 253 253 253 253 253 206 206 206 2 2 6
37229- 22 22 22 34 34 34 18 14 6 22 22 22
37230- 26 26 26 18 18 18 6 6 6 2 2 6
37231- 2 2 6 82 82 82 54 54 54 18 18 18
37232- 6 6 6 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 0 0 0
37234- 0 0 0 0 0 0 0 0 0 0 0 0
37235- 0 0 0 0 0 0 0 0 0 0 0 0
37236- 0 0 0 0 0 0 0 0 0 0 0 0
37237- 0 0 0 0 0 0 0 0 0 0 0 0
37238- 0 0 0 0 0 0 6 6 6 26 26 26
37239- 62 62 62 106 106 106 74 54 14 185 133 11
37240-210 162 10 121 92 8 6 6 6 62 62 62
37241-238 238 238 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 253 253 253 231 231 231 246 246 246
37244-253 253 253 253 253 253 253 253 253 253 253 253
37245-253 253 253 253 253 253 253 253 253 253 253 253
37246-253 253 253 253 253 253 253 253 253 253 253 253
37247-253 253 253 253 253 253 253 253 253 253 253 253
37248-253 253 253 253 253 253 158 158 158 18 18 18
37249- 14 14 14 2 2 6 2 2 6 2 2 6
37250- 6 6 6 18 18 18 66 66 66 38 38 38
37251- 6 6 6 94 94 94 50 50 50 18 18 18
37252- 6 6 6 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 0 0 0 0 0 0
37254- 0 0 0 0 0 0 0 0 0 0 0 0
37255- 0 0 0 0 0 0 0 0 0 0 0 0
37256- 0 0 0 0 0 0 0 0 0 0 0 0
37257- 0 0 0 0 0 0 0 0 0 6 6 6
37258- 10 10 10 10 10 10 18 18 18 38 38 38
37259- 78 78 78 142 134 106 216 158 10 242 186 14
37260-246 190 14 246 190 14 156 118 10 10 10 10
37261- 90 90 90 238 238 238 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 253 253 253 231 231 231 250 250 250
37264-253 253 253 253 253 253 253 253 253 253 253 253
37265-253 253 253 253 253 253 253 253 253 253 253 253
37266-253 253 253 253 253 253 253 253 253 253 253 253
37267-253 253 253 253 253 253 253 253 253 246 230 190
37268-238 204 91 238 204 91 181 142 44 37 26 9
37269- 2 2 6 2 2 6 2 2 6 2 2 6
37270- 2 2 6 2 2 6 38 38 38 46 46 46
37271- 26 26 26 106 106 106 54 54 54 18 18 18
37272- 6 6 6 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 0 0 0 0 0 0
37274- 0 0 0 0 0 0 0 0 0 0 0 0
37275- 0 0 0 0 0 0 0 0 0 0 0 0
37276- 0 0 0 0 0 0 0 0 0 0 0 0
37277- 0 0 0 6 6 6 14 14 14 22 22 22
37278- 30 30 30 38 38 38 50 50 50 70 70 70
37279-106 106 106 190 142 34 226 170 11 242 186 14
37280-246 190 14 246 190 14 246 190 14 154 114 10
37281- 6 6 6 74 74 74 226 226 226 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-253 253 253 253 253 253 231 231 231 250 250 250
37284-253 253 253 253 253 253 253 253 253 253 253 253
37285-253 253 253 253 253 253 253 253 253 253 253 253
37286-253 253 253 253 253 253 253 253 253 253 253 253
37287-253 253 253 253 253 253 253 253 253 228 184 62
37288-241 196 14 241 208 19 232 195 16 38 30 10
37289- 2 2 6 2 2 6 2 2 6 2 2 6
37290- 2 2 6 6 6 6 30 30 30 26 26 26
37291-203 166 17 154 142 90 66 66 66 26 26 26
37292- 6 6 6 0 0 0 0 0 0 0 0 0
37293- 0 0 0 0 0 0 0 0 0 0 0 0
37294- 0 0 0 0 0 0 0 0 0 0 0 0
37295- 0 0 0 0 0 0 0 0 0 0 0 0
37296- 0 0 0 0 0 0 0 0 0 0 0 0
37297- 6 6 6 18 18 18 38 38 38 58 58 58
37298- 78 78 78 86 86 86 101 101 101 123 123 123
37299-175 146 61 210 150 10 234 174 13 246 186 14
37300-246 190 14 246 190 14 246 190 14 238 190 10
37301-102 78 10 2 2 6 46 46 46 198 198 198
37302-253 253 253 253 253 253 253 253 253 253 253 253
37303-253 253 253 253 253 253 234 234 234 242 242 242
37304-253 253 253 253 253 253 253 253 253 253 253 253
37305-253 253 253 253 253 253 253 253 253 253 253 253
37306-253 253 253 253 253 253 253 253 253 253 253 253
37307-253 253 253 253 253 253 253 253 253 224 178 62
37308-242 186 14 241 196 14 210 166 10 22 18 6
37309- 2 2 6 2 2 6 2 2 6 2 2 6
37310- 2 2 6 2 2 6 6 6 6 121 92 8
37311-238 202 15 232 195 16 82 82 82 34 34 34
37312- 10 10 10 0 0 0 0 0 0 0 0 0
37313- 0 0 0 0 0 0 0 0 0 0 0 0
37314- 0 0 0 0 0 0 0 0 0 0 0 0
37315- 0 0 0 0 0 0 0 0 0 0 0 0
37316- 0 0 0 0 0 0 0 0 0 0 0 0
37317- 14 14 14 38 38 38 70 70 70 154 122 46
37318-190 142 34 200 144 11 197 138 11 197 138 11
37319-213 154 11 226 170 11 242 186 14 246 190 14
37320-246 190 14 246 190 14 246 190 14 246 190 14
37321-225 175 15 46 32 6 2 2 6 22 22 22
37322-158 158 158 250 250 250 253 253 253 253 253 253
37323-253 253 253 253 253 253 253 253 253 253 253 253
37324-253 253 253 253 253 253 253 253 253 253 253 253
37325-253 253 253 253 253 253 253 253 253 253 253 253
37326-253 253 253 253 253 253 253 253 253 253 253 253
37327-253 253 253 250 250 250 242 242 242 224 178 62
37328-239 182 13 236 186 11 213 154 11 46 32 6
37329- 2 2 6 2 2 6 2 2 6 2 2 6
37330- 2 2 6 2 2 6 61 42 6 225 175 15
37331-238 190 10 236 186 11 112 100 78 42 42 42
37332- 14 14 14 0 0 0 0 0 0 0 0 0
37333- 0 0 0 0 0 0 0 0 0 0 0 0
37334- 0 0 0 0 0 0 0 0 0 0 0 0
37335- 0 0 0 0 0 0 0 0 0 0 0 0
37336- 0 0 0 0 0 0 0 0 0 6 6 6
37337- 22 22 22 54 54 54 154 122 46 213 154 11
37338-226 170 11 230 174 11 226 170 11 226 170 11
37339-236 178 12 242 186 14 246 190 14 246 190 14
37340-246 190 14 246 190 14 246 190 14 246 190 14
37341-241 196 14 184 144 12 10 10 10 2 2 6
37342- 6 6 6 116 116 116 242 242 242 253 253 253
37343-253 253 253 253 253 253 253 253 253 253 253 253
37344-253 253 253 253 253 253 253 253 253 253 253 253
37345-253 253 253 253 253 253 253 253 253 253 253 253
37346-253 253 253 253 253 253 253 253 253 253 253 253
37347-253 253 253 231 231 231 198 198 198 214 170 54
37348-236 178 12 236 178 12 210 150 10 137 92 6
37349- 18 14 6 2 2 6 2 2 6 2 2 6
37350- 6 6 6 70 47 6 200 144 11 236 178 12
37351-239 182 13 239 182 13 124 112 88 58 58 58
37352- 22 22 22 6 6 6 0 0 0 0 0 0
37353- 0 0 0 0 0 0 0 0 0 0 0 0
37354- 0 0 0 0 0 0 0 0 0 0 0 0
37355- 0 0 0 0 0 0 0 0 0 0 0 0
37356- 0 0 0 0 0 0 0 0 0 10 10 10
37357- 30 30 30 70 70 70 180 133 36 226 170 11
37358-239 182 13 242 186 14 242 186 14 246 186 14
37359-246 190 14 246 190 14 246 190 14 246 190 14
37360-246 190 14 246 190 14 246 190 14 246 190 14
37361-246 190 14 232 195 16 98 70 6 2 2 6
37362- 2 2 6 2 2 6 66 66 66 221 221 221
37363-253 253 253 253 253 253 253 253 253 253 253 253
37364-253 253 253 253 253 253 253 253 253 253 253 253
37365-253 253 253 253 253 253 253 253 253 253 253 253
37366-253 253 253 253 253 253 253 253 253 253 253 253
37367-253 253 253 206 206 206 198 198 198 214 166 58
37368-230 174 11 230 174 11 216 158 10 192 133 9
37369-163 110 8 116 81 8 102 78 10 116 81 8
37370-167 114 7 197 138 11 226 170 11 239 182 13
37371-242 186 14 242 186 14 162 146 94 78 78 78
37372- 34 34 34 14 14 14 6 6 6 0 0 0
37373- 0 0 0 0 0 0 0 0 0 0 0 0
37374- 0 0 0 0 0 0 0 0 0 0 0 0
37375- 0 0 0 0 0 0 0 0 0 0 0 0
37376- 0 0 0 0 0 0 0 0 0 6 6 6
37377- 30 30 30 78 78 78 190 142 34 226 170 11
37378-239 182 13 246 190 14 246 190 14 246 190 14
37379-246 190 14 246 190 14 246 190 14 246 190 14
37380-246 190 14 246 190 14 246 190 14 246 190 14
37381-246 190 14 241 196 14 203 166 17 22 18 6
37382- 2 2 6 2 2 6 2 2 6 38 38 38
37383-218 218 218 253 253 253 253 253 253 253 253 253
37384-253 253 253 253 253 253 253 253 253 253 253 253
37385-253 253 253 253 253 253 253 253 253 253 253 253
37386-253 253 253 253 253 253 253 253 253 253 253 253
37387-250 250 250 206 206 206 198 198 198 202 162 69
37388-226 170 11 236 178 12 224 166 10 210 150 10
37389-200 144 11 197 138 11 192 133 9 197 138 11
37390-210 150 10 226 170 11 242 186 14 246 190 14
37391-246 190 14 246 186 14 225 175 15 124 112 88
37392- 62 62 62 30 30 30 14 14 14 6 6 6
37393- 0 0 0 0 0 0 0 0 0 0 0 0
37394- 0 0 0 0 0 0 0 0 0 0 0 0
37395- 0 0 0 0 0 0 0 0 0 0 0 0
37396- 0 0 0 0 0 0 0 0 0 10 10 10
37397- 30 30 30 78 78 78 174 135 50 224 166 10
37398-239 182 13 246 190 14 246 190 14 246 190 14
37399-246 190 14 246 190 14 246 190 14 246 190 14
37400-246 190 14 246 190 14 246 190 14 246 190 14
37401-246 190 14 246 190 14 241 196 14 139 102 15
37402- 2 2 6 2 2 6 2 2 6 2 2 6
37403- 78 78 78 250 250 250 253 253 253 253 253 253
37404-253 253 253 253 253 253 253 253 253 253 253 253
37405-253 253 253 253 253 253 253 253 253 253 253 253
37406-253 253 253 253 253 253 253 253 253 253 253 253
37407-250 250 250 214 214 214 198 198 198 190 150 46
37408-219 162 10 236 178 12 234 174 13 224 166 10
37409-216 158 10 213 154 11 213 154 11 216 158 10
37410-226 170 11 239 182 13 246 190 14 246 190 14
37411-246 190 14 246 190 14 242 186 14 206 162 42
37412-101 101 101 58 58 58 30 30 30 14 14 14
37413- 6 6 6 0 0 0 0 0 0 0 0 0
37414- 0 0 0 0 0 0 0 0 0 0 0 0
37415- 0 0 0 0 0 0 0 0 0 0 0 0
37416- 0 0 0 0 0 0 0 0 0 10 10 10
37417- 30 30 30 74 74 74 174 135 50 216 158 10
37418-236 178 12 246 190 14 246 190 14 246 190 14
37419-246 190 14 246 190 14 246 190 14 246 190 14
37420-246 190 14 246 190 14 246 190 14 246 190 14
37421-246 190 14 246 190 14 241 196 14 226 184 13
37422- 61 42 6 2 2 6 2 2 6 2 2 6
37423- 22 22 22 238 238 238 253 253 253 253 253 253
37424-253 253 253 253 253 253 253 253 253 253 253 253
37425-253 253 253 253 253 253 253 253 253 253 253 253
37426-253 253 253 253 253 253 253 253 253 253 253 253
37427-253 253 253 226 226 226 187 187 187 180 133 36
37428-216 158 10 236 178 12 239 182 13 236 178 12
37429-230 174 11 226 170 11 226 170 11 230 174 11
37430-236 178 12 242 186 14 246 190 14 246 190 14
37431-246 190 14 246 190 14 246 186 14 239 182 13
37432-206 162 42 106 106 106 66 66 66 34 34 34
37433- 14 14 14 6 6 6 0 0 0 0 0 0
37434- 0 0 0 0 0 0 0 0 0 0 0 0
37435- 0 0 0 0 0 0 0 0 0 0 0 0
37436- 0 0 0 0 0 0 0 0 0 6 6 6
37437- 26 26 26 70 70 70 163 133 67 213 154 11
37438-236 178 12 246 190 14 246 190 14 246 190 14
37439-246 190 14 246 190 14 246 190 14 246 190 14
37440-246 190 14 246 190 14 246 190 14 246 190 14
37441-246 190 14 246 190 14 246 190 14 241 196 14
37442-190 146 13 18 14 6 2 2 6 2 2 6
37443- 46 46 46 246 246 246 253 253 253 253 253 253
37444-253 253 253 253 253 253 253 253 253 253 253 253
37445-253 253 253 253 253 253 253 253 253 253 253 253
37446-253 253 253 253 253 253 253 253 253 253 253 253
37447-253 253 253 221 221 221 86 86 86 156 107 11
37448-216 158 10 236 178 12 242 186 14 246 186 14
37449-242 186 14 239 182 13 239 182 13 242 186 14
37450-242 186 14 246 186 14 246 190 14 246 190 14
37451-246 190 14 246 190 14 246 190 14 246 190 14
37452-242 186 14 225 175 15 142 122 72 66 66 66
37453- 30 30 30 10 10 10 0 0 0 0 0 0
37454- 0 0 0 0 0 0 0 0 0 0 0 0
37455- 0 0 0 0 0 0 0 0 0 0 0 0
37456- 0 0 0 0 0 0 0 0 0 6 6 6
37457- 26 26 26 70 70 70 163 133 67 210 150 10
37458-236 178 12 246 190 14 246 190 14 246 190 14
37459-246 190 14 246 190 14 246 190 14 246 190 14
37460-246 190 14 246 190 14 246 190 14 246 190 14
37461-246 190 14 246 190 14 246 190 14 246 190 14
37462-232 195 16 121 92 8 34 34 34 106 106 106
37463-221 221 221 253 253 253 253 253 253 253 253 253
37464-253 253 253 253 253 253 253 253 253 253 253 253
37465-253 253 253 253 253 253 253 253 253 253 253 253
37466-253 253 253 253 253 253 253 253 253 253 253 253
37467-242 242 242 82 82 82 18 14 6 163 110 8
37468-216 158 10 236 178 12 242 186 14 246 190 14
37469-246 190 14 246 190 14 246 190 14 246 190 14
37470-246 190 14 246 190 14 246 190 14 246 190 14
37471-246 190 14 246 190 14 246 190 14 246 190 14
37472-246 190 14 246 190 14 242 186 14 163 133 67
37473- 46 46 46 18 18 18 6 6 6 0 0 0
37474- 0 0 0 0 0 0 0 0 0 0 0 0
37475- 0 0 0 0 0 0 0 0 0 0 0 0
37476- 0 0 0 0 0 0 0 0 0 10 10 10
37477- 30 30 30 78 78 78 163 133 67 210 150 10
37478-236 178 12 246 186 14 246 190 14 246 190 14
37479-246 190 14 246 190 14 246 190 14 246 190 14
37480-246 190 14 246 190 14 246 190 14 246 190 14
37481-246 190 14 246 190 14 246 190 14 246 190 14
37482-241 196 14 215 174 15 190 178 144 253 253 253
37483-253 253 253 253 253 253 253 253 253 253 253 253
37484-253 253 253 253 253 253 253 253 253 253 253 253
37485-253 253 253 253 253 253 253 253 253 253 253 253
37486-253 253 253 253 253 253 253 253 253 218 218 218
37487- 58 58 58 2 2 6 22 18 6 167 114 7
37488-216 158 10 236 178 12 246 186 14 246 190 14
37489-246 190 14 246 190 14 246 190 14 246 190 14
37490-246 190 14 246 190 14 246 190 14 246 190 14
37491-246 190 14 246 190 14 246 190 14 246 190 14
37492-246 190 14 246 186 14 242 186 14 190 150 46
37493- 54 54 54 22 22 22 6 6 6 0 0 0
37494- 0 0 0 0 0 0 0 0 0 0 0 0
37495- 0 0 0 0 0 0 0 0 0 0 0 0
37496- 0 0 0 0 0 0 0 0 0 14 14 14
37497- 38 38 38 86 86 86 180 133 36 213 154 11
37498-236 178 12 246 186 14 246 190 14 246 190 14
37499-246 190 14 246 190 14 246 190 14 246 190 14
37500-246 190 14 246 190 14 246 190 14 246 190 14
37501-246 190 14 246 190 14 246 190 14 246 190 14
37502-246 190 14 232 195 16 190 146 13 214 214 214
37503-253 253 253 253 253 253 253 253 253 253 253 253
37504-253 253 253 253 253 253 253 253 253 253 253 253
37505-253 253 253 253 253 253 253 253 253 253 253 253
37506-253 253 253 250 250 250 170 170 170 26 26 26
37507- 2 2 6 2 2 6 37 26 9 163 110 8
37508-219 162 10 239 182 13 246 186 14 246 190 14
37509-246 190 14 246 190 14 246 190 14 246 190 14
37510-246 190 14 246 190 14 246 190 14 246 190 14
37511-246 190 14 246 190 14 246 190 14 246 190 14
37512-246 186 14 236 178 12 224 166 10 142 122 72
37513- 46 46 46 18 18 18 6 6 6 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 0 0 0 0 0 0 0 0 0
37516- 0 0 0 0 0 0 6 6 6 18 18 18
37517- 50 50 50 109 106 95 192 133 9 224 166 10
37518-242 186 14 246 190 14 246 190 14 246 190 14
37519-246 190 14 246 190 14 246 190 14 246 190 14
37520-246 190 14 246 190 14 246 190 14 246 190 14
37521-246 190 14 246 190 14 246 190 14 246 190 14
37522-242 186 14 226 184 13 210 162 10 142 110 46
37523-226 226 226 253 253 253 253 253 253 253 253 253
37524-253 253 253 253 253 253 253 253 253 253 253 253
37525-253 253 253 253 253 253 253 253 253 253 253 253
37526-198 198 198 66 66 66 2 2 6 2 2 6
37527- 2 2 6 2 2 6 50 34 6 156 107 11
37528-219 162 10 239 182 13 246 186 14 246 190 14
37529-246 190 14 246 190 14 246 190 14 246 190 14
37530-246 190 14 246 190 14 246 190 14 246 190 14
37531-246 190 14 246 190 14 246 190 14 242 186 14
37532-234 174 13 213 154 11 154 122 46 66 66 66
37533- 30 30 30 10 10 10 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 0 0 0 0 0 0
37536- 0 0 0 0 0 0 6 6 6 22 22 22
37537- 58 58 58 154 121 60 206 145 10 234 174 13
37538-242 186 14 246 186 14 246 190 14 246 190 14
37539-246 190 14 246 190 14 246 190 14 246 190 14
37540-246 190 14 246 190 14 246 190 14 246 190 14
37541-246 190 14 246 190 14 246 190 14 246 190 14
37542-246 186 14 236 178 12 210 162 10 163 110 8
37543- 61 42 6 138 138 138 218 218 218 250 250 250
37544-253 253 253 253 253 253 253 253 253 250 250 250
37545-242 242 242 210 210 210 144 144 144 66 66 66
37546- 6 6 6 2 2 6 2 2 6 2 2 6
37547- 2 2 6 2 2 6 61 42 6 163 110 8
37548-216 158 10 236 178 12 246 190 14 246 190 14
37549-246 190 14 246 190 14 246 190 14 246 190 14
37550-246 190 14 246 190 14 246 190 14 246 190 14
37551-246 190 14 239 182 13 230 174 11 216 158 10
37552-190 142 34 124 112 88 70 70 70 38 38 38
37553- 18 18 18 6 6 6 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 0 0 0
37555- 0 0 0 0 0 0 0 0 0 0 0 0
37556- 0 0 0 0 0 0 6 6 6 22 22 22
37557- 62 62 62 168 124 44 206 145 10 224 166 10
37558-236 178 12 239 182 13 242 186 14 242 186 14
37559-246 186 14 246 190 14 246 190 14 246 190 14
37560-246 190 14 246 190 14 246 190 14 246 190 14
37561-246 190 14 246 190 14 246 190 14 246 190 14
37562-246 190 14 236 178 12 216 158 10 175 118 6
37563- 80 54 7 2 2 6 6 6 6 30 30 30
37564- 54 54 54 62 62 62 50 50 50 38 38 38
37565- 14 14 14 2 2 6 2 2 6 2 2 6
37566- 2 2 6 2 2 6 2 2 6 2 2 6
37567- 2 2 6 6 6 6 80 54 7 167 114 7
37568-213 154 11 236 178 12 246 190 14 246 190 14
37569-246 190 14 246 190 14 246 190 14 246 190 14
37570-246 190 14 242 186 14 239 182 13 239 182 13
37571-230 174 11 210 150 10 174 135 50 124 112 88
37572- 82 82 82 54 54 54 34 34 34 18 18 18
37573- 6 6 6 0 0 0 0 0 0 0 0 0
37574- 0 0 0 0 0 0 0 0 0 0 0 0
37575- 0 0 0 0 0 0 0 0 0 0 0 0
37576- 0 0 0 0 0 0 6 6 6 18 18 18
37577- 50 50 50 158 118 36 192 133 9 200 144 11
37578-216 158 10 219 162 10 224 166 10 226 170 11
37579-230 174 11 236 178 12 239 182 13 239 182 13
37580-242 186 14 246 186 14 246 190 14 246 190 14
37581-246 190 14 246 190 14 246 190 14 246 190 14
37582-246 186 14 230 174 11 210 150 10 163 110 8
37583-104 69 6 10 10 10 2 2 6 2 2 6
37584- 2 2 6 2 2 6 2 2 6 2 2 6
37585- 2 2 6 2 2 6 2 2 6 2 2 6
37586- 2 2 6 2 2 6 2 2 6 2 2 6
37587- 2 2 6 6 6 6 91 60 6 167 114 7
37588-206 145 10 230 174 11 242 186 14 246 190 14
37589-246 190 14 246 190 14 246 186 14 242 186 14
37590-239 182 13 230 174 11 224 166 10 213 154 11
37591-180 133 36 124 112 88 86 86 86 58 58 58
37592- 38 38 38 22 22 22 10 10 10 6 6 6
37593- 0 0 0 0 0 0 0 0 0 0 0 0
37594- 0 0 0 0 0 0 0 0 0 0 0 0
37595- 0 0 0 0 0 0 0 0 0 0 0 0
37596- 0 0 0 0 0 0 0 0 0 14 14 14
37597- 34 34 34 70 70 70 138 110 50 158 118 36
37598-167 114 7 180 123 7 192 133 9 197 138 11
37599-200 144 11 206 145 10 213 154 11 219 162 10
37600-224 166 10 230 174 11 239 182 13 242 186 14
37601-246 186 14 246 186 14 246 186 14 246 186 14
37602-239 182 13 216 158 10 185 133 11 152 99 6
37603-104 69 6 18 14 6 2 2 6 2 2 6
37604- 2 2 6 2 2 6 2 2 6 2 2 6
37605- 2 2 6 2 2 6 2 2 6 2 2 6
37606- 2 2 6 2 2 6 2 2 6 2 2 6
37607- 2 2 6 6 6 6 80 54 7 152 99 6
37608-192 133 9 219 162 10 236 178 12 239 182 13
37609-246 186 14 242 186 14 239 182 13 236 178 12
37610-224 166 10 206 145 10 192 133 9 154 121 60
37611- 94 94 94 62 62 62 42 42 42 22 22 22
37612- 14 14 14 6 6 6 0 0 0 0 0 0
37613- 0 0 0 0 0 0 0 0 0 0 0 0
37614- 0 0 0 0 0 0 0 0 0 0 0 0
37615- 0 0 0 0 0 0 0 0 0 0 0 0
37616- 0 0 0 0 0 0 0 0 0 6 6 6
37617- 18 18 18 34 34 34 58 58 58 78 78 78
37618-101 98 89 124 112 88 142 110 46 156 107 11
37619-163 110 8 167 114 7 175 118 6 180 123 7
37620-185 133 11 197 138 11 210 150 10 219 162 10
37621-226 170 11 236 178 12 236 178 12 234 174 13
37622-219 162 10 197 138 11 163 110 8 130 83 6
37623- 91 60 6 10 10 10 2 2 6 2 2 6
37624- 18 18 18 38 38 38 38 38 38 38 38 38
37625- 38 38 38 38 38 38 38 38 38 38 38 38
37626- 38 38 38 38 38 38 26 26 26 2 2 6
37627- 2 2 6 6 6 6 70 47 6 137 92 6
37628-175 118 6 200 144 11 219 162 10 230 174 11
37629-234 174 13 230 174 11 219 162 10 210 150 10
37630-192 133 9 163 110 8 124 112 88 82 82 82
37631- 50 50 50 30 30 30 14 14 14 6 6 6
37632- 0 0 0 0 0 0 0 0 0 0 0 0
37633- 0 0 0 0 0 0 0 0 0 0 0 0
37634- 0 0 0 0 0 0 0 0 0 0 0 0
37635- 0 0 0 0 0 0 0 0 0 0 0 0
37636- 0 0 0 0 0 0 0 0 0 0 0 0
37637- 6 6 6 14 14 14 22 22 22 34 34 34
37638- 42 42 42 58 58 58 74 74 74 86 86 86
37639-101 98 89 122 102 70 130 98 46 121 87 25
37640-137 92 6 152 99 6 163 110 8 180 123 7
37641-185 133 11 197 138 11 206 145 10 200 144 11
37642-180 123 7 156 107 11 130 83 6 104 69 6
37643- 50 34 6 54 54 54 110 110 110 101 98 89
37644- 86 86 86 82 82 82 78 78 78 78 78 78
37645- 78 78 78 78 78 78 78 78 78 78 78 78
37646- 78 78 78 82 82 82 86 86 86 94 94 94
37647-106 106 106 101 101 101 86 66 34 124 80 6
37648-156 107 11 180 123 7 192 133 9 200 144 11
37649-206 145 10 200 144 11 192 133 9 175 118 6
37650-139 102 15 109 106 95 70 70 70 42 42 42
37651- 22 22 22 10 10 10 0 0 0 0 0 0
37652- 0 0 0 0 0 0 0 0 0 0 0 0
37653- 0 0 0 0 0 0 0 0 0 0 0 0
37654- 0 0 0 0 0 0 0 0 0 0 0 0
37655- 0 0 0 0 0 0 0 0 0 0 0 0
37656- 0 0 0 0 0 0 0 0 0 0 0 0
37657- 0 0 0 0 0 0 6 6 6 10 10 10
37658- 14 14 14 22 22 22 30 30 30 38 38 38
37659- 50 50 50 62 62 62 74 74 74 90 90 90
37660-101 98 89 112 100 78 121 87 25 124 80 6
37661-137 92 6 152 99 6 152 99 6 152 99 6
37662-138 86 6 124 80 6 98 70 6 86 66 30
37663-101 98 89 82 82 82 58 58 58 46 46 46
37664- 38 38 38 34 34 34 34 34 34 34 34 34
37665- 34 34 34 34 34 34 34 34 34 34 34 34
37666- 34 34 34 34 34 34 38 38 38 42 42 42
37667- 54 54 54 82 82 82 94 86 76 91 60 6
37668-134 86 6 156 107 11 167 114 7 175 118 6
37669-175 118 6 167 114 7 152 99 6 121 87 25
37670-101 98 89 62 62 62 34 34 34 18 18 18
37671- 6 6 6 0 0 0 0 0 0 0 0 0
37672- 0 0 0 0 0 0 0 0 0 0 0 0
37673- 0 0 0 0 0 0 0 0 0 0 0 0
37674- 0 0 0 0 0 0 0 0 0 0 0 0
37675- 0 0 0 0 0 0 0 0 0 0 0 0
37676- 0 0 0 0 0 0 0 0 0 0 0 0
37677- 0 0 0 0 0 0 0 0 0 0 0 0
37678- 0 0 0 6 6 6 6 6 6 10 10 10
37679- 18 18 18 22 22 22 30 30 30 42 42 42
37680- 50 50 50 66 66 66 86 86 86 101 98 89
37681-106 86 58 98 70 6 104 69 6 104 69 6
37682-104 69 6 91 60 6 82 62 34 90 90 90
37683- 62 62 62 38 38 38 22 22 22 14 14 14
37684- 10 10 10 10 10 10 10 10 10 10 10 10
37685- 10 10 10 10 10 10 6 6 6 10 10 10
37686- 10 10 10 10 10 10 10 10 10 14 14 14
37687- 22 22 22 42 42 42 70 70 70 89 81 66
37688- 80 54 7 104 69 6 124 80 6 137 92 6
37689-134 86 6 116 81 8 100 82 52 86 86 86
37690- 58 58 58 30 30 30 14 14 14 6 6 6
37691- 0 0 0 0 0 0 0 0 0 0 0 0
37692- 0 0 0 0 0 0 0 0 0 0 0 0
37693- 0 0 0 0 0 0 0 0 0 0 0 0
37694- 0 0 0 0 0 0 0 0 0 0 0 0
37695- 0 0 0 0 0 0 0 0 0 0 0 0
37696- 0 0 0 0 0 0 0 0 0 0 0 0
37697- 0 0 0 0 0 0 0 0 0 0 0 0
37698- 0 0 0 0 0 0 0 0 0 0 0 0
37699- 0 0 0 6 6 6 10 10 10 14 14 14
37700- 18 18 18 26 26 26 38 38 38 54 54 54
37701- 70 70 70 86 86 86 94 86 76 89 81 66
37702- 89 81 66 86 86 86 74 74 74 50 50 50
37703- 30 30 30 14 14 14 6 6 6 0 0 0
37704- 0 0 0 0 0 0 0 0 0 0 0 0
37705- 0 0 0 0 0 0 0 0 0 0 0 0
37706- 0 0 0 0 0 0 0 0 0 0 0 0
37707- 6 6 6 18 18 18 34 34 34 58 58 58
37708- 82 82 82 89 81 66 89 81 66 89 81 66
37709- 94 86 66 94 86 76 74 74 74 50 50 50
37710- 26 26 26 14 14 14 6 6 6 0 0 0
37711- 0 0 0 0 0 0 0 0 0 0 0 0
37712- 0 0 0 0 0 0 0 0 0 0 0 0
37713- 0 0 0 0 0 0 0 0 0 0 0 0
37714- 0 0 0 0 0 0 0 0 0 0 0 0
37715- 0 0 0 0 0 0 0 0 0 0 0 0
37716- 0 0 0 0 0 0 0 0 0 0 0 0
37717- 0 0 0 0 0 0 0 0 0 0 0 0
37718- 0 0 0 0 0 0 0 0 0 0 0 0
37719- 0 0 0 0 0 0 0 0 0 0 0 0
37720- 6 6 6 6 6 6 14 14 14 18 18 18
37721- 30 30 30 38 38 38 46 46 46 54 54 54
37722- 50 50 50 42 42 42 30 30 30 18 18 18
37723- 10 10 10 0 0 0 0 0 0 0 0 0
37724- 0 0 0 0 0 0 0 0 0 0 0 0
37725- 0 0 0 0 0 0 0 0 0 0 0 0
37726- 0 0 0 0 0 0 0 0 0 0 0 0
37727- 0 0 0 6 6 6 14 14 14 26 26 26
37728- 38 38 38 50 50 50 58 58 58 58 58 58
37729- 54 54 54 42 42 42 30 30 30 18 18 18
37730- 10 10 10 0 0 0 0 0 0 0 0 0
37731- 0 0 0 0 0 0 0 0 0 0 0 0
37732- 0 0 0 0 0 0 0 0 0 0 0 0
37733- 0 0 0 0 0 0 0 0 0 0 0 0
37734- 0 0 0 0 0 0 0 0 0 0 0 0
37735- 0 0 0 0 0 0 0 0 0 0 0 0
37736- 0 0 0 0 0 0 0 0 0 0 0 0
37737- 0 0 0 0 0 0 0 0 0 0 0 0
37738- 0 0 0 0 0 0 0 0 0 0 0 0
37739- 0 0 0 0 0 0 0 0 0 0 0 0
37740- 0 0 0 0 0 0 0 0 0 6 6 6
37741- 6 6 6 10 10 10 14 14 14 18 18 18
37742- 18 18 18 14 14 14 10 10 10 6 6 6
37743- 0 0 0 0 0 0 0 0 0 0 0 0
37744- 0 0 0 0 0 0 0 0 0 0 0 0
37745- 0 0 0 0 0 0 0 0 0 0 0 0
37746- 0 0 0 0 0 0 0 0 0 0 0 0
37747- 0 0 0 0 0 0 0 0 0 6 6 6
37748- 14 14 14 18 18 18 22 22 22 22 22 22
37749- 18 18 18 14 14 14 10 10 10 6 6 6
37750- 0 0 0 0 0 0 0 0 0 0 0 0
37751- 0 0 0 0 0 0 0 0 0 0 0 0
37752- 0 0 0 0 0 0 0 0 0 0 0 0
37753- 0 0 0 0 0 0 0 0 0 0 0 0
37754- 0 0 0 0 0 0 0 0 0 0 0 0
37755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37768+4 4 4 4 4 4
37769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37782+4 4 4 4 4 4
37783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37796+4 4 4 4 4 4
37797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37810+4 4 4 4 4 4
37811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37824+4 4 4 4 4 4
37825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37838+4 4 4 4 4 4
37839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37843+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
37844+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
37845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37848+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
37849+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37850+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
37851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37852+4 4 4 4 4 4
37853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37857+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
37858+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
37859+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37862+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
37863+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
37864+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
37865+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37866+4 4 4 4 4 4
37867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37871+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
37872+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
37873+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37876+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
37877+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
37878+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
37879+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
37880+4 4 4 4 4 4
37881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37884+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
37885+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
37886+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
37887+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
37888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37889+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37890+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
37891+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
37892+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
37893+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
37894+4 4 4 4 4 4
37895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37898+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
37899+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
37900+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
37901+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
37902+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37903+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
37904+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
37905+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
37906+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
37907+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
37908+4 4 4 4 4 4
37909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37912+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
37913+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
37914+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
37915+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
37916+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37917+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
37918+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
37919+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
37920+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
37921+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
37922+4 4 4 4 4 4
37923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37925+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
37926+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
37927+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
37928+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
37929+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
37930+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
37931+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
37932+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
37933+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
37934+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
37935+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
37936+4 4 4 4 4 4
37937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37939+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
37940+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
37941+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
37942+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
37943+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
37944+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
37945+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
37946+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
37947+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
37948+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
37949+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
37950+4 4 4 4 4 4
37951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37953+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
37954+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
37955+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
37956+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
37957+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
37958+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
37959+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
37960+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
37961+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
37962+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
37963+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37964+4 4 4 4 4 4
37965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37967+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
37968+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
37969+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
37970+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
37971+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
37972+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
37973+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
37974+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
37975+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
37976+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
37977+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
37978+4 4 4 4 4 4
37979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37980+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
37981+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
37982+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
37983+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
37984+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
37985+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
37986+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
37987+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
37988+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
37989+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
37990+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
37991+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
37992+4 4 4 4 4 4
37993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37994+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
37995+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
37996+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
37997+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37998+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
37999+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38000+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38001+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38002+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38003+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38004+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38005+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38006+0 0 0 4 4 4
38007+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38008+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38009+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38010+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38011+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38012+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38013+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38014+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38015+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38016+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38017+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38018+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38019+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38020+2 0 0 0 0 0
38021+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38022+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38023+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38024+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38025+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38026+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38027+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38028+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38029+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38030+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38031+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38032+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38033+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38034+37 38 37 0 0 0
38035+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38036+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38037+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38038+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38039+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38040+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38041+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38042+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38043+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38044+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38045+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38046+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38047+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38048+85 115 134 4 0 0
38049+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38050+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38051+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38052+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38053+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38054+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38055+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38056+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38057+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38058+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38059+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38060+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38061+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38062+60 73 81 4 0 0
38063+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38064+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38065+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38066+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38067+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38068+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38069+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38070+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38071+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38072+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38073+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38074+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38075+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38076+16 19 21 4 0 0
38077+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38078+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38079+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38080+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38081+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38082+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38083+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38084+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38085+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38086+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38087+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38088+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38089+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38090+4 0 0 4 3 3
38091+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38092+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38093+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38095+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38096+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38097+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38098+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38099+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38100+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38101+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38102+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38103+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38104+3 2 2 4 4 4
38105+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38106+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38107+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38108+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38109+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38110+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38111+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38112+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38113+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38114+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38115+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38116+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38117+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38118+4 4 4 4 4 4
38119+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38120+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38121+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38122+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38123+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38124+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38125+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38126+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38127+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38128+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38129+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38130+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38131+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38132+4 4 4 4 4 4
38133+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38134+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38135+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38136+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38137+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38138+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38139+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38140+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38141+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38142+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38143+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38144+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38145+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38146+5 5 5 5 5 5
38147+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38148+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38149+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38150+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38151+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38152+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38153+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38154+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38155+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38156+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38157+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38158+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38159+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38160+5 5 5 4 4 4
38161+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38162+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38163+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38164+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38165+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38166+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38167+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38168+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38169+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38170+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38171+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38172+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174+4 4 4 4 4 4
38175+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38176+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38177+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38178+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38179+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38180+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38181+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38182+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38183+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38184+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38185+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38186+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188+4 4 4 4 4 4
38189+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38190+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38191+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38192+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38193+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38194+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38195+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38196+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38197+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38198+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38199+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38202+4 4 4 4 4 4
38203+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38204+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38205+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38206+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38207+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38208+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38209+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38210+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38211+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38212+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38213+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38216+4 4 4 4 4 4
38217+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38218+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38219+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38220+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38221+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38222+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38223+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38224+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38225+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38226+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38227+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38230+4 4 4 4 4 4
38231+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38232+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38233+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38234+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38235+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38236+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38237+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38238+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38239+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38240+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38241+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244+4 4 4 4 4 4
38245+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38246+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38247+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38248+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38249+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38250+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38251+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38252+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38253+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38254+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38255+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258+4 4 4 4 4 4
38259+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38260+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38261+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38262+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38263+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38264+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38265+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38266+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38267+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38268+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38269+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272+4 4 4 4 4 4
38273+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38274+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38275+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38276+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38277+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38278+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38279+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38280+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38281+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38282+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38283+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286+4 4 4 4 4 4
38287+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38288+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38289+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38290+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38291+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38292+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38293+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38294+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38295+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38296+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38297+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300+4 4 4 4 4 4
38301+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38302+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38303+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38304+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38305+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38306+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38307+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38308+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38309+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38310+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38311+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314+4 4 4 4 4 4
38315+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38316+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38317+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38318+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38319+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38320+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38321+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38322+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38323+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38324+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38325+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328+4 4 4 4 4 4
38329+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38330+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38331+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38332+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38333+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38334+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38335+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38336+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38337+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38338+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38339+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342+4 4 4 4 4 4
38343+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38344+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38345+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38346+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38347+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38348+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38349+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38350+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38351+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38352+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38353+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38356+4 4 4 4 4 4
38357+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38358+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38359+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38360+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38361+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38362+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38363+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38364+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38365+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38366+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38367+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38370+4 4 4 4 4 4
38371+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38372+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38373+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38374+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38375+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38376+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38377+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38378+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38379+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38380+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38381+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38384+4 4 4 4 4 4
38385+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38386+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38387+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38388+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38389+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38390+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38391+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38392+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38393+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38394+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38395+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38398+4 4 4 4 4 4
38399+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38400+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38401+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38402+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38403+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38404+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38405+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38406+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38407+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38408+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38409+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38412+4 4 4 4 4 4
38413+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38414+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38415+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38416+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38417+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38418+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38419+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38420+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38421+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38422+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38423+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38426+4 4 4 4 4 4
38427+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38428+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38429+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38430+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38431+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38432+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38433+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38434+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38435+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38436+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38437+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38440+4 4 4 4 4 4
38441+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38442+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38443+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38444+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38445+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38446+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38447+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38448+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38449+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38450+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38451+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38454+4 4 4 4 4 4
38455+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38456+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38457+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38458+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38459+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38460+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38461+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38462+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38463+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38464+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38465+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38468+4 4 4 4 4 4
38469+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38470+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38471+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38472+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38473+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38474+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38475+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38476+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38477+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38478+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38479+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38482+4 4 4 4 4 4
38483+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38484+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38485+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38486+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38487+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38488+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38489+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38490+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38491+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38492+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38493+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38496+4 4 4 4 4 4
38497+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38498+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38499+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38500+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38501+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38502+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38503+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38504+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38505+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38506+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38507+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38510+4 4 4 4 4 4
38511+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38512+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38513+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38514+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38515+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38516+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38517+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38518+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38519+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38520+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38521+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38524+4 4 4 4 4 4
38525+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38526+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38527+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38528+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38529+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38530+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38531+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38532+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38533+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38534+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38535+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38538+4 4 4 4 4 4
38539+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38540+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38541+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38542+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38543+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38544+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38545+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38546+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38547+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38548+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38549+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38552+4 4 4 4 4 4
38553+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38554+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38555+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38556+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38557+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38558+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38559+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38560+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38561+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38562+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38563+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4
38567+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38568+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38569+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38570+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38571+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38572+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38573+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38574+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38575+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38576+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4
38581+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38582+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38583+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38584+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38585+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38586+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38587+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38588+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38589+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38590+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4
38595+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38596+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38597+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38598+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38599+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38600+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38601+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38602+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38603+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38604+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4
38609+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38610+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38611+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38612+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38613+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38614+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38615+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38616+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38617+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38618+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4
38623+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38624+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38625+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38626+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38627+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38628+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38629+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38630+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38631+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4
38637+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38638+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38639+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38640+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38641+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38642+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38643+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38644+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38645+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4
38651+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38652+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38653+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38654+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38655+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38656+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38657+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38658+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38659+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4
38665+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38666+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38667+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38668+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38669+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38670+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38671+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38672+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38680+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38681+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38682+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38683+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38684+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38685+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38686+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38692+4 4 4 4 4 4
38693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38694+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38695+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38696+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38697+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38698+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38699+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38700+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38706+4 4 4 4 4 4
38707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38709+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38710+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38711+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38712+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38713+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38714+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38720+4 4 4 4 4 4
38721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38723+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38724+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38725+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38726+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38727+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38728+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38734+4 4 4 4 4 4
38735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38738+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38739+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38740+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38741+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38742+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38748+4 4 4 4 4 4
38749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38752+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38753+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38754+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38755+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38762+4 4 4 4 4 4
38763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38766+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38767+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38768+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38769+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38776+4 4 4 4 4 4
38777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38780+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38781+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38782+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38783+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38790+4 4 4 4 4 4
38791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38794+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38795+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38796+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
38797+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
38798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38804+4 4 4 4 4 4
38805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38809+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
38810+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38811+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38818+4 4 4 4 4 4
38819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
38824+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
38825+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38832+4 4 4 4 4 4
38833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
38838+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
38839+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38846+4 4 4 4 4 4
38847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
38852+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
38853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38860+4 4 4 4 4 4
38861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38866+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
38867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38874+4 4 4 4 4 4
38875diff -urNp linux-3.0.7/drivers/video/udlfb.c linux-3.0.7/drivers/video/udlfb.c
38876--- linux-3.0.7/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
38877+++ linux-3.0.7/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
38878@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
38879 dlfb_urb_completion(urb);
38880
38881 error:
38882- atomic_add(bytes_sent, &dev->bytes_sent);
38883- atomic_add(bytes_identical, &dev->bytes_identical);
38884- atomic_add(width*height*2, &dev->bytes_rendered);
38885+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38886+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38887+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
38888 end_cycles = get_cycles();
38889- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38890+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38891 >> 10)), /* Kcycles */
38892 &dev->cpu_kcycles_used);
38893
38894@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
38895 dlfb_urb_completion(urb);
38896
38897 error:
38898- atomic_add(bytes_sent, &dev->bytes_sent);
38899- atomic_add(bytes_identical, &dev->bytes_identical);
38900- atomic_add(bytes_rendered, &dev->bytes_rendered);
38901+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38902+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38903+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
38904 end_cycles = get_cycles();
38905- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38906+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38907 >> 10)), /* Kcycles */
38908 &dev->cpu_kcycles_used);
38909 }
38910@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
38911 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38912 struct dlfb_data *dev = fb_info->par;
38913 return snprintf(buf, PAGE_SIZE, "%u\n",
38914- atomic_read(&dev->bytes_rendered));
38915+ atomic_read_unchecked(&dev->bytes_rendered));
38916 }
38917
38918 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
38919@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
38920 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38921 struct dlfb_data *dev = fb_info->par;
38922 return snprintf(buf, PAGE_SIZE, "%u\n",
38923- atomic_read(&dev->bytes_identical));
38924+ atomic_read_unchecked(&dev->bytes_identical));
38925 }
38926
38927 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
38928@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
38929 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38930 struct dlfb_data *dev = fb_info->par;
38931 return snprintf(buf, PAGE_SIZE, "%u\n",
38932- atomic_read(&dev->bytes_sent));
38933+ atomic_read_unchecked(&dev->bytes_sent));
38934 }
38935
38936 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
38937@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
38938 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38939 struct dlfb_data *dev = fb_info->par;
38940 return snprintf(buf, PAGE_SIZE, "%u\n",
38941- atomic_read(&dev->cpu_kcycles_used));
38942+ atomic_read_unchecked(&dev->cpu_kcycles_used));
38943 }
38944
38945 static ssize_t edid_show(
38946@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
38947 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38948 struct dlfb_data *dev = fb_info->par;
38949
38950- atomic_set(&dev->bytes_rendered, 0);
38951- atomic_set(&dev->bytes_identical, 0);
38952- atomic_set(&dev->bytes_sent, 0);
38953- atomic_set(&dev->cpu_kcycles_used, 0);
38954+ atomic_set_unchecked(&dev->bytes_rendered, 0);
38955+ atomic_set_unchecked(&dev->bytes_identical, 0);
38956+ atomic_set_unchecked(&dev->bytes_sent, 0);
38957+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
38958
38959 return count;
38960 }
38961diff -urNp linux-3.0.7/drivers/video/uvesafb.c linux-3.0.7/drivers/video/uvesafb.c
38962--- linux-3.0.7/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
38963+++ linux-3.0.7/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
38964@@ -19,6 +19,7 @@
38965 #include <linux/io.h>
38966 #include <linux/mutex.h>
38967 #include <linux/slab.h>
38968+#include <linux/moduleloader.h>
38969 #include <video/edid.h>
38970 #include <video/uvesafb.h>
38971 #ifdef CONFIG_X86
38972@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
38973 NULL,
38974 };
38975
38976- return call_usermodehelper(v86d_path, argv, envp, 1);
38977+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38978 }
38979
38980 /*
38981@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
38982 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38983 par->pmi_setpal = par->ypan = 0;
38984 } else {
38985+
38986+#ifdef CONFIG_PAX_KERNEXEC
38987+#ifdef CONFIG_MODULES
38988+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38989+#endif
38990+ if (!par->pmi_code) {
38991+ par->pmi_setpal = par->ypan = 0;
38992+ return 0;
38993+ }
38994+#endif
38995+
38996 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38997 + task->t.regs.edi);
38998+
38999+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39000+ pax_open_kernel();
39001+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39002+ pax_close_kernel();
39003+
39004+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39005+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39006+#else
39007 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39008 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39009+#endif
39010+
39011 printk(KERN_INFO "uvesafb: protected mode interface info at "
39012 "%04x:%04x\n",
39013 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39014@@ -1821,6 +1844,11 @@ out:
39015 if (par->vbe_modes)
39016 kfree(par->vbe_modes);
39017
39018+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39019+ if (par->pmi_code)
39020+ module_free_exec(NULL, par->pmi_code);
39021+#endif
39022+
39023 framebuffer_release(info);
39024 return err;
39025 }
39026@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39027 kfree(par->vbe_state_orig);
39028 if (par->vbe_state_saved)
39029 kfree(par->vbe_state_saved);
39030+
39031+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39032+ if (par->pmi_code)
39033+ module_free_exec(NULL, par->pmi_code);
39034+#endif
39035+
39036 }
39037
39038 framebuffer_release(info);
39039diff -urNp linux-3.0.7/drivers/video/vesafb.c linux-3.0.7/drivers/video/vesafb.c
39040--- linux-3.0.7/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
39041+++ linux-3.0.7/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
39042@@ -9,6 +9,7 @@
39043 */
39044
39045 #include <linux/module.h>
39046+#include <linux/moduleloader.h>
39047 #include <linux/kernel.h>
39048 #include <linux/errno.h>
39049 #include <linux/string.h>
39050@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39051 static int vram_total __initdata; /* Set total amount of memory */
39052 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39053 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39054-static void (*pmi_start)(void) __read_mostly;
39055-static void (*pmi_pal) (void) __read_mostly;
39056+static void (*pmi_start)(void) __read_only;
39057+static void (*pmi_pal) (void) __read_only;
39058 static int depth __read_mostly;
39059 static int vga_compat __read_mostly;
39060 /* --------------------------------------------------------------------- */
39061@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39062 unsigned int size_vmode;
39063 unsigned int size_remap;
39064 unsigned int size_total;
39065+ void *pmi_code = NULL;
39066
39067 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39068 return -ENODEV;
39069@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39070 size_remap = size_total;
39071 vesafb_fix.smem_len = size_remap;
39072
39073-#ifndef __i386__
39074- screen_info.vesapm_seg = 0;
39075-#endif
39076-
39077 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39078 printk(KERN_WARNING
39079 "vesafb: cannot reserve video memory at 0x%lx\n",
39080@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39081 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39082 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39083
39084+#ifdef __i386__
39085+
39086+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39087+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39088+ if (!pmi_code)
39089+#elif !defined(CONFIG_PAX_KERNEXEC)
39090+ if (0)
39091+#endif
39092+
39093+#endif
39094+ screen_info.vesapm_seg = 0;
39095+
39096 if (screen_info.vesapm_seg) {
39097- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39098- screen_info.vesapm_seg,screen_info.vesapm_off);
39099+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39100+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39101 }
39102
39103 if (screen_info.vesapm_seg < 0xc000)
39104@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39105
39106 if (ypan || pmi_setpal) {
39107 unsigned short *pmi_base;
39108+
39109 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39110- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39111- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39112+
39113+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39114+ pax_open_kernel();
39115+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39116+#else
39117+ pmi_code = pmi_base;
39118+#endif
39119+
39120+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39121+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39122+
39123+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39124+ pmi_start = ktva_ktla(pmi_start);
39125+ pmi_pal = ktva_ktla(pmi_pal);
39126+ pax_close_kernel();
39127+#endif
39128+
39129 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39130 if (pmi_base[3]) {
39131 printk(KERN_INFO "vesafb: pmi: ports = ");
39132@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39133 info->node, info->fix.id);
39134 return 0;
39135 err:
39136+
39137+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39138+ module_free_exec(NULL, pmi_code);
39139+#endif
39140+
39141 if (info->screen_base)
39142 iounmap(info->screen_base);
39143 framebuffer_release(info);
39144diff -urNp linux-3.0.7/drivers/video/via/via_clock.h linux-3.0.7/drivers/video/via/via_clock.h
39145--- linux-3.0.7/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
39146+++ linux-3.0.7/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
39147@@ -56,7 +56,7 @@ struct via_clock {
39148
39149 void (*set_engine_pll_state)(u8 state);
39150 void (*set_engine_pll)(struct via_pll_config config);
39151-};
39152+} __no_const;
39153
39154
39155 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39156diff -urNp linux-3.0.7/drivers/virtio/virtio_balloon.c linux-3.0.7/drivers/virtio/virtio_balloon.c
39157--- linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
39158+++ linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
39159@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39160 struct sysinfo i;
39161 int idx = 0;
39162
39163+ pax_track_stack();
39164+
39165 all_vm_events(events);
39166 si_meminfo(&i);
39167
39168diff -urNp linux-3.0.7/fs/9p/vfs_inode.c linux-3.0.7/fs/9p/vfs_inode.c
39169--- linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:54:54.000000000 -0400
39170+++ linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:55:28.000000000 -0400
39171@@ -1264,7 +1264,7 @@ static void *v9fs_vfs_follow_link(struct
39172 void
39173 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39174 {
39175- char *s = nd_get_link(nd);
39176+ const char *s = nd_get_link(nd);
39177
39178 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39179 IS_ERR(s) ? "<error>" : s);
39180diff -urNp linux-3.0.7/fs/Kconfig.binfmt linux-3.0.7/fs/Kconfig.binfmt
39181--- linux-3.0.7/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
39182+++ linux-3.0.7/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
39183@@ -86,7 +86,7 @@ config HAVE_AOUT
39184
39185 config BINFMT_AOUT
39186 tristate "Kernel support for a.out and ECOFF binaries"
39187- depends on HAVE_AOUT
39188+ depends on HAVE_AOUT && BROKEN
39189 ---help---
39190 A.out (Assembler.OUTput) is a set of formats for libraries and
39191 executables used in the earliest versions of UNIX. Linux used
39192diff -urNp linux-3.0.7/fs/aio.c linux-3.0.7/fs/aio.c
39193--- linux-3.0.7/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
39194+++ linux-3.0.7/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
39195@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39196 size += sizeof(struct io_event) * nr_events;
39197 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39198
39199- if (nr_pages < 0)
39200+ if (nr_pages <= 0)
39201 return -EINVAL;
39202
39203 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39204@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39205 struct aio_timeout to;
39206 int retry = 0;
39207
39208+ pax_track_stack();
39209+
39210 /* needed to zero any padding within an entry (there shouldn't be
39211 * any, but C is fun!
39212 */
39213@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39214 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39215 {
39216 ssize_t ret;
39217+ struct iovec iovstack;
39218
39219 #ifdef CONFIG_COMPAT
39220 if (compat)
39221 ret = compat_rw_copy_check_uvector(type,
39222 (struct compat_iovec __user *)kiocb->ki_buf,
39223- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39224+ kiocb->ki_nbytes, 1, &iovstack,
39225 &kiocb->ki_iovec);
39226 else
39227 #endif
39228 ret = rw_copy_check_uvector(type,
39229 (struct iovec __user *)kiocb->ki_buf,
39230- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39231+ kiocb->ki_nbytes, 1, &iovstack,
39232 &kiocb->ki_iovec);
39233 if (ret < 0)
39234 goto out;
39235
39236+ if (kiocb->ki_iovec == &iovstack) {
39237+ kiocb->ki_inline_vec = iovstack;
39238+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39239+ }
39240 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39241 kiocb->ki_cur_seg = 0;
39242 /* ki_nbytes/left now reflect bytes instead of segs */
39243diff -urNp linux-3.0.7/fs/attr.c linux-3.0.7/fs/attr.c
39244--- linux-3.0.7/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
39245+++ linux-3.0.7/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
39246@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39247 unsigned long limit;
39248
39249 limit = rlimit(RLIMIT_FSIZE);
39250+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39251 if (limit != RLIM_INFINITY && offset > limit)
39252 goto out_sig;
39253 if (offset > inode->i_sb->s_maxbytes)
39254diff -urNp linux-3.0.7/fs/autofs4/waitq.c linux-3.0.7/fs/autofs4/waitq.c
39255--- linux-3.0.7/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
39256+++ linux-3.0.7/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
39257@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39258 {
39259 unsigned long sigpipe, flags;
39260 mm_segment_t fs;
39261- const char *data = (const char *)addr;
39262+ const char __user *data = (const char __force_user *)addr;
39263 ssize_t wr = 0;
39264
39265 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39266diff -urNp linux-3.0.7/fs/befs/linuxvfs.c linux-3.0.7/fs/befs/linuxvfs.c
39267--- linux-3.0.7/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
39268+++ linux-3.0.7/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
39269@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39270 {
39271 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39272 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39273- char *link = nd_get_link(nd);
39274+ const char *link = nd_get_link(nd);
39275 if (!IS_ERR(link))
39276 kfree(link);
39277 }
39278diff -urNp linux-3.0.7/fs/binfmt_aout.c linux-3.0.7/fs/binfmt_aout.c
39279--- linux-3.0.7/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
39280+++ linux-3.0.7/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
39281@@ -16,6 +16,7 @@
39282 #include <linux/string.h>
39283 #include <linux/fs.h>
39284 #include <linux/file.h>
39285+#include <linux/security.h>
39286 #include <linux/stat.h>
39287 #include <linux/fcntl.h>
39288 #include <linux/ptrace.h>
39289@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39290 #endif
39291 # define START_STACK(u) ((void __user *)u.start_stack)
39292
39293+ memset(&dump, 0, sizeof(dump));
39294+
39295 fs = get_fs();
39296 set_fs(KERNEL_DS);
39297 has_dumped = 1;
39298@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39299
39300 /* If the size of the dump file exceeds the rlimit, then see what would happen
39301 if we wrote the stack, but not the data area. */
39302+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39303 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39304 dump.u_dsize = 0;
39305
39306 /* Make sure we have enough room to write the stack and data areas. */
39307+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39308 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39309 dump.u_ssize = 0;
39310
39311@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39312 rlim = rlimit(RLIMIT_DATA);
39313 if (rlim >= RLIM_INFINITY)
39314 rlim = ~0;
39315+
39316+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39317 if (ex.a_data + ex.a_bss > rlim)
39318 return -ENOMEM;
39319
39320@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39321 install_exec_creds(bprm);
39322 current->flags &= ~PF_FORKNOEXEC;
39323
39324+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39325+ current->mm->pax_flags = 0UL;
39326+#endif
39327+
39328+#ifdef CONFIG_PAX_PAGEEXEC
39329+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39330+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39331+
39332+#ifdef CONFIG_PAX_EMUTRAMP
39333+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39334+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39335+#endif
39336+
39337+#ifdef CONFIG_PAX_MPROTECT
39338+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39339+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39340+#endif
39341+
39342+ }
39343+#endif
39344+
39345 if (N_MAGIC(ex) == OMAGIC) {
39346 unsigned long text_addr, map_size;
39347 loff_t pos;
39348@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39349
39350 down_write(&current->mm->mmap_sem);
39351 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39352- PROT_READ | PROT_WRITE | PROT_EXEC,
39353+ PROT_READ | PROT_WRITE,
39354 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39355 fd_offset + ex.a_text);
39356 up_write(&current->mm->mmap_sem);
39357diff -urNp linux-3.0.7/fs/binfmt_elf.c linux-3.0.7/fs/binfmt_elf.c
39358--- linux-3.0.7/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39359+++ linux-3.0.7/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
39360@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39361 #define elf_core_dump NULL
39362 #endif
39363
39364+#ifdef CONFIG_PAX_MPROTECT
39365+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39366+#endif
39367+
39368 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39369 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39370 #else
39371@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39372 .load_binary = load_elf_binary,
39373 .load_shlib = load_elf_library,
39374 .core_dump = elf_core_dump,
39375+
39376+#ifdef CONFIG_PAX_MPROTECT
39377+ .handle_mprotect= elf_handle_mprotect,
39378+#endif
39379+
39380 .min_coredump = ELF_EXEC_PAGESIZE,
39381 };
39382
39383@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39384
39385 static int set_brk(unsigned long start, unsigned long end)
39386 {
39387+ unsigned long e = end;
39388+
39389 start = ELF_PAGEALIGN(start);
39390 end = ELF_PAGEALIGN(end);
39391 if (end > start) {
39392@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39393 if (BAD_ADDR(addr))
39394 return addr;
39395 }
39396- current->mm->start_brk = current->mm->brk = end;
39397+ current->mm->start_brk = current->mm->brk = e;
39398 return 0;
39399 }
39400
39401@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39402 elf_addr_t __user *u_rand_bytes;
39403 const char *k_platform = ELF_PLATFORM;
39404 const char *k_base_platform = ELF_BASE_PLATFORM;
39405- unsigned char k_rand_bytes[16];
39406+ u32 k_rand_bytes[4];
39407 int items;
39408 elf_addr_t *elf_info;
39409 int ei_index = 0;
39410 const struct cred *cred = current_cred();
39411 struct vm_area_struct *vma;
39412+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39413+
39414+ pax_track_stack();
39415
39416 /*
39417 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39418@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39419 * Generate 16 random bytes for userspace PRNG seeding.
39420 */
39421 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39422- u_rand_bytes = (elf_addr_t __user *)
39423- STACK_ALLOC(p, sizeof(k_rand_bytes));
39424+ srandom32(k_rand_bytes[0] ^ random32());
39425+ srandom32(k_rand_bytes[1] ^ random32());
39426+ srandom32(k_rand_bytes[2] ^ random32());
39427+ srandom32(k_rand_bytes[3] ^ random32());
39428+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39429+ u_rand_bytes = (elf_addr_t __user *) p;
39430 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39431 return -EFAULT;
39432
39433@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39434 return -EFAULT;
39435 current->mm->env_end = p;
39436
39437+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39438+
39439 /* Put the elf_info on the stack in the right place. */
39440 sp = (elf_addr_t __user *)envp + 1;
39441- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39442+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39443 return -EFAULT;
39444 return 0;
39445 }
39446@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39447 {
39448 struct elf_phdr *elf_phdata;
39449 struct elf_phdr *eppnt;
39450- unsigned long load_addr = 0;
39451+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39452 int load_addr_set = 0;
39453 unsigned long last_bss = 0, elf_bss = 0;
39454- unsigned long error = ~0UL;
39455+ unsigned long error = -EINVAL;
39456 unsigned long total_size;
39457 int retval, i, size;
39458
39459@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39460 goto out_close;
39461 }
39462
39463+#ifdef CONFIG_PAX_SEGMEXEC
39464+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39465+ pax_task_size = SEGMEXEC_TASK_SIZE;
39466+#endif
39467+
39468 eppnt = elf_phdata;
39469 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39470 if (eppnt->p_type == PT_LOAD) {
39471@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39472 k = load_addr + eppnt->p_vaddr;
39473 if (BAD_ADDR(k) ||
39474 eppnt->p_filesz > eppnt->p_memsz ||
39475- eppnt->p_memsz > TASK_SIZE ||
39476- TASK_SIZE - eppnt->p_memsz < k) {
39477+ eppnt->p_memsz > pax_task_size ||
39478+ pax_task_size - eppnt->p_memsz < k) {
39479 error = -ENOMEM;
39480 goto out_close;
39481 }
39482@@ -528,6 +553,193 @@ out:
39483 return error;
39484 }
39485
39486+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39487+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39488+{
39489+ unsigned long pax_flags = 0UL;
39490+
39491+#ifdef CONFIG_PAX_PAGEEXEC
39492+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39493+ pax_flags |= MF_PAX_PAGEEXEC;
39494+#endif
39495+
39496+#ifdef CONFIG_PAX_SEGMEXEC
39497+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39498+ pax_flags |= MF_PAX_SEGMEXEC;
39499+#endif
39500+
39501+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39502+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39503+ if ((__supported_pte_mask & _PAGE_NX))
39504+ pax_flags &= ~MF_PAX_SEGMEXEC;
39505+ else
39506+ pax_flags &= ~MF_PAX_PAGEEXEC;
39507+ }
39508+#endif
39509+
39510+#ifdef CONFIG_PAX_EMUTRAMP
39511+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39512+ pax_flags |= MF_PAX_EMUTRAMP;
39513+#endif
39514+
39515+#ifdef CONFIG_PAX_MPROTECT
39516+ if (elf_phdata->p_flags & PF_MPROTECT)
39517+ pax_flags |= MF_PAX_MPROTECT;
39518+#endif
39519+
39520+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39521+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39522+ pax_flags |= MF_PAX_RANDMMAP;
39523+#endif
39524+
39525+ return pax_flags;
39526+}
39527+#endif
39528+
39529+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39530+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39531+{
39532+ unsigned long pax_flags = 0UL;
39533+
39534+#ifdef CONFIG_PAX_PAGEEXEC
39535+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39536+ pax_flags |= MF_PAX_PAGEEXEC;
39537+#endif
39538+
39539+#ifdef CONFIG_PAX_SEGMEXEC
39540+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39541+ pax_flags |= MF_PAX_SEGMEXEC;
39542+#endif
39543+
39544+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39545+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39546+ if ((__supported_pte_mask & _PAGE_NX))
39547+ pax_flags &= ~MF_PAX_SEGMEXEC;
39548+ else
39549+ pax_flags &= ~MF_PAX_PAGEEXEC;
39550+ }
39551+#endif
39552+
39553+#ifdef CONFIG_PAX_EMUTRAMP
39554+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39555+ pax_flags |= MF_PAX_EMUTRAMP;
39556+#endif
39557+
39558+#ifdef CONFIG_PAX_MPROTECT
39559+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39560+ pax_flags |= MF_PAX_MPROTECT;
39561+#endif
39562+
39563+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39564+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39565+ pax_flags |= MF_PAX_RANDMMAP;
39566+#endif
39567+
39568+ return pax_flags;
39569+}
39570+#endif
39571+
39572+#ifdef CONFIG_PAX_EI_PAX
39573+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39574+{
39575+ unsigned long pax_flags = 0UL;
39576+
39577+#ifdef CONFIG_PAX_PAGEEXEC
39578+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39579+ pax_flags |= MF_PAX_PAGEEXEC;
39580+#endif
39581+
39582+#ifdef CONFIG_PAX_SEGMEXEC
39583+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39584+ pax_flags |= MF_PAX_SEGMEXEC;
39585+#endif
39586+
39587+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39588+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39589+ if ((__supported_pte_mask & _PAGE_NX))
39590+ pax_flags &= ~MF_PAX_SEGMEXEC;
39591+ else
39592+ pax_flags &= ~MF_PAX_PAGEEXEC;
39593+ }
39594+#endif
39595+
39596+#ifdef CONFIG_PAX_EMUTRAMP
39597+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39598+ pax_flags |= MF_PAX_EMUTRAMP;
39599+#endif
39600+
39601+#ifdef CONFIG_PAX_MPROTECT
39602+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39603+ pax_flags |= MF_PAX_MPROTECT;
39604+#endif
39605+
39606+#ifdef CONFIG_PAX_ASLR
39607+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39608+ pax_flags |= MF_PAX_RANDMMAP;
39609+#endif
39610+
39611+ return pax_flags;
39612+}
39613+#endif
39614+
39615+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39616+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39617+{
39618+ unsigned long pax_flags = 0UL;
39619+
39620+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39621+ unsigned long i;
39622+ int found_flags = 0;
39623+#endif
39624+
39625+#ifdef CONFIG_PAX_EI_PAX
39626+ pax_flags = pax_parse_ei_pax(elf_ex);
39627+#endif
39628+
39629+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39630+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39631+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39632+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39633+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39634+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39635+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39636+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39637+ return -EINVAL;
39638+
39639+#ifdef CONFIG_PAX_SOFTMODE
39640+ if (pax_softmode)
39641+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39642+ else
39643+#endif
39644+
39645+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39646+ found_flags = 1;
39647+ break;
39648+ }
39649+#endif
39650+
39651+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39652+ if (found_flags == 0) {
39653+ struct elf_phdr phdr;
39654+ memset(&phdr, 0, sizeof(phdr));
39655+ phdr.p_flags = PF_NOEMUTRAMP;
39656+#ifdef CONFIG_PAX_SOFTMODE
39657+ if (pax_softmode)
39658+ pax_flags = pax_parse_softmode(&phdr);
39659+ else
39660+#endif
39661+ pax_flags = pax_parse_hardmode(&phdr);
39662+ }
39663+#endif
39664+
39665+ if (0 > pax_check_flags(&pax_flags))
39666+ return -EINVAL;
39667+
39668+ current->mm->pax_flags = pax_flags;
39669+ return 0;
39670+}
39671+#endif
39672+
39673 /*
39674 * These are the functions used to load ELF style executables and shared
39675 * libraries. There is no binary dependent code anywhere else.
39676@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
39677 {
39678 unsigned int random_variable = 0;
39679
39680+#ifdef CONFIG_PAX_RANDUSTACK
39681+ if (randomize_va_space)
39682+ return stack_top - current->mm->delta_stack;
39683+#endif
39684+
39685 if ((current->flags & PF_RANDOMIZE) &&
39686 !(current->personality & ADDR_NO_RANDOMIZE)) {
39687 random_variable = get_random_int() & STACK_RND_MASK;
39688@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
39689 unsigned long load_addr = 0, load_bias = 0;
39690 int load_addr_set = 0;
39691 char * elf_interpreter = NULL;
39692- unsigned long error;
39693+ unsigned long error = 0;
39694 struct elf_phdr *elf_ppnt, *elf_phdata;
39695 unsigned long elf_bss, elf_brk;
39696 int retval, i;
39697@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
39698 unsigned long start_code, end_code, start_data, end_data;
39699 unsigned long reloc_func_desc __maybe_unused = 0;
39700 int executable_stack = EXSTACK_DEFAULT;
39701- unsigned long def_flags = 0;
39702 struct {
39703 struct elfhdr elf_ex;
39704 struct elfhdr interp_elf_ex;
39705 } *loc;
39706+ unsigned long pax_task_size = TASK_SIZE;
39707
39708 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39709 if (!loc) {
39710@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
39711
39712 /* OK, This is the point of no return */
39713 current->flags &= ~PF_FORKNOEXEC;
39714- current->mm->def_flags = def_flags;
39715+
39716+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39717+ current->mm->pax_flags = 0UL;
39718+#endif
39719+
39720+#ifdef CONFIG_PAX_DLRESOLVE
39721+ current->mm->call_dl_resolve = 0UL;
39722+#endif
39723+
39724+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39725+ current->mm->call_syscall = 0UL;
39726+#endif
39727+
39728+#ifdef CONFIG_PAX_ASLR
39729+ current->mm->delta_mmap = 0UL;
39730+ current->mm->delta_stack = 0UL;
39731+#endif
39732+
39733+ current->mm->def_flags = 0;
39734+
39735+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39736+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39737+ send_sig(SIGKILL, current, 0);
39738+ goto out_free_dentry;
39739+ }
39740+#endif
39741+
39742+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39743+ pax_set_initial_flags(bprm);
39744+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39745+ if (pax_set_initial_flags_func)
39746+ (pax_set_initial_flags_func)(bprm);
39747+#endif
39748+
39749+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39750+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
39751+ current->mm->context.user_cs_limit = PAGE_SIZE;
39752+ current->mm->def_flags |= VM_PAGEEXEC;
39753+ }
39754+#endif
39755+
39756+#ifdef CONFIG_PAX_SEGMEXEC
39757+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39758+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39759+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39760+ pax_task_size = SEGMEXEC_TASK_SIZE;
39761+ current->mm->def_flags |= VM_NOHUGEPAGE;
39762+ }
39763+#endif
39764+
39765+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39766+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39767+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39768+ put_cpu();
39769+ }
39770+#endif
39771
39772 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39773 may depend on the personality. */
39774 SET_PERSONALITY(loc->elf_ex);
39775+
39776+#ifdef CONFIG_PAX_ASLR
39777+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39778+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39779+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39780+ }
39781+#endif
39782+
39783+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39784+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39785+ executable_stack = EXSTACK_DISABLE_X;
39786+ current->personality &= ~READ_IMPLIES_EXEC;
39787+ } else
39788+#endif
39789+
39790 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39791 current->personality |= READ_IMPLIES_EXEC;
39792
39793@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
39794 #else
39795 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39796 #endif
39797+
39798+#ifdef CONFIG_PAX_RANDMMAP
39799+ /* PaX: randomize base address at the default exe base if requested */
39800+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39801+#ifdef CONFIG_SPARC64
39802+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39803+#else
39804+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39805+#endif
39806+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39807+ elf_flags |= MAP_FIXED;
39808+ }
39809+#endif
39810+
39811 }
39812
39813 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39814@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
39815 * allowed task size. Note that p_filesz must always be
39816 * <= p_memsz so it is only necessary to check p_memsz.
39817 */
39818- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39819- elf_ppnt->p_memsz > TASK_SIZE ||
39820- TASK_SIZE - elf_ppnt->p_memsz < k) {
39821+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39822+ elf_ppnt->p_memsz > pax_task_size ||
39823+ pax_task_size - elf_ppnt->p_memsz < k) {
39824 /* set_brk can never work. Avoid overflows. */
39825 send_sig(SIGKILL, current, 0);
39826 retval = -EINVAL;
39827@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
39828 start_data += load_bias;
39829 end_data += load_bias;
39830
39831+#ifdef CONFIG_PAX_RANDMMAP
39832+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39833+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39834+#endif
39835+
39836 /* Calling set_brk effectively mmaps the pages that we need
39837 * for the bss and break sections. We must do this before
39838 * mapping in the interpreter, to make sure it doesn't wind
39839@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
39840 goto out_free_dentry;
39841 }
39842 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39843- send_sig(SIGSEGV, current, 0);
39844- retval = -EFAULT; /* Nobody gets to see this, but.. */
39845- goto out_free_dentry;
39846+ /*
39847+ * This bss-zeroing can fail if the ELF
39848+ * file specifies odd protections. So
39849+ * we don't check the return value
39850+ */
39851 }
39852
39853 if (elf_interpreter) {
39854@@ -1090,7 +1398,7 @@ out:
39855 * Decide what to dump of a segment, part, all or none.
39856 */
39857 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39858- unsigned long mm_flags)
39859+ unsigned long mm_flags, long signr)
39860 {
39861 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39862
39863@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
39864 if (vma->vm_file == NULL)
39865 return 0;
39866
39867- if (FILTER(MAPPED_PRIVATE))
39868+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39869 goto whole;
39870
39871 /*
39872@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
39873 {
39874 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39875 int i = 0;
39876- do
39877+ do {
39878 i += 2;
39879- while (auxv[i - 2] != AT_NULL);
39880+ } while (auxv[i - 2] != AT_NULL);
39881 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39882 }
39883
39884@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
39885 }
39886
39887 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
39888- unsigned long mm_flags)
39889+ struct coredump_params *cprm)
39890 {
39891 struct vm_area_struct *vma;
39892 size_t size = 0;
39893
39894 for (vma = first_vma(current, gate_vma); vma != NULL;
39895 vma = next_vma(vma, gate_vma))
39896- size += vma_dump_size(vma, mm_flags);
39897+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39898 return size;
39899 }
39900
39901@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
39902
39903 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
39904
39905- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
39906+ offset += elf_core_vma_data_size(gate_vma, cprm);
39907 offset += elf_core_extra_data_size();
39908 e_shoff = offset;
39909
39910@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
39911 offset = dataoff;
39912
39913 size += sizeof(*elf);
39914+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39915 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
39916 goto end_coredump;
39917
39918 size += sizeof(*phdr4note);
39919+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39920 if (size > cprm->limit
39921 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
39922 goto end_coredump;
39923@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
39924 phdr.p_offset = offset;
39925 phdr.p_vaddr = vma->vm_start;
39926 phdr.p_paddr = 0;
39927- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
39928+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39929 phdr.p_memsz = vma->vm_end - vma->vm_start;
39930 offset += phdr.p_filesz;
39931 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39932@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
39933 phdr.p_align = ELF_EXEC_PAGESIZE;
39934
39935 size += sizeof(phdr);
39936+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39937 if (size > cprm->limit
39938 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
39939 goto end_coredump;
39940@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
39941 unsigned long addr;
39942 unsigned long end;
39943
39944- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
39945+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39946
39947 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39948 struct page *page;
39949@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
39950 page = get_dump_page(addr);
39951 if (page) {
39952 void *kaddr = kmap(page);
39953+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39954 stop = ((size += PAGE_SIZE) > cprm->limit) ||
39955 !dump_write(cprm->file, kaddr,
39956 PAGE_SIZE);
39957@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
39958
39959 if (e_phnum == PN_XNUM) {
39960 size += sizeof(*shdr4extnum);
39961+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39962 if (size > cprm->limit
39963 || !dump_write(cprm->file, shdr4extnum,
39964 sizeof(*shdr4extnum)))
39965@@ -2067,6 +2380,97 @@ out:
39966
39967 #endif /* CONFIG_ELF_CORE */
39968
39969+#ifdef CONFIG_PAX_MPROTECT
39970+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39971+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39972+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39973+ *
39974+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39975+ * basis because we want to allow the common case and not the special ones.
39976+ */
39977+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39978+{
39979+ struct elfhdr elf_h;
39980+ struct elf_phdr elf_p;
39981+ unsigned long i;
39982+ unsigned long oldflags;
39983+ bool is_textrel_rw, is_textrel_rx, is_relro;
39984+
39985+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39986+ return;
39987+
39988+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39989+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39990+
39991+#ifdef CONFIG_PAX_ELFRELOCS
39992+ /* possible TEXTREL */
39993+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39994+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39995+#else
39996+ is_textrel_rw = false;
39997+ is_textrel_rx = false;
39998+#endif
39999+
40000+ /* possible RELRO */
40001+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40002+
40003+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40004+ return;
40005+
40006+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40007+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40008+
40009+#ifdef CONFIG_PAX_ETEXECRELOCS
40010+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40011+#else
40012+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40013+#endif
40014+
40015+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40016+ !elf_check_arch(&elf_h) ||
40017+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40018+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40019+ return;
40020+
40021+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40022+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40023+ return;
40024+ switch (elf_p.p_type) {
40025+ case PT_DYNAMIC:
40026+ if (!is_textrel_rw && !is_textrel_rx)
40027+ continue;
40028+ i = 0UL;
40029+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40030+ elf_dyn dyn;
40031+
40032+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40033+ return;
40034+ if (dyn.d_tag == DT_NULL)
40035+ return;
40036+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40037+ gr_log_textrel(vma);
40038+ if (is_textrel_rw)
40039+ vma->vm_flags |= VM_MAYWRITE;
40040+ else
40041+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40042+ vma->vm_flags &= ~VM_MAYWRITE;
40043+ return;
40044+ }
40045+ i++;
40046+ }
40047+ return;
40048+
40049+ case PT_GNU_RELRO:
40050+ if (!is_relro)
40051+ continue;
40052+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40053+ vma->vm_flags &= ~VM_MAYWRITE;
40054+ return;
40055+ }
40056+ }
40057+}
40058+#endif
40059+
40060 static int __init init_elf_binfmt(void)
40061 {
40062 return register_binfmt(&elf_format);
40063diff -urNp linux-3.0.7/fs/binfmt_flat.c linux-3.0.7/fs/binfmt_flat.c
40064--- linux-3.0.7/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
40065+++ linux-3.0.7/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
40066@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40067 realdatastart = (unsigned long) -ENOMEM;
40068 printk("Unable to allocate RAM for process data, errno %d\n",
40069 (int)-realdatastart);
40070+ down_write(&current->mm->mmap_sem);
40071 do_munmap(current->mm, textpos, text_len);
40072+ up_write(&current->mm->mmap_sem);
40073 ret = realdatastart;
40074 goto err;
40075 }
40076@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40077 }
40078 if (IS_ERR_VALUE(result)) {
40079 printk("Unable to read data+bss, errno %d\n", (int)-result);
40080+ down_write(&current->mm->mmap_sem);
40081 do_munmap(current->mm, textpos, text_len);
40082 do_munmap(current->mm, realdatastart, len);
40083+ up_write(&current->mm->mmap_sem);
40084 ret = result;
40085 goto err;
40086 }
40087@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40088 }
40089 if (IS_ERR_VALUE(result)) {
40090 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40091+ down_write(&current->mm->mmap_sem);
40092 do_munmap(current->mm, textpos, text_len + data_len + extra +
40093 MAX_SHARED_LIBS * sizeof(unsigned long));
40094+ up_write(&current->mm->mmap_sem);
40095 ret = result;
40096 goto err;
40097 }
40098diff -urNp linux-3.0.7/fs/bio.c linux-3.0.7/fs/bio.c
40099--- linux-3.0.7/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
40100+++ linux-3.0.7/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
40101@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40102 const int read = bio_data_dir(bio) == READ;
40103 struct bio_map_data *bmd = bio->bi_private;
40104 int i;
40105- char *p = bmd->sgvecs[0].iov_base;
40106+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40107
40108 __bio_for_each_segment(bvec, bio, i, 0) {
40109 char *addr = page_address(bvec->bv_page);
40110diff -urNp linux-3.0.7/fs/block_dev.c linux-3.0.7/fs/block_dev.c
40111--- linux-3.0.7/fs/block_dev.c 2011-10-16 21:54:54.000000000 -0400
40112+++ linux-3.0.7/fs/block_dev.c 2011-10-16 21:55:28.000000000 -0400
40113@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
40114 else if (bdev->bd_contains == bdev)
40115 return true; /* is a whole device which isn't held */
40116
40117- else if (whole->bd_holder == bd_may_claim)
40118+ else if (whole->bd_holder == (void *)bd_may_claim)
40119 return true; /* is a partition of a device that is being partitioned */
40120 else if (whole->bd_holder != NULL)
40121 return false; /* is a partition of a held device */
40122diff -urNp linux-3.0.7/fs/btrfs/ctree.c linux-3.0.7/fs/btrfs/ctree.c
40123--- linux-3.0.7/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
40124+++ linux-3.0.7/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
40125@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
40126 free_extent_buffer(buf);
40127 add_root_to_dirty_list(root);
40128 } else {
40129- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40130- parent_start = parent->start;
40131- else
40132+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40133+ if (parent)
40134+ parent_start = parent->start;
40135+ else
40136+ parent_start = 0;
40137+ } else
40138 parent_start = 0;
40139
40140 WARN_ON(trans->transid != btrfs_header_generation(parent));
40141diff -urNp linux-3.0.7/fs/btrfs/inode.c linux-3.0.7/fs/btrfs/inode.c
40142--- linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:54:54.000000000 -0400
40143+++ linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:55:28.000000000 -0400
40144@@ -6896,7 +6896,7 @@ fail:
40145 return -ENOMEM;
40146 }
40147
40148-static int btrfs_getattr(struct vfsmount *mnt,
40149+int btrfs_getattr(struct vfsmount *mnt,
40150 struct dentry *dentry, struct kstat *stat)
40151 {
40152 struct inode *inode = dentry->d_inode;
40153@@ -6908,6 +6908,14 @@ static int btrfs_getattr(struct vfsmount
40154 return 0;
40155 }
40156
40157+EXPORT_SYMBOL(btrfs_getattr);
40158+
40159+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40160+{
40161+ return BTRFS_I(inode)->root->anon_super.s_dev;
40162+}
40163+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40164+
40165 /*
40166 * If a file is moved, it will inherit the cow and compression flags of the new
40167 * directory.
40168diff -urNp linux-3.0.7/fs/btrfs/ioctl.c linux-3.0.7/fs/btrfs/ioctl.c
40169--- linux-3.0.7/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40170+++ linux-3.0.7/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40171@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
40172 for (i = 0; i < num_types; i++) {
40173 struct btrfs_space_info *tmp;
40174
40175+ /* Don't copy in more than we allocated */
40176 if (!slot_count)
40177 break;
40178
40179+ slot_count--;
40180+
40181 info = NULL;
40182 rcu_read_lock();
40183 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40184@@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
40185 memcpy(dest, &space, sizeof(space));
40186 dest++;
40187 space_args.total_spaces++;
40188- slot_count--;
40189 }
40190- if (!slot_count)
40191- break;
40192 }
40193 up_read(&info->groups_sem);
40194 }
40195
40196- user_dest = (struct btrfs_ioctl_space_info *)
40197+ user_dest = (struct btrfs_ioctl_space_info __user *)
40198 (arg + sizeof(struct btrfs_ioctl_space_args));
40199
40200 if (copy_to_user(user_dest, dest_orig, alloc_size))
40201diff -urNp linux-3.0.7/fs/btrfs/relocation.c linux-3.0.7/fs/btrfs/relocation.c
40202--- linux-3.0.7/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
40203+++ linux-3.0.7/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
40204@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40205 }
40206 spin_unlock(&rc->reloc_root_tree.lock);
40207
40208- BUG_ON((struct btrfs_root *)node->data != root);
40209+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40210
40211 if (!del) {
40212 spin_lock(&rc->reloc_root_tree.lock);
40213diff -urNp linux-3.0.7/fs/cachefiles/bind.c linux-3.0.7/fs/cachefiles/bind.c
40214--- linux-3.0.7/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
40215+++ linux-3.0.7/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
40216@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40217 args);
40218
40219 /* start by checking things over */
40220- ASSERT(cache->fstop_percent >= 0 &&
40221- cache->fstop_percent < cache->fcull_percent &&
40222+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40223 cache->fcull_percent < cache->frun_percent &&
40224 cache->frun_percent < 100);
40225
40226- ASSERT(cache->bstop_percent >= 0 &&
40227- cache->bstop_percent < cache->bcull_percent &&
40228+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40229 cache->bcull_percent < cache->brun_percent &&
40230 cache->brun_percent < 100);
40231
40232diff -urNp linux-3.0.7/fs/cachefiles/daemon.c linux-3.0.7/fs/cachefiles/daemon.c
40233--- linux-3.0.7/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
40234+++ linux-3.0.7/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
40235@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40236 if (n > buflen)
40237 return -EMSGSIZE;
40238
40239- if (copy_to_user(_buffer, buffer, n) != 0)
40240+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40241 return -EFAULT;
40242
40243 return n;
40244@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40245 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40246 return -EIO;
40247
40248- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40249+ if (datalen > PAGE_SIZE - 1)
40250 return -EOPNOTSUPP;
40251
40252 /* drag the command string into the kernel so we can parse it */
40253@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40254 if (args[0] != '%' || args[1] != '\0')
40255 return -EINVAL;
40256
40257- if (fstop < 0 || fstop >= cache->fcull_percent)
40258+ if (fstop >= cache->fcull_percent)
40259 return cachefiles_daemon_range_error(cache, args);
40260
40261 cache->fstop_percent = fstop;
40262@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40263 if (args[0] != '%' || args[1] != '\0')
40264 return -EINVAL;
40265
40266- if (bstop < 0 || bstop >= cache->bcull_percent)
40267+ if (bstop >= cache->bcull_percent)
40268 return cachefiles_daemon_range_error(cache, args);
40269
40270 cache->bstop_percent = bstop;
40271diff -urNp linux-3.0.7/fs/cachefiles/internal.h linux-3.0.7/fs/cachefiles/internal.h
40272--- linux-3.0.7/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
40273+++ linux-3.0.7/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
40274@@ -57,7 +57,7 @@ struct cachefiles_cache {
40275 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40276 struct rb_root active_nodes; /* active nodes (can't be culled) */
40277 rwlock_t active_lock; /* lock for active_nodes */
40278- atomic_t gravecounter; /* graveyard uniquifier */
40279+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40280 unsigned frun_percent; /* when to stop culling (% files) */
40281 unsigned fcull_percent; /* when to start culling (% files) */
40282 unsigned fstop_percent; /* when to stop allocating (% files) */
40283@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40284 * proc.c
40285 */
40286 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40287-extern atomic_t cachefiles_lookup_histogram[HZ];
40288-extern atomic_t cachefiles_mkdir_histogram[HZ];
40289-extern atomic_t cachefiles_create_histogram[HZ];
40290+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40291+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40292+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40293
40294 extern int __init cachefiles_proc_init(void);
40295 extern void cachefiles_proc_cleanup(void);
40296 static inline
40297-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40298+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40299 {
40300 unsigned long jif = jiffies - start_jif;
40301 if (jif >= HZ)
40302 jif = HZ - 1;
40303- atomic_inc(&histogram[jif]);
40304+ atomic_inc_unchecked(&histogram[jif]);
40305 }
40306
40307 #else
40308diff -urNp linux-3.0.7/fs/cachefiles/namei.c linux-3.0.7/fs/cachefiles/namei.c
40309--- linux-3.0.7/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
40310+++ linux-3.0.7/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
40311@@ -318,7 +318,7 @@ try_again:
40312 /* first step is to make up a grave dentry in the graveyard */
40313 sprintf(nbuffer, "%08x%08x",
40314 (uint32_t) get_seconds(),
40315- (uint32_t) atomic_inc_return(&cache->gravecounter));
40316+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40317
40318 /* do the multiway lock magic */
40319 trap = lock_rename(cache->graveyard, dir);
40320diff -urNp linux-3.0.7/fs/cachefiles/proc.c linux-3.0.7/fs/cachefiles/proc.c
40321--- linux-3.0.7/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
40322+++ linux-3.0.7/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
40323@@ -14,9 +14,9 @@
40324 #include <linux/seq_file.h>
40325 #include "internal.h"
40326
40327-atomic_t cachefiles_lookup_histogram[HZ];
40328-atomic_t cachefiles_mkdir_histogram[HZ];
40329-atomic_t cachefiles_create_histogram[HZ];
40330+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40331+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40332+atomic_unchecked_t cachefiles_create_histogram[HZ];
40333
40334 /*
40335 * display the latency histogram
40336@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40337 return 0;
40338 default:
40339 index = (unsigned long) v - 3;
40340- x = atomic_read(&cachefiles_lookup_histogram[index]);
40341- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40342- z = atomic_read(&cachefiles_create_histogram[index]);
40343+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40344+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40345+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40346 if (x == 0 && y == 0 && z == 0)
40347 return 0;
40348
40349diff -urNp linux-3.0.7/fs/cachefiles/rdwr.c linux-3.0.7/fs/cachefiles/rdwr.c
40350--- linux-3.0.7/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
40351+++ linux-3.0.7/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
40352@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40353 old_fs = get_fs();
40354 set_fs(KERNEL_DS);
40355 ret = file->f_op->write(
40356- file, (const void __user *) data, len, &pos);
40357+ file, (const void __force_user *) data, len, &pos);
40358 set_fs(old_fs);
40359 kunmap(page);
40360 if (ret != len)
40361diff -urNp linux-3.0.7/fs/ceph/dir.c linux-3.0.7/fs/ceph/dir.c
40362--- linux-3.0.7/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
40363+++ linux-3.0.7/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
40364@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
40365 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40366 struct ceph_mds_client *mdsc = fsc->mdsc;
40367 unsigned frag = fpos_frag(filp->f_pos);
40368- int off = fpos_off(filp->f_pos);
40369+ unsigned int off = fpos_off(filp->f_pos);
40370 int err;
40371 u32 ftype;
40372 struct ceph_mds_reply_info_parsed *rinfo;
40373diff -urNp linux-3.0.7/fs/cifs/cifs_debug.c linux-3.0.7/fs/cifs/cifs_debug.c
40374--- linux-3.0.7/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
40375+++ linux-3.0.7/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
40376@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40377
40378 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40379 #ifdef CONFIG_CIFS_STATS2
40380- atomic_set(&totBufAllocCount, 0);
40381- atomic_set(&totSmBufAllocCount, 0);
40382+ atomic_set_unchecked(&totBufAllocCount, 0);
40383+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40384 #endif /* CONFIG_CIFS_STATS2 */
40385 spin_lock(&cifs_tcp_ses_lock);
40386 list_for_each(tmp1, &cifs_tcp_ses_list) {
40387@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40388 tcon = list_entry(tmp3,
40389 struct cifs_tcon,
40390 tcon_list);
40391- atomic_set(&tcon->num_smbs_sent, 0);
40392- atomic_set(&tcon->num_writes, 0);
40393- atomic_set(&tcon->num_reads, 0);
40394- atomic_set(&tcon->num_oplock_brks, 0);
40395- atomic_set(&tcon->num_opens, 0);
40396- atomic_set(&tcon->num_posixopens, 0);
40397- atomic_set(&tcon->num_posixmkdirs, 0);
40398- atomic_set(&tcon->num_closes, 0);
40399- atomic_set(&tcon->num_deletes, 0);
40400- atomic_set(&tcon->num_mkdirs, 0);
40401- atomic_set(&tcon->num_rmdirs, 0);
40402- atomic_set(&tcon->num_renames, 0);
40403- atomic_set(&tcon->num_t2renames, 0);
40404- atomic_set(&tcon->num_ffirst, 0);
40405- atomic_set(&tcon->num_fnext, 0);
40406- atomic_set(&tcon->num_fclose, 0);
40407- atomic_set(&tcon->num_hardlinks, 0);
40408- atomic_set(&tcon->num_symlinks, 0);
40409- atomic_set(&tcon->num_locks, 0);
40410+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40411+ atomic_set_unchecked(&tcon->num_writes, 0);
40412+ atomic_set_unchecked(&tcon->num_reads, 0);
40413+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40414+ atomic_set_unchecked(&tcon->num_opens, 0);
40415+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40416+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40417+ atomic_set_unchecked(&tcon->num_closes, 0);
40418+ atomic_set_unchecked(&tcon->num_deletes, 0);
40419+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40420+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40421+ atomic_set_unchecked(&tcon->num_renames, 0);
40422+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40423+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40424+ atomic_set_unchecked(&tcon->num_fnext, 0);
40425+ atomic_set_unchecked(&tcon->num_fclose, 0);
40426+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40427+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40428+ atomic_set_unchecked(&tcon->num_locks, 0);
40429 }
40430 }
40431 }
40432@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40433 smBufAllocCount.counter, cifs_min_small);
40434 #ifdef CONFIG_CIFS_STATS2
40435 seq_printf(m, "Total Large %d Small %d Allocations\n",
40436- atomic_read(&totBufAllocCount),
40437- atomic_read(&totSmBufAllocCount));
40438+ atomic_read_unchecked(&totBufAllocCount),
40439+ atomic_read_unchecked(&totSmBufAllocCount));
40440 #endif /* CONFIG_CIFS_STATS2 */
40441
40442 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40443@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40444 if (tcon->need_reconnect)
40445 seq_puts(m, "\tDISCONNECTED ");
40446 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40447- atomic_read(&tcon->num_smbs_sent),
40448- atomic_read(&tcon->num_oplock_brks));
40449+ atomic_read_unchecked(&tcon->num_smbs_sent),
40450+ atomic_read_unchecked(&tcon->num_oplock_brks));
40451 seq_printf(m, "\nReads: %d Bytes: %lld",
40452- atomic_read(&tcon->num_reads),
40453+ atomic_read_unchecked(&tcon->num_reads),
40454 (long long)(tcon->bytes_read));
40455 seq_printf(m, "\nWrites: %d Bytes: %lld",
40456- atomic_read(&tcon->num_writes),
40457+ atomic_read_unchecked(&tcon->num_writes),
40458 (long long)(tcon->bytes_written));
40459 seq_printf(m, "\nFlushes: %d",
40460- atomic_read(&tcon->num_flushes));
40461+ atomic_read_unchecked(&tcon->num_flushes));
40462 seq_printf(m, "\nLocks: %d HardLinks: %d "
40463 "Symlinks: %d",
40464- atomic_read(&tcon->num_locks),
40465- atomic_read(&tcon->num_hardlinks),
40466- atomic_read(&tcon->num_symlinks));
40467+ atomic_read_unchecked(&tcon->num_locks),
40468+ atomic_read_unchecked(&tcon->num_hardlinks),
40469+ atomic_read_unchecked(&tcon->num_symlinks));
40470 seq_printf(m, "\nOpens: %d Closes: %d "
40471 "Deletes: %d",
40472- atomic_read(&tcon->num_opens),
40473- atomic_read(&tcon->num_closes),
40474- atomic_read(&tcon->num_deletes));
40475+ atomic_read_unchecked(&tcon->num_opens),
40476+ atomic_read_unchecked(&tcon->num_closes),
40477+ atomic_read_unchecked(&tcon->num_deletes));
40478 seq_printf(m, "\nPosix Opens: %d "
40479 "Posix Mkdirs: %d",
40480- atomic_read(&tcon->num_posixopens),
40481- atomic_read(&tcon->num_posixmkdirs));
40482+ atomic_read_unchecked(&tcon->num_posixopens),
40483+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40484 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40485- atomic_read(&tcon->num_mkdirs),
40486- atomic_read(&tcon->num_rmdirs));
40487+ atomic_read_unchecked(&tcon->num_mkdirs),
40488+ atomic_read_unchecked(&tcon->num_rmdirs));
40489 seq_printf(m, "\nRenames: %d T2 Renames %d",
40490- atomic_read(&tcon->num_renames),
40491- atomic_read(&tcon->num_t2renames));
40492+ atomic_read_unchecked(&tcon->num_renames),
40493+ atomic_read_unchecked(&tcon->num_t2renames));
40494 seq_printf(m, "\nFindFirst: %d FNext %d "
40495 "FClose %d",
40496- atomic_read(&tcon->num_ffirst),
40497- atomic_read(&tcon->num_fnext),
40498- atomic_read(&tcon->num_fclose));
40499+ atomic_read_unchecked(&tcon->num_ffirst),
40500+ atomic_read_unchecked(&tcon->num_fnext),
40501+ atomic_read_unchecked(&tcon->num_fclose));
40502 }
40503 }
40504 }
40505diff -urNp linux-3.0.7/fs/cifs/cifsfs.c linux-3.0.7/fs/cifs/cifsfs.c
40506--- linux-3.0.7/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
40507+++ linux-3.0.7/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
40508@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
40509 cifs_req_cachep = kmem_cache_create("cifs_request",
40510 CIFSMaxBufSize +
40511 MAX_CIFS_HDR_SIZE, 0,
40512- SLAB_HWCACHE_ALIGN, NULL);
40513+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40514 if (cifs_req_cachep == NULL)
40515 return -ENOMEM;
40516
40517@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
40518 efficient to alloc 1 per page off the slab compared to 17K (5page)
40519 alloc of large cifs buffers even when page debugging is on */
40520 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40521- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40522+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40523 NULL);
40524 if (cifs_sm_req_cachep == NULL) {
40525 mempool_destroy(cifs_req_poolp);
40526@@ -1106,8 +1106,8 @@ init_cifs(void)
40527 atomic_set(&bufAllocCount, 0);
40528 atomic_set(&smBufAllocCount, 0);
40529 #ifdef CONFIG_CIFS_STATS2
40530- atomic_set(&totBufAllocCount, 0);
40531- atomic_set(&totSmBufAllocCount, 0);
40532+ atomic_set_unchecked(&totBufAllocCount, 0);
40533+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40534 #endif /* CONFIG_CIFS_STATS2 */
40535
40536 atomic_set(&midCount, 0);
40537diff -urNp linux-3.0.7/fs/cifs/cifsglob.h linux-3.0.7/fs/cifs/cifsglob.h
40538--- linux-3.0.7/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
40539+++ linux-3.0.7/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
40540@@ -381,28 +381,28 @@ struct cifs_tcon {
40541 __u16 Flags; /* optional support bits */
40542 enum statusEnum tidStatus;
40543 #ifdef CONFIG_CIFS_STATS
40544- atomic_t num_smbs_sent;
40545- atomic_t num_writes;
40546- atomic_t num_reads;
40547- atomic_t num_flushes;
40548- atomic_t num_oplock_brks;
40549- atomic_t num_opens;
40550- atomic_t num_closes;
40551- atomic_t num_deletes;
40552- atomic_t num_mkdirs;
40553- atomic_t num_posixopens;
40554- atomic_t num_posixmkdirs;
40555- atomic_t num_rmdirs;
40556- atomic_t num_renames;
40557- atomic_t num_t2renames;
40558- atomic_t num_ffirst;
40559- atomic_t num_fnext;
40560- atomic_t num_fclose;
40561- atomic_t num_hardlinks;
40562- atomic_t num_symlinks;
40563- atomic_t num_locks;
40564- atomic_t num_acl_get;
40565- atomic_t num_acl_set;
40566+ atomic_unchecked_t num_smbs_sent;
40567+ atomic_unchecked_t num_writes;
40568+ atomic_unchecked_t num_reads;
40569+ atomic_unchecked_t num_flushes;
40570+ atomic_unchecked_t num_oplock_brks;
40571+ atomic_unchecked_t num_opens;
40572+ atomic_unchecked_t num_closes;
40573+ atomic_unchecked_t num_deletes;
40574+ atomic_unchecked_t num_mkdirs;
40575+ atomic_unchecked_t num_posixopens;
40576+ atomic_unchecked_t num_posixmkdirs;
40577+ atomic_unchecked_t num_rmdirs;
40578+ atomic_unchecked_t num_renames;
40579+ atomic_unchecked_t num_t2renames;
40580+ atomic_unchecked_t num_ffirst;
40581+ atomic_unchecked_t num_fnext;
40582+ atomic_unchecked_t num_fclose;
40583+ atomic_unchecked_t num_hardlinks;
40584+ atomic_unchecked_t num_symlinks;
40585+ atomic_unchecked_t num_locks;
40586+ atomic_unchecked_t num_acl_get;
40587+ atomic_unchecked_t num_acl_set;
40588 #ifdef CONFIG_CIFS_STATS2
40589 unsigned long long time_writes;
40590 unsigned long long time_reads;
40591@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40592 }
40593
40594 #ifdef CONFIG_CIFS_STATS
40595-#define cifs_stats_inc atomic_inc
40596+#define cifs_stats_inc atomic_inc_unchecked
40597
40598 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40599 unsigned int bytes)
40600@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40601 /* Various Debug counters */
40602 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40603 #ifdef CONFIG_CIFS_STATS2
40604-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40605-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40606+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40607+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40608 #endif
40609 GLOBAL_EXTERN atomic_t smBufAllocCount;
40610 GLOBAL_EXTERN atomic_t midCount;
40611diff -urNp linux-3.0.7/fs/cifs/link.c linux-3.0.7/fs/cifs/link.c
40612--- linux-3.0.7/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
40613+++ linux-3.0.7/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
40614@@ -587,7 +587,7 @@ symlink_exit:
40615
40616 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40617 {
40618- char *p = nd_get_link(nd);
40619+ const char *p = nd_get_link(nd);
40620 if (!IS_ERR(p))
40621 kfree(p);
40622 }
40623diff -urNp linux-3.0.7/fs/cifs/misc.c linux-3.0.7/fs/cifs/misc.c
40624--- linux-3.0.7/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
40625+++ linux-3.0.7/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
40626@@ -156,7 +156,7 @@ cifs_buf_get(void)
40627 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40628 atomic_inc(&bufAllocCount);
40629 #ifdef CONFIG_CIFS_STATS2
40630- atomic_inc(&totBufAllocCount);
40631+ atomic_inc_unchecked(&totBufAllocCount);
40632 #endif /* CONFIG_CIFS_STATS2 */
40633 }
40634
40635@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40636 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40637 atomic_inc(&smBufAllocCount);
40638 #ifdef CONFIG_CIFS_STATS2
40639- atomic_inc(&totSmBufAllocCount);
40640+ atomic_inc_unchecked(&totSmBufAllocCount);
40641 #endif /* CONFIG_CIFS_STATS2 */
40642
40643 }
40644diff -urNp linux-3.0.7/fs/coda/cache.c linux-3.0.7/fs/coda/cache.c
40645--- linux-3.0.7/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
40646+++ linux-3.0.7/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
40647@@ -24,7 +24,7 @@
40648 #include "coda_linux.h"
40649 #include "coda_cache.h"
40650
40651-static atomic_t permission_epoch = ATOMIC_INIT(0);
40652+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40653
40654 /* replace or extend an acl cache hit */
40655 void coda_cache_enter(struct inode *inode, int mask)
40656@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
40657 struct coda_inode_info *cii = ITOC(inode);
40658
40659 spin_lock(&cii->c_lock);
40660- cii->c_cached_epoch = atomic_read(&permission_epoch);
40661+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40662 if (cii->c_uid != current_fsuid()) {
40663 cii->c_uid = current_fsuid();
40664 cii->c_cached_perm = mask;
40665@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
40666 {
40667 struct coda_inode_info *cii = ITOC(inode);
40668 spin_lock(&cii->c_lock);
40669- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40670+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40671 spin_unlock(&cii->c_lock);
40672 }
40673
40674 /* remove all acl caches */
40675 void coda_cache_clear_all(struct super_block *sb)
40676 {
40677- atomic_inc(&permission_epoch);
40678+ atomic_inc_unchecked(&permission_epoch);
40679 }
40680
40681
40682@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
40683 spin_lock(&cii->c_lock);
40684 hit = (mask & cii->c_cached_perm) == mask &&
40685 cii->c_uid == current_fsuid() &&
40686- cii->c_cached_epoch == atomic_read(&permission_epoch);
40687+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40688 spin_unlock(&cii->c_lock);
40689
40690 return hit;
40691diff -urNp linux-3.0.7/fs/compat.c linux-3.0.7/fs/compat.c
40692--- linux-3.0.7/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
40693+++ linux-3.0.7/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
40694@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
40695 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
40696 {
40697 compat_ino_t ino = stat->ino;
40698- typeof(ubuf->st_uid) uid = 0;
40699- typeof(ubuf->st_gid) gid = 0;
40700+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
40701+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
40702 int err;
40703
40704 SET_UID(uid, stat->uid);
40705@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
40706
40707 set_fs(KERNEL_DS);
40708 /* The __user pointer cast is valid because of the set_fs() */
40709- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
40710+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
40711 set_fs(oldfs);
40712 /* truncating is ok because it's a user address */
40713 if (!ret)
40714@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
40715 goto out;
40716
40717 ret = -EINVAL;
40718- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
40719+ if (nr_segs > UIO_MAXIOV)
40720 goto out;
40721 if (nr_segs > fast_segs) {
40722 ret = -ENOMEM;
40723@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
40724
40725 struct compat_readdir_callback {
40726 struct compat_old_linux_dirent __user *dirent;
40727+ struct file * file;
40728 int result;
40729 };
40730
40731@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
40732 buf->result = -EOVERFLOW;
40733 return -EOVERFLOW;
40734 }
40735+
40736+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40737+ return 0;
40738+
40739 buf->result++;
40740 dirent = buf->dirent;
40741 if (!access_ok(VERIFY_WRITE, dirent,
40742@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
40743
40744 buf.result = 0;
40745 buf.dirent = dirent;
40746+ buf.file = file;
40747
40748 error = vfs_readdir(file, compat_fillonedir, &buf);
40749 if (buf.result)
40750@@ -917,6 +923,7 @@ struct compat_linux_dirent {
40751 struct compat_getdents_callback {
40752 struct compat_linux_dirent __user *current_dir;
40753 struct compat_linux_dirent __user *previous;
40754+ struct file * file;
40755 int count;
40756 int error;
40757 };
40758@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
40759 buf->error = -EOVERFLOW;
40760 return -EOVERFLOW;
40761 }
40762+
40763+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40764+ return 0;
40765+
40766 dirent = buf->previous;
40767 if (dirent) {
40768 if (__put_user(offset, &dirent->d_off))
40769@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
40770 buf.previous = NULL;
40771 buf.count = count;
40772 buf.error = 0;
40773+ buf.file = file;
40774
40775 error = vfs_readdir(file, compat_filldir, &buf);
40776 if (error >= 0)
40777@@ -1006,6 +1018,7 @@ out:
40778 struct compat_getdents_callback64 {
40779 struct linux_dirent64 __user *current_dir;
40780 struct linux_dirent64 __user *previous;
40781+ struct file * file;
40782 int count;
40783 int error;
40784 };
40785@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
40786 buf->error = -EINVAL; /* only used if we fail.. */
40787 if (reclen > buf->count)
40788 return -EINVAL;
40789+
40790+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40791+ return 0;
40792+
40793 dirent = buf->previous;
40794
40795 if (dirent) {
40796@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
40797 buf.previous = NULL;
40798 buf.count = count;
40799 buf.error = 0;
40800+ buf.file = file;
40801
40802 error = vfs_readdir(file, compat_filldir64, &buf);
40803 if (error >= 0)
40804 error = buf.error;
40805 lastdirent = buf.previous;
40806 if (lastdirent) {
40807- typeof(lastdirent->d_off) d_off = file->f_pos;
40808+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
40809 if (__put_user_unaligned(d_off, &lastdirent->d_off))
40810 error = -EFAULT;
40811 else
40812@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
40813 struct fdtable *fdt;
40814 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40815
40816+ pax_track_stack();
40817+
40818 if (n < 0)
40819 goto out_nofds;
40820
40821@@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
40822 oldfs = get_fs();
40823 set_fs(KERNEL_DS);
40824 /* The __user pointer casts are valid because of the set_fs() */
40825- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
40826+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
40827 set_fs(oldfs);
40828
40829 if (err)
40830diff -urNp linux-3.0.7/fs/compat_binfmt_elf.c linux-3.0.7/fs/compat_binfmt_elf.c
40831--- linux-3.0.7/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
40832+++ linux-3.0.7/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
40833@@ -30,11 +30,13 @@
40834 #undef elf_phdr
40835 #undef elf_shdr
40836 #undef elf_note
40837+#undef elf_dyn
40838 #undef elf_addr_t
40839 #define elfhdr elf32_hdr
40840 #define elf_phdr elf32_phdr
40841 #define elf_shdr elf32_shdr
40842 #define elf_note elf32_note
40843+#define elf_dyn Elf32_Dyn
40844 #define elf_addr_t Elf32_Addr
40845
40846 /*
40847diff -urNp linux-3.0.7/fs/compat_ioctl.c linux-3.0.7/fs/compat_ioctl.c
40848--- linux-3.0.7/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
40849+++ linux-3.0.7/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
40850@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
40851
40852 err = get_user(palp, &up->palette);
40853 err |= get_user(length, &up->length);
40854+ if (err)
40855+ return -EFAULT;
40856
40857 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40858 err = put_user(compat_ptr(palp), &up_native->palette);
40859@@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
40860 return -EFAULT;
40861 if (__get_user(udata, &ss32->iomem_base))
40862 return -EFAULT;
40863- ss.iomem_base = compat_ptr(udata);
40864+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
40865 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
40866 __get_user(ss.port_high, &ss32->port_high))
40867 return -EFAULT;
40868@@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
40869 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
40870 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
40871 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
40872- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40873+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40874 return -EFAULT;
40875
40876 return ioctl_preallocate(file, p);
40877@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
40878 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
40879 {
40880 unsigned int a, b;
40881- a = *(unsigned int *)p;
40882- b = *(unsigned int *)q;
40883+ a = *(const unsigned int *)p;
40884+ b = *(const unsigned int *)q;
40885 if (a > b)
40886 return 1;
40887 if (a < b)
40888diff -urNp linux-3.0.7/fs/configfs/dir.c linux-3.0.7/fs/configfs/dir.c
40889--- linux-3.0.7/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40890+++ linux-3.0.7/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
40891@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
40892 }
40893 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40894 struct configfs_dirent *next;
40895- const char * name;
40896+ const unsigned char * name;
40897+ char d_name[sizeof(next->s_dentry->d_iname)];
40898 int len;
40899 struct inode *inode = NULL;
40900
40901@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
40902 continue;
40903
40904 name = configfs_get_name(next);
40905- len = strlen(name);
40906+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40907+ len = next->s_dentry->d_name.len;
40908+ memcpy(d_name, name, len);
40909+ name = d_name;
40910+ } else
40911+ len = strlen(name);
40912
40913 /*
40914 * We'll have a dentry and an inode for
40915diff -urNp linux-3.0.7/fs/dcache.c linux-3.0.7/fs/dcache.c
40916--- linux-3.0.7/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
40917+++ linux-3.0.7/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
40918@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
40919 mempages -= reserve;
40920
40921 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40922- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40923+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40924
40925 dcache_init();
40926 inode_init();
40927diff -urNp linux-3.0.7/fs/ecryptfs/inode.c linux-3.0.7/fs/ecryptfs/inode.c
40928--- linux-3.0.7/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
40929+++ linux-3.0.7/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
40930@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
40931 old_fs = get_fs();
40932 set_fs(get_ds());
40933 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40934- (char __user *)lower_buf,
40935+ (char __force_user *)lower_buf,
40936 lower_bufsiz);
40937 set_fs(old_fs);
40938 if (rc < 0)
40939@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
40940 }
40941 old_fs = get_fs();
40942 set_fs(get_ds());
40943- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40944+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
40945 set_fs(old_fs);
40946 if (rc < 0) {
40947 kfree(buf);
40948@@ -765,7 +765,7 @@ out:
40949 static void
40950 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
40951 {
40952- char *buf = nd_get_link(nd);
40953+ const char *buf = nd_get_link(nd);
40954 if (!IS_ERR(buf)) {
40955 /* Free the char* */
40956 kfree(buf);
40957diff -urNp linux-3.0.7/fs/ecryptfs/miscdev.c linux-3.0.7/fs/ecryptfs/miscdev.c
40958--- linux-3.0.7/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
40959+++ linux-3.0.7/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
40960@@ -328,7 +328,7 @@ check_list:
40961 goto out_unlock_msg_ctx;
40962 i = 5;
40963 if (msg_ctx->msg) {
40964- if (copy_to_user(&buf[i], packet_length, packet_length_size))
40965+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
40966 goto out_unlock_msg_ctx;
40967 i += packet_length_size;
40968 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
40969diff -urNp linux-3.0.7/fs/ecryptfs/read_write.c linux-3.0.7/fs/ecryptfs/read_write.c
40970--- linux-3.0.7/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
40971+++ linux-3.0.7/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
40972@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
40973 return -EIO;
40974 fs_save = get_fs();
40975 set_fs(get_ds());
40976- rc = vfs_write(lower_file, data, size, &offset);
40977+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
40978 set_fs(fs_save);
40979 mark_inode_dirty_sync(ecryptfs_inode);
40980 return rc;
40981@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
40982 return -EIO;
40983 fs_save = get_fs();
40984 set_fs(get_ds());
40985- rc = vfs_read(lower_file, data, size, &offset);
40986+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
40987 set_fs(fs_save);
40988 return rc;
40989 }
40990diff -urNp linux-3.0.7/fs/exec.c linux-3.0.7/fs/exec.c
40991--- linux-3.0.7/fs/exec.c 2011-10-17 23:17:09.000000000 -0400
40992+++ linux-3.0.7/fs/exec.c 2011-10-17 23:17:19.000000000 -0400
40993@@ -55,12 +55,24 @@
40994 #include <linux/pipe_fs_i.h>
40995 #include <linux/oom.h>
40996 #include <linux/compat.h>
40997+#include <linux/random.h>
40998+#include <linux/seq_file.h>
40999+
41000+#ifdef CONFIG_PAX_REFCOUNT
41001+#include <linux/kallsyms.h>
41002+#include <linux/kdebug.h>
41003+#endif
41004
41005 #include <asm/uaccess.h>
41006 #include <asm/mmu_context.h>
41007 #include <asm/tlb.h>
41008 #include "internal.h"
41009
41010+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41011+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41012+EXPORT_SYMBOL(pax_set_initial_flags_func);
41013+#endif
41014+
41015 int core_uses_pid;
41016 char core_pattern[CORENAME_MAX_SIZE] = "core";
41017 unsigned int core_pipe_limit;
41018@@ -70,7 +82,7 @@ struct core_name {
41019 char *corename;
41020 int used, size;
41021 };
41022-static atomic_t call_count = ATOMIC_INIT(1);
41023+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41024
41025 /* The maximal length of core_pattern is also specified in sysctl.c */
41026
41027@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
41028 char *tmp = getname(library);
41029 int error = PTR_ERR(tmp);
41030 static const struct open_flags uselib_flags = {
41031- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41032+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41033 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
41034 .intent = LOOKUP_OPEN
41035 };
41036@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
41037 int write)
41038 {
41039 struct page *page;
41040- int ret;
41041
41042-#ifdef CONFIG_STACK_GROWSUP
41043- if (write) {
41044- ret = expand_downwards(bprm->vma, pos);
41045- if (ret < 0)
41046- return NULL;
41047- }
41048-#endif
41049- ret = get_user_pages(current, bprm->mm, pos,
41050- 1, write, 1, &page, NULL);
41051- if (ret <= 0)
41052+ if (0 > expand_downwards(bprm->vma, pos))
41053+ return NULL;
41054+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41055 return NULL;
41056
41057 if (write) {
41058@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
41059 vma->vm_end = STACK_TOP_MAX;
41060 vma->vm_start = vma->vm_end - PAGE_SIZE;
41061 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41062+
41063+#ifdef CONFIG_PAX_SEGMEXEC
41064+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41065+#endif
41066+
41067 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41068 INIT_LIST_HEAD(&vma->anon_vma_chain);
41069
41070@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
41071 mm->stack_vm = mm->total_vm = 1;
41072 up_write(&mm->mmap_sem);
41073 bprm->p = vma->vm_end - sizeof(void *);
41074+
41075+#ifdef CONFIG_PAX_RANDUSTACK
41076+ if (randomize_va_space)
41077+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41078+#endif
41079+
41080 return 0;
41081 err:
41082 up_write(&mm->mmap_sem);
41083@@ -403,19 +418,7 @@ err:
41084 return err;
41085 }
41086
41087-struct user_arg_ptr {
41088-#ifdef CONFIG_COMPAT
41089- bool is_compat;
41090-#endif
41091- union {
41092- const char __user *const __user *native;
41093-#ifdef CONFIG_COMPAT
41094- compat_uptr_t __user *compat;
41095-#endif
41096- } ptr;
41097-};
41098-
41099-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41100+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41101 {
41102 const char __user *native;
41103
41104@@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
41105 compat_uptr_t compat;
41106
41107 if (get_user(compat, argv.ptr.compat + nr))
41108- return ERR_PTR(-EFAULT);
41109+ return (const char __force_user *)ERR_PTR(-EFAULT);
41110
41111 return compat_ptr(compat);
41112 }
41113 #endif
41114
41115 if (get_user(native, argv.ptr.native + nr))
41116- return ERR_PTR(-EFAULT);
41117+ return (const char __force_user *)ERR_PTR(-EFAULT);
41118
41119 return native;
41120 }
41121@@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
41122 if (!p)
41123 break;
41124
41125- if (IS_ERR(p))
41126+ if (IS_ERR((const char __force_kernel *)p))
41127 return -EFAULT;
41128
41129 if (i++ >= max)
41130@@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
41131
41132 ret = -EFAULT;
41133 str = get_user_arg_ptr(argv, argc);
41134- if (IS_ERR(str))
41135+ if (IS_ERR((const char __force_kernel *)str))
41136 goto out;
41137
41138 len = strnlen_user(str, MAX_ARG_STRLEN);
41139@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
41140 int r;
41141 mm_segment_t oldfs = get_fs();
41142 struct user_arg_ptr argv = {
41143- .ptr.native = (const char __user *const __user *)__argv,
41144+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41145 };
41146
41147 set_fs(KERNEL_DS);
41148@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
41149 unsigned long new_end = old_end - shift;
41150 struct mmu_gather tlb;
41151
41152- BUG_ON(new_start > new_end);
41153+ if (new_start >= new_end || new_start < mmap_min_addr)
41154+ return -ENOMEM;
41155
41156 /*
41157 * ensure there are no vmas between where we want to go
41158@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
41159 if (vma != find_vma(mm, new_start))
41160 return -EFAULT;
41161
41162+#ifdef CONFIG_PAX_SEGMEXEC
41163+ BUG_ON(pax_find_mirror_vma(vma));
41164+#endif
41165+
41166 /*
41167 * cover the whole range: [new_start, old_end)
41168 */
41169@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
41170 stack_top = arch_align_stack(stack_top);
41171 stack_top = PAGE_ALIGN(stack_top);
41172
41173- if (unlikely(stack_top < mmap_min_addr) ||
41174- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41175- return -ENOMEM;
41176-
41177 stack_shift = vma->vm_end - stack_top;
41178
41179 bprm->p -= stack_shift;
41180@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
41181 bprm->exec -= stack_shift;
41182
41183 down_write(&mm->mmap_sem);
41184+
41185+ /* Move stack pages down in memory. */
41186+ if (stack_shift) {
41187+ ret = shift_arg_pages(vma, stack_shift);
41188+ if (ret)
41189+ goto out_unlock;
41190+ }
41191+
41192 vm_flags = VM_STACK_FLAGS;
41193
41194+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41195+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41196+ vm_flags &= ~VM_EXEC;
41197+
41198+#ifdef CONFIG_PAX_MPROTECT
41199+ if (mm->pax_flags & MF_PAX_MPROTECT)
41200+ vm_flags &= ~VM_MAYEXEC;
41201+#endif
41202+
41203+ }
41204+#endif
41205+
41206 /*
41207 * Adjust stack execute permissions; explicitly enable for
41208 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41209@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
41210 goto out_unlock;
41211 BUG_ON(prev != vma);
41212
41213- /* Move stack pages down in memory. */
41214- if (stack_shift) {
41215- ret = shift_arg_pages(vma, stack_shift);
41216- if (ret)
41217- goto out_unlock;
41218- }
41219-
41220 /* mprotect_fixup is overkill to remove the temporary stack flags */
41221 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41222
41223@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
41224 struct file *file;
41225 int err;
41226 static const struct open_flags open_exec_flags = {
41227- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41228+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41229 .acc_mode = MAY_EXEC | MAY_OPEN,
41230 .intent = LOOKUP_OPEN
41231 };
41232@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
41233 old_fs = get_fs();
41234 set_fs(get_ds());
41235 /* The cast to a user pointer is valid due to the set_fs() */
41236- result = vfs_read(file, (void __user *)addr, count, &pos);
41237+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41238 set_fs(old_fs);
41239 return result;
41240 }
41241@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
41242 }
41243 rcu_read_unlock();
41244
41245- if (p->fs->users > n_fs) {
41246+ if (atomic_read(&p->fs->users) > n_fs) {
41247 bprm->unsafe |= LSM_UNSAFE_SHARE;
41248 } else {
41249 res = -EAGAIN;
41250@@ -1430,11 +1447,35 @@ static int do_execve_common(const char *
41251 struct user_arg_ptr envp,
41252 struct pt_regs *regs)
41253 {
41254+#ifdef CONFIG_GRKERNSEC
41255+ struct file *old_exec_file;
41256+ struct acl_subject_label *old_acl;
41257+ struct rlimit old_rlim[RLIM_NLIMITS];
41258+#endif
41259 struct linux_binprm *bprm;
41260 struct file *file;
41261 struct files_struct *displaced;
41262 bool clear_in_exec;
41263 int retval;
41264+ const struct cred *cred = current_cred();
41265+
41266+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41267+
41268+ /*
41269+ * We move the actual failure in case of RLIMIT_NPROC excess from
41270+ * set*uid() to execve() because too many poorly written programs
41271+ * don't check setuid() return code. Here we additionally recheck
41272+ * whether NPROC limit is still exceeded.
41273+ */
41274+ if ((current->flags & PF_NPROC_EXCEEDED) &&
41275+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
41276+ retval = -EAGAIN;
41277+ goto out_ret;
41278+ }
41279+
41280+ /* We're below the limit (still or again), so we don't want to make
41281+ * further execve() calls fail. */
41282+ current->flags &= ~PF_NPROC_EXCEEDED;
41283
41284 retval = unshare_files(&displaced);
41285 if (retval)
41286@@ -1466,6 +1507,16 @@ static int do_execve_common(const char *
41287 bprm->filename = filename;
41288 bprm->interp = filename;
41289
41290+ if (gr_process_user_ban()) {
41291+ retval = -EPERM;
41292+ goto out_file;
41293+ }
41294+
41295+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41296+ retval = -EACCES;
41297+ goto out_file;
41298+ }
41299+
41300 retval = bprm_mm_init(bprm);
41301 if (retval)
41302 goto out_file;
41303@@ -1495,9 +1546,40 @@ static int do_execve_common(const char *
41304 if (retval < 0)
41305 goto out;
41306
41307+ if (!gr_tpe_allow(file)) {
41308+ retval = -EACCES;
41309+ goto out;
41310+ }
41311+
41312+ if (gr_check_crash_exec(file)) {
41313+ retval = -EACCES;
41314+ goto out;
41315+ }
41316+
41317+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41318+
41319+ gr_handle_exec_args(bprm, argv);
41320+
41321+#ifdef CONFIG_GRKERNSEC
41322+ old_acl = current->acl;
41323+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41324+ old_exec_file = current->exec_file;
41325+ get_file(file);
41326+ current->exec_file = file;
41327+#endif
41328+
41329+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41330+ bprm->unsafe & LSM_UNSAFE_SHARE);
41331+ if (retval < 0)
41332+ goto out_fail;
41333+
41334 retval = search_binary_handler(bprm,regs);
41335 if (retval < 0)
41336- goto out;
41337+ goto out_fail;
41338+#ifdef CONFIG_GRKERNSEC
41339+ if (old_exec_file)
41340+ fput(old_exec_file);
41341+#endif
41342
41343 /* execve succeeded */
41344 current->fs->in_exec = 0;
41345@@ -1508,6 +1590,14 @@ static int do_execve_common(const char *
41346 put_files_struct(displaced);
41347 return retval;
41348
41349+out_fail:
41350+#ifdef CONFIG_GRKERNSEC
41351+ current->acl = old_acl;
41352+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41353+ fput(current->exec_file);
41354+ current->exec_file = old_exec_file;
41355+#endif
41356+
41357 out:
41358 if (bprm->mm) {
41359 acct_arg_size(bprm, 0);
41360@@ -1581,7 +1671,7 @@ static int expand_corename(struct core_n
41361 {
41362 char *old_corename = cn->corename;
41363
41364- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41365+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41366 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41367
41368 if (!cn->corename) {
41369@@ -1669,7 +1759,7 @@ static int format_corename(struct core_n
41370 int pid_in_pattern = 0;
41371 int err = 0;
41372
41373- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41374+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41375 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41376 cn->used = 0;
41377
41378@@ -1760,6 +1850,219 @@ out:
41379 return ispipe;
41380 }
41381
41382+int pax_check_flags(unsigned long *flags)
41383+{
41384+ int retval = 0;
41385+
41386+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41387+ if (*flags & MF_PAX_SEGMEXEC)
41388+ {
41389+ *flags &= ~MF_PAX_SEGMEXEC;
41390+ retval = -EINVAL;
41391+ }
41392+#endif
41393+
41394+ if ((*flags & MF_PAX_PAGEEXEC)
41395+
41396+#ifdef CONFIG_PAX_PAGEEXEC
41397+ && (*flags & MF_PAX_SEGMEXEC)
41398+#endif
41399+
41400+ )
41401+ {
41402+ *flags &= ~MF_PAX_PAGEEXEC;
41403+ retval = -EINVAL;
41404+ }
41405+
41406+ if ((*flags & MF_PAX_MPROTECT)
41407+
41408+#ifdef CONFIG_PAX_MPROTECT
41409+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41410+#endif
41411+
41412+ )
41413+ {
41414+ *flags &= ~MF_PAX_MPROTECT;
41415+ retval = -EINVAL;
41416+ }
41417+
41418+ if ((*flags & MF_PAX_EMUTRAMP)
41419+
41420+#ifdef CONFIG_PAX_EMUTRAMP
41421+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41422+#endif
41423+
41424+ )
41425+ {
41426+ *flags &= ~MF_PAX_EMUTRAMP;
41427+ retval = -EINVAL;
41428+ }
41429+
41430+ return retval;
41431+}
41432+
41433+EXPORT_SYMBOL(pax_check_flags);
41434+
41435+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41436+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41437+{
41438+ struct task_struct *tsk = current;
41439+ struct mm_struct *mm = current->mm;
41440+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41441+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41442+ char *path_exec = NULL;
41443+ char *path_fault = NULL;
41444+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41445+
41446+ if (buffer_exec && buffer_fault) {
41447+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41448+
41449+ down_read(&mm->mmap_sem);
41450+ vma = mm->mmap;
41451+ while (vma && (!vma_exec || !vma_fault)) {
41452+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41453+ vma_exec = vma;
41454+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41455+ vma_fault = vma;
41456+ vma = vma->vm_next;
41457+ }
41458+ if (vma_exec) {
41459+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41460+ if (IS_ERR(path_exec))
41461+ path_exec = "<path too long>";
41462+ else {
41463+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41464+ if (path_exec) {
41465+ *path_exec = 0;
41466+ path_exec = buffer_exec;
41467+ } else
41468+ path_exec = "<path too long>";
41469+ }
41470+ }
41471+ if (vma_fault) {
41472+ start = vma_fault->vm_start;
41473+ end = vma_fault->vm_end;
41474+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41475+ if (vma_fault->vm_file) {
41476+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41477+ if (IS_ERR(path_fault))
41478+ path_fault = "<path too long>";
41479+ else {
41480+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41481+ if (path_fault) {
41482+ *path_fault = 0;
41483+ path_fault = buffer_fault;
41484+ } else
41485+ path_fault = "<path too long>";
41486+ }
41487+ } else
41488+ path_fault = "<anonymous mapping>";
41489+ }
41490+ up_read(&mm->mmap_sem);
41491+ }
41492+ if (tsk->signal->curr_ip)
41493+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41494+ else
41495+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41496+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41497+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41498+ task_uid(tsk), task_euid(tsk), pc, sp);
41499+ free_page((unsigned long)buffer_exec);
41500+ free_page((unsigned long)buffer_fault);
41501+ pax_report_insns(pc, sp);
41502+ do_coredump(SIGKILL, SIGKILL, regs);
41503+}
41504+#endif
41505+
41506+#ifdef CONFIG_PAX_REFCOUNT
41507+void pax_report_refcount_overflow(struct pt_regs *regs)
41508+{
41509+ if (current->signal->curr_ip)
41510+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41511+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41512+ else
41513+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41514+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41515+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41516+ show_regs(regs);
41517+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41518+}
41519+#endif
41520+
41521+#ifdef CONFIG_PAX_USERCOPY
41522+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41523+int object_is_on_stack(const void *obj, unsigned long len)
41524+{
41525+ const void * const stack = task_stack_page(current);
41526+ const void * const stackend = stack + THREAD_SIZE;
41527+
41528+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41529+ const void *frame = NULL;
41530+ const void *oldframe;
41531+#endif
41532+
41533+ if (obj + len < obj)
41534+ return -1;
41535+
41536+ if (obj + len <= stack || stackend <= obj)
41537+ return 0;
41538+
41539+ if (obj < stack || stackend < obj + len)
41540+ return -1;
41541+
41542+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41543+ oldframe = __builtin_frame_address(1);
41544+ if (oldframe)
41545+ frame = __builtin_frame_address(2);
41546+ /*
41547+ low ----------------------------------------------> high
41548+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41549+ ^----------------^
41550+ allow copies only within here
41551+ */
41552+ while (stack <= frame && frame < stackend) {
41553+ /* if obj + len extends past the last frame, this
41554+ check won't pass and the next frame will be 0,
41555+ causing us to bail out and correctly report
41556+ the copy as invalid
41557+ */
41558+ if (obj + len <= frame)
41559+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41560+ oldframe = frame;
41561+ frame = *(const void * const *)frame;
41562+ }
41563+ return -1;
41564+#else
41565+ return 1;
41566+#endif
41567+}
41568+
41569+
41570+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41571+{
41572+ if (current->signal->curr_ip)
41573+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41574+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41575+ else
41576+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41577+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41578+ dump_stack();
41579+ gr_handle_kernel_exploit();
41580+ do_group_exit(SIGKILL);
41581+}
41582+#endif
41583+
41584+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41585+void pax_track_stack(void)
41586+{
41587+ unsigned long sp = (unsigned long)&sp;
41588+ if (sp < current_thread_info()->lowest_stack &&
41589+ sp > (unsigned long)task_stack_page(current))
41590+ current_thread_info()->lowest_stack = sp;
41591+}
41592+EXPORT_SYMBOL(pax_track_stack);
41593+#endif
41594+
41595 static int zap_process(struct task_struct *start, int exit_code)
41596 {
41597 struct task_struct *t;
41598@@ -1971,17 +2274,17 @@ static void wait_for_dump_helpers(struct
41599 pipe = file->f_path.dentry->d_inode->i_pipe;
41600
41601 pipe_lock(pipe);
41602- pipe->readers++;
41603- pipe->writers--;
41604+ atomic_inc(&pipe->readers);
41605+ atomic_dec(&pipe->writers);
41606
41607- while ((pipe->readers > 1) && (!signal_pending(current))) {
41608+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41609 wake_up_interruptible_sync(&pipe->wait);
41610 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41611 pipe_wait(pipe);
41612 }
41613
41614- pipe->readers--;
41615- pipe->writers++;
41616+ atomic_dec(&pipe->readers);
41617+ atomic_inc(&pipe->writers);
41618 pipe_unlock(pipe);
41619
41620 }
41621@@ -2042,7 +2345,7 @@ void do_coredump(long signr, int exit_co
41622 int retval = 0;
41623 int flag = 0;
41624 int ispipe;
41625- static atomic_t core_dump_count = ATOMIC_INIT(0);
41626+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41627 struct coredump_params cprm = {
41628 .signr = signr,
41629 .regs = regs,
41630@@ -2057,6 +2360,9 @@ void do_coredump(long signr, int exit_co
41631
41632 audit_core_dumps(signr);
41633
41634+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41635+ gr_handle_brute_attach(current, cprm.mm_flags);
41636+
41637 binfmt = mm->binfmt;
41638 if (!binfmt || !binfmt->core_dump)
41639 goto fail;
41640@@ -2097,6 +2403,8 @@ void do_coredump(long signr, int exit_co
41641 goto fail_corename;
41642 }
41643
41644+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41645+
41646 if (ispipe) {
41647 int dump_count;
41648 char **helper_argv;
41649@@ -2124,7 +2432,7 @@ void do_coredump(long signr, int exit_co
41650 }
41651 cprm.limit = RLIM_INFINITY;
41652
41653- dump_count = atomic_inc_return(&core_dump_count);
41654+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41655 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41656 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41657 task_tgid_vnr(current), current->comm);
41658@@ -2194,7 +2502,7 @@ close_fail:
41659 filp_close(cprm.file, NULL);
41660 fail_dropcount:
41661 if (ispipe)
41662- atomic_dec(&core_dump_count);
41663+ atomic_dec_unchecked(&core_dump_count);
41664 fail_unlock:
41665 kfree(cn.corename);
41666 fail_corename:
41667@@ -2213,7 +2521,7 @@ fail:
41668 */
41669 int dump_write(struct file *file, const void *addr, int nr)
41670 {
41671- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41672+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41673 }
41674 EXPORT_SYMBOL(dump_write);
41675
41676diff -urNp linux-3.0.7/fs/ext2/balloc.c linux-3.0.7/fs/ext2/balloc.c
41677--- linux-3.0.7/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
41678+++ linux-3.0.7/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
41679@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41680
41681 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41682 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41683- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41684+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41685 sbi->s_resuid != current_fsuid() &&
41686 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41687 return 0;
41688diff -urNp linux-3.0.7/fs/ext3/balloc.c linux-3.0.7/fs/ext3/balloc.c
41689--- linux-3.0.7/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
41690+++ linux-3.0.7/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
41691@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
41692
41693 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41694 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41695- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41696+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41697 sbi->s_resuid != current_fsuid() &&
41698 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41699 return 0;
41700diff -urNp linux-3.0.7/fs/ext3/ioctl.c linux-3.0.7/fs/ext3/ioctl.c
41701--- linux-3.0.7/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41702+++ linux-3.0.7/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41703@@ -285,7 +285,7 @@ group_add_out:
41704 if (!capable(CAP_SYS_ADMIN))
41705 return -EPERM;
41706
41707- if (copy_from_user(&range, (struct fstrim_range *)arg,
41708+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41709 sizeof(range)))
41710 return -EFAULT;
41711
41712@@ -293,7 +293,7 @@ group_add_out:
41713 if (ret < 0)
41714 return ret;
41715
41716- if (copy_to_user((struct fstrim_range *)arg, &range,
41717+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41718 sizeof(range)))
41719 return -EFAULT;
41720
41721diff -urNp linux-3.0.7/fs/ext4/balloc.c linux-3.0.7/fs/ext4/balloc.c
41722--- linux-3.0.7/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
41723+++ linux-3.0.7/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
41724@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
41725 /* Hm, nope. Are (enough) root reserved blocks available? */
41726 if (sbi->s_resuid == current_fsuid() ||
41727 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41728- capable(CAP_SYS_RESOURCE) ||
41729- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
41730+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
41731+ capable_nolog(CAP_SYS_RESOURCE)) {
41732
41733 if (free_blocks >= (nblocks + dirty_blocks))
41734 return 1;
41735diff -urNp linux-3.0.7/fs/ext4/ext4.h linux-3.0.7/fs/ext4/ext4.h
41736--- linux-3.0.7/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
41737+++ linux-3.0.7/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
41738@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
41739 unsigned long s_mb_last_start;
41740
41741 /* stats for buddy allocator */
41742- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41743- atomic_t s_bal_success; /* we found long enough chunks */
41744- atomic_t s_bal_allocated; /* in blocks */
41745- atomic_t s_bal_ex_scanned; /* total extents scanned */
41746- atomic_t s_bal_goals; /* goal hits */
41747- atomic_t s_bal_breaks; /* too long searches */
41748- atomic_t s_bal_2orders; /* 2^order hits */
41749+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41750+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41751+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41752+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41753+ atomic_unchecked_t s_bal_goals; /* goal hits */
41754+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41755+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41756 spinlock_t s_bal_lock;
41757 unsigned long s_mb_buddies_generated;
41758 unsigned long long s_mb_generation_time;
41759- atomic_t s_mb_lost_chunks;
41760- atomic_t s_mb_preallocated;
41761- atomic_t s_mb_discarded;
41762+ atomic_unchecked_t s_mb_lost_chunks;
41763+ atomic_unchecked_t s_mb_preallocated;
41764+ atomic_unchecked_t s_mb_discarded;
41765 atomic_t s_lock_busy;
41766
41767 /* locality groups */
41768diff -urNp linux-3.0.7/fs/ext4/file.c linux-3.0.7/fs/ext4/file.c
41769--- linux-3.0.7/fs/ext4/file.c 2011-07-21 22:17:23.000000000 -0400
41770+++ linux-3.0.7/fs/ext4/file.c 2011-10-17 02:30:30.000000000 -0400
41771@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
41772 path.dentry = mnt->mnt_root;
41773 cp = d_path(&path, buf, sizeof(buf));
41774 if (!IS_ERR(cp)) {
41775- memcpy(sbi->s_es->s_last_mounted, cp,
41776- sizeof(sbi->s_es->s_last_mounted));
41777+ strlcpy(sbi->s_es->s_last_mounted, cp,
41778+ sizeof(sbi->s_es->s_last_mounted));
41779 ext4_mark_super_dirty(sb);
41780 }
41781 }
41782diff -urNp linux-3.0.7/fs/ext4/ioctl.c linux-3.0.7/fs/ext4/ioctl.c
41783--- linux-3.0.7/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41784+++ linux-3.0.7/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41785@@ -344,7 +344,7 @@ mext_out:
41786 if (!blk_queue_discard(q))
41787 return -EOPNOTSUPP;
41788
41789- if (copy_from_user(&range, (struct fstrim_range *)arg,
41790+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41791 sizeof(range)))
41792 return -EFAULT;
41793
41794@@ -354,7 +354,7 @@ mext_out:
41795 if (ret < 0)
41796 return ret;
41797
41798- if (copy_to_user((struct fstrim_range *)arg, &range,
41799+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41800 sizeof(range)))
41801 return -EFAULT;
41802
41803diff -urNp linux-3.0.7/fs/ext4/mballoc.c linux-3.0.7/fs/ext4/mballoc.c
41804--- linux-3.0.7/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
41805+++ linux-3.0.7/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
41806@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
41807 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41808
41809 if (EXT4_SB(sb)->s_mb_stats)
41810- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41811+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41812
41813 break;
41814 }
41815@@ -2087,7 +2087,7 @@ repeat:
41816 ac->ac_status = AC_STATUS_CONTINUE;
41817 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41818 cr = 3;
41819- atomic_inc(&sbi->s_mb_lost_chunks);
41820+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41821 goto repeat;
41822 }
41823 }
41824@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
41825 ext4_grpblk_t counters[16];
41826 } sg;
41827
41828+ pax_track_stack();
41829+
41830 group--;
41831 if (group == 0)
41832 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41833@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
41834 if (sbi->s_mb_stats) {
41835 printk(KERN_INFO
41836 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41837- atomic_read(&sbi->s_bal_allocated),
41838- atomic_read(&sbi->s_bal_reqs),
41839- atomic_read(&sbi->s_bal_success));
41840+ atomic_read_unchecked(&sbi->s_bal_allocated),
41841+ atomic_read_unchecked(&sbi->s_bal_reqs),
41842+ atomic_read_unchecked(&sbi->s_bal_success));
41843 printk(KERN_INFO
41844 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41845 "%u 2^N hits, %u breaks, %u lost\n",
41846- atomic_read(&sbi->s_bal_ex_scanned),
41847- atomic_read(&sbi->s_bal_goals),
41848- atomic_read(&sbi->s_bal_2orders),
41849- atomic_read(&sbi->s_bal_breaks),
41850- atomic_read(&sbi->s_mb_lost_chunks));
41851+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41852+ atomic_read_unchecked(&sbi->s_bal_goals),
41853+ atomic_read_unchecked(&sbi->s_bal_2orders),
41854+ atomic_read_unchecked(&sbi->s_bal_breaks),
41855+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41856 printk(KERN_INFO
41857 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41858 sbi->s_mb_buddies_generated++,
41859 sbi->s_mb_generation_time);
41860 printk(KERN_INFO
41861 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41862- atomic_read(&sbi->s_mb_preallocated),
41863- atomic_read(&sbi->s_mb_discarded));
41864+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41865+ atomic_read_unchecked(&sbi->s_mb_discarded));
41866 }
41867
41868 free_percpu(sbi->s_locality_groups);
41869@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
41870 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41871
41872 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41873- atomic_inc(&sbi->s_bal_reqs);
41874- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41875+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41876+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41877 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
41878- atomic_inc(&sbi->s_bal_success);
41879- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41880+ atomic_inc_unchecked(&sbi->s_bal_success);
41881+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41882 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41883 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41884- atomic_inc(&sbi->s_bal_goals);
41885+ atomic_inc_unchecked(&sbi->s_bal_goals);
41886 if (ac->ac_found > sbi->s_mb_max_to_scan)
41887- atomic_inc(&sbi->s_bal_breaks);
41888+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41889 }
41890
41891 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41892@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41893 trace_ext4_mb_new_inode_pa(ac, pa);
41894
41895 ext4_mb_use_inode_pa(ac, pa);
41896- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41897+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41898
41899 ei = EXT4_I(ac->ac_inode);
41900 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41901@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41902 trace_ext4_mb_new_group_pa(ac, pa);
41903
41904 ext4_mb_use_group_pa(ac, pa);
41905- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41906+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41907
41908 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41909 lg = ac->ac_lg;
41910@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41911 * from the bitmap and continue.
41912 */
41913 }
41914- atomic_add(free, &sbi->s_mb_discarded);
41915+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41916
41917 return err;
41918 }
41919@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41920 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41921 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41922 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41923- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41924+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41925 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
41926
41927 return 0;
41928diff -urNp linux-3.0.7/fs/fcntl.c linux-3.0.7/fs/fcntl.c
41929--- linux-3.0.7/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
41930+++ linux-3.0.7/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
41931@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
41932 if (err)
41933 return err;
41934
41935+ if (gr_handle_chroot_fowner(pid, type))
41936+ return -ENOENT;
41937+ if (gr_check_protected_task_fowner(pid, type))
41938+ return -EACCES;
41939+
41940 f_modown(filp, pid, type, force);
41941 return 0;
41942 }
41943@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
41944
41945 static int f_setown_ex(struct file *filp, unsigned long arg)
41946 {
41947- struct f_owner_ex * __user owner_p = (void * __user)arg;
41948+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41949 struct f_owner_ex owner;
41950 struct pid *pid;
41951 int type;
41952@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
41953
41954 static int f_getown_ex(struct file *filp, unsigned long arg)
41955 {
41956- struct f_owner_ex * __user owner_p = (void * __user)arg;
41957+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41958 struct f_owner_ex owner;
41959 int ret = 0;
41960
41961@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
41962 switch (cmd) {
41963 case F_DUPFD:
41964 case F_DUPFD_CLOEXEC:
41965+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41966 if (arg >= rlimit(RLIMIT_NOFILE))
41967 break;
41968 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41969@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
41970 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
41971 * is defined as O_NONBLOCK on some platforms and not on others.
41972 */
41973- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41974+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41975 O_RDONLY | O_WRONLY | O_RDWR |
41976 O_CREAT | O_EXCL | O_NOCTTY |
41977 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
41978 __O_SYNC | O_DSYNC | FASYNC |
41979 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
41980 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
41981- __FMODE_EXEC | O_PATH
41982+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
41983 ));
41984
41985 fasync_cache = kmem_cache_create("fasync_cache",
41986diff -urNp linux-3.0.7/fs/fifo.c linux-3.0.7/fs/fifo.c
41987--- linux-3.0.7/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
41988+++ linux-3.0.7/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
41989@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
41990 */
41991 filp->f_op = &read_pipefifo_fops;
41992 pipe->r_counter++;
41993- if (pipe->readers++ == 0)
41994+ if (atomic_inc_return(&pipe->readers) == 1)
41995 wake_up_partner(inode);
41996
41997- if (!pipe->writers) {
41998+ if (!atomic_read(&pipe->writers)) {
41999 if ((filp->f_flags & O_NONBLOCK)) {
42000 /* suppress POLLHUP until we have
42001 * seen a writer */
42002@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42003 * errno=ENXIO when there is no process reading the FIFO.
42004 */
42005 ret = -ENXIO;
42006- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42007+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42008 goto err;
42009
42010 filp->f_op = &write_pipefifo_fops;
42011 pipe->w_counter++;
42012- if (!pipe->writers++)
42013+ if (atomic_inc_return(&pipe->writers) == 1)
42014 wake_up_partner(inode);
42015
42016- if (!pipe->readers) {
42017+ if (!atomic_read(&pipe->readers)) {
42018 wait_for_partner(inode, &pipe->r_counter);
42019 if (signal_pending(current))
42020 goto err_wr;
42021@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42022 */
42023 filp->f_op = &rdwr_pipefifo_fops;
42024
42025- pipe->readers++;
42026- pipe->writers++;
42027+ atomic_inc(&pipe->readers);
42028+ atomic_inc(&pipe->writers);
42029 pipe->r_counter++;
42030 pipe->w_counter++;
42031- if (pipe->readers == 1 || pipe->writers == 1)
42032+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42033 wake_up_partner(inode);
42034 break;
42035
42036@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42037 return 0;
42038
42039 err_rd:
42040- if (!--pipe->readers)
42041+ if (atomic_dec_and_test(&pipe->readers))
42042 wake_up_interruptible(&pipe->wait);
42043 ret = -ERESTARTSYS;
42044 goto err;
42045
42046 err_wr:
42047- if (!--pipe->writers)
42048+ if (atomic_dec_and_test(&pipe->writers))
42049 wake_up_interruptible(&pipe->wait);
42050 ret = -ERESTARTSYS;
42051 goto err;
42052
42053 err:
42054- if (!pipe->readers && !pipe->writers)
42055+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42056 free_pipe_info(inode);
42057
42058 err_nocleanup:
42059diff -urNp linux-3.0.7/fs/file.c linux-3.0.7/fs/file.c
42060--- linux-3.0.7/fs/file.c 2011-07-21 22:17:23.000000000 -0400
42061+++ linux-3.0.7/fs/file.c 2011-08-23 21:48:14.000000000 -0400
42062@@ -15,6 +15,7 @@
42063 #include <linux/slab.h>
42064 #include <linux/vmalloc.h>
42065 #include <linux/file.h>
42066+#include <linux/security.h>
42067 #include <linux/fdtable.h>
42068 #include <linux/bitops.h>
42069 #include <linux/interrupt.h>
42070@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42071 * N.B. For clone tasks sharing a files structure, this test
42072 * will limit the total number of files that can be opened.
42073 */
42074+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42075 if (nr >= rlimit(RLIMIT_NOFILE))
42076 return -EMFILE;
42077
42078diff -urNp linux-3.0.7/fs/filesystems.c linux-3.0.7/fs/filesystems.c
42079--- linux-3.0.7/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
42080+++ linux-3.0.7/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
42081@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42082 int len = dot ? dot - name : strlen(name);
42083
42084 fs = __get_fs_type(name, len);
42085+
42086+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42087+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42088+#else
42089 if (!fs && (request_module("%.*s", len, name) == 0))
42090+#endif
42091 fs = __get_fs_type(name, len);
42092
42093 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42094diff -urNp linux-3.0.7/fs/fs_struct.c linux-3.0.7/fs/fs_struct.c
42095--- linux-3.0.7/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
42096+++ linux-3.0.7/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
42097@@ -4,6 +4,7 @@
42098 #include <linux/path.h>
42099 #include <linux/slab.h>
42100 #include <linux/fs_struct.h>
42101+#include <linux/grsecurity.h>
42102 #include "internal.h"
42103
42104 static inline void path_get_longterm(struct path *path)
42105@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
42106 old_root = fs->root;
42107 fs->root = *path;
42108 path_get_longterm(path);
42109+ gr_set_chroot_entries(current, path);
42110 write_seqcount_end(&fs->seq);
42111 spin_unlock(&fs->lock);
42112 if (old_root.dentry)
42113@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
42114 && fs->root.mnt == old_root->mnt) {
42115 path_get_longterm(new_root);
42116 fs->root = *new_root;
42117+ gr_set_chroot_entries(p, new_root);
42118 count++;
42119 }
42120 if (fs->pwd.dentry == old_root->dentry
42121@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42122 spin_lock(&fs->lock);
42123 write_seqcount_begin(&fs->seq);
42124 tsk->fs = NULL;
42125- kill = !--fs->users;
42126+ gr_clear_chroot_entries(tsk);
42127+ kill = !atomic_dec_return(&fs->users);
42128 write_seqcount_end(&fs->seq);
42129 spin_unlock(&fs->lock);
42130 task_unlock(tsk);
42131@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
42132 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42133 /* We don't need to lock fs - think why ;-) */
42134 if (fs) {
42135- fs->users = 1;
42136+ atomic_set(&fs->users, 1);
42137 fs->in_exec = 0;
42138 spin_lock_init(&fs->lock);
42139 seqcount_init(&fs->seq);
42140@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
42141 spin_lock(&old->lock);
42142 fs->root = old->root;
42143 path_get_longterm(&fs->root);
42144+ /* instead of calling gr_set_chroot_entries here,
42145+ we call it from every caller of this function
42146+ */
42147 fs->pwd = old->pwd;
42148 path_get_longterm(&fs->pwd);
42149 spin_unlock(&old->lock);
42150@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42151
42152 task_lock(current);
42153 spin_lock(&fs->lock);
42154- kill = !--fs->users;
42155+ kill = !atomic_dec_return(&fs->users);
42156 current->fs = new_fs;
42157+ gr_set_chroot_entries(current, &new_fs->root);
42158 spin_unlock(&fs->lock);
42159 task_unlock(current);
42160
42161@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42162
42163 /* to be mentioned only in INIT_TASK */
42164 struct fs_struct init_fs = {
42165- .users = 1,
42166+ .users = ATOMIC_INIT(1),
42167 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42168 .seq = SEQCNT_ZERO,
42169 .umask = 0022,
42170@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42171 task_lock(current);
42172
42173 spin_lock(&init_fs.lock);
42174- init_fs.users++;
42175+ atomic_inc(&init_fs.users);
42176 spin_unlock(&init_fs.lock);
42177
42178 spin_lock(&fs->lock);
42179 current->fs = &init_fs;
42180- kill = !--fs->users;
42181+ gr_set_chroot_entries(current, &current->fs->root);
42182+ kill = !atomic_dec_return(&fs->users);
42183 spin_unlock(&fs->lock);
42184
42185 task_unlock(current);
42186diff -urNp linux-3.0.7/fs/fscache/cookie.c linux-3.0.7/fs/fscache/cookie.c
42187--- linux-3.0.7/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
42188+++ linux-3.0.7/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
42189@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42190 parent ? (char *) parent->def->name : "<no-parent>",
42191 def->name, netfs_data);
42192
42193- fscache_stat(&fscache_n_acquires);
42194+ fscache_stat_unchecked(&fscache_n_acquires);
42195
42196 /* if there's no parent cookie, then we don't create one here either */
42197 if (!parent) {
42198- fscache_stat(&fscache_n_acquires_null);
42199+ fscache_stat_unchecked(&fscache_n_acquires_null);
42200 _leave(" [no parent]");
42201 return NULL;
42202 }
42203@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42204 /* allocate and initialise a cookie */
42205 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42206 if (!cookie) {
42207- fscache_stat(&fscache_n_acquires_oom);
42208+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42209 _leave(" [ENOMEM]");
42210 return NULL;
42211 }
42212@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42213
42214 switch (cookie->def->type) {
42215 case FSCACHE_COOKIE_TYPE_INDEX:
42216- fscache_stat(&fscache_n_cookie_index);
42217+ fscache_stat_unchecked(&fscache_n_cookie_index);
42218 break;
42219 case FSCACHE_COOKIE_TYPE_DATAFILE:
42220- fscache_stat(&fscache_n_cookie_data);
42221+ fscache_stat_unchecked(&fscache_n_cookie_data);
42222 break;
42223 default:
42224- fscache_stat(&fscache_n_cookie_special);
42225+ fscache_stat_unchecked(&fscache_n_cookie_special);
42226 break;
42227 }
42228
42229@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42230 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42231 atomic_dec(&parent->n_children);
42232 __fscache_cookie_put(cookie);
42233- fscache_stat(&fscache_n_acquires_nobufs);
42234+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42235 _leave(" = NULL");
42236 return NULL;
42237 }
42238 }
42239
42240- fscache_stat(&fscache_n_acquires_ok);
42241+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42242 _leave(" = %p", cookie);
42243 return cookie;
42244 }
42245@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42246 cache = fscache_select_cache_for_object(cookie->parent);
42247 if (!cache) {
42248 up_read(&fscache_addremove_sem);
42249- fscache_stat(&fscache_n_acquires_no_cache);
42250+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42251 _leave(" = -ENOMEDIUM [no cache]");
42252 return -ENOMEDIUM;
42253 }
42254@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42255 object = cache->ops->alloc_object(cache, cookie);
42256 fscache_stat_d(&fscache_n_cop_alloc_object);
42257 if (IS_ERR(object)) {
42258- fscache_stat(&fscache_n_object_no_alloc);
42259+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42260 ret = PTR_ERR(object);
42261 goto error;
42262 }
42263
42264- fscache_stat(&fscache_n_object_alloc);
42265+ fscache_stat_unchecked(&fscache_n_object_alloc);
42266
42267 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42268
42269@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42270 struct fscache_object *object;
42271 struct hlist_node *_p;
42272
42273- fscache_stat(&fscache_n_updates);
42274+ fscache_stat_unchecked(&fscache_n_updates);
42275
42276 if (!cookie) {
42277- fscache_stat(&fscache_n_updates_null);
42278+ fscache_stat_unchecked(&fscache_n_updates_null);
42279 _leave(" [no cookie]");
42280 return;
42281 }
42282@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42283 struct fscache_object *object;
42284 unsigned long event;
42285
42286- fscache_stat(&fscache_n_relinquishes);
42287+ fscache_stat_unchecked(&fscache_n_relinquishes);
42288 if (retire)
42289- fscache_stat(&fscache_n_relinquishes_retire);
42290+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42291
42292 if (!cookie) {
42293- fscache_stat(&fscache_n_relinquishes_null);
42294+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42295 _leave(" [no cookie]");
42296 return;
42297 }
42298@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42299
42300 /* wait for the cookie to finish being instantiated (or to fail) */
42301 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42302- fscache_stat(&fscache_n_relinquishes_waitcrt);
42303+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42304 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42305 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42306 }
42307diff -urNp linux-3.0.7/fs/fscache/internal.h linux-3.0.7/fs/fscache/internal.h
42308--- linux-3.0.7/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
42309+++ linux-3.0.7/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
42310@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42311 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42312 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42313
42314-extern atomic_t fscache_n_op_pend;
42315-extern atomic_t fscache_n_op_run;
42316-extern atomic_t fscache_n_op_enqueue;
42317-extern atomic_t fscache_n_op_deferred_release;
42318-extern atomic_t fscache_n_op_release;
42319-extern atomic_t fscache_n_op_gc;
42320-extern atomic_t fscache_n_op_cancelled;
42321-extern atomic_t fscache_n_op_rejected;
42322-
42323-extern atomic_t fscache_n_attr_changed;
42324-extern atomic_t fscache_n_attr_changed_ok;
42325-extern atomic_t fscache_n_attr_changed_nobufs;
42326-extern atomic_t fscache_n_attr_changed_nomem;
42327-extern atomic_t fscache_n_attr_changed_calls;
42328-
42329-extern atomic_t fscache_n_allocs;
42330-extern atomic_t fscache_n_allocs_ok;
42331-extern atomic_t fscache_n_allocs_wait;
42332-extern atomic_t fscache_n_allocs_nobufs;
42333-extern atomic_t fscache_n_allocs_intr;
42334-extern atomic_t fscache_n_allocs_object_dead;
42335-extern atomic_t fscache_n_alloc_ops;
42336-extern atomic_t fscache_n_alloc_op_waits;
42337-
42338-extern atomic_t fscache_n_retrievals;
42339-extern atomic_t fscache_n_retrievals_ok;
42340-extern atomic_t fscache_n_retrievals_wait;
42341-extern atomic_t fscache_n_retrievals_nodata;
42342-extern atomic_t fscache_n_retrievals_nobufs;
42343-extern atomic_t fscache_n_retrievals_intr;
42344-extern atomic_t fscache_n_retrievals_nomem;
42345-extern atomic_t fscache_n_retrievals_object_dead;
42346-extern atomic_t fscache_n_retrieval_ops;
42347-extern atomic_t fscache_n_retrieval_op_waits;
42348-
42349-extern atomic_t fscache_n_stores;
42350-extern atomic_t fscache_n_stores_ok;
42351-extern atomic_t fscache_n_stores_again;
42352-extern atomic_t fscache_n_stores_nobufs;
42353-extern atomic_t fscache_n_stores_oom;
42354-extern atomic_t fscache_n_store_ops;
42355-extern atomic_t fscache_n_store_calls;
42356-extern atomic_t fscache_n_store_pages;
42357-extern atomic_t fscache_n_store_radix_deletes;
42358-extern atomic_t fscache_n_store_pages_over_limit;
42359-
42360-extern atomic_t fscache_n_store_vmscan_not_storing;
42361-extern atomic_t fscache_n_store_vmscan_gone;
42362-extern atomic_t fscache_n_store_vmscan_busy;
42363-extern atomic_t fscache_n_store_vmscan_cancelled;
42364-
42365-extern atomic_t fscache_n_marks;
42366-extern atomic_t fscache_n_uncaches;
42367-
42368-extern atomic_t fscache_n_acquires;
42369-extern atomic_t fscache_n_acquires_null;
42370-extern atomic_t fscache_n_acquires_no_cache;
42371-extern atomic_t fscache_n_acquires_ok;
42372-extern atomic_t fscache_n_acquires_nobufs;
42373-extern atomic_t fscache_n_acquires_oom;
42374-
42375-extern atomic_t fscache_n_updates;
42376-extern atomic_t fscache_n_updates_null;
42377-extern atomic_t fscache_n_updates_run;
42378-
42379-extern atomic_t fscache_n_relinquishes;
42380-extern atomic_t fscache_n_relinquishes_null;
42381-extern atomic_t fscache_n_relinquishes_waitcrt;
42382-extern atomic_t fscache_n_relinquishes_retire;
42383-
42384-extern atomic_t fscache_n_cookie_index;
42385-extern atomic_t fscache_n_cookie_data;
42386-extern atomic_t fscache_n_cookie_special;
42387-
42388-extern atomic_t fscache_n_object_alloc;
42389-extern atomic_t fscache_n_object_no_alloc;
42390-extern atomic_t fscache_n_object_lookups;
42391-extern atomic_t fscache_n_object_lookups_negative;
42392-extern atomic_t fscache_n_object_lookups_positive;
42393-extern atomic_t fscache_n_object_lookups_timed_out;
42394-extern atomic_t fscache_n_object_created;
42395-extern atomic_t fscache_n_object_avail;
42396-extern atomic_t fscache_n_object_dead;
42397-
42398-extern atomic_t fscache_n_checkaux_none;
42399-extern atomic_t fscache_n_checkaux_okay;
42400-extern atomic_t fscache_n_checkaux_update;
42401-extern atomic_t fscache_n_checkaux_obsolete;
42402+extern atomic_unchecked_t fscache_n_op_pend;
42403+extern atomic_unchecked_t fscache_n_op_run;
42404+extern atomic_unchecked_t fscache_n_op_enqueue;
42405+extern atomic_unchecked_t fscache_n_op_deferred_release;
42406+extern atomic_unchecked_t fscache_n_op_release;
42407+extern atomic_unchecked_t fscache_n_op_gc;
42408+extern atomic_unchecked_t fscache_n_op_cancelled;
42409+extern atomic_unchecked_t fscache_n_op_rejected;
42410+
42411+extern atomic_unchecked_t fscache_n_attr_changed;
42412+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42413+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42414+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42415+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42416+
42417+extern atomic_unchecked_t fscache_n_allocs;
42418+extern atomic_unchecked_t fscache_n_allocs_ok;
42419+extern atomic_unchecked_t fscache_n_allocs_wait;
42420+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42421+extern atomic_unchecked_t fscache_n_allocs_intr;
42422+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42423+extern atomic_unchecked_t fscache_n_alloc_ops;
42424+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42425+
42426+extern atomic_unchecked_t fscache_n_retrievals;
42427+extern atomic_unchecked_t fscache_n_retrievals_ok;
42428+extern atomic_unchecked_t fscache_n_retrievals_wait;
42429+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42430+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42431+extern atomic_unchecked_t fscache_n_retrievals_intr;
42432+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42433+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42434+extern atomic_unchecked_t fscache_n_retrieval_ops;
42435+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42436+
42437+extern atomic_unchecked_t fscache_n_stores;
42438+extern atomic_unchecked_t fscache_n_stores_ok;
42439+extern atomic_unchecked_t fscache_n_stores_again;
42440+extern atomic_unchecked_t fscache_n_stores_nobufs;
42441+extern atomic_unchecked_t fscache_n_stores_oom;
42442+extern atomic_unchecked_t fscache_n_store_ops;
42443+extern atomic_unchecked_t fscache_n_store_calls;
42444+extern atomic_unchecked_t fscache_n_store_pages;
42445+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42446+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42447+
42448+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42449+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42450+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42451+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42452+
42453+extern atomic_unchecked_t fscache_n_marks;
42454+extern atomic_unchecked_t fscache_n_uncaches;
42455+
42456+extern atomic_unchecked_t fscache_n_acquires;
42457+extern atomic_unchecked_t fscache_n_acquires_null;
42458+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42459+extern atomic_unchecked_t fscache_n_acquires_ok;
42460+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42461+extern atomic_unchecked_t fscache_n_acquires_oom;
42462+
42463+extern atomic_unchecked_t fscache_n_updates;
42464+extern atomic_unchecked_t fscache_n_updates_null;
42465+extern atomic_unchecked_t fscache_n_updates_run;
42466+
42467+extern atomic_unchecked_t fscache_n_relinquishes;
42468+extern atomic_unchecked_t fscache_n_relinquishes_null;
42469+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42470+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42471+
42472+extern atomic_unchecked_t fscache_n_cookie_index;
42473+extern atomic_unchecked_t fscache_n_cookie_data;
42474+extern atomic_unchecked_t fscache_n_cookie_special;
42475+
42476+extern atomic_unchecked_t fscache_n_object_alloc;
42477+extern atomic_unchecked_t fscache_n_object_no_alloc;
42478+extern atomic_unchecked_t fscache_n_object_lookups;
42479+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42480+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42481+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42482+extern atomic_unchecked_t fscache_n_object_created;
42483+extern atomic_unchecked_t fscache_n_object_avail;
42484+extern atomic_unchecked_t fscache_n_object_dead;
42485+
42486+extern atomic_unchecked_t fscache_n_checkaux_none;
42487+extern atomic_unchecked_t fscache_n_checkaux_okay;
42488+extern atomic_unchecked_t fscache_n_checkaux_update;
42489+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42490
42491 extern atomic_t fscache_n_cop_alloc_object;
42492 extern atomic_t fscache_n_cop_lookup_object;
42493@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42494 atomic_inc(stat);
42495 }
42496
42497+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42498+{
42499+ atomic_inc_unchecked(stat);
42500+}
42501+
42502 static inline void fscache_stat_d(atomic_t *stat)
42503 {
42504 atomic_dec(stat);
42505@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42506
42507 #define __fscache_stat(stat) (NULL)
42508 #define fscache_stat(stat) do {} while (0)
42509+#define fscache_stat_unchecked(stat) do {} while (0)
42510 #define fscache_stat_d(stat) do {} while (0)
42511 #endif
42512
42513diff -urNp linux-3.0.7/fs/fscache/object.c linux-3.0.7/fs/fscache/object.c
42514--- linux-3.0.7/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
42515+++ linux-3.0.7/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
42516@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42517 /* update the object metadata on disk */
42518 case FSCACHE_OBJECT_UPDATING:
42519 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42520- fscache_stat(&fscache_n_updates_run);
42521+ fscache_stat_unchecked(&fscache_n_updates_run);
42522 fscache_stat(&fscache_n_cop_update_object);
42523 object->cache->ops->update_object(object);
42524 fscache_stat_d(&fscache_n_cop_update_object);
42525@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42526 spin_lock(&object->lock);
42527 object->state = FSCACHE_OBJECT_DEAD;
42528 spin_unlock(&object->lock);
42529- fscache_stat(&fscache_n_object_dead);
42530+ fscache_stat_unchecked(&fscache_n_object_dead);
42531 goto terminal_transit;
42532
42533 /* handle the parent cache of this object being withdrawn from
42534@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42535 spin_lock(&object->lock);
42536 object->state = FSCACHE_OBJECT_DEAD;
42537 spin_unlock(&object->lock);
42538- fscache_stat(&fscache_n_object_dead);
42539+ fscache_stat_unchecked(&fscache_n_object_dead);
42540 goto terminal_transit;
42541
42542 /* complain about the object being woken up once it is
42543@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42544 parent->cookie->def->name, cookie->def->name,
42545 object->cache->tag->name);
42546
42547- fscache_stat(&fscache_n_object_lookups);
42548+ fscache_stat_unchecked(&fscache_n_object_lookups);
42549 fscache_stat(&fscache_n_cop_lookup_object);
42550 ret = object->cache->ops->lookup_object(object);
42551 fscache_stat_d(&fscache_n_cop_lookup_object);
42552@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42553 if (ret == -ETIMEDOUT) {
42554 /* probably stuck behind another object, so move this one to
42555 * the back of the queue */
42556- fscache_stat(&fscache_n_object_lookups_timed_out);
42557+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42558 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42559 }
42560
42561@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42562
42563 spin_lock(&object->lock);
42564 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42565- fscache_stat(&fscache_n_object_lookups_negative);
42566+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42567
42568 /* transit here to allow write requests to begin stacking up
42569 * and read requests to begin returning ENODATA */
42570@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42571 * result, in which case there may be data available */
42572 spin_lock(&object->lock);
42573 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42574- fscache_stat(&fscache_n_object_lookups_positive);
42575+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42576
42577 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42578
42579@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42580 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42581 } else {
42582 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42583- fscache_stat(&fscache_n_object_created);
42584+ fscache_stat_unchecked(&fscache_n_object_created);
42585
42586 object->state = FSCACHE_OBJECT_AVAILABLE;
42587 spin_unlock(&object->lock);
42588@@ -602,7 +602,7 @@ static void fscache_object_available(str
42589 fscache_enqueue_dependents(object);
42590
42591 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42592- fscache_stat(&fscache_n_object_avail);
42593+ fscache_stat_unchecked(&fscache_n_object_avail);
42594
42595 _leave("");
42596 }
42597@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42598 enum fscache_checkaux result;
42599
42600 if (!object->cookie->def->check_aux) {
42601- fscache_stat(&fscache_n_checkaux_none);
42602+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42603 return FSCACHE_CHECKAUX_OKAY;
42604 }
42605
42606@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42607 switch (result) {
42608 /* entry okay as is */
42609 case FSCACHE_CHECKAUX_OKAY:
42610- fscache_stat(&fscache_n_checkaux_okay);
42611+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42612 break;
42613
42614 /* entry requires update */
42615 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42616- fscache_stat(&fscache_n_checkaux_update);
42617+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42618 break;
42619
42620 /* entry requires deletion */
42621 case FSCACHE_CHECKAUX_OBSOLETE:
42622- fscache_stat(&fscache_n_checkaux_obsolete);
42623+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42624 break;
42625
42626 default:
42627diff -urNp linux-3.0.7/fs/fscache/operation.c linux-3.0.7/fs/fscache/operation.c
42628--- linux-3.0.7/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
42629+++ linux-3.0.7/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
42630@@ -17,7 +17,7 @@
42631 #include <linux/slab.h>
42632 #include "internal.h"
42633
42634-atomic_t fscache_op_debug_id;
42635+atomic_unchecked_t fscache_op_debug_id;
42636 EXPORT_SYMBOL(fscache_op_debug_id);
42637
42638 /**
42639@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42640 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42641 ASSERTCMP(atomic_read(&op->usage), >, 0);
42642
42643- fscache_stat(&fscache_n_op_enqueue);
42644+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42645 switch (op->flags & FSCACHE_OP_TYPE) {
42646 case FSCACHE_OP_ASYNC:
42647 _debug("queue async");
42648@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42649 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42650 if (op->processor)
42651 fscache_enqueue_operation(op);
42652- fscache_stat(&fscache_n_op_run);
42653+ fscache_stat_unchecked(&fscache_n_op_run);
42654 }
42655
42656 /*
42657@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42658 if (object->n_ops > 1) {
42659 atomic_inc(&op->usage);
42660 list_add_tail(&op->pend_link, &object->pending_ops);
42661- fscache_stat(&fscache_n_op_pend);
42662+ fscache_stat_unchecked(&fscache_n_op_pend);
42663 } else if (!list_empty(&object->pending_ops)) {
42664 atomic_inc(&op->usage);
42665 list_add_tail(&op->pend_link, &object->pending_ops);
42666- fscache_stat(&fscache_n_op_pend);
42667+ fscache_stat_unchecked(&fscache_n_op_pend);
42668 fscache_start_operations(object);
42669 } else {
42670 ASSERTCMP(object->n_in_progress, ==, 0);
42671@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42672 object->n_exclusive++; /* reads and writes must wait */
42673 atomic_inc(&op->usage);
42674 list_add_tail(&op->pend_link, &object->pending_ops);
42675- fscache_stat(&fscache_n_op_pend);
42676+ fscache_stat_unchecked(&fscache_n_op_pend);
42677 ret = 0;
42678 } else {
42679 /* not allowed to submit ops in any other state */
42680@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42681 if (object->n_exclusive > 0) {
42682 atomic_inc(&op->usage);
42683 list_add_tail(&op->pend_link, &object->pending_ops);
42684- fscache_stat(&fscache_n_op_pend);
42685+ fscache_stat_unchecked(&fscache_n_op_pend);
42686 } else if (!list_empty(&object->pending_ops)) {
42687 atomic_inc(&op->usage);
42688 list_add_tail(&op->pend_link, &object->pending_ops);
42689- fscache_stat(&fscache_n_op_pend);
42690+ fscache_stat_unchecked(&fscache_n_op_pend);
42691 fscache_start_operations(object);
42692 } else {
42693 ASSERTCMP(object->n_exclusive, ==, 0);
42694@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42695 object->n_ops++;
42696 atomic_inc(&op->usage);
42697 list_add_tail(&op->pend_link, &object->pending_ops);
42698- fscache_stat(&fscache_n_op_pend);
42699+ fscache_stat_unchecked(&fscache_n_op_pend);
42700 ret = 0;
42701 } else if (object->state == FSCACHE_OBJECT_DYING ||
42702 object->state == FSCACHE_OBJECT_LC_DYING ||
42703 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42704- fscache_stat(&fscache_n_op_rejected);
42705+ fscache_stat_unchecked(&fscache_n_op_rejected);
42706 ret = -ENOBUFS;
42707 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42708 fscache_report_unexpected_submission(object, op, ostate);
42709@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42710
42711 ret = -EBUSY;
42712 if (!list_empty(&op->pend_link)) {
42713- fscache_stat(&fscache_n_op_cancelled);
42714+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42715 list_del_init(&op->pend_link);
42716 object->n_ops--;
42717 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42718@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42719 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42720 BUG();
42721
42722- fscache_stat(&fscache_n_op_release);
42723+ fscache_stat_unchecked(&fscache_n_op_release);
42724
42725 if (op->release) {
42726 op->release(op);
42727@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42728 * lock, and defer it otherwise */
42729 if (!spin_trylock(&object->lock)) {
42730 _debug("defer put");
42731- fscache_stat(&fscache_n_op_deferred_release);
42732+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42733
42734 cache = object->cache;
42735 spin_lock(&cache->op_gc_list_lock);
42736@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42737
42738 _debug("GC DEFERRED REL OBJ%x OP%x",
42739 object->debug_id, op->debug_id);
42740- fscache_stat(&fscache_n_op_gc);
42741+ fscache_stat_unchecked(&fscache_n_op_gc);
42742
42743 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42744
42745diff -urNp linux-3.0.7/fs/fscache/page.c linux-3.0.7/fs/fscache/page.c
42746--- linux-3.0.7/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
42747+++ linux-3.0.7/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
42748@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42749 val = radix_tree_lookup(&cookie->stores, page->index);
42750 if (!val) {
42751 rcu_read_unlock();
42752- fscache_stat(&fscache_n_store_vmscan_not_storing);
42753+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42754 __fscache_uncache_page(cookie, page);
42755 return true;
42756 }
42757@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42758 spin_unlock(&cookie->stores_lock);
42759
42760 if (xpage) {
42761- fscache_stat(&fscache_n_store_vmscan_cancelled);
42762- fscache_stat(&fscache_n_store_radix_deletes);
42763+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42764+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42765 ASSERTCMP(xpage, ==, page);
42766 } else {
42767- fscache_stat(&fscache_n_store_vmscan_gone);
42768+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42769 }
42770
42771 wake_up_bit(&cookie->flags, 0);
42772@@ -107,7 +107,7 @@ page_busy:
42773 /* we might want to wait here, but that could deadlock the allocator as
42774 * the work threads writing to the cache may all end up sleeping
42775 * on memory allocation */
42776- fscache_stat(&fscache_n_store_vmscan_busy);
42777+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42778 return false;
42779 }
42780 EXPORT_SYMBOL(__fscache_maybe_release_page);
42781@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42782 FSCACHE_COOKIE_STORING_TAG);
42783 if (!radix_tree_tag_get(&cookie->stores, page->index,
42784 FSCACHE_COOKIE_PENDING_TAG)) {
42785- fscache_stat(&fscache_n_store_radix_deletes);
42786+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42787 xpage = radix_tree_delete(&cookie->stores, page->index);
42788 }
42789 spin_unlock(&cookie->stores_lock);
42790@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42791
42792 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42793
42794- fscache_stat(&fscache_n_attr_changed_calls);
42795+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42796
42797 if (fscache_object_is_active(object)) {
42798 fscache_stat(&fscache_n_cop_attr_changed);
42799@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42800
42801 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42802
42803- fscache_stat(&fscache_n_attr_changed);
42804+ fscache_stat_unchecked(&fscache_n_attr_changed);
42805
42806 op = kzalloc(sizeof(*op), GFP_KERNEL);
42807 if (!op) {
42808- fscache_stat(&fscache_n_attr_changed_nomem);
42809+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42810 _leave(" = -ENOMEM");
42811 return -ENOMEM;
42812 }
42813@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
42814 if (fscache_submit_exclusive_op(object, op) < 0)
42815 goto nobufs;
42816 spin_unlock(&cookie->lock);
42817- fscache_stat(&fscache_n_attr_changed_ok);
42818+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42819 fscache_put_operation(op);
42820 _leave(" = 0");
42821 return 0;
42822@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
42823 nobufs:
42824 spin_unlock(&cookie->lock);
42825 kfree(op);
42826- fscache_stat(&fscache_n_attr_changed_nobufs);
42827+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42828 _leave(" = %d", -ENOBUFS);
42829 return -ENOBUFS;
42830 }
42831@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
42832 /* allocate a retrieval operation and attempt to submit it */
42833 op = kzalloc(sizeof(*op), GFP_NOIO);
42834 if (!op) {
42835- fscache_stat(&fscache_n_retrievals_nomem);
42836+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42837 return NULL;
42838 }
42839
42840@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
42841 return 0;
42842 }
42843
42844- fscache_stat(&fscache_n_retrievals_wait);
42845+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42846
42847 jif = jiffies;
42848 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42849 fscache_wait_bit_interruptible,
42850 TASK_INTERRUPTIBLE) != 0) {
42851- fscache_stat(&fscache_n_retrievals_intr);
42852+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42853 _leave(" = -ERESTARTSYS");
42854 return -ERESTARTSYS;
42855 }
42856@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
42857 */
42858 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42859 struct fscache_retrieval *op,
42860- atomic_t *stat_op_waits,
42861- atomic_t *stat_object_dead)
42862+ atomic_unchecked_t *stat_op_waits,
42863+ atomic_unchecked_t *stat_object_dead)
42864 {
42865 int ret;
42866
42867@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
42868 goto check_if_dead;
42869
42870 _debug(">>> WT");
42871- fscache_stat(stat_op_waits);
42872+ fscache_stat_unchecked(stat_op_waits);
42873 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42874 fscache_wait_bit_interruptible,
42875 TASK_INTERRUPTIBLE) < 0) {
42876@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
42877
42878 check_if_dead:
42879 if (unlikely(fscache_object_is_dead(object))) {
42880- fscache_stat(stat_object_dead);
42881+ fscache_stat_unchecked(stat_object_dead);
42882 return -ENOBUFS;
42883 }
42884 return 0;
42885@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
42886
42887 _enter("%p,%p,,,", cookie, page);
42888
42889- fscache_stat(&fscache_n_retrievals);
42890+ fscache_stat_unchecked(&fscache_n_retrievals);
42891
42892 if (hlist_empty(&cookie->backing_objects))
42893 goto nobufs;
42894@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
42895 goto nobufs_unlock;
42896 spin_unlock(&cookie->lock);
42897
42898- fscache_stat(&fscache_n_retrieval_ops);
42899+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42900
42901 /* pin the netfs read context in case we need to do the actual netfs
42902 * read because we've encountered a cache read failure */
42903@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
42904
42905 error:
42906 if (ret == -ENOMEM)
42907- fscache_stat(&fscache_n_retrievals_nomem);
42908+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42909 else if (ret == -ERESTARTSYS)
42910- fscache_stat(&fscache_n_retrievals_intr);
42911+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42912 else if (ret == -ENODATA)
42913- fscache_stat(&fscache_n_retrievals_nodata);
42914+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42915 else if (ret < 0)
42916- fscache_stat(&fscache_n_retrievals_nobufs);
42917+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42918 else
42919- fscache_stat(&fscache_n_retrievals_ok);
42920+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42921
42922 fscache_put_retrieval(op);
42923 _leave(" = %d", ret);
42924@@ -429,7 +429,7 @@ nobufs_unlock:
42925 spin_unlock(&cookie->lock);
42926 kfree(op);
42927 nobufs:
42928- fscache_stat(&fscache_n_retrievals_nobufs);
42929+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42930 _leave(" = -ENOBUFS");
42931 return -ENOBUFS;
42932 }
42933@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
42934
42935 _enter("%p,,%d,,,", cookie, *nr_pages);
42936
42937- fscache_stat(&fscache_n_retrievals);
42938+ fscache_stat_unchecked(&fscache_n_retrievals);
42939
42940 if (hlist_empty(&cookie->backing_objects))
42941 goto nobufs;
42942@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
42943 goto nobufs_unlock;
42944 spin_unlock(&cookie->lock);
42945
42946- fscache_stat(&fscache_n_retrieval_ops);
42947+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42948
42949 /* pin the netfs read context in case we need to do the actual netfs
42950 * read because we've encountered a cache read failure */
42951@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
42952
42953 error:
42954 if (ret == -ENOMEM)
42955- fscache_stat(&fscache_n_retrievals_nomem);
42956+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42957 else if (ret == -ERESTARTSYS)
42958- fscache_stat(&fscache_n_retrievals_intr);
42959+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42960 else if (ret == -ENODATA)
42961- fscache_stat(&fscache_n_retrievals_nodata);
42962+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42963 else if (ret < 0)
42964- fscache_stat(&fscache_n_retrievals_nobufs);
42965+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42966 else
42967- fscache_stat(&fscache_n_retrievals_ok);
42968+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42969
42970 fscache_put_retrieval(op);
42971 _leave(" = %d", ret);
42972@@ -545,7 +545,7 @@ nobufs_unlock:
42973 spin_unlock(&cookie->lock);
42974 kfree(op);
42975 nobufs:
42976- fscache_stat(&fscache_n_retrievals_nobufs);
42977+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42978 _leave(" = -ENOBUFS");
42979 return -ENOBUFS;
42980 }
42981@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
42982
42983 _enter("%p,%p,,,", cookie, page);
42984
42985- fscache_stat(&fscache_n_allocs);
42986+ fscache_stat_unchecked(&fscache_n_allocs);
42987
42988 if (hlist_empty(&cookie->backing_objects))
42989 goto nobufs;
42990@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
42991 goto nobufs_unlock;
42992 spin_unlock(&cookie->lock);
42993
42994- fscache_stat(&fscache_n_alloc_ops);
42995+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42996
42997 ret = fscache_wait_for_retrieval_activation(
42998 object, op,
42999@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43000
43001 error:
43002 if (ret == -ERESTARTSYS)
43003- fscache_stat(&fscache_n_allocs_intr);
43004+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43005 else if (ret < 0)
43006- fscache_stat(&fscache_n_allocs_nobufs);
43007+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43008 else
43009- fscache_stat(&fscache_n_allocs_ok);
43010+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43011
43012 fscache_put_retrieval(op);
43013 _leave(" = %d", ret);
43014@@ -625,7 +625,7 @@ nobufs_unlock:
43015 spin_unlock(&cookie->lock);
43016 kfree(op);
43017 nobufs:
43018- fscache_stat(&fscache_n_allocs_nobufs);
43019+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43020 _leave(" = -ENOBUFS");
43021 return -ENOBUFS;
43022 }
43023@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43024
43025 spin_lock(&cookie->stores_lock);
43026
43027- fscache_stat(&fscache_n_store_calls);
43028+ fscache_stat_unchecked(&fscache_n_store_calls);
43029
43030 /* find a page to store */
43031 page = NULL;
43032@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43033 page = results[0];
43034 _debug("gang %d [%lx]", n, page->index);
43035 if (page->index > op->store_limit) {
43036- fscache_stat(&fscache_n_store_pages_over_limit);
43037+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43038 goto superseded;
43039 }
43040
43041@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43042 spin_unlock(&cookie->stores_lock);
43043 spin_unlock(&object->lock);
43044
43045- fscache_stat(&fscache_n_store_pages);
43046+ fscache_stat_unchecked(&fscache_n_store_pages);
43047 fscache_stat(&fscache_n_cop_write_page);
43048 ret = object->cache->ops->write_page(op, page);
43049 fscache_stat_d(&fscache_n_cop_write_page);
43050@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43051 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43052 ASSERT(PageFsCache(page));
43053
43054- fscache_stat(&fscache_n_stores);
43055+ fscache_stat_unchecked(&fscache_n_stores);
43056
43057 op = kzalloc(sizeof(*op), GFP_NOIO);
43058 if (!op)
43059@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43060 spin_unlock(&cookie->stores_lock);
43061 spin_unlock(&object->lock);
43062
43063- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43064+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43065 op->store_limit = object->store_limit;
43066
43067 if (fscache_submit_op(object, &op->op) < 0)
43068@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43069
43070 spin_unlock(&cookie->lock);
43071 radix_tree_preload_end();
43072- fscache_stat(&fscache_n_store_ops);
43073- fscache_stat(&fscache_n_stores_ok);
43074+ fscache_stat_unchecked(&fscache_n_store_ops);
43075+ fscache_stat_unchecked(&fscache_n_stores_ok);
43076
43077 /* the work queue now carries its own ref on the object */
43078 fscache_put_operation(&op->op);
43079@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43080 return 0;
43081
43082 already_queued:
43083- fscache_stat(&fscache_n_stores_again);
43084+ fscache_stat_unchecked(&fscache_n_stores_again);
43085 already_pending:
43086 spin_unlock(&cookie->stores_lock);
43087 spin_unlock(&object->lock);
43088 spin_unlock(&cookie->lock);
43089 radix_tree_preload_end();
43090 kfree(op);
43091- fscache_stat(&fscache_n_stores_ok);
43092+ fscache_stat_unchecked(&fscache_n_stores_ok);
43093 _leave(" = 0");
43094 return 0;
43095
43096@@ -851,14 +851,14 @@ nobufs:
43097 spin_unlock(&cookie->lock);
43098 radix_tree_preload_end();
43099 kfree(op);
43100- fscache_stat(&fscache_n_stores_nobufs);
43101+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43102 _leave(" = -ENOBUFS");
43103 return -ENOBUFS;
43104
43105 nomem_free:
43106 kfree(op);
43107 nomem:
43108- fscache_stat(&fscache_n_stores_oom);
43109+ fscache_stat_unchecked(&fscache_n_stores_oom);
43110 _leave(" = -ENOMEM");
43111 return -ENOMEM;
43112 }
43113@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43114 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43115 ASSERTCMP(page, !=, NULL);
43116
43117- fscache_stat(&fscache_n_uncaches);
43118+ fscache_stat_unchecked(&fscache_n_uncaches);
43119
43120 /* cache withdrawal may beat us to it */
43121 if (!PageFsCache(page))
43122@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43123 unsigned long loop;
43124
43125 #ifdef CONFIG_FSCACHE_STATS
43126- atomic_add(pagevec->nr, &fscache_n_marks);
43127+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43128 #endif
43129
43130 for (loop = 0; loop < pagevec->nr; loop++) {
43131diff -urNp linux-3.0.7/fs/fscache/stats.c linux-3.0.7/fs/fscache/stats.c
43132--- linux-3.0.7/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
43133+++ linux-3.0.7/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
43134@@ -18,95 +18,95 @@
43135 /*
43136 * operation counters
43137 */
43138-atomic_t fscache_n_op_pend;
43139-atomic_t fscache_n_op_run;
43140-atomic_t fscache_n_op_enqueue;
43141-atomic_t fscache_n_op_requeue;
43142-atomic_t fscache_n_op_deferred_release;
43143-atomic_t fscache_n_op_release;
43144-atomic_t fscache_n_op_gc;
43145-atomic_t fscache_n_op_cancelled;
43146-atomic_t fscache_n_op_rejected;
43147-
43148-atomic_t fscache_n_attr_changed;
43149-atomic_t fscache_n_attr_changed_ok;
43150-atomic_t fscache_n_attr_changed_nobufs;
43151-atomic_t fscache_n_attr_changed_nomem;
43152-atomic_t fscache_n_attr_changed_calls;
43153-
43154-atomic_t fscache_n_allocs;
43155-atomic_t fscache_n_allocs_ok;
43156-atomic_t fscache_n_allocs_wait;
43157-atomic_t fscache_n_allocs_nobufs;
43158-atomic_t fscache_n_allocs_intr;
43159-atomic_t fscache_n_allocs_object_dead;
43160-atomic_t fscache_n_alloc_ops;
43161-atomic_t fscache_n_alloc_op_waits;
43162-
43163-atomic_t fscache_n_retrievals;
43164-atomic_t fscache_n_retrievals_ok;
43165-atomic_t fscache_n_retrievals_wait;
43166-atomic_t fscache_n_retrievals_nodata;
43167-atomic_t fscache_n_retrievals_nobufs;
43168-atomic_t fscache_n_retrievals_intr;
43169-atomic_t fscache_n_retrievals_nomem;
43170-atomic_t fscache_n_retrievals_object_dead;
43171-atomic_t fscache_n_retrieval_ops;
43172-atomic_t fscache_n_retrieval_op_waits;
43173-
43174-atomic_t fscache_n_stores;
43175-atomic_t fscache_n_stores_ok;
43176-atomic_t fscache_n_stores_again;
43177-atomic_t fscache_n_stores_nobufs;
43178-atomic_t fscache_n_stores_oom;
43179-atomic_t fscache_n_store_ops;
43180-atomic_t fscache_n_store_calls;
43181-atomic_t fscache_n_store_pages;
43182-atomic_t fscache_n_store_radix_deletes;
43183-atomic_t fscache_n_store_pages_over_limit;
43184-
43185-atomic_t fscache_n_store_vmscan_not_storing;
43186-atomic_t fscache_n_store_vmscan_gone;
43187-atomic_t fscache_n_store_vmscan_busy;
43188-atomic_t fscache_n_store_vmscan_cancelled;
43189-
43190-atomic_t fscache_n_marks;
43191-atomic_t fscache_n_uncaches;
43192-
43193-atomic_t fscache_n_acquires;
43194-atomic_t fscache_n_acquires_null;
43195-atomic_t fscache_n_acquires_no_cache;
43196-atomic_t fscache_n_acquires_ok;
43197-atomic_t fscache_n_acquires_nobufs;
43198-atomic_t fscache_n_acquires_oom;
43199-
43200-atomic_t fscache_n_updates;
43201-atomic_t fscache_n_updates_null;
43202-atomic_t fscache_n_updates_run;
43203-
43204-atomic_t fscache_n_relinquishes;
43205-atomic_t fscache_n_relinquishes_null;
43206-atomic_t fscache_n_relinquishes_waitcrt;
43207-atomic_t fscache_n_relinquishes_retire;
43208-
43209-atomic_t fscache_n_cookie_index;
43210-atomic_t fscache_n_cookie_data;
43211-atomic_t fscache_n_cookie_special;
43212-
43213-atomic_t fscache_n_object_alloc;
43214-atomic_t fscache_n_object_no_alloc;
43215-atomic_t fscache_n_object_lookups;
43216-atomic_t fscache_n_object_lookups_negative;
43217-atomic_t fscache_n_object_lookups_positive;
43218-atomic_t fscache_n_object_lookups_timed_out;
43219-atomic_t fscache_n_object_created;
43220-atomic_t fscache_n_object_avail;
43221-atomic_t fscache_n_object_dead;
43222-
43223-atomic_t fscache_n_checkaux_none;
43224-atomic_t fscache_n_checkaux_okay;
43225-atomic_t fscache_n_checkaux_update;
43226-atomic_t fscache_n_checkaux_obsolete;
43227+atomic_unchecked_t fscache_n_op_pend;
43228+atomic_unchecked_t fscache_n_op_run;
43229+atomic_unchecked_t fscache_n_op_enqueue;
43230+atomic_unchecked_t fscache_n_op_requeue;
43231+atomic_unchecked_t fscache_n_op_deferred_release;
43232+atomic_unchecked_t fscache_n_op_release;
43233+atomic_unchecked_t fscache_n_op_gc;
43234+atomic_unchecked_t fscache_n_op_cancelled;
43235+atomic_unchecked_t fscache_n_op_rejected;
43236+
43237+atomic_unchecked_t fscache_n_attr_changed;
43238+atomic_unchecked_t fscache_n_attr_changed_ok;
43239+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43240+atomic_unchecked_t fscache_n_attr_changed_nomem;
43241+atomic_unchecked_t fscache_n_attr_changed_calls;
43242+
43243+atomic_unchecked_t fscache_n_allocs;
43244+atomic_unchecked_t fscache_n_allocs_ok;
43245+atomic_unchecked_t fscache_n_allocs_wait;
43246+atomic_unchecked_t fscache_n_allocs_nobufs;
43247+atomic_unchecked_t fscache_n_allocs_intr;
43248+atomic_unchecked_t fscache_n_allocs_object_dead;
43249+atomic_unchecked_t fscache_n_alloc_ops;
43250+atomic_unchecked_t fscache_n_alloc_op_waits;
43251+
43252+atomic_unchecked_t fscache_n_retrievals;
43253+atomic_unchecked_t fscache_n_retrievals_ok;
43254+atomic_unchecked_t fscache_n_retrievals_wait;
43255+atomic_unchecked_t fscache_n_retrievals_nodata;
43256+atomic_unchecked_t fscache_n_retrievals_nobufs;
43257+atomic_unchecked_t fscache_n_retrievals_intr;
43258+atomic_unchecked_t fscache_n_retrievals_nomem;
43259+atomic_unchecked_t fscache_n_retrievals_object_dead;
43260+atomic_unchecked_t fscache_n_retrieval_ops;
43261+atomic_unchecked_t fscache_n_retrieval_op_waits;
43262+
43263+atomic_unchecked_t fscache_n_stores;
43264+atomic_unchecked_t fscache_n_stores_ok;
43265+atomic_unchecked_t fscache_n_stores_again;
43266+atomic_unchecked_t fscache_n_stores_nobufs;
43267+atomic_unchecked_t fscache_n_stores_oom;
43268+atomic_unchecked_t fscache_n_store_ops;
43269+atomic_unchecked_t fscache_n_store_calls;
43270+atomic_unchecked_t fscache_n_store_pages;
43271+atomic_unchecked_t fscache_n_store_radix_deletes;
43272+atomic_unchecked_t fscache_n_store_pages_over_limit;
43273+
43274+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43275+atomic_unchecked_t fscache_n_store_vmscan_gone;
43276+atomic_unchecked_t fscache_n_store_vmscan_busy;
43277+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43278+
43279+atomic_unchecked_t fscache_n_marks;
43280+atomic_unchecked_t fscache_n_uncaches;
43281+
43282+atomic_unchecked_t fscache_n_acquires;
43283+atomic_unchecked_t fscache_n_acquires_null;
43284+atomic_unchecked_t fscache_n_acquires_no_cache;
43285+atomic_unchecked_t fscache_n_acquires_ok;
43286+atomic_unchecked_t fscache_n_acquires_nobufs;
43287+atomic_unchecked_t fscache_n_acquires_oom;
43288+
43289+atomic_unchecked_t fscache_n_updates;
43290+atomic_unchecked_t fscache_n_updates_null;
43291+atomic_unchecked_t fscache_n_updates_run;
43292+
43293+atomic_unchecked_t fscache_n_relinquishes;
43294+atomic_unchecked_t fscache_n_relinquishes_null;
43295+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43296+atomic_unchecked_t fscache_n_relinquishes_retire;
43297+
43298+atomic_unchecked_t fscache_n_cookie_index;
43299+atomic_unchecked_t fscache_n_cookie_data;
43300+atomic_unchecked_t fscache_n_cookie_special;
43301+
43302+atomic_unchecked_t fscache_n_object_alloc;
43303+atomic_unchecked_t fscache_n_object_no_alloc;
43304+atomic_unchecked_t fscache_n_object_lookups;
43305+atomic_unchecked_t fscache_n_object_lookups_negative;
43306+atomic_unchecked_t fscache_n_object_lookups_positive;
43307+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43308+atomic_unchecked_t fscache_n_object_created;
43309+atomic_unchecked_t fscache_n_object_avail;
43310+atomic_unchecked_t fscache_n_object_dead;
43311+
43312+atomic_unchecked_t fscache_n_checkaux_none;
43313+atomic_unchecked_t fscache_n_checkaux_okay;
43314+atomic_unchecked_t fscache_n_checkaux_update;
43315+atomic_unchecked_t fscache_n_checkaux_obsolete;
43316
43317 atomic_t fscache_n_cop_alloc_object;
43318 atomic_t fscache_n_cop_lookup_object;
43319@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43320 seq_puts(m, "FS-Cache statistics\n");
43321
43322 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43323- atomic_read(&fscache_n_cookie_index),
43324- atomic_read(&fscache_n_cookie_data),
43325- atomic_read(&fscache_n_cookie_special));
43326+ atomic_read_unchecked(&fscache_n_cookie_index),
43327+ atomic_read_unchecked(&fscache_n_cookie_data),
43328+ atomic_read_unchecked(&fscache_n_cookie_special));
43329
43330 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43331- atomic_read(&fscache_n_object_alloc),
43332- atomic_read(&fscache_n_object_no_alloc),
43333- atomic_read(&fscache_n_object_avail),
43334- atomic_read(&fscache_n_object_dead));
43335+ atomic_read_unchecked(&fscache_n_object_alloc),
43336+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43337+ atomic_read_unchecked(&fscache_n_object_avail),
43338+ atomic_read_unchecked(&fscache_n_object_dead));
43339 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43340- atomic_read(&fscache_n_checkaux_none),
43341- atomic_read(&fscache_n_checkaux_okay),
43342- atomic_read(&fscache_n_checkaux_update),
43343- atomic_read(&fscache_n_checkaux_obsolete));
43344+ atomic_read_unchecked(&fscache_n_checkaux_none),
43345+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43346+ atomic_read_unchecked(&fscache_n_checkaux_update),
43347+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43348
43349 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43350- atomic_read(&fscache_n_marks),
43351- atomic_read(&fscache_n_uncaches));
43352+ atomic_read_unchecked(&fscache_n_marks),
43353+ atomic_read_unchecked(&fscache_n_uncaches));
43354
43355 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43356 " oom=%u\n",
43357- atomic_read(&fscache_n_acquires),
43358- atomic_read(&fscache_n_acquires_null),
43359- atomic_read(&fscache_n_acquires_no_cache),
43360- atomic_read(&fscache_n_acquires_ok),
43361- atomic_read(&fscache_n_acquires_nobufs),
43362- atomic_read(&fscache_n_acquires_oom));
43363+ atomic_read_unchecked(&fscache_n_acquires),
43364+ atomic_read_unchecked(&fscache_n_acquires_null),
43365+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43366+ atomic_read_unchecked(&fscache_n_acquires_ok),
43367+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43368+ atomic_read_unchecked(&fscache_n_acquires_oom));
43369
43370 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43371- atomic_read(&fscache_n_object_lookups),
43372- atomic_read(&fscache_n_object_lookups_negative),
43373- atomic_read(&fscache_n_object_lookups_positive),
43374- atomic_read(&fscache_n_object_created),
43375- atomic_read(&fscache_n_object_lookups_timed_out));
43376+ atomic_read_unchecked(&fscache_n_object_lookups),
43377+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43378+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43379+ atomic_read_unchecked(&fscache_n_object_created),
43380+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43381
43382 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43383- atomic_read(&fscache_n_updates),
43384- atomic_read(&fscache_n_updates_null),
43385- atomic_read(&fscache_n_updates_run));
43386+ atomic_read_unchecked(&fscache_n_updates),
43387+ atomic_read_unchecked(&fscache_n_updates_null),
43388+ atomic_read_unchecked(&fscache_n_updates_run));
43389
43390 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43391- atomic_read(&fscache_n_relinquishes),
43392- atomic_read(&fscache_n_relinquishes_null),
43393- atomic_read(&fscache_n_relinquishes_waitcrt),
43394- atomic_read(&fscache_n_relinquishes_retire));
43395+ atomic_read_unchecked(&fscache_n_relinquishes),
43396+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43397+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43398+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43399
43400 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43401- atomic_read(&fscache_n_attr_changed),
43402- atomic_read(&fscache_n_attr_changed_ok),
43403- atomic_read(&fscache_n_attr_changed_nobufs),
43404- atomic_read(&fscache_n_attr_changed_nomem),
43405- atomic_read(&fscache_n_attr_changed_calls));
43406+ atomic_read_unchecked(&fscache_n_attr_changed),
43407+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43408+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43409+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43410+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43411
43412 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43413- atomic_read(&fscache_n_allocs),
43414- atomic_read(&fscache_n_allocs_ok),
43415- atomic_read(&fscache_n_allocs_wait),
43416- atomic_read(&fscache_n_allocs_nobufs),
43417- atomic_read(&fscache_n_allocs_intr));
43418+ atomic_read_unchecked(&fscache_n_allocs),
43419+ atomic_read_unchecked(&fscache_n_allocs_ok),
43420+ atomic_read_unchecked(&fscache_n_allocs_wait),
43421+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43422+ atomic_read_unchecked(&fscache_n_allocs_intr));
43423 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43424- atomic_read(&fscache_n_alloc_ops),
43425- atomic_read(&fscache_n_alloc_op_waits),
43426- atomic_read(&fscache_n_allocs_object_dead));
43427+ atomic_read_unchecked(&fscache_n_alloc_ops),
43428+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43429+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43430
43431 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43432 " int=%u oom=%u\n",
43433- atomic_read(&fscache_n_retrievals),
43434- atomic_read(&fscache_n_retrievals_ok),
43435- atomic_read(&fscache_n_retrievals_wait),
43436- atomic_read(&fscache_n_retrievals_nodata),
43437- atomic_read(&fscache_n_retrievals_nobufs),
43438- atomic_read(&fscache_n_retrievals_intr),
43439- atomic_read(&fscache_n_retrievals_nomem));
43440+ atomic_read_unchecked(&fscache_n_retrievals),
43441+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43442+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43443+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43444+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43445+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43446+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43447 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43448- atomic_read(&fscache_n_retrieval_ops),
43449- atomic_read(&fscache_n_retrieval_op_waits),
43450- atomic_read(&fscache_n_retrievals_object_dead));
43451+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43452+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43453+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43454
43455 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43456- atomic_read(&fscache_n_stores),
43457- atomic_read(&fscache_n_stores_ok),
43458- atomic_read(&fscache_n_stores_again),
43459- atomic_read(&fscache_n_stores_nobufs),
43460- atomic_read(&fscache_n_stores_oom));
43461+ atomic_read_unchecked(&fscache_n_stores),
43462+ atomic_read_unchecked(&fscache_n_stores_ok),
43463+ atomic_read_unchecked(&fscache_n_stores_again),
43464+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43465+ atomic_read_unchecked(&fscache_n_stores_oom));
43466 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43467- atomic_read(&fscache_n_store_ops),
43468- atomic_read(&fscache_n_store_calls),
43469- atomic_read(&fscache_n_store_pages),
43470- atomic_read(&fscache_n_store_radix_deletes),
43471- atomic_read(&fscache_n_store_pages_over_limit));
43472+ atomic_read_unchecked(&fscache_n_store_ops),
43473+ atomic_read_unchecked(&fscache_n_store_calls),
43474+ atomic_read_unchecked(&fscache_n_store_pages),
43475+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43476+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43477
43478 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43479- atomic_read(&fscache_n_store_vmscan_not_storing),
43480- atomic_read(&fscache_n_store_vmscan_gone),
43481- atomic_read(&fscache_n_store_vmscan_busy),
43482- atomic_read(&fscache_n_store_vmscan_cancelled));
43483+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43484+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43485+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43486+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43487
43488 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43489- atomic_read(&fscache_n_op_pend),
43490- atomic_read(&fscache_n_op_run),
43491- atomic_read(&fscache_n_op_enqueue),
43492- atomic_read(&fscache_n_op_cancelled),
43493- atomic_read(&fscache_n_op_rejected));
43494+ atomic_read_unchecked(&fscache_n_op_pend),
43495+ atomic_read_unchecked(&fscache_n_op_run),
43496+ atomic_read_unchecked(&fscache_n_op_enqueue),
43497+ atomic_read_unchecked(&fscache_n_op_cancelled),
43498+ atomic_read_unchecked(&fscache_n_op_rejected));
43499 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43500- atomic_read(&fscache_n_op_deferred_release),
43501- atomic_read(&fscache_n_op_release),
43502- atomic_read(&fscache_n_op_gc));
43503+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43504+ atomic_read_unchecked(&fscache_n_op_release),
43505+ atomic_read_unchecked(&fscache_n_op_gc));
43506
43507 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43508 atomic_read(&fscache_n_cop_alloc_object),
43509diff -urNp linux-3.0.7/fs/fuse/cuse.c linux-3.0.7/fs/fuse/cuse.c
43510--- linux-3.0.7/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
43511+++ linux-3.0.7/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
43512@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43513 INIT_LIST_HEAD(&cuse_conntbl[i]);
43514
43515 /* inherit and extend fuse_dev_operations */
43516- cuse_channel_fops = fuse_dev_operations;
43517- cuse_channel_fops.owner = THIS_MODULE;
43518- cuse_channel_fops.open = cuse_channel_open;
43519- cuse_channel_fops.release = cuse_channel_release;
43520+ pax_open_kernel();
43521+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43522+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43523+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43524+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43525+ pax_close_kernel();
43526
43527 cuse_class = class_create(THIS_MODULE, "cuse");
43528 if (IS_ERR(cuse_class))
43529diff -urNp linux-3.0.7/fs/fuse/dev.c linux-3.0.7/fs/fuse/dev.c
43530--- linux-3.0.7/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
43531+++ linux-3.0.7/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
43532@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
43533 ret = 0;
43534 pipe_lock(pipe);
43535
43536- if (!pipe->readers) {
43537+ if (!atomic_read(&pipe->readers)) {
43538 send_sig(SIGPIPE, current, 0);
43539 if (!ret)
43540 ret = -EPIPE;
43541diff -urNp linux-3.0.7/fs/fuse/dir.c linux-3.0.7/fs/fuse/dir.c
43542--- linux-3.0.7/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
43543+++ linux-3.0.7/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
43544@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
43545 return link;
43546 }
43547
43548-static void free_link(char *link)
43549+static void free_link(const char *link)
43550 {
43551 if (!IS_ERR(link))
43552 free_page((unsigned long) link);
43553diff -urNp linux-3.0.7/fs/gfs2/inode.c linux-3.0.7/fs/gfs2/inode.c
43554--- linux-3.0.7/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
43555+++ linux-3.0.7/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
43556@@ -1525,7 +1525,7 @@ out:
43557
43558 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43559 {
43560- char *s = nd_get_link(nd);
43561+ const char *s = nd_get_link(nd);
43562 if (!IS_ERR(s))
43563 kfree(s);
43564 }
43565diff -urNp linux-3.0.7/fs/hfsplus/catalog.c linux-3.0.7/fs/hfsplus/catalog.c
43566--- linux-3.0.7/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
43567+++ linux-3.0.7/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
43568@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43569 int err;
43570 u16 type;
43571
43572+ pax_track_stack();
43573+
43574 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43575 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43576 if (err)
43577@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43578 int entry_size;
43579 int err;
43580
43581+ pax_track_stack();
43582+
43583 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43584 str->name, cnid, inode->i_nlink);
43585 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43586@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
43587 int entry_size, type;
43588 int err = 0;
43589
43590+ pax_track_stack();
43591+
43592 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43593 cnid, src_dir->i_ino, src_name->name,
43594 dst_dir->i_ino, dst_name->name);
43595diff -urNp linux-3.0.7/fs/hfsplus/dir.c linux-3.0.7/fs/hfsplus/dir.c
43596--- linux-3.0.7/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
43597+++ linux-3.0.7/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
43598@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
43599 struct hfsplus_readdir_data *rd;
43600 u16 type;
43601
43602+ pax_track_stack();
43603+
43604 if (filp->f_pos >= inode->i_size)
43605 return 0;
43606
43607diff -urNp linux-3.0.7/fs/hfsplus/inode.c linux-3.0.7/fs/hfsplus/inode.c
43608--- linux-3.0.7/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
43609+++ linux-3.0.7/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
43610@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
43611 int res = 0;
43612 u16 type;
43613
43614+ pax_track_stack();
43615+
43616 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43617
43618 HFSPLUS_I(inode)->linkid = 0;
43619@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
43620 struct hfs_find_data fd;
43621 hfsplus_cat_entry entry;
43622
43623+ pax_track_stack();
43624+
43625 if (HFSPLUS_IS_RSRC(inode))
43626 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43627
43628diff -urNp linux-3.0.7/fs/hfsplus/ioctl.c linux-3.0.7/fs/hfsplus/ioctl.c
43629--- linux-3.0.7/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
43630+++ linux-3.0.7/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
43631@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43632 struct hfsplus_cat_file *file;
43633 int res;
43634
43635+ pax_track_stack();
43636+
43637 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43638 return -EOPNOTSUPP;
43639
43640@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43641 struct hfsplus_cat_file *file;
43642 ssize_t res = 0;
43643
43644+ pax_track_stack();
43645+
43646 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43647 return -EOPNOTSUPP;
43648
43649diff -urNp linux-3.0.7/fs/hfsplus/super.c linux-3.0.7/fs/hfsplus/super.c
43650--- linux-3.0.7/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
43651+++ linux-3.0.7/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
43652@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
43653 struct nls_table *nls = NULL;
43654 int err;
43655
43656+ pax_track_stack();
43657+
43658 err = -EINVAL;
43659 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43660 if (!sbi)
43661diff -urNp linux-3.0.7/fs/hugetlbfs/inode.c linux-3.0.7/fs/hugetlbfs/inode.c
43662--- linux-3.0.7/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43663+++ linux-3.0.7/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
43664@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
43665 .kill_sb = kill_litter_super,
43666 };
43667
43668-static struct vfsmount *hugetlbfs_vfsmount;
43669+struct vfsmount *hugetlbfs_vfsmount;
43670
43671 static int can_do_hugetlb_shm(void)
43672 {
43673diff -urNp linux-3.0.7/fs/inode.c linux-3.0.7/fs/inode.c
43674--- linux-3.0.7/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
43675+++ linux-3.0.7/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
43676@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
43677
43678 #ifdef CONFIG_SMP
43679 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43680- static atomic_t shared_last_ino;
43681- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43682+ static atomic_unchecked_t shared_last_ino;
43683+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43684
43685 res = next - LAST_INO_BATCH;
43686 }
43687diff -urNp linux-3.0.7/fs/jbd/checkpoint.c linux-3.0.7/fs/jbd/checkpoint.c
43688--- linux-3.0.7/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
43689+++ linux-3.0.7/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
43690@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
43691 tid_t this_tid;
43692 int result;
43693
43694+ pax_track_stack();
43695+
43696 jbd_debug(1, "Start checkpoint\n");
43697
43698 /*
43699diff -urNp linux-3.0.7/fs/jffs2/compr_rtime.c linux-3.0.7/fs/jffs2/compr_rtime.c
43700--- linux-3.0.7/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
43701+++ linux-3.0.7/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
43702@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43703 int outpos = 0;
43704 int pos=0;
43705
43706+ pax_track_stack();
43707+
43708 memset(positions,0,sizeof(positions));
43709
43710 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43711@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43712 int outpos = 0;
43713 int pos=0;
43714
43715+ pax_track_stack();
43716+
43717 memset(positions,0,sizeof(positions));
43718
43719 while (outpos<destlen) {
43720diff -urNp linux-3.0.7/fs/jffs2/compr_rubin.c linux-3.0.7/fs/jffs2/compr_rubin.c
43721--- linux-3.0.7/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
43722+++ linux-3.0.7/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
43723@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43724 int ret;
43725 uint32_t mysrclen, mydstlen;
43726
43727+ pax_track_stack();
43728+
43729 mysrclen = *sourcelen;
43730 mydstlen = *dstlen - 8;
43731
43732diff -urNp linux-3.0.7/fs/jffs2/erase.c linux-3.0.7/fs/jffs2/erase.c
43733--- linux-3.0.7/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
43734+++ linux-3.0.7/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
43735@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
43736 struct jffs2_unknown_node marker = {
43737 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43738 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43739- .totlen = cpu_to_je32(c->cleanmarker_size)
43740+ .totlen = cpu_to_je32(c->cleanmarker_size),
43741+ .hdr_crc = cpu_to_je32(0)
43742 };
43743
43744 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43745diff -urNp linux-3.0.7/fs/jffs2/wbuf.c linux-3.0.7/fs/jffs2/wbuf.c
43746--- linux-3.0.7/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
43747+++ linux-3.0.7/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
43748@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43749 {
43750 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43751 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43752- .totlen = constant_cpu_to_je32(8)
43753+ .totlen = constant_cpu_to_je32(8),
43754+ .hdr_crc = constant_cpu_to_je32(0)
43755 };
43756
43757 /*
43758diff -urNp linux-3.0.7/fs/jffs2/xattr.c linux-3.0.7/fs/jffs2/xattr.c
43759--- linux-3.0.7/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
43760+++ linux-3.0.7/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
43761@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43762
43763 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43764
43765+ pax_track_stack();
43766+
43767 /* Phase.1 : Merge same xref */
43768 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43769 xref_tmphash[i] = NULL;
43770diff -urNp linux-3.0.7/fs/jfs/super.c linux-3.0.7/fs/jfs/super.c
43771--- linux-3.0.7/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
43772+++ linux-3.0.7/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
43773@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
43774
43775 jfs_inode_cachep =
43776 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43777- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43778+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43779 init_once);
43780 if (jfs_inode_cachep == NULL)
43781 return -ENOMEM;
43782diff -urNp linux-3.0.7/fs/libfs.c linux-3.0.7/fs/libfs.c
43783--- linux-3.0.7/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
43784+++ linux-3.0.7/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
43785@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
43786
43787 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43788 struct dentry *next;
43789+ char d_name[sizeof(next->d_iname)];
43790+ const unsigned char *name;
43791+
43792 next = list_entry(p, struct dentry, d_u.d_child);
43793 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43794 if (!simple_positive(next)) {
43795@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
43796
43797 spin_unlock(&next->d_lock);
43798 spin_unlock(&dentry->d_lock);
43799- if (filldir(dirent, next->d_name.name,
43800+ name = next->d_name.name;
43801+ if (name == next->d_iname) {
43802+ memcpy(d_name, name, next->d_name.len);
43803+ name = d_name;
43804+ }
43805+ if (filldir(dirent, name,
43806 next->d_name.len, filp->f_pos,
43807 next->d_inode->i_ino,
43808 dt_type(next->d_inode)) < 0)
43809diff -urNp linux-3.0.7/fs/lockd/clntproc.c linux-3.0.7/fs/lockd/clntproc.c
43810--- linux-3.0.7/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
43811+++ linux-3.0.7/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
43812@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43813 /*
43814 * Cookie counter for NLM requests
43815 */
43816-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43817+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43818
43819 void nlmclnt_next_cookie(struct nlm_cookie *c)
43820 {
43821- u32 cookie = atomic_inc_return(&nlm_cookie);
43822+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43823
43824 memcpy(c->data, &cookie, 4);
43825 c->len=4;
43826@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43827 struct nlm_rqst reqst, *req;
43828 int status;
43829
43830+ pax_track_stack();
43831+
43832 req = &reqst;
43833 memset(req, 0, sizeof(*req));
43834 locks_init_lock(&req->a_args.lock.fl);
43835diff -urNp linux-3.0.7/fs/locks.c linux-3.0.7/fs/locks.c
43836--- linux-3.0.7/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
43837+++ linux-3.0.7/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
43838@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
43839 return;
43840
43841 if (filp->f_op && filp->f_op->flock) {
43842- struct file_lock fl = {
43843+ struct file_lock flock = {
43844 .fl_pid = current->tgid,
43845 .fl_file = filp,
43846 .fl_flags = FL_FLOCK,
43847 .fl_type = F_UNLCK,
43848 .fl_end = OFFSET_MAX,
43849 };
43850- filp->f_op->flock(filp, F_SETLKW, &fl);
43851- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43852- fl.fl_ops->fl_release_private(&fl);
43853+ filp->f_op->flock(filp, F_SETLKW, &flock);
43854+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43855+ flock.fl_ops->fl_release_private(&flock);
43856 }
43857
43858 lock_flocks();
43859diff -urNp linux-3.0.7/fs/logfs/super.c linux-3.0.7/fs/logfs/super.c
43860--- linux-3.0.7/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
43861+++ linux-3.0.7/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
43862@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
43863 struct logfs_disk_super _ds1, *ds1 = &_ds1;
43864 int err, valid0, valid1;
43865
43866+ pax_track_stack();
43867+
43868 /* read first superblock */
43869 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
43870 if (err)
43871diff -urNp linux-3.0.7/fs/namei.c linux-3.0.7/fs/namei.c
43872--- linux-3.0.7/fs/namei.c 2011-10-16 21:54:54.000000000 -0400
43873+++ linux-3.0.7/fs/namei.c 2011-10-19 10:09:26.000000000 -0400
43874@@ -237,21 +237,23 @@ int generic_permission(struct inode *ino
43875 return ret;
43876
43877 /*
43878- * Read/write DACs are always overridable.
43879- * Executable DACs are overridable for all directories and
43880- * for non-directories that have least one exec bit set.
43881+ * Searching includes executable on directories, else just read.
43882 */
43883- if (!(mask & MAY_EXEC) || execute_ok(inode))
43884- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43885+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43886+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
43887+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43888 return 0;
43889+ }
43890
43891 /*
43892- * Searching includes executable on directories, else just read.
43893+ * Read/write DACs are always overridable.
43894+ * Executable DACs are overridable for all directories and
43895+ * for non-directories that have least one exec bit set.
43896 */
43897- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43898- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
43899- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43900+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
43901+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43902 return 0;
43903+ }
43904
43905 return -EACCES;
43906 }
43907@@ -593,9 +595,12 @@ static inline int exec_permission(struct
43908 if (ret == -ECHILD)
43909 return ret;
43910
43911- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
43912- ns_capable(ns, CAP_DAC_READ_SEARCH))
43913+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
43914 goto ok;
43915+ else {
43916+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
43917+ goto ok;
43918+ }
43919
43920 return ret;
43921 ok:
43922@@ -703,11 +708,19 @@ follow_link(struct path *link, struct na
43923 return error;
43924 }
43925
43926+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
43927+ dentry->d_inode, dentry, nd->path.mnt)) {
43928+ error = -EACCES;
43929+ *p = ERR_PTR(error); /* no ->put_link(), please */
43930+ path_put(&nd->path);
43931+ return error;
43932+ }
43933+
43934 nd->last_type = LAST_BIND;
43935 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
43936 error = PTR_ERR(*p);
43937 if (!IS_ERR(*p)) {
43938- char *s = nd_get_link(nd);
43939+ const char *s = nd_get_link(nd);
43940 error = 0;
43941 if (s)
43942 error = __vfs_follow_link(nd, s);
43943@@ -1598,6 +1611,12 @@ static int path_lookupat(int dfd, const
43944 if (!err)
43945 err = complete_walk(nd);
43946
43947+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43948+ if (!err)
43949+ path_put(&nd->path);
43950+ err = -ENOENT;
43951+ }
43952+
43953 if (!err && nd->flags & LOOKUP_DIRECTORY) {
43954 if (!nd->inode->i_op->lookup) {
43955 path_put(&nd->path);
43956@@ -1625,6 +1644,9 @@ static int do_path_lookup(int dfd, const
43957 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
43958
43959 if (likely(!retval)) {
43960+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43961+ return -ENOENT;
43962+
43963 if (unlikely(!audit_dummy_context())) {
43964 if (nd->path.dentry && nd->inode)
43965 audit_inode(name, nd->path.dentry);
43966@@ -1935,6 +1957,30 @@ int vfs_create(struct inode *dir, struct
43967 return error;
43968 }
43969
43970+/*
43971+ * Note that while the flag value (low two bits) for sys_open means:
43972+ * 00 - read-only
43973+ * 01 - write-only
43974+ * 10 - read-write
43975+ * 11 - special
43976+ * it is changed into
43977+ * 00 - no permissions needed
43978+ * 01 - read-permission
43979+ * 10 - write-permission
43980+ * 11 - read-write
43981+ * for the internal routines (ie open_namei()/follow_link() etc)
43982+ * This is more logical, and also allows the 00 "no perm needed"
43983+ * to be used for symlinks (where the permissions are checked
43984+ * later).
43985+ *
43986+*/
43987+static inline int open_to_namei_flags(int flag)
43988+{
43989+ if ((flag+1) & O_ACCMODE)
43990+ flag++;
43991+ return flag;
43992+}
43993+
43994 static int may_open(struct path *path, int acc_mode, int flag)
43995 {
43996 struct dentry *dentry = path->dentry;
43997@@ -1987,7 +2033,27 @@ static int may_open(struct path *path, i
43998 /*
43999 * Ensure there are no outstanding leases on the file.
44000 */
44001- return break_lease(inode, flag);
44002+ error = break_lease(inode, flag);
44003+
44004+ if (error)
44005+ return error;
44006+
44007+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44008+ error = -EPERM;
44009+ goto exit;
44010+ }
44011+
44012+ if (gr_handle_rawio(inode)) {
44013+ error = -EPERM;
44014+ goto exit;
44015+ }
44016+
44017+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
44018+ error = -EACCES;
44019+ goto exit;
44020+ }
44021+exit:
44022+ return error;
44023 }
44024
44025 static int handle_truncate(struct file *filp)
44026@@ -2013,30 +2079,6 @@ static int handle_truncate(struct file *
44027 }
44028
44029 /*
44030- * Note that while the flag value (low two bits) for sys_open means:
44031- * 00 - read-only
44032- * 01 - write-only
44033- * 10 - read-write
44034- * 11 - special
44035- * it is changed into
44036- * 00 - no permissions needed
44037- * 01 - read-permission
44038- * 10 - write-permission
44039- * 11 - read-write
44040- * for the internal routines (ie open_namei()/follow_link() etc)
44041- * This is more logical, and also allows the 00 "no perm needed"
44042- * to be used for symlinks (where the permissions are checked
44043- * later).
44044- *
44045-*/
44046-static inline int open_to_namei_flags(int flag)
44047-{
44048- if ((flag+1) & O_ACCMODE)
44049- flag++;
44050- return flag;
44051-}
44052-
44053-/*
44054 * Handle the last step of open()
44055 */
44056 static struct file *do_last(struct nameidata *nd, struct path *path,
44057@@ -2045,6 +2087,7 @@ static struct file *do_last(struct namei
44058 struct dentry *dir = nd->path.dentry;
44059 struct dentry *dentry;
44060 int open_flag = op->open_flag;
44061+ int flag = open_to_namei_flags(open_flag);
44062 int will_truncate = open_flag & O_TRUNC;
44063 int want_write = 0;
44064 int acc_mode = op->acc_mode;
44065@@ -2065,6 +2108,10 @@ static struct file *do_last(struct namei
44066 error = complete_walk(nd);
44067 if (error)
44068 return ERR_PTR(error);
44069+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44070+ error = -ENOENT;
44071+ goto exit;
44072+ }
44073 audit_inode(pathname, nd->path.dentry);
44074 if (open_flag & O_CREAT) {
44075 error = -EISDIR;
44076@@ -2075,6 +2122,10 @@ static struct file *do_last(struct namei
44077 error = complete_walk(nd);
44078 if (error)
44079 return ERR_PTR(error);
44080+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44081+ error = -ENOENT;
44082+ goto exit;
44083+ }
44084 audit_inode(pathname, dir);
44085 goto ok;
44086 }
44087@@ -2097,6 +2148,11 @@ static struct file *do_last(struct namei
44088 if (error)
44089 return ERR_PTR(-ECHILD);
44090
44091+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44092+ error = -ENOENT;
44093+ goto exit;
44094+ }
44095+
44096 error = -ENOTDIR;
44097 if (nd->flags & LOOKUP_DIRECTORY) {
44098 if (!nd->inode->i_op->lookup)
44099@@ -2132,6 +2188,12 @@ static struct file *do_last(struct namei
44100 /* Negative dentry, just create the file */
44101 if (!dentry->d_inode) {
44102 int mode = op->mode;
44103+
44104+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
44105+ error = -EACCES;
44106+ goto exit_mutex_unlock;
44107+ }
44108+
44109 if (!IS_POSIXACL(dir->d_inode))
44110 mode &= ~current_umask();
44111 /*
44112@@ -2155,6 +2217,8 @@ static struct file *do_last(struct namei
44113 error = vfs_create(dir->d_inode, dentry, mode, nd);
44114 if (error)
44115 goto exit_mutex_unlock;
44116+ else
44117+ gr_handle_create(path->dentry, path->mnt);
44118 mutex_unlock(&dir->d_inode->i_mutex);
44119 dput(nd->path.dentry);
44120 nd->path.dentry = dentry;
44121@@ -2164,6 +2228,19 @@ static struct file *do_last(struct namei
44122 /*
44123 * It already exists.
44124 */
44125+
44126+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44127+ error = -ENOENT;
44128+ goto exit_mutex_unlock;
44129+ }
44130+
44131+ /* only check if O_CREAT is specified, all other checks need to go
44132+ into may_open */
44133+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
44134+ error = -EACCES;
44135+ goto exit_mutex_unlock;
44136+ }
44137+
44138 mutex_unlock(&dir->d_inode->i_mutex);
44139 audit_inode(pathname, path->dentry);
44140
44141@@ -2373,6 +2450,10 @@ struct dentry *lookup_create(struct name
44142 }
44143 return dentry;
44144 eexist:
44145+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44146+ dput(dentry);
44147+ return ERR_PTR(-ENOENT);
44148+ }
44149 dput(dentry);
44150 dentry = ERR_PTR(-EEXIST);
44151 fail:
44152@@ -2450,6 +2531,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44153 error = may_mknod(mode);
44154 if (error)
44155 goto out_dput;
44156+
44157+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
44158+ error = -EPERM;
44159+ goto out_dput;
44160+ }
44161+
44162+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
44163+ error = -EACCES;
44164+ goto out_dput;
44165+ }
44166+
44167 error = mnt_want_write(nd.path.mnt);
44168 if (error)
44169 goto out_dput;
44170@@ -2470,6 +2562,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44171 }
44172 out_drop_write:
44173 mnt_drop_write(nd.path.mnt);
44174+
44175+ if (!error)
44176+ gr_handle_create(dentry, nd.path.mnt);
44177 out_dput:
44178 dput(dentry);
44179 out_unlock:
44180@@ -2522,6 +2617,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44181 if (IS_ERR(dentry))
44182 goto out_unlock;
44183
44184+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44185+ error = -EACCES;
44186+ goto out_dput;
44187+ }
44188+
44189 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44190 mode &= ~current_umask();
44191 error = mnt_want_write(nd.path.mnt);
44192@@ -2533,6 +2633,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44193 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44194 out_drop_write:
44195 mnt_drop_write(nd.path.mnt);
44196+
44197+ if (!error)
44198+ gr_handle_create(dentry, nd.path.mnt);
44199+
44200 out_dput:
44201 dput(dentry);
44202 out_unlock:
44203@@ -2615,6 +2719,8 @@ static long do_rmdir(int dfd, const char
44204 char * name;
44205 struct dentry *dentry;
44206 struct nameidata nd;
44207+ ino_t saved_ino = 0;
44208+ dev_t saved_dev = 0;
44209
44210 error = user_path_parent(dfd, pathname, &nd, &name);
44211 if (error)
44212@@ -2643,6 +2749,17 @@ static long do_rmdir(int dfd, const char
44213 error = -ENOENT;
44214 goto exit3;
44215 }
44216+
44217+ if (dentry->d_inode->i_nlink <= 1) {
44218+ saved_ino = dentry->d_inode->i_ino;
44219+ saved_dev = gr_get_dev_from_dentry(dentry);
44220+ }
44221+
44222+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44223+ error = -EACCES;
44224+ goto exit3;
44225+ }
44226+
44227 error = mnt_want_write(nd.path.mnt);
44228 if (error)
44229 goto exit3;
44230@@ -2650,6 +2767,8 @@ static long do_rmdir(int dfd, const char
44231 if (error)
44232 goto exit4;
44233 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44234+ if (!error && (saved_dev || saved_ino))
44235+ gr_handle_delete(saved_ino, saved_dev);
44236 exit4:
44237 mnt_drop_write(nd.path.mnt);
44238 exit3:
44239@@ -2712,6 +2831,8 @@ static long do_unlinkat(int dfd, const c
44240 struct dentry *dentry;
44241 struct nameidata nd;
44242 struct inode *inode = NULL;
44243+ ino_t saved_ino = 0;
44244+ dev_t saved_dev = 0;
44245
44246 error = user_path_parent(dfd, pathname, &nd, &name);
44247 if (error)
44248@@ -2734,6 +2855,16 @@ static long do_unlinkat(int dfd, const c
44249 if (!inode)
44250 goto slashes;
44251 ihold(inode);
44252+
44253+ if (inode->i_nlink <= 1) {
44254+ saved_ino = inode->i_ino;
44255+ saved_dev = gr_get_dev_from_dentry(dentry);
44256+ }
44257+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44258+ error = -EACCES;
44259+ goto exit2;
44260+ }
44261+
44262 error = mnt_want_write(nd.path.mnt);
44263 if (error)
44264 goto exit2;
44265@@ -2741,6 +2872,8 @@ static long do_unlinkat(int dfd, const c
44266 if (error)
44267 goto exit3;
44268 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44269+ if (!error && (saved_ino || saved_dev))
44270+ gr_handle_delete(saved_ino, saved_dev);
44271 exit3:
44272 mnt_drop_write(nd.path.mnt);
44273 exit2:
44274@@ -2818,6 +2951,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44275 if (IS_ERR(dentry))
44276 goto out_unlock;
44277
44278+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44279+ error = -EACCES;
44280+ goto out_dput;
44281+ }
44282+
44283 error = mnt_want_write(nd.path.mnt);
44284 if (error)
44285 goto out_dput;
44286@@ -2825,6 +2963,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44287 if (error)
44288 goto out_drop_write;
44289 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44290+ if (!error)
44291+ gr_handle_create(dentry, nd.path.mnt);
44292 out_drop_write:
44293 mnt_drop_write(nd.path.mnt);
44294 out_dput:
44295@@ -2933,6 +3073,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44296 error = PTR_ERR(new_dentry);
44297 if (IS_ERR(new_dentry))
44298 goto out_unlock;
44299+
44300+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44301+ old_path.dentry->d_inode,
44302+ old_path.dentry->d_inode->i_mode, to)) {
44303+ error = -EACCES;
44304+ goto out_dput;
44305+ }
44306+
44307+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44308+ old_path.dentry, old_path.mnt, to)) {
44309+ error = -EACCES;
44310+ goto out_dput;
44311+ }
44312+
44313 error = mnt_want_write(nd.path.mnt);
44314 if (error)
44315 goto out_dput;
44316@@ -2940,6 +3094,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44317 if (error)
44318 goto out_drop_write;
44319 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44320+ if (!error)
44321+ gr_handle_create(new_dentry, nd.path.mnt);
44322 out_drop_write:
44323 mnt_drop_write(nd.path.mnt);
44324 out_dput:
44325@@ -3117,6 +3273,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44326 char *to;
44327 int error;
44328
44329+ pax_track_stack();
44330+
44331 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44332 if (error)
44333 goto exit;
44334@@ -3173,6 +3331,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44335 if (new_dentry == trap)
44336 goto exit5;
44337
44338+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44339+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44340+ to);
44341+ if (error)
44342+ goto exit5;
44343+
44344 error = mnt_want_write(oldnd.path.mnt);
44345 if (error)
44346 goto exit5;
44347@@ -3182,6 +3346,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44348 goto exit6;
44349 error = vfs_rename(old_dir->d_inode, old_dentry,
44350 new_dir->d_inode, new_dentry);
44351+ if (!error)
44352+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44353+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44354 exit6:
44355 mnt_drop_write(oldnd.path.mnt);
44356 exit5:
44357@@ -3207,6 +3374,8 @@ SYSCALL_DEFINE2(rename, const char __use
44358
44359 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44360 {
44361+ char tmpbuf[64];
44362+ const char *newlink;
44363 int len;
44364
44365 len = PTR_ERR(link);
44366@@ -3216,7 +3385,14 @@ int vfs_readlink(struct dentry *dentry,
44367 len = strlen(link);
44368 if (len > (unsigned) buflen)
44369 len = buflen;
44370- if (copy_to_user(buffer, link, len))
44371+
44372+ if (len < sizeof(tmpbuf)) {
44373+ memcpy(tmpbuf, link, len);
44374+ newlink = tmpbuf;
44375+ } else
44376+ newlink = link;
44377+
44378+ if (copy_to_user(buffer, newlink, len))
44379 len = -EFAULT;
44380 out:
44381 return len;
44382diff -urNp linux-3.0.7/fs/namespace.c linux-3.0.7/fs/namespace.c
44383--- linux-3.0.7/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
44384+++ linux-3.0.7/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
44385@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
44386 if (!(sb->s_flags & MS_RDONLY))
44387 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44388 up_write(&sb->s_umount);
44389+
44390+ gr_log_remount(mnt->mnt_devname, retval);
44391+
44392 return retval;
44393 }
44394
44395@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
44396 br_write_unlock(vfsmount_lock);
44397 up_write(&namespace_sem);
44398 release_mounts(&umount_list);
44399+
44400+ gr_log_unmount(mnt->mnt_devname, retval);
44401+
44402 return retval;
44403 }
44404
44405@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
44406 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44407 MS_STRICTATIME);
44408
44409+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44410+ retval = -EPERM;
44411+ goto dput_out;
44412+ }
44413+
44414+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44415+ retval = -EPERM;
44416+ goto dput_out;
44417+ }
44418+
44419 if (flags & MS_REMOUNT)
44420 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44421 data_page);
44422@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
44423 dev_name, data_page);
44424 dput_out:
44425 path_put(&path);
44426+
44427+ gr_log_mount(dev_name, dir_name, retval);
44428+
44429 return retval;
44430 }
44431
44432@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44433 if (error)
44434 goto out2;
44435
44436+ if (gr_handle_chroot_pivot()) {
44437+ error = -EPERM;
44438+ goto out2;
44439+ }
44440+
44441 get_fs_root(current->fs, &root);
44442 error = lock_mount(&old);
44443 if (error)
44444diff -urNp linux-3.0.7/fs/ncpfs/dir.c linux-3.0.7/fs/ncpfs/dir.c
44445--- linux-3.0.7/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44446+++ linux-3.0.7/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44447@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44448 int res, val = 0, len;
44449 __u8 __name[NCP_MAXPATHLEN + 1];
44450
44451+ pax_track_stack();
44452+
44453 if (dentry == dentry->d_sb->s_root)
44454 return 1;
44455
44456@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44457 int error, res, len;
44458 __u8 __name[NCP_MAXPATHLEN + 1];
44459
44460+ pax_track_stack();
44461+
44462 error = -EIO;
44463 if (!ncp_conn_valid(server))
44464 goto finished;
44465@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44466 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44467 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44468
44469+ pax_track_stack();
44470+
44471 ncp_age_dentry(server, dentry);
44472 len = sizeof(__name);
44473 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44474@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44475 int error, len;
44476 __u8 __name[NCP_MAXPATHLEN + 1];
44477
44478+ pax_track_stack();
44479+
44480 DPRINTK("ncp_mkdir: making %s/%s\n",
44481 dentry->d_parent->d_name.name, dentry->d_name.name);
44482
44483@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44484 int old_len, new_len;
44485 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44486
44487+ pax_track_stack();
44488+
44489 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44490 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44491 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44492diff -urNp linux-3.0.7/fs/ncpfs/inode.c linux-3.0.7/fs/ncpfs/inode.c
44493--- linux-3.0.7/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44494+++ linux-3.0.7/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44495@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44496 #endif
44497 struct ncp_entry_info finfo;
44498
44499+ pax_track_stack();
44500+
44501 memset(&data, 0, sizeof(data));
44502 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44503 if (!server)
44504diff -urNp linux-3.0.7/fs/nfs/inode.c linux-3.0.7/fs/nfs/inode.c
44505--- linux-3.0.7/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44506+++ linux-3.0.7/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
44507@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44508 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44509 nfsi->attrtimeo_timestamp = jiffies;
44510
44511- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44512+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44513 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44514 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44515 else
44516@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
44517 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44518 }
44519
44520-static atomic_long_t nfs_attr_generation_counter;
44521+static atomic_long_unchecked_t nfs_attr_generation_counter;
44522
44523 static unsigned long nfs_read_attr_generation_counter(void)
44524 {
44525- return atomic_long_read(&nfs_attr_generation_counter);
44526+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44527 }
44528
44529 unsigned long nfs_inc_attr_generation_counter(void)
44530 {
44531- return atomic_long_inc_return(&nfs_attr_generation_counter);
44532+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44533 }
44534
44535 void nfs_fattr_init(struct nfs_fattr *fattr)
44536diff -urNp linux-3.0.7/fs/nfsd/nfs4state.c linux-3.0.7/fs/nfsd/nfs4state.c
44537--- linux-3.0.7/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
44538+++ linux-3.0.7/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
44539@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44540 unsigned int strhashval;
44541 int err;
44542
44543+ pax_track_stack();
44544+
44545 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44546 (long long) lock->lk_offset,
44547 (long long) lock->lk_length);
44548diff -urNp linux-3.0.7/fs/nfsd/nfs4xdr.c linux-3.0.7/fs/nfsd/nfs4xdr.c
44549--- linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
44550+++ linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
44551@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44552 .dentry = dentry,
44553 };
44554
44555+ pax_track_stack();
44556+
44557 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44558 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44559 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44560diff -urNp linux-3.0.7/fs/nfsd/vfs.c linux-3.0.7/fs/nfsd/vfs.c
44561--- linux-3.0.7/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
44562+++ linux-3.0.7/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
44563@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44564 } else {
44565 oldfs = get_fs();
44566 set_fs(KERNEL_DS);
44567- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44568+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44569 set_fs(oldfs);
44570 }
44571
44572@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44573
44574 /* Write the data. */
44575 oldfs = get_fs(); set_fs(KERNEL_DS);
44576- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44577+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44578 set_fs(oldfs);
44579 if (host_err < 0)
44580 goto out_nfserr;
44581@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44582 */
44583
44584 oldfs = get_fs(); set_fs(KERNEL_DS);
44585- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44586+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44587 set_fs(oldfs);
44588
44589 if (host_err < 0)
44590diff -urNp linux-3.0.7/fs/notify/fanotify/fanotify_user.c linux-3.0.7/fs/notify/fanotify/fanotify_user.c
44591--- linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
44592+++ linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
44593@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44594 goto out_close_fd;
44595
44596 ret = -EFAULT;
44597- if (copy_to_user(buf, &fanotify_event_metadata,
44598+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44599+ copy_to_user(buf, &fanotify_event_metadata,
44600 fanotify_event_metadata.event_len))
44601 goto out_kill_access_response;
44602
44603diff -urNp linux-3.0.7/fs/notify/notification.c linux-3.0.7/fs/notify/notification.c
44604--- linux-3.0.7/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
44605+++ linux-3.0.7/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
44606@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44607 * get set to 0 so it will never get 'freed'
44608 */
44609 static struct fsnotify_event *q_overflow_event;
44610-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44611+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44612
44613 /**
44614 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44615@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44616 */
44617 u32 fsnotify_get_cookie(void)
44618 {
44619- return atomic_inc_return(&fsnotify_sync_cookie);
44620+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44621 }
44622 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44623
44624diff -urNp linux-3.0.7/fs/ntfs/dir.c linux-3.0.7/fs/ntfs/dir.c
44625--- linux-3.0.7/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44626+++ linux-3.0.7/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
44627@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44628 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44629 ~(s64)(ndir->itype.index.block_size - 1)));
44630 /* Bounds checks. */
44631- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44632+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44633 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44634 "inode 0x%lx or driver bug.", vdir->i_ino);
44635 goto err_out;
44636diff -urNp linux-3.0.7/fs/ntfs/file.c linux-3.0.7/fs/ntfs/file.c
44637--- linux-3.0.7/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
44638+++ linux-3.0.7/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
44639@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
44640 #endif /* NTFS_RW */
44641 };
44642
44643-const struct file_operations ntfs_empty_file_ops = {};
44644+const struct file_operations ntfs_empty_file_ops __read_only;
44645
44646-const struct inode_operations ntfs_empty_inode_ops = {};
44647+const struct inode_operations ntfs_empty_inode_ops __read_only;
44648diff -urNp linux-3.0.7/fs/ocfs2/localalloc.c linux-3.0.7/fs/ocfs2/localalloc.c
44649--- linux-3.0.7/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
44650+++ linux-3.0.7/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
44651@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44652 goto bail;
44653 }
44654
44655- atomic_inc(&osb->alloc_stats.moves);
44656+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44657
44658 bail:
44659 if (handle)
44660diff -urNp linux-3.0.7/fs/ocfs2/namei.c linux-3.0.7/fs/ocfs2/namei.c
44661--- linux-3.0.7/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
44662+++ linux-3.0.7/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
44663@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44664 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44665 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44666
44667+ pax_track_stack();
44668+
44669 /* At some point it might be nice to break this function up a
44670 * bit. */
44671
44672diff -urNp linux-3.0.7/fs/ocfs2/ocfs2.h linux-3.0.7/fs/ocfs2/ocfs2.h
44673--- linux-3.0.7/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
44674+++ linux-3.0.7/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
44675@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44676
44677 struct ocfs2_alloc_stats
44678 {
44679- atomic_t moves;
44680- atomic_t local_data;
44681- atomic_t bitmap_data;
44682- atomic_t bg_allocs;
44683- atomic_t bg_extends;
44684+ atomic_unchecked_t moves;
44685+ atomic_unchecked_t local_data;
44686+ atomic_unchecked_t bitmap_data;
44687+ atomic_unchecked_t bg_allocs;
44688+ atomic_unchecked_t bg_extends;
44689 };
44690
44691 enum ocfs2_local_alloc_state
44692diff -urNp linux-3.0.7/fs/ocfs2/suballoc.c linux-3.0.7/fs/ocfs2/suballoc.c
44693--- linux-3.0.7/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
44694+++ linux-3.0.7/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
44695@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44696 mlog_errno(status);
44697 goto bail;
44698 }
44699- atomic_inc(&osb->alloc_stats.bg_extends);
44700+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44701
44702 /* You should never ask for this much metadata */
44703 BUG_ON(bits_wanted >
44704@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44705 mlog_errno(status);
44706 goto bail;
44707 }
44708- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44709+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44710
44711 *suballoc_loc = res.sr_bg_blkno;
44712 *suballoc_bit_start = res.sr_bit_offset;
44713@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44714 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44715 res->sr_bits);
44716
44717- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44718+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44719
44720 BUG_ON(res->sr_bits != 1);
44721
44722@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44723 mlog_errno(status);
44724 goto bail;
44725 }
44726- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44727+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44728
44729 BUG_ON(res.sr_bits != 1);
44730
44731@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44732 cluster_start,
44733 num_clusters);
44734 if (!status)
44735- atomic_inc(&osb->alloc_stats.local_data);
44736+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44737 } else {
44738 if (min_clusters > (osb->bitmap_cpg - 1)) {
44739 /* The only paths asking for contiguousness
44740@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
44741 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44742 res.sr_bg_blkno,
44743 res.sr_bit_offset);
44744- atomic_inc(&osb->alloc_stats.bitmap_data);
44745+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44746 *num_clusters = res.sr_bits;
44747 }
44748 }
44749diff -urNp linux-3.0.7/fs/ocfs2/super.c linux-3.0.7/fs/ocfs2/super.c
44750--- linux-3.0.7/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
44751+++ linux-3.0.7/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
44752@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44753 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44754 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44755 "Stats",
44756- atomic_read(&osb->alloc_stats.bitmap_data),
44757- atomic_read(&osb->alloc_stats.local_data),
44758- atomic_read(&osb->alloc_stats.bg_allocs),
44759- atomic_read(&osb->alloc_stats.moves),
44760- atomic_read(&osb->alloc_stats.bg_extends));
44761+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44762+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44763+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44764+ atomic_read_unchecked(&osb->alloc_stats.moves),
44765+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44766
44767 out += snprintf(buf + out, len - out,
44768 "%10s => State: %u Descriptor: %llu Size: %u bits "
44769@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
44770 spin_lock_init(&osb->osb_xattr_lock);
44771 ocfs2_init_steal_slots(osb);
44772
44773- atomic_set(&osb->alloc_stats.moves, 0);
44774- atomic_set(&osb->alloc_stats.local_data, 0);
44775- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44776- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44777- atomic_set(&osb->alloc_stats.bg_extends, 0);
44778+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44779+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44780+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44781+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44782+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44783
44784 /* Copy the blockcheck stats from the superblock probe */
44785 osb->osb_ecc_stats = *stats;
44786diff -urNp linux-3.0.7/fs/ocfs2/symlink.c linux-3.0.7/fs/ocfs2/symlink.c
44787--- linux-3.0.7/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
44788+++ linux-3.0.7/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
44789@@ -142,7 +142,7 @@ bail:
44790
44791 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44792 {
44793- char *link = nd_get_link(nd);
44794+ const char *link = nd_get_link(nd);
44795 if (!IS_ERR(link))
44796 kfree(link);
44797 }
44798diff -urNp linux-3.0.7/fs/open.c linux-3.0.7/fs/open.c
44799--- linux-3.0.7/fs/open.c 2011-07-21 22:17:23.000000000 -0400
44800+++ linux-3.0.7/fs/open.c 2011-09-14 09:16:46.000000000 -0400
44801@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
44802 error = locks_verify_truncate(inode, NULL, length);
44803 if (!error)
44804 error = security_path_truncate(&path);
44805+
44806+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44807+ error = -EACCES;
44808+
44809 if (!error)
44810 error = do_truncate(path.dentry, length, 0, NULL);
44811
44812@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44813 if (__mnt_is_readonly(path.mnt))
44814 res = -EROFS;
44815
44816+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44817+ res = -EACCES;
44818+
44819 out_path_release:
44820 path_put(&path);
44821 out:
44822@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44823 if (error)
44824 goto dput_and_out;
44825
44826+ gr_log_chdir(path.dentry, path.mnt);
44827+
44828 set_fs_pwd(current->fs, &path);
44829
44830 dput_and_out:
44831@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44832 goto out_putf;
44833
44834 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44835+
44836+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44837+ error = -EPERM;
44838+
44839+ if (!error)
44840+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44841+
44842 if (!error)
44843 set_fs_pwd(current->fs, &file->f_path);
44844 out_putf:
44845@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
44846 if (error)
44847 goto dput_and_out;
44848
44849+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44850+ goto dput_and_out;
44851+
44852 set_fs_root(current->fs, &path);
44853+
44854+ gr_handle_chroot_chdir(&path);
44855+
44856 error = 0;
44857 dput_and_out:
44858 path_put(&path);
44859@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44860 err = mnt_want_write_file(file);
44861 if (err)
44862 goto out_putf;
44863+
44864 mutex_lock(&inode->i_mutex);
44865+
44866+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
44867+ err = -EACCES;
44868+ goto out_unlock;
44869+ }
44870+
44871 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
44872 if (err)
44873 goto out_unlock;
44874 if (mode == (mode_t) -1)
44875 mode = inode->i_mode;
44876+
44877+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
44878+ err = -EACCES;
44879+ goto out_unlock;
44880+ }
44881+
44882 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44883 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44884 err = notify_change(dentry, &newattrs);
44885@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44886 error = mnt_want_write(path.mnt);
44887 if (error)
44888 goto dput_and_out;
44889+
44890 mutex_lock(&inode->i_mutex);
44891+
44892+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44893+ error = -EACCES;
44894+ goto out_unlock;
44895+ }
44896+
44897 error = security_path_chmod(path.dentry, path.mnt, mode);
44898 if (error)
44899 goto out_unlock;
44900 if (mode == (mode_t) -1)
44901 mode = inode->i_mode;
44902+
44903+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44904+ error = -EACCES;
44905+ goto out_unlock;
44906+ }
44907+
44908 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44909 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44910 error = notify_change(path.dentry, &newattrs);
44911@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
44912 int error;
44913 struct iattr newattrs;
44914
44915+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
44916+ return -EACCES;
44917+
44918 newattrs.ia_valid = ATTR_CTIME;
44919 if (user != (uid_t) -1) {
44920 newattrs.ia_valid |= ATTR_UID;
44921@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
44922 if (!IS_ERR(tmp)) {
44923 fd = get_unused_fd_flags(flags);
44924 if (fd >= 0) {
44925- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
44926+ struct file *f;
44927+ /* don't allow to be set by userland */
44928+ flags &= ~FMODE_GREXEC;
44929+ f = do_filp_open(dfd, tmp, &op, lookup);
44930 if (IS_ERR(f)) {
44931 put_unused_fd(fd);
44932 fd = PTR_ERR(f);
44933diff -urNp linux-3.0.7/fs/partitions/ldm.c linux-3.0.7/fs/partitions/ldm.c
44934--- linux-3.0.7/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
44935+++ linux-3.0.7/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
44936@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44937 ldm_error ("A VBLK claims to have %d parts.", num);
44938 return false;
44939 }
44940+
44941 if (rec >= num) {
44942 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44943 return false;
44944@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44945 goto found;
44946 }
44947
44948- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44949+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44950 if (!f) {
44951 ldm_crit ("Out of memory.");
44952 return false;
44953diff -urNp linux-3.0.7/fs/pipe.c linux-3.0.7/fs/pipe.c
44954--- linux-3.0.7/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
44955+++ linux-3.0.7/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
44956@@ -420,9 +420,9 @@ redo:
44957 }
44958 if (bufs) /* More to do? */
44959 continue;
44960- if (!pipe->writers)
44961+ if (!atomic_read(&pipe->writers))
44962 break;
44963- if (!pipe->waiting_writers) {
44964+ if (!atomic_read(&pipe->waiting_writers)) {
44965 /* syscall merging: Usually we must not sleep
44966 * if O_NONBLOCK is set, or if we got some data.
44967 * But if a writer sleeps in kernel space, then
44968@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
44969 mutex_lock(&inode->i_mutex);
44970 pipe = inode->i_pipe;
44971
44972- if (!pipe->readers) {
44973+ if (!atomic_read(&pipe->readers)) {
44974 send_sig(SIGPIPE, current, 0);
44975 ret = -EPIPE;
44976 goto out;
44977@@ -530,7 +530,7 @@ redo1:
44978 for (;;) {
44979 int bufs;
44980
44981- if (!pipe->readers) {
44982+ if (!atomic_read(&pipe->readers)) {
44983 send_sig(SIGPIPE, current, 0);
44984 if (!ret)
44985 ret = -EPIPE;
44986@@ -616,9 +616,9 @@ redo2:
44987 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44988 do_wakeup = 0;
44989 }
44990- pipe->waiting_writers++;
44991+ atomic_inc(&pipe->waiting_writers);
44992 pipe_wait(pipe);
44993- pipe->waiting_writers--;
44994+ atomic_dec(&pipe->waiting_writers);
44995 }
44996 out:
44997 mutex_unlock(&inode->i_mutex);
44998@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
44999 mask = 0;
45000 if (filp->f_mode & FMODE_READ) {
45001 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45002- if (!pipe->writers && filp->f_version != pipe->w_counter)
45003+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45004 mask |= POLLHUP;
45005 }
45006
45007@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45008 * Most Unices do not set POLLERR for FIFOs but on Linux they
45009 * behave exactly like pipes for poll().
45010 */
45011- if (!pipe->readers)
45012+ if (!atomic_read(&pipe->readers))
45013 mask |= POLLERR;
45014 }
45015
45016@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45017
45018 mutex_lock(&inode->i_mutex);
45019 pipe = inode->i_pipe;
45020- pipe->readers -= decr;
45021- pipe->writers -= decw;
45022+ atomic_sub(decr, &pipe->readers);
45023+ atomic_sub(decw, &pipe->writers);
45024
45025- if (!pipe->readers && !pipe->writers) {
45026+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45027 free_pipe_info(inode);
45028 } else {
45029 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45030@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45031
45032 if (inode->i_pipe) {
45033 ret = 0;
45034- inode->i_pipe->readers++;
45035+ atomic_inc(&inode->i_pipe->readers);
45036 }
45037
45038 mutex_unlock(&inode->i_mutex);
45039@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45040
45041 if (inode->i_pipe) {
45042 ret = 0;
45043- inode->i_pipe->writers++;
45044+ atomic_inc(&inode->i_pipe->writers);
45045 }
45046
45047 mutex_unlock(&inode->i_mutex);
45048@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45049 if (inode->i_pipe) {
45050 ret = 0;
45051 if (filp->f_mode & FMODE_READ)
45052- inode->i_pipe->readers++;
45053+ atomic_inc(&inode->i_pipe->readers);
45054 if (filp->f_mode & FMODE_WRITE)
45055- inode->i_pipe->writers++;
45056+ atomic_inc(&inode->i_pipe->writers);
45057 }
45058
45059 mutex_unlock(&inode->i_mutex);
45060@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45061 inode->i_pipe = NULL;
45062 }
45063
45064-static struct vfsmount *pipe_mnt __read_mostly;
45065+struct vfsmount *pipe_mnt __read_mostly;
45066
45067 /*
45068 * pipefs_dname() is called from d_path().
45069@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45070 goto fail_iput;
45071 inode->i_pipe = pipe;
45072
45073- pipe->readers = pipe->writers = 1;
45074+ atomic_set(&pipe->readers, 1);
45075+ atomic_set(&pipe->writers, 1);
45076 inode->i_fop = &rdwr_pipefifo_fops;
45077
45078 /*
45079diff -urNp linux-3.0.7/fs/proc/Kconfig linux-3.0.7/fs/proc/Kconfig
45080--- linux-3.0.7/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
45081+++ linux-3.0.7/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
45082@@ -30,12 +30,12 @@ config PROC_FS
45083
45084 config PROC_KCORE
45085 bool "/proc/kcore support" if !ARM
45086- depends on PROC_FS && MMU
45087+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45088
45089 config PROC_VMCORE
45090 bool "/proc/vmcore support"
45091- depends on PROC_FS && CRASH_DUMP
45092- default y
45093+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45094+ default n
45095 help
45096 Exports the dump image of crashed kernel in ELF format.
45097
45098@@ -59,8 +59,8 @@ config PROC_SYSCTL
45099 limited in memory.
45100
45101 config PROC_PAGE_MONITOR
45102- default y
45103- depends on PROC_FS && MMU
45104+ default n
45105+ depends on PROC_FS && MMU && !GRKERNSEC
45106 bool "Enable /proc page monitoring" if EXPERT
45107 help
45108 Various /proc files exist to monitor process memory utilization:
45109diff -urNp linux-3.0.7/fs/proc/array.c linux-3.0.7/fs/proc/array.c
45110--- linux-3.0.7/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
45111+++ linux-3.0.7/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
45112@@ -60,6 +60,7 @@
45113 #include <linux/tty.h>
45114 #include <linux/string.h>
45115 #include <linux/mman.h>
45116+#include <linux/grsecurity.h>
45117 #include <linux/proc_fs.h>
45118 #include <linux/ioport.h>
45119 #include <linux/uaccess.h>
45120@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45121 seq_putc(m, '\n');
45122 }
45123
45124+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45125+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45126+{
45127+ if (p->mm)
45128+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45129+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45130+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45131+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45132+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45133+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45134+ else
45135+ seq_printf(m, "PaX:\t-----\n");
45136+}
45137+#endif
45138+
45139 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45140 struct pid *pid, struct task_struct *task)
45141 {
45142@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45143 task_cpus_allowed(m, task);
45144 cpuset_task_status_allowed(m, task);
45145 task_context_switch_counts(m, task);
45146+
45147+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45148+ task_pax(m, task);
45149+#endif
45150+
45151+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45152+ task_grsec_rbac(m, task);
45153+#endif
45154+
45155 return 0;
45156 }
45157
45158+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45159+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45160+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45161+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45162+#endif
45163+
45164 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45165 struct pid *pid, struct task_struct *task, int whole)
45166 {
45167@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
45168 cputime_t cutime, cstime, utime, stime;
45169 cputime_t cgtime, gtime;
45170 unsigned long rsslim = 0;
45171- char tcomm[sizeof(task->comm)];
45172+ char tcomm[sizeof(task->comm)] = { 0 };
45173 unsigned long flags;
45174
45175+ pax_track_stack();
45176+
45177 state = *get_task_state(task);
45178 vsize = eip = esp = 0;
45179 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45180@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45181 gtime = task->gtime;
45182 }
45183
45184+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45185+ if (PAX_RAND_FLAGS(mm)) {
45186+ eip = 0;
45187+ esp = 0;
45188+ wchan = 0;
45189+ }
45190+#endif
45191+#ifdef CONFIG_GRKERNSEC_HIDESYM
45192+ wchan = 0;
45193+ eip =0;
45194+ esp =0;
45195+#endif
45196+
45197 /* scale priority and nice values from timeslices to -20..20 */
45198 /* to make it look like a "normal" Unix priority/nice value */
45199 priority = task_prio(task);
45200@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45201 vsize,
45202 mm ? get_mm_rss(mm) : 0,
45203 rsslim,
45204+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45205+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45206+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45207+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45208+#else
45209 mm ? (permitted ? mm->start_code : 1) : 0,
45210 mm ? (permitted ? mm->end_code : 1) : 0,
45211 (permitted && mm) ? mm->start_stack : 0,
45212+#endif
45213 esp,
45214 eip,
45215 /* The signal information here is obsolete.
45216@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45217
45218 return 0;
45219 }
45220+
45221+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45222+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45223+{
45224+ u32 curr_ip = 0;
45225+ unsigned long flags;
45226+
45227+ if (lock_task_sighand(task, &flags)) {
45228+ curr_ip = task->signal->curr_ip;
45229+ unlock_task_sighand(task, &flags);
45230+ }
45231+
45232+ return sprintf(buffer, "%pI4\n", &curr_ip);
45233+}
45234+#endif
45235diff -urNp linux-3.0.7/fs/proc/base.c linux-3.0.7/fs/proc/base.c
45236--- linux-3.0.7/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
45237+++ linux-3.0.7/fs/proc/base.c 2011-10-19 03:59:32.000000000 -0400
45238@@ -107,6 +107,22 @@ struct pid_entry {
45239 union proc_op op;
45240 };
45241
45242+struct getdents_callback {
45243+ struct linux_dirent __user * current_dir;
45244+ struct linux_dirent __user * previous;
45245+ struct file * file;
45246+ int count;
45247+ int error;
45248+};
45249+
45250+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45251+ loff_t offset, u64 ino, unsigned int d_type)
45252+{
45253+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45254+ buf->error = -EINVAL;
45255+ return 0;
45256+}
45257+
45258 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45259 .name = (NAME), \
45260 .len = sizeof(NAME) - 1, \
45261@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45262 if (task == current)
45263 return mm;
45264
45265+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45266+ return ERR_PTR(-EPERM);
45267+
45268 /*
45269 * If current is actively ptrace'ing, and would also be
45270 * permitted to freshly attach with ptrace now, permit it.
45271@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45272 if (!mm->arg_end)
45273 goto out_mm; /* Shh! No looking before we're done */
45274
45275+ if (gr_acl_handle_procpidmem(task))
45276+ goto out_mm;
45277+
45278 len = mm->arg_end - mm->arg_start;
45279
45280 if (len > PAGE_SIZE)
45281@@ -309,12 +331,28 @@ out:
45282 return res;
45283 }
45284
45285+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45286+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45287+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45288+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45289+#endif
45290+
45291 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45292 {
45293 struct mm_struct *mm = mm_for_maps(task);
45294 int res = PTR_ERR(mm);
45295 if (mm && !IS_ERR(mm)) {
45296 unsigned int nwords = 0;
45297+
45298+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45299+ /* allow if we're currently ptracing this task */
45300+ if (PAX_RAND_FLAGS(mm) &&
45301+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45302+ mmput(mm);
45303+ return 0;
45304+ }
45305+#endif
45306+
45307 do {
45308 nwords += 2;
45309 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45310@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45311 }
45312
45313
45314-#ifdef CONFIG_KALLSYMS
45315+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45316 /*
45317 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45318 * Returns the resolved symbol. If that fails, simply return the address.
45319@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45320 mutex_unlock(&task->signal->cred_guard_mutex);
45321 }
45322
45323-#ifdef CONFIG_STACKTRACE
45324+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45325
45326 #define MAX_STACK_TRACE_DEPTH 64
45327
45328@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45329 return count;
45330 }
45331
45332-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45333+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45334 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45335 {
45336 long nr;
45337@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45338 /************************************************************************/
45339
45340 /* permission checks */
45341-static int proc_fd_access_allowed(struct inode *inode)
45342+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45343 {
45344 struct task_struct *task;
45345 int allowed = 0;
45346@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45347 */
45348 task = get_proc_task(inode);
45349 if (task) {
45350- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45351+ if (log)
45352+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45353+ else
45354+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45355 put_task_struct(task);
45356 }
45357 return allowed;
45358@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45359 if (!task)
45360 goto out_no_task;
45361
45362+ if (gr_acl_handle_procpidmem(task))
45363+ goto out;
45364+
45365 ret = -ENOMEM;
45366 page = (char *)__get_free_page(GFP_TEMPORARY);
45367 if (!page)
45368@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
45369 path_put(&nd->path);
45370
45371 /* Are we allowed to snoop on the tasks file descriptors? */
45372- if (!proc_fd_access_allowed(inode))
45373+ if (!proc_fd_access_allowed(inode,0))
45374 goto out;
45375
45376 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45377@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
45378 struct path path;
45379
45380 /* Are we allowed to snoop on the tasks file descriptors? */
45381- if (!proc_fd_access_allowed(inode))
45382- goto out;
45383+ /* logging this is needed for learning on chromium to work properly,
45384+ but we don't want to flood the logs from 'ps' which does a readlink
45385+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45386+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45387+ */
45388+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45389+ if (!proc_fd_access_allowed(inode,0))
45390+ goto out;
45391+ } else {
45392+ if (!proc_fd_access_allowed(inode,1))
45393+ goto out;
45394+ }
45395
45396 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45397 if (error)
45398@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
45399 rcu_read_lock();
45400 cred = __task_cred(task);
45401 inode->i_uid = cred->euid;
45402+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45403+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45404+#else
45405 inode->i_gid = cred->egid;
45406+#endif
45407 rcu_read_unlock();
45408 }
45409 security_task_to_inode(task, inode);
45410@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
45411 struct inode *inode = dentry->d_inode;
45412 struct task_struct *task;
45413 const struct cred *cred;
45414+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45415+ const struct cred *tmpcred = current_cred();
45416+#endif
45417
45418 generic_fillattr(inode, stat);
45419
45420@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
45421 stat->uid = 0;
45422 stat->gid = 0;
45423 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45424+
45425+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45426+ rcu_read_unlock();
45427+ return -ENOENT;
45428+ }
45429+
45430 if (task) {
45431+ cred = __task_cred(task);
45432+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45433+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45434+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45435+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45436+#endif
45437+ ) {
45438+#endif
45439 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45440+#ifdef CONFIG_GRKERNSEC_PROC_USER
45441+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45442+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45443+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45444+#endif
45445 task_dumpable(task)) {
45446- cred = __task_cred(task);
45447 stat->uid = cred->euid;
45448+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45449+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45450+#else
45451 stat->gid = cred->egid;
45452+#endif
45453+ }
45454+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45455+ } else {
45456+ rcu_read_unlock();
45457+ return -ENOENT;
45458 }
45459+#endif
45460 }
45461 rcu_read_unlock();
45462 return 0;
45463@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
45464
45465 if (task) {
45466 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45467+#ifdef CONFIG_GRKERNSEC_PROC_USER
45468+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45469+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45470+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45471+#endif
45472 task_dumpable(task)) {
45473 rcu_read_lock();
45474 cred = __task_cred(task);
45475 inode->i_uid = cred->euid;
45476+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45477+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45478+#else
45479 inode->i_gid = cred->egid;
45480+#endif
45481 rcu_read_unlock();
45482 } else {
45483 inode->i_uid = 0;
45484@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
45485 int fd = proc_fd(inode);
45486
45487 if (task) {
45488- files = get_files_struct(task);
45489+ if (!gr_acl_handle_procpidmem(task))
45490+ files = get_files_struct(task);
45491 put_task_struct(task);
45492 }
45493 if (files) {
45494@@ -2169,11 +2268,21 @@ static const struct file_operations proc
45495 */
45496 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
45497 {
45498+ struct task_struct *task;
45499 int rv = generic_permission(inode, mask, flags, NULL);
45500- if (rv == 0)
45501- return 0;
45502+
45503 if (task_pid(current) == proc_pid(inode))
45504 rv = 0;
45505+
45506+ task = get_proc_task(inode);
45507+ if (task == NULL)
45508+ return rv;
45509+
45510+ if (gr_acl_handle_procpidmem(task))
45511+ rv = -EACCES;
45512+
45513+ put_task_struct(task);
45514+
45515 return rv;
45516 }
45517
45518@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
45519 if (!task)
45520 goto out_no_task;
45521
45522+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45523+ goto out;
45524+
45525 /*
45526 * Yes, it does not scale. And it should not. Don't add
45527 * new entries into /proc/<tgid>/ without very good reasons.
45528@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
45529 if (!task)
45530 goto out_no_task;
45531
45532+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45533+ goto out;
45534+
45535 ret = 0;
45536 i = filp->f_pos;
45537 switch (i) {
45538@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
45539 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45540 void *cookie)
45541 {
45542- char *s = nd_get_link(nd);
45543+ const char *s = nd_get_link(nd);
45544 if (!IS_ERR(s))
45545 __putname(s);
45546 }
45547@@ -2656,6 +2771,7 @@ static struct dentry *proc_base_instanti
45548 if (p->fop)
45549 inode->i_fop = p->fop;
45550 ei->op = p->op;
45551+
45552 d_add(dentry, inode);
45553 error = NULL;
45554 out:
45555@@ -2795,7 +2911,7 @@ static const struct pid_entry tgid_base_
45556 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45557 #endif
45558 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45559-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45560+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45561 INF("syscall", S_IRUGO, proc_pid_syscall),
45562 #endif
45563 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45564@@ -2820,10 +2936,10 @@ static const struct pid_entry tgid_base_
45565 #ifdef CONFIG_SECURITY
45566 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45567 #endif
45568-#ifdef CONFIG_KALLSYMS
45569+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45570 INF("wchan", S_IRUGO, proc_pid_wchan),
45571 #endif
45572-#ifdef CONFIG_STACKTRACE
45573+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45574 ONE("stack", S_IRUGO, proc_pid_stack),
45575 #endif
45576 #ifdef CONFIG_SCHEDSTATS
45577@@ -2857,6 +2973,9 @@ static const struct pid_entry tgid_base_
45578 #ifdef CONFIG_HARDWALL
45579 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45580 #endif
45581+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45582+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45583+#endif
45584 };
45585
45586 static int proc_tgid_base_readdir(struct file * filp,
45587@@ -2982,7 +3101,14 @@ static struct dentry *proc_pid_instantia
45588 if (!inode)
45589 goto out;
45590
45591+#ifdef CONFIG_GRKERNSEC_PROC_USER
45592+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45593+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45594+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45595+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45596+#else
45597 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45598+#endif
45599 inode->i_op = &proc_tgid_base_inode_operations;
45600 inode->i_fop = &proc_tgid_base_operations;
45601 inode->i_flags|=S_IMMUTABLE;
45602@@ -3024,7 +3150,14 @@ struct dentry *proc_pid_lookup(struct in
45603 if (!task)
45604 goto out;
45605
45606+ if (!has_group_leader_pid(task))
45607+ goto out_put_task;
45608+
45609+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45610+ goto out_put_task;
45611+
45612 result = proc_pid_instantiate(dir, dentry, task, NULL);
45613+out_put_task:
45614 put_task_struct(task);
45615 out:
45616 return result;
45617@@ -3089,6 +3222,11 @@ int proc_pid_readdir(struct file * filp,
45618 {
45619 unsigned int nr;
45620 struct task_struct *reaper;
45621+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45622+ const struct cred *tmpcred = current_cred();
45623+ const struct cred *itercred;
45624+#endif
45625+ filldir_t __filldir = filldir;
45626 struct tgid_iter iter;
45627 struct pid_namespace *ns;
45628
45629@@ -3112,8 +3250,27 @@ int proc_pid_readdir(struct file * filp,
45630 for (iter = next_tgid(ns, iter);
45631 iter.task;
45632 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45633+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45634+ rcu_read_lock();
45635+ itercred = __task_cred(iter.task);
45636+#endif
45637+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45638+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45639+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45640+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45641+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45642+#endif
45643+ )
45644+#endif
45645+ )
45646+ __filldir = &gr_fake_filldir;
45647+ else
45648+ __filldir = filldir;
45649+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45650+ rcu_read_unlock();
45651+#endif
45652 filp->f_pos = iter.tgid + TGID_OFFSET;
45653- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45654+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45655 put_task_struct(iter.task);
45656 goto out;
45657 }
45658@@ -3141,7 +3298,7 @@ static const struct pid_entry tid_base_s
45659 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45660 #endif
45661 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45662-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45663+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45664 INF("syscall", S_IRUGO, proc_pid_syscall),
45665 #endif
45666 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45667@@ -3165,10 +3322,10 @@ static const struct pid_entry tid_base_s
45668 #ifdef CONFIG_SECURITY
45669 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45670 #endif
45671-#ifdef CONFIG_KALLSYMS
45672+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45673 INF("wchan", S_IRUGO, proc_pid_wchan),
45674 #endif
45675-#ifdef CONFIG_STACKTRACE
45676+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45677 ONE("stack", S_IRUGO, proc_pid_stack),
45678 #endif
45679 #ifdef CONFIG_SCHEDSTATS
45680diff -urNp linux-3.0.7/fs/proc/cmdline.c linux-3.0.7/fs/proc/cmdline.c
45681--- linux-3.0.7/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
45682+++ linux-3.0.7/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
45683@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45684
45685 static int __init proc_cmdline_init(void)
45686 {
45687+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45688+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45689+#else
45690 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45691+#endif
45692 return 0;
45693 }
45694 module_init(proc_cmdline_init);
45695diff -urNp linux-3.0.7/fs/proc/devices.c linux-3.0.7/fs/proc/devices.c
45696--- linux-3.0.7/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
45697+++ linux-3.0.7/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
45698@@ -64,7 +64,11 @@ static const struct file_operations proc
45699
45700 static int __init proc_devices_init(void)
45701 {
45702+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45703+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45704+#else
45705 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45706+#endif
45707 return 0;
45708 }
45709 module_init(proc_devices_init);
45710diff -urNp linux-3.0.7/fs/proc/inode.c linux-3.0.7/fs/proc/inode.c
45711--- linux-3.0.7/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
45712+++ linux-3.0.7/fs/proc/inode.c 2011-10-19 03:59:32.000000000 -0400
45713@@ -18,12 +18,18 @@
45714 #include <linux/module.h>
45715 #include <linux/sysctl.h>
45716 #include <linux/slab.h>
45717+#include <linux/grsecurity.h>
45718
45719 #include <asm/system.h>
45720 #include <asm/uaccess.h>
45721
45722 #include "internal.h"
45723
45724+#ifdef CONFIG_PROC_SYSCTL
45725+extern const struct inode_operations proc_sys_inode_operations;
45726+extern const struct inode_operations proc_sys_dir_operations;
45727+#endif
45728+
45729 static void proc_evict_inode(struct inode *inode)
45730 {
45731 struct proc_dir_entry *de;
45732@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45733 ns_ops = PROC_I(inode)->ns_ops;
45734 if (ns_ops && ns_ops->put)
45735 ns_ops->put(PROC_I(inode)->ns);
45736+
45737+#ifdef CONFIG_PROC_SYSCTL
45738+ if (inode->i_op == &proc_sys_inode_operations ||
45739+ inode->i_op == &proc_sys_dir_operations)
45740+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45741+#endif
45742+
45743 }
45744
45745 static struct kmem_cache * proc_inode_cachep;
45746@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
45747 if (de->mode) {
45748 inode->i_mode = de->mode;
45749 inode->i_uid = de->uid;
45750+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45751+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45752+#else
45753 inode->i_gid = de->gid;
45754+#endif
45755 }
45756 if (de->size)
45757 inode->i_size = de->size;
45758diff -urNp linux-3.0.7/fs/proc/internal.h linux-3.0.7/fs/proc/internal.h
45759--- linux-3.0.7/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
45760+++ linux-3.0.7/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
45761@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45762 struct pid *pid, struct task_struct *task);
45763 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45764 struct pid *pid, struct task_struct *task);
45765+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45766+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45767+#endif
45768 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45769
45770 extern const struct file_operations proc_maps_operations;
45771diff -urNp linux-3.0.7/fs/proc/kcore.c linux-3.0.7/fs/proc/kcore.c
45772--- linux-3.0.7/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
45773+++ linux-3.0.7/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
45774@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
45775 off_t offset = 0;
45776 struct kcore_list *m;
45777
45778+ pax_track_stack();
45779+
45780 /* setup ELF header */
45781 elf = (struct elfhdr *) bufp;
45782 bufp += sizeof(struct elfhdr);
45783@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
45784 * the addresses in the elf_phdr on our list.
45785 */
45786 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45787- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45788+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45789+ if (tsz > buflen)
45790 tsz = buflen;
45791-
45792+
45793 while (buflen) {
45794 struct kcore_list *m;
45795
45796@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
45797 kfree(elf_buf);
45798 } else {
45799 if (kern_addr_valid(start)) {
45800- unsigned long n;
45801+ char *elf_buf;
45802+ mm_segment_t oldfs;
45803
45804- n = copy_to_user(buffer, (char *)start, tsz);
45805- /*
45806- * We cannot distingush between fault on source
45807- * and fault on destination. When this happens
45808- * we clear too and hope it will trigger the
45809- * EFAULT again.
45810- */
45811- if (n) {
45812- if (clear_user(buffer + tsz - n,
45813- n))
45814+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45815+ if (!elf_buf)
45816+ return -ENOMEM;
45817+ oldfs = get_fs();
45818+ set_fs(KERNEL_DS);
45819+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45820+ set_fs(oldfs);
45821+ if (copy_to_user(buffer, elf_buf, tsz)) {
45822+ kfree(elf_buf);
45823 return -EFAULT;
45824+ }
45825 }
45826+ set_fs(oldfs);
45827+ kfree(elf_buf);
45828 } else {
45829 if (clear_user(buffer, tsz))
45830 return -EFAULT;
45831@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
45832
45833 static int open_kcore(struct inode *inode, struct file *filp)
45834 {
45835+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45836+ return -EPERM;
45837+#endif
45838 if (!capable(CAP_SYS_RAWIO))
45839 return -EPERM;
45840 if (kcore_need_update)
45841diff -urNp linux-3.0.7/fs/proc/meminfo.c linux-3.0.7/fs/proc/meminfo.c
45842--- linux-3.0.7/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
45843+++ linux-3.0.7/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
45844@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45845 unsigned long pages[NR_LRU_LISTS];
45846 int lru;
45847
45848+ pax_track_stack();
45849+
45850 /*
45851 * display in kilobytes.
45852 */
45853@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
45854 vmi.used >> 10,
45855 vmi.largest_chunk >> 10
45856 #ifdef CONFIG_MEMORY_FAILURE
45857- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45858+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45859 #endif
45860 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
45861 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
45862diff -urNp linux-3.0.7/fs/proc/nommu.c linux-3.0.7/fs/proc/nommu.c
45863--- linux-3.0.7/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
45864+++ linux-3.0.7/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
45865@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
45866 if (len < 1)
45867 len = 1;
45868 seq_printf(m, "%*c", len, ' ');
45869- seq_path(m, &file->f_path, "");
45870+ seq_path(m, &file->f_path, "\n\\");
45871 }
45872
45873 seq_putc(m, '\n');
45874diff -urNp linux-3.0.7/fs/proc/proc_net.c linux-3.0.7/fs/proc/proc_net.c
45875--- linux-3.0.7/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
45876+++ linux-3.0.7/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
45877@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
45878 struct task_struct *task;
45879 struct nsproxy *ns;
45880 struct net *net = NULL;
45881+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45882+ const struct cred *cred = current_cred();
45883+#endif
45884+
45885+#ifdef CONFIG_GRKERNSEC_PROC_USER
45886+ if (cred->fsuid)
45887+ return net;
45888+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45889+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45890+ return net;
45891+#endif
45892
45893 rcu_read_lock();
45894 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45895diff -urNp linux-3.0.7/fs/proc/proc_sysctl.c linux-3.0.7/fs/proc/proc_sysctl.c
45896--- linux-3.0.7/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
45897+++ linux-3.0.7/fs/proc/proc_sysctl.c 2011-10-19 03:59:32.000000000 -0400
45898@@ -8,11 +8,13 @@
45899 #include <linux/namei.h>
45900 #include "internal.h"
45901
45902+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45903+
45904 static const struct dentry_operations proc_sys_dentry_operations;
45905 static const struct file_operations proc_sys_file_operations;
45906-static const struct inode_operations proc_sys_inode_operations;
45907+const struct inode_operations proc_sys_inode_operations;
45908 static const struct file_operations proc_sys_dir_file_operations;
45909-static const struct inode_operations proc_sys_dir_operations;
45910+const struct inode_operations proc_sys_dir_operations;
45911
45912 static struct inode *proc_sys_make_inode(struct super_block *sb,
45913 struct ctl_table_header *head, struct ctl_table *table)
45914@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
45915
45916 err = NULL;
45917 d_set_d_op(dentry, &proc_sys_dentry_operations);
45918+
45919+ gr_handle_proc_create(dentry, inode);
45920+
45921 d_add(dentry, inode);
45922
45923+ if (gr_handle_sysctl(p, MAY_EXEC))
45924+ err = ERR_PTR(-ENOENT);
45925+
45926 out:
45927 sysctl_head_finish(head);
45928 return err;
45929@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
45930 return -ENOMEM;
45931 } else {
45932 d_set_d_op(child, &proc_sys_dentry_operations);
45933+
45934+ gr_handle_proc_create(child, inode);
45935+
45936 d_add(child, inode);
45937 }
45938 } else {
45939@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
45940 if (*pos < file->f_pos)
45941 continue;
45942
45943+ if (gr_handle_sysctl(table, 0))
45944+ continue;
45945+
45946 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45947 if (res)
45948 return res;
45949@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
45950 if (IS_ERR(head))
45951 return PTR_ERR(head);
45952
45953+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45954+ return -ENOENT;
45955+
45956 generic_fillattr(inode, stat);
45957 if (table)
45958 stat->mode = (stat->mode & S_IFMT) | table->mode;
45959@@ -374,13 +391,13 @@ static const struct file_operations proc
45960 .llseek = generic_file_llseek,
45961 };
45962
45963-static const struct inode_operations proc_sys_inode_operations = {
45964+const struct inode_operations proc_sys_inode_operations = {
45965 .permission = proc_sys_permission,
45966 .setattr = proc_sys_setattr,
45967 .getattr = proc_sys_getattr,
45968 };
45969
45970-static const struct inode_operations proc_sys_dir_operations = {
45971+const struct inode_operations proc_sys_dir_operations = {
45972 .lookup = proc_sys_lookup,
45973 .permission = proc_sys_permission,
45974 .setattr = proc_sys_setattr,
45975diff -urNp linux-3.0.7/fs/proc/root.c linux-3.0.7/fs/proc/root.c
45976--- linux-3.0.7/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
45977+++ linux-3.0.7/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
45978@@ -123,7 +123,15 @@ void __init proc_root_init(void)
45979 #ifdef CONFIG_PROC_DEVICETREE
45980 proc_device_tree_init();
45981 #endif
45982+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45983+#ifdef CONFIG_GRKERNSEC_PROC_USER
45984+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45985+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45986+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45987+#endif
45988+#else
45989 proc_mkdir("bus", NULL);
45990+#endif
45991 proc_sys_init();
45992 }
45993
45994diff -urNp linux-3.0.7/fs/proc/task_mmu.c linux-3.0.7/fs/proc/task_mmu.c
45995--- linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:54:54.000000000 -0400
45996+++ linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:55:28.000000000 -0400
45997@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
45998 "VmExe:\t%8lu kB\n"
45999 "VmLib:\t%8lu kB\n"
46000 "VmPTE:\t%8lu kB\n"
46001- "VmSwap:\t%8lu kB\n",
46002- hiwater_vm << (PAGE_SHIFT-10),
46003+ "VmSwap:\t%8lu kB\n"
46004+
46005+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46006+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46007+#endif
46008+
46009+ ,hiwater_vm << (PAGE_SHIFT-10),
46010 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46011 mm->locked_vm << (PAGE_SHIFT-10),
46012 hiwater_rss << (PAGE_SHIFT-10),
46013@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46014 data << (PAGE_SHIFT-10),
46015 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46016 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46017- swap << (PAGE_SHIFT-10));
46018+ swap << (PAGE_SHIFT-10)
46019+
46020+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46021+ , mm->context.user_cs_base, mm->context.user_cs_limit
46022+#endif
46023+
46024+ );
46025 }
46026
46027 unsigned long task_vsize(struct mm_struct *mm)
46028@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46029 return ret;
46030 }
46031
46032+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46033+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46034+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46035+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46036+#endif
46037+
46038 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46039 {
46040 struct mm_struct *mm = vma->vm_mm;
46041@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46042 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46043 }
46044
46045- /* We don't show the stack guard page in /proc/maps */
46046+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46047+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46048+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46049+#else
46050 start = vma->vm_start;
46051- if (stack_guard_page_start(vma, start))
46052- start += PAGE_SIZE;
46053 end = vma->vm_end;
46054- if (stack_guard_page_end(vma, end))
46055- end -= PAGE_SIZE;
46056+#endif
46057
46058 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46059 start,
46060@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46061 flags & VM_WRITE ? 'w' : '-',
46062 flags & VM_EXEC ? 'x' : '-',
46063 flags & VM_MAYSHARE ? 's' : 'p',
46064+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46065+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46066+#else
46067 pgoff,
46068+#endif
46069 MAJOR(dev), MINOR(dev), ino, &len);
46070
46071 /*
46072@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46073 */
46074 if (file) {
46075 pad_len_spaces(m, len);
46076- seq_path(m, &file->f_path, "\n");
46077+ seq_path(m, &file->f_path, "\n\\");
46078 } else {
46079 const char *name = arch_vma_name(vma);
46080 if (!name) {
46081@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46082 if (vma->vm_start <= mm->brk &&
46083 vma->vm_end >= mm->start_brk) {
46084 name = "[heap]";
46085- } else if (vma->vm_start <= mm->start_stack &&
46086- vma->vm_end >= mm->start_stack) {
46087+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46088+ (vma->vm_start <= mm->start_stack &&
46089+ vma->vm_end >= mm->start_stack)) {
46090 name = "[stack]";
46091 }
46092 } else {
46093@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46094 };
46095
46096 memset(&mss, 0, sizeof mss);
46097- mss.vma = vma;
46098- /* mmap_sem is held in m_start */
46099- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46100- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46101-
46102+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46103+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46104+#endif
46105+ mss.vma = vma;
46106+ /* mmap_sem is held in m_start */
46107+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46108+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46109+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46110+ }
46111+#endif
46112 show_map_vma(m, vma);
46113
46114 seq_printf(m,
46115@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46116 "KernelPageSize: %8lu kB\n"
46117 "MMUPageSize: %8lu kB\n"
46118 "Locked: %8lu kB\n",
46119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46120+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46121+#else
46122 (vma->vm_end - vma->vm_start) >> 10,
46123+#endif
46124 mss.resident >> 10,
46125 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46126 mss.shared_clean >> 10,
46127@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46128
46129 if (file) {
46130 seq_printf(m, " file=");
46131- seq_path(m, &file->f_path, "\n\t= ");
46132+ seq_path(m, &file->f_path, "\n\t\\= ");
46133 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46134 seq_printf(m, " heap");
46135 } else if (vma->vm_start <= mm->start_stack &&
46136diff -urNp linux-3.0.7/fs/proc/task_nommu.c linux-3.0.7/fs/proc/task_nommu.c
46137--- linux-3.0.7/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
46138+++ linux-3.0.7/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
46139@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46140 else
46141 bytes += kobjsize(mm);
46142
46143- if (current->fs && current->fs->users > 1)
46144+ if (current->fs && atomic_read(&current->fs->users) > 1)
46145 sbytes += kobjsize(current->fs);
46146 else
46147 bytes += kobjsize(current->fs);
46148@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46149
46150 if (file) {
46151 pad_len_spaces(m, len);
46152- seq_path(m, &file->f_path, "");
46153+ seq_path(m, &file->f_path, "\n\\");
46154 } else if (mm) {
46155 if (vma->vm_start <= mm->start_stack &&
46156 vma->vm_end >= mm->start_stack) {
46157diff -urNp linux-3.0.7/fs/quota/netlink.c linux-3.0.7/fs/quota/netlink.c
46158--- linux-3.0.7/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
46159+++ linux-3.0.7/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
46160@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46161 void quota_send_warning(short type, unsigned int id, dev_t dev,
46162 const char warntype)
46163 {
46164- static atomic_t seq;
46165+ static atomic_unchecked_t seq;
46166 struct sk_buff *skb;
46167 void *msg_head;
46168 int ret;
46169@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46170 "VFS: Not enough memory to send quota warning.\n");
46171 return;
46172 }
46173- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46174+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46175 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46176 if (!msg_head) {
46177 printk(KERN_ERR
46178diff -urNp linux-3.0.7/fs/readdir.c linux-3.0.7/fs/readdir.c
46179--- linux-3.0.7/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
46180+++ linux-3.0.7/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
46181@@ -17,6 +17,7 @@
46182 #include <linux/security.h>
46183 #include <linux/syscalls.h>
46184 #include <linux/unistd.h>
46185+#include <linux/namei.h>
46186
46187 #include <asm/uaccess.h>
46188
46189@@ -67,6 +68,7 @@ struct old_linux_dirent {
46190
46191 struct readdir_callback {
46192 struct old_linux_dirent __user * dirent;
46193+ struct file * file;
46194 int result;
46195 };
46196
46197@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46198 buf->result = -EOVERFLOW;
46199 return -EOVERFLOW;
46200 }
46201+
46202+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46203+ return 0;
46204+
46205 buf->result++;
46206 dirent = buf->dirent;
46207 if (!access_ok(VERIFY_WRITE, dirent,
46208@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46209
46210 buf.result = 0;
46211 buf.dirent = dirent;
46212+ buf.file = file;
46213
46214 error = vfs_readdir(file, fillonedir, &buf);
46215 if (buf.result)
46216@@ -142,6 +149,7 @@ struct linux_dirent {
46217 struct getdents_callback {
46218 struct linux_dirent __user * current_dir;
46219 struct linux_dirent __user * previous;
46220+ struct file * file;
46221 int count;
46222 int error;
46223 };
46224@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46225 buf->error = -EOVERFLOW;
46226 return -EOVERFLOW;
46227 }
46228+
46229+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46230+ return 0;
46231+
46232 dirent = buf->previous;
46233 if (dirent) {
46234 if (__put_user(offset, &dirent->d_off))
46235@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46236 buf.previous = NULL;
46237 buf.count = count;
46238 buf.error = 0;
46239+ buf.file = file;
46240
46241 error = vfs_readdir(file, filldir, &buf);
46242 if (error >= 0)
46243@@ -229,6 +242,7 @@ out:
46244 struct getdents_callback64 {
46245 struct linux_dirent64 __user * current_dir;
46246 struct linux_dirent64 __user * previous;
46247+ struct file *file;
46248 int count;
46249 int error;
46250 };
46251@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46252 buf->error = -EINVAL; /* only used if we fail.. */
46253 if (reclen > buf->count)
46254 return -EINVAL;
46255+
46256+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46257+ return 0;
46258+
46259 dirent = buf->previous;
46260 if (dirent) {
46261 if (__put_user(offset, &dirent->d_off))
46262@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46263
46264 buf.current_dir = dirent;
46265 buf.previous = NULL;
46266+ buf.file = file;
46267 buf.count = count;
46268 buf.error = 0;
46269
46270@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46271 error = buf.error;
46272 lastdirent = buf.previous;
46273 if (lastdirent) {
46274- typeof(lastdirent->d_off) d_off = file->f_pos;
46275+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46276 if (__put_user(d_off, &lastdirent->d_off))
46277 error = -EFAULT;
46278 else
46279diff -urNp linux-3.0.7/fs/reiserfs/dir.c linux-3.0.7/fs/reiserfs/dir.c
46280--- linux-3.0.7/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
46281+++ linux-3.0.7/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
46282@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46283 struct reiserfs_dir_entry de;
46284 int ret = 0;
46285
46286+ pax_track_stack();
46287+
46288 reiserfs_write_lock(inode->i_sb);
46289
46290 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46291diff -urNp linux-3.0.7/fs/reiserfs/do_balan.c linux-3.0.7/fs/reiserfs/do_balan.c
46292--- linux-3.0.7/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
46293+++ linux-3.0.7/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
46294@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46295 return;
46296 }
46297
46298- atomic_inc(&(fs_generation(tb->tb_sb)));
46299+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46300 do_balance_starts(tb);
46301
46302 /* balance leaf returns 0 except if combining L R and S into
46303diff -urNp linux-3.0.7/fs/reiserfs/journal.c linux-3.0.7/fs/reiserfs/journal.c
46304--- linux-3.0.7/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
46305+++ linux-3.0.7/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
46306@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
46307 struct buffer_head *bh;
46308 int i, j;
46309
46310+ pax_track_stack();
46311+
46312 bh = __getblk(dev, block, bufsize);
46313 if (buffer_uptodate(bh))
46314 return (bh);
46315diff -urNp linux-3.0.7/fs/reiserfs/namei.c linux-3.0.7/fs/reiserfs/namei.c
46316--- linux-3.0.7/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
46317+++ linux-3.0.7/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
46318@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46319 unsigned long savelink = 1;
46320 struct timespec ctime;
46321
46322+ pax_track_stack();
46323+
46324 /* three balancings: (1) old name removal, (2) new name insertion
46325 and (3) maybe "save" link insertion
46326 stat data updates: (1) old directory,
46327diff -urNp linux-3.0.7/fs/reiserfs/procfs.c linux-3.0.7/fs/reiserfs/procfs.c
46328--- linux-3.0.7/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
46329+++ linux-3.0.7/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
46330@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46331 "SMALL_TAILS " : "NO_TAILS ",
46332 replay_only(sb) ? "REPLAY_ONLY " : "",
46333 convert_reiserfs(sb) ? "CONV " : "",
46334- atomic_read(&r->s_generation_counter),
46335+ atomic_read_unchecked(&r->s_generation_counter),
46336 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46337 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46338 SF(s_good_search_by_key_reada), SF(s_bmaps),
46339@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46340 struct journal_params *jp = &rs->s_v1.s_journal;
46341 char b[BDEVNAME_SIZE];
46342
46343+ pax_track_stack();
46344+
46345 seq_printf(m, /* on-disk fields */
46346 "jp_journal_1st_block: \t%i\n"
46347 "jp_journal_dev: \t%s[%x]\n"
46348diff -urNp linux-3.0.7/fs/reiserfs/stree.c linux-3.0.7/fs/reiserfs/stree.c
46349--- linux-3.0.7/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
46350+++ linux-3.0.7/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
46351@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46352 int iter = 0;
46353 #endif
46354
46355+ pax_track_stack();
46356+
46357 BUG_ON(!th->t_trans_id);
46358
46359 init_tb_struct(th, &s_del_balance, sb, path,
46360@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46361 int retval;
46362 int quota_cut_bytes = 0;
46363
46364+ pax_track_stack();
46365+
46366 BUG_ON(!th->t_trans_id);
46367
46368 le_key2cpu_key(&cpu_key, key);
46369@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46370 int quota_cut_bytes;
46371 loff_t tail_pos = 0;
46372
46373+ pax_track_stack();
46374+
46375 BUG_ON(!th->t_trans_id);
46376
46377 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46378@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46379 int retval;
46380 int fs_gen;
46381
46382+ pax_track_stack();
46383+
46384 BUG_ON(!th->t_trans_id);
46385
46386 fs_gen = get_generation(inode->i_sb);
46387@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46388 int fs_gen = 0;
46389 int quota_bytes = 0;
46390
46391+ pax_track_stack();
46392+
46393 BUG_ON(!th->t_trans_id);
46394
46395 if (inode) { /* Do we count quotas for item? */
46396diff -urNp linux-3.0.7/fs/reiserfs/super.c linux-3.0.7/fs/reiserfs/super.c
46397--- linux-3.0.7/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
46398+++ linux-3.0.7/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
46399@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46400 {.option_name = NULL}
46401 };
46402
46403+ pax_track_stack();
46404+
46405 *blocks = 0;
46406 if (!options || !*options)
46407 /* use default configuration: create tails, journaling on, no
46408diff -urNp linux-3.0.7/fs/select.c linux-3.0.7/fs/select.c
46409--- linux-3.0.7/fs/select.c 2011-07-21 22:17:23.000000000 -0400
46410+++ linux-3.0.7/fs/select.c 2011-08-23 21:48:14.000000000 -0400
46411@@ -20,6 +20,7 @@
46412 #include <linux/module.h>
46413 #include <linux/slab.h>
46414 #include <linux/poll.h>
46415+#include <linux/security.h>
46416 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46417 #include <linux/file.h>
46418 #include <linux/fdtable.h>
46419@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46420 int retval, i, timed_out = 0;
46421 unsigned long slack = 0;
46422
46423+ pax_track_stack();
46424+
46425 rcu_read_lock();
46426 retval = max_select_fd(n, fds);
46427 rcu_read_unlock();
46428@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46429 /* Allocate small arguments on the stack to save memory and be faster */
46430 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46431
46432+ pax_track_stack();
46433+
46434 ret = -EINVAL;
46435 if (n < 0)
46436 goto out_nofds;
46437@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46438 struct poll_list *walk = head;
46439 unsigned long todo = nfds;
46440
46441+ pax_track_stack();
46442+
46443+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46444 if (nfds > rlimit(RLIMIT_NOFILE))
46445 return -EINVAL;
46446
46447diff -urNp linux-3.0.7/fs/seq_file.c linux-3.0.7/fs/seq_file.c
46448--- linux-3.0.7/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
46449+++ linux-3.0.7/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
46450@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46451 return 0;
46452 }
46453 if (!m->buf) {
46454- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46455+ m->size = PAGE_SIZE;
46456+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46457 if (!m->buf)
46458 return -ENOMEM;
46459 }
46460@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46461 Eoverflow:
46462 m->op->stop(m, p);
46463 kfree(m->buf);
46464- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46465+ m->size <<= 1;
46466+ m->buf = kmalloc(m->size, GFP_KERNEL);
46467 return !m->buf ? -ENOMEM : -EAGAIN;
46468 }
46469
46470@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46471 m->version = file->f_version;
46472 /* grab buffer if we didn't have one */
46473 if (!m->buf) {
46474- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46475+ m->size = PAGE_SIZE;
46476+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46477 if (!m->buf)
46478 goto Enomem;
46479 }
46480@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46481 goto Fill;
46482 m->op->stop(m, p);
46483 kfree(m->buf);
46484- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46485+ m->size <<= 1;
46486+ m->buf = kmalloc(m->size, GFP_KERNEL);
46487 if (!m->buf)
46488 goto Enomem;
46489 m->count = 0;
46490@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46491 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46492 void *data)
46493 {
46494- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46495+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46496 int res = -ENOMEM;
46497
46498 if (op) {
46499diff -urNp linux-3.0.7/fs/splice.c linux-3.0.7/fs/splice.c
46500--- linux-3.0.7/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
46501+++ linux-3.0.7/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
46502@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46503 pipe_lock(pipe);
46504
46505 for (;;) {
46506- if (!pipe->readers) {
46507+ if (!atomic_read(&pipe->readers)) {
46508 send_sig(SIGPIPE, current, 0);
46509 if (!ret)
46510 ret = -EPIPE;
46511@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46512 do_wakeup = 0;
46513 }
46514
46515- pipe->waiting_writers++;
46516+ atomic_inc(&pipe->waiting_writers);
46517 pipe_wait(pipe);
46518- pipe->waiting_writers--;
46519+ atomic_dec(&pipe->waiting_writers);
46520 }
46521
46522 pipe_unlock(pipe);
46523@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46524 .spd_release = spd_release_page,
46525 };
46526
46527+ pax_track_stack();
46528+
46529 if (splice_grow_spd(pipe, &spd))
46530 return -ENOMEM;
46531
46532@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46533 old_fs = get_fs();
46534 set_fs(get_ds());
46535 /* The cast to a user pointer is valid due to the set_fs() */
46536- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46537+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46538 set_fs(old_fs);
46539
46540 return res;
46541@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46542 old_fs = get_fs();
46543 set_fs(get_ds());
46544 /* The cast to a user pointer is valid due to the set_fs() */
46545- res = vfs_write(file, (const char __user *)buf, count, &pos);
46546+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46547 set_fs(old_fs);
46548
46549 return res;
46550@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46551 .spd_release = spd_release_page,
46552 };
46553
46554+ pax_track_stack();
46555+
46556 if (splice_grow_spd(pipe, &spd))
46557 return -ENOMEM;
46558
46559@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46560 goto err;
46561
46562 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46563- vec[i].iov_base = (void __user *) page_address(page);
46564+ vec[i].iov_base = (void __force_user *) page_address(page);
46565 vec[i].iov_len = this_len;
46566 spd.pages[i] = page;
46567 spd.nr_pages++;
46568@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46569 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46570 {
46571 while (!pipe->nrbufs) {
46572- if (!pipe->writers)
46573+ if (!atomic_read(&pipe->writers))
46574 return 0;
46575
46576- if (!pipe->waiting_writers && sd->num_spliced)
46577+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46578 return 0;
46579
46580 if (sd->flags & SPLICE_F_NONBLOCK)
46581@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46582 * out of the pipe right after the splice_to_pipe(). So set
46583 * PIPE_READERS appropriately.
46584 */
46585- pipe->readers = 1;
46586+ atomic_set(&pipe->readers, 1);
46587
46588 current->splice_pipe = pipe;
46589 }
46590@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46591 };
46592 long ret;
46593
46594+ pax_track_stack();
46595+
46596 pipe = get_pipe_info(file);
46597 if (!pipe)
46598 return -EBADF;
46599@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46600 ret = -ERESTARTSYS;
46601 break;
46602 }
46603- if (!pipe->writers)
46604+ if (!atomic_read(&pipe->writers))
46605 break;
46606- if (!pipe->waiting_writers) {
46607+ if (!atomic_read(&pipe->waiting_writers)) {
46608 if (flags & SPLICE_F_NONBLOCK) {
46609 ret = -EAGAIN;
46610 break;
46611@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46612 pipe_lock(pipe);
46613
46614 while (pipe->nrbufs >= pipe->buffers) {
46615- if (!pipe->readers) {
46616+ if (!atomic_read(&pipe->readers)) {
46617 send_sig(SIGPIPE, current, 0);
46618 ret = -EPIPE;
46619 break;
46620@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46621 ret = -ERESTARTSYS;
46622 break;
46623 }
46624- pipe->waiting_writers++;
46625+ atomic_inc(&pipe->waiting_writers);
46626 pipe_wait(pipe);
46627- pipe->waiting_writers--;
46628+ atomic_dec(&pipe->waiting_writers);
46629 }
46630
46631 pipe_unlock(pipe);
46632@@ -1819,14 +1825,14 @@ retry:
46633 pipe_double_lock(ipipe, opipe);
46634
46635 do {
46636- if (!opipe->readers) {
46637+ if (!atomic_read(&opipe->readers)) {
46638 send_sig(SIGPIPE, current, 0);
46639 if (!ret)
46640 ret = -EPIPE;
46641 break;
46642 }
46643
46644- if (!ipipe->nrbufs && !ipipe->writers)
46645+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46646 break;
46647
46648 /*
46649@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46650 pipe_double_lock(ipipe, opipe);
46651
46652 do {
46653- if (!opipe->readers) {
46654+ if (!atomic_read(&opipe->readers)) {
46655 send_sig(SIGPIPE, current, 0);
46656 if (!ret)
46657 ret = -EPIPE;
46658@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46659 * return EAGAIN if we have the potential of some data in the
46660 * future, otherwise just return 0
46661 */
46662- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46663+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46664 ret = -EAGAIN;
46665
46666 pipe_unlock(ipipe);
46667diff -urNp linux-3.0.7/fs/sysfs/file.c linux-3.0.7/fs/sysfs/file.c
46668--- linux-3.0.7/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
46669+++ linux-3.0.7/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
46670@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46671
46672 struct sysfs_open_dirent {
46673 atomic_t refcnt;
46674- atomic_t event;
46675+ atomic_unchecked_t event;
46676 wait_queue_head_t poll;
46677 struct list_head buffers; /* goes through sysfs_buffer.list */
46678 };
46679@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46680 if (!sysfs_get_active(attr_sd))
46681 return -ENODEV;
46682
46683- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46684+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46685 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46686
46687 sysfs_put_active(attr_sd);
46688@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46689 return -ENOMEM;
46690
46691 atomic_set(&new_od->refcnt, 0);
46692- atomic_set(&new_od->event, 1);
46693+ atomic_set_unchecked(&new_od->event, 1);
46694 init_waitqueue_head(&new_od->poll);
46695 INIT_LIST_HEAD(&new_od->buffers);
46696 goto retry;
46697@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46698
46699 sysfs_put_active(attr_sd);
46700
46701- if (buffer->event != atomic_read(&od->event))
46702+ if (buffer->event != atomic_read_unchecked(&od->event))
46703 goto trigger;
46704
46705 return DEFAULT_POLLMASK;
46706@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46707
46708 od = sd->s_attr.open;
46709 if (od) {
46710- atomic_inc(&od->event);
46711+ atomic_inc_unchecked(&od->event);
46712 wake_up_interruptible(&od->poll);
46713 }
46714
46715diff -urNp linux-3.0.7/fs/sysfs/mount.c linux-3.0.7/fs/sysfs/mount.c
46716--- linux-3.0.7/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
46717+++ linux-3.0.7/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
46718@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46719 .s_name = "",
46720 .s_count = ATOMIC_INIT(1),
46721 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46722+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46723+ .s_mode = S_IFDIR | S_IRWXU,
46724+#else
46725 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46726+#endif
46727 .s_ino = 1,
46728 };
46729
46730diff -urNp linux-3.0.7/fs/sysfs/symlink.c linux-3.0.7/fs/sysfs/symlink.c
46731--- linux-3.0.7/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
46732+++ linux-3.0.7/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
46733@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46734
46735 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46736 {
46737- char *page = nd_get_link(nd);
46738+ const char *page = nd_get_link(nd);
46739 if (!IS_ERR(page))
46740 free_page((unsigned long)page);
46741 }
46742diff -urNp linux-3.0.7/fs/udf/inode.c linux-3.0.7/fs/udf/inode.c
46743--- linux-3.0.7/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
46744+++ linux-3.0.7/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
46745@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46746 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46747 int lastblock = 0;
46748
46749+ pax_track_stack();
46750+
46751 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46752 prev_epos.block = iinfo->i_location;
46753 prev_epos.bh = NULL;
46754diff -urNp linux-3.0.7/fs/udf/misc.c linux-3.0.7/fs/udf/misc.c
46755--- linux-3.0.7/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
46756+++ linux-3.0.7/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
46757@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46758
46759 u8 udf_tag_checksum(const struct tag *t)
46760 {
46761- u8 *data = (u8 *)t;
46762+ const u8 *data = (const u8 *)t;
46763 u8 checksum = 0;
46764 int i;
46765 for (i = 0; i < sizeof(struct tag); ++i)
46766diff -urNp linux-3.0.7/fs/utimes.c linux-3.0.7/fs/utimes.c
46767--- linux-3.0.7/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
46768+++ linux-3.0.7/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
46769@@ -1,6 +1,7 @@
46770 #include <linux/compiler.h>
46771 #include <linux/file.h>
46772 #include <linux/fs.h>
46773+#include <linux/security.h>
46774 #include <linux/linkage.h>
46775 #include <linux/mount.h>
46776 #include <linux/namei.h>
46777@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46778 goto mnt_drop_write_and_out;
46779 }
46780 }
46781+
46782+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46783+ error = -EACCES;
46784+ goto mnt_drop_write_and_out;
46785+ }
46786+
46787 mutex_lock(&inode->i_mutex);
46788 error = notify_change(path->dentry, &newattrs);
46789 mutex_unlock(&inode->i_mutex);
46790diff -urNp linux-3.0.7/fs/xattr.c linux-3.0.7/fs/xattr.c
46791--- linux-3.0.7/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
46792+++ linux-3.0.7/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
46793@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46794 * Extended attribute SET operations
46795 */
46796 static long
46797-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46798+setxattr(struct path *path, const char __user *name, const void __user *value,
46799 size_t size, int flags)
46800 {
46801 int error;
46802@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
46803 return PTR_ERR(kvalue);
46804 }
46805
46806- error = vfs_setxattr(d, kname, kvalue, size, flags);
46807+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46808+ error = -EACCES;
46809+ goto out;
46810+ }
46811+
46812+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46813+out:
46814 kfree(kvalue);
46815 return error;
46816 }
46817@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46818 return error;
46819 error = mnt_want_write(path.mnt);
46820 if (!error) {
46821- error = setxattr(path.dentry, name, value, size, flags);
46822+ error = setxattr(&path, name, value, size, flags);
46823 mnt_drop_write(path.mnt);
46824 }
46825 path_put(&path);
46826@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46827 return error;
46828 error = mnt_want_write(path.mnt);
46829 if (!error) {
46830- error = setxattr(path.dentry, name, value, size, flags);
46831+ error = setxattr(&path, name, value, size, flags);
46832 mnt_drop_write(path.mnt);
46833 }
46834 path_put(&path);
46835@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46836 const void __user *,value, size_t, size, int, flags)
46837 {
46838 struct file *f;
46839- struct dentry *dentry;
46840 int error = -EBADF;
46841
46842 f = fget(fd);
46843 if (!f)
46844 return error;
46845- dentry = f->f_path.dentry;
46846- audit_inode(NULL, dentry);
46847+ audit_inode(NULL, f->f_path.dentry);
46848 error = mnt_want_write_file(f);
46849 if (!error) {
46850- error = setxattr(dentry, name, value, size, flags);
46851+ error = setxattr(&f->f_path, name, value, size, flags);
46852 mnt_drop_write(f->f_path.mnt);
46853 }
46854 fput(f);
46855diff -urNp linux-3.0.7/fs/xattr_acl.c linux-3.0.7/fs/xattr_acl.c
46856--- linux-3.0.7/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
46857+++ linux-3.0.7/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
46858@@ -17,8 +17,8 @@
46859 struct posix_acl *
46860 posix_acl_from_xattr(const void *value, size_t size)
46861 {
46862- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46863- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46864+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46865+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46866 int count;
46867 struct posix_acl *acl;
46868 struct posix_acl_entry *acl_e;
46869diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c
46870--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
46871+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
46872@@ -128,7 +128,7 @@ xfs_find_handle(
46873 }
46874
46875 error = -EFAULT;
46876- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46877+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46878 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46879 goto out_put;
46880
46881diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c
46882--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
46883+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
46884@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
46885 xfs_fsop_geom_t fsgeo;
46886 int error;
46887
46888+ memset(&fsgeo, 0, sizeof(fsgeo));
46889 error = xfs_fs_geometry(mp, &fsgeo, 3);
46890 if (error)
46891 return -error;
46892diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c
46893--- linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
46894+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
46895@@ -437,7 +437,7 @@ xfs_vn_put_link(
46896 struct nameidata *nd,
46897 void *p)
46898 {
46899- char *s = nd_get_link(nd);
46900+ const char *s = nd_get_link(nd);
46901
46902 if (!IS_ERR(s))
46903 kfree(s);
46904diff -urNp linux-3.0.7/fs/xfs/xfs_bmap.c linux-3.0.7/fs/xfs/xfs_bmap.c
46905--- linux-3.0.7/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
46906+++ linux-3.0.7/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
46907@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
46908 int nmap,
46909 int ret_nmap);
46910 #else
46911-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46912+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46913 #endif /* DEBUG */
46914
46915 STATIC int
46916diff -urNp linux-3.0.7/fs/xfs/xfs_dir2_sf.c linux-3.0.7/fs/xfs/xfs_dir2_sf.c
46917--- linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
46918+++ linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
46919@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
46920 }
46921
46922 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46923- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46924+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46925+ char name[sfep->namelen];
46926+ memcpy(name, sfep->name, sfep->namelen);
46927+ if (filldir(dirent, name, sfep->namelen,
46928+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46929+ *offset = off & 0x7fffffff;
46930+ return 0;
46931+ }
46932+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46933 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46934 *offset = off & 0x7fffffff;
46935 return 0;
46936diff -urNp linux-3.0.7/grsecurity/Kconfig linux-3.0.7/grsecurity/Kconfig
46937--- linux-3.0.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
46938+++ linux-3.0.7/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
46939@@ -0,0 +1,1038 @@
46940+#
46941+# grecurity configuration
46942+#
46943+
46944+menu "Grsecurity"
46945+
46946+config GRKERNSEC
46947+ bool "Grsecurity"
46948+ select CRYPTO
46949+ select CRYPTO_SHA256
46950+ help
46951+ If you say Y here, you will be able to configure many features
46952+ that will enhance the security of your system. It is highly
46953+ recommended that you say Y here and read through the help
46954+ for each option so that you fully understand the features and
46955+ can evaluate their usefulness for your machine.
46956+
46957+choice
46958+ prompt "Security Level"
46959+ depends on GRKERNSEC
46960+ default GRKERNSEC_CUSTOM
46961+
46962+config GRKERNSEC_LOW
46963+ bool "Low"
46964+ select GRKERNSEC_LINK
46965+ select GRKERNSEC_FIFO
46966+ select GRKERNSEC_RANDNET
46967+ select GRKERNSEC_DMESG
46968+ select GRKERNSEC_CHROOT
46969+ select GRKERNSEC_CHROOT_CHDIR
46970+
46971+ help
46972+ If you choose this option, several of the grsecurity options will
46973+ be enabled that will give you greater protection against a number
46974+ of attacks, while assuring that none of your software will have any
46975+ conflicts with the additional security measures. If you run a lot
46976+ of unusual software, or you are having problems with the higher
46977+ security levels, you should say Y here. With this option, the
46978+ following features are enabled:
46979+
46980+ - Linking restrictions
46981+ - FIFO restrictions
46982+ - Restricted dmesg
46983+ - Enforced chdir("/") on chroot
46984+ - Runtime module disabling
46985+
46986+config GRKERNSEC_MEDIUM
46987+ bool "Medium"
46988+ select PAX
46989+ select PAX_EI_PAX
46990+ select PAX_PT_PAX_FLAGS
46991+ select PAX_HAVE_ACL_FLAGS
46992+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
46993+ select GRKERNSEC_CHROOT
46994+ select GRKERNSEC_CHROOT_SYSCTL
46995+ select GRKERNSEC_LINK
46996+ select GRKERNSEC_FIFO
46997+ select GRKERNSEC_DMESG
46998+ select GRKERNSEC_RANDNET
46999+ select GRKERNSEC_FORKFAIL
47000+ select GRKERNSEC_TIME
47001+ select GRKERNSEC_SIGNAL
47002+ select GRKERNSEC_CHROOT
47003+ select GRKERNSEC_CHROOT_UNIX
47004+ select GRKERNSEC_CHROOT_MOUNT
47005+ select GRKERNSEC_CHROOT_PIVOT
47006+ select GRKERNSEC_CHROOT_DOUBLE
47007+ select GRKERNSEC_CHROOT_CHDIR
47008+ select GRKERNSEC_CHROOT_MKNOD
47009+ select GRKERNSEC_PROC
47010+ select GRKERNSEC_PROC_USERGROUP
47011+ select PAX_RANDUSTACK
47012+ select PAX_ASLR
47013+ select PAX_RANDMMAP
47014+ select PAX_REFCOUNT if (X86 || SPARC64)
47015+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47016+
47017+ help
47018+ If you say Y here, several features in addition to those included
47019+ in the low additional security level will be enabled. These
47020+ features provide even more security to your system, though in rare
47021+ cases they may be incompatible with very old or poorly written
47022+ software. If you enable this option, make sure that your auth
47023+ service (identd) is running as gid 1001. With this option,
47024+ the following features (in addition to those provided in the
47025+ low additional security level) will be enabled:
47026+
47027+ - Failed fork logging
47028+ - Time change logging
47029+ - Signal logging
47030+ - Deny mounts in chroot
47031+ - Deny double chrooting
47032+ - Deny sysctl writes in chroot
47033+ - Deny mknod in chroot
47034+ - Deny access to abstract AF_UNIX sockets out of chroot
47035+ - Deny pivot_root in chroot
47036+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
47037+ - /proc restrictions with special GID set to 10 (usually wheel)
47038+ - Address Space Layout Randomization (ASLR)
47039+ - Prevent exploitation of most refcount overflows
47040+ - Bounds checking of copying between the kernel and userland
47041+
47042+config GRKERNSEC_HIGH
47043+ bool "High"
47044+ select GRKERNSEC_LINK
47045+ select GRKERNSEC_FIFO
47046+ select GRKERNSEC_DMESG
47047+ select GRKERNSEC_FORKFAIL
47048+ select GRKERNSEC_TIME
47049+ select GRKERNSEC_SIGNAL
47050+ select GRKERNSEC_CHROOT
47051+ select GRKERNSEC_CHROOT_SHMAT
47052+ select GRKERNSEC_CHROOT_UNIX
47053+ select GRKERNSEC_CHROOT_MOUNT
47054+ select GRKERNSEC_CHROOT_FCHDIR
47055+ select GRKERNSEC_CHROOT_PIVOT
47056+ select GRKERNSEC_CHROOT_DOUBLE
47057+ select GRKERNSEC_CHROOT_CHDIR
47058+ select GRKERNSEC_CHROOT_MKNOD
47059+ select GRKERNSEC_CHROOT_CAPS
47060+ select GRKERNSEC_CHROOT_SYSCTL
47061+ select GRKERNSEC_CHROOT_FINDTASK
47062+ select GRKERNSEC_SYSFS_RESTRICT
47063+ select GRKERNSEC_PROC
47064+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47065+ select GRKERNSEC_HIDESYM
47066+ select GRKERNSEC_BRUTE
47067+ select GRKERNSEC_PROC_USERGROUP
47068+ select GRKERNSEC_KMEM
47069+ select GRKERNSEC_RESLOG
47070+ select GRKERNSEC_RANDNET
47071+ select GRKERNSEC_PROC_ADD
47072+ select GRKERNSEC_CHROOT_CHMOD
47073+ select GRKERNSEC_CHROOT_NICE
47074+ select GRKERNSEC_AUDIT_MOUNT
47075+ select GRKERNSEC_MODHARDEN if (MODULES)
47076+ select GRKERNSEC_HARDEN_PTRACE
47077+ select GRKERNSEC_VM86 if (X86_32)
47078+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47079+ select PAX
47080+ select PAX_RANDUSTACK
47081+ select PAX_ASLR
47082+ select PAX_RANDMMAP
47083+ select PAX_NOEXEC
47084+ select PAX_MPROTECT
47085+ select PAX_EI_PAX
47086+ select PAX_PT_PAX_FLAGS
47087+ select PAX_HAVE_ACL_FLAGS
47088+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47089+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47090+ select PAX_RANDKSTACK if (X86_TSC && X86)
47091+ select PAX_SEGMEXEC if (X86_32)
47092+ select PAX_PAGEEXEC
47093+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47094+ select PAX_EMUTRAMP if (PARISC)
47095+ select PAX_EMUSIGRT if (PARISC)
47096+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47097+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47098+ select PAX_REFCOUNT if (X86 || SPARC64)
47099+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47100+ help
47101+ If you say Y here, many of the features of grsecurity will be
47102+ enabled, which will protect you against many kinds of attacks
47103+ against your system. The heightened security comes at a cost
47104+ of an increased chance of incompatibilities with rare software
47105+ on your machine. Since this security level enables PaX, you should
47106+ view <http://pax.grsecurity.net> and read about the PaX
47107+ project. While you are there, download chpax and run it on
47108+ binaries that cause problems with PaX. Also remember that
47109+ since the /proc restrictions are enabled, you must run your
47110+ identd as gid 1001. This security level enables the following
47111+ features in addition to those listed in the low and medium
47112+ security levels:
47113+
47114+ - Additional /proc restrictions
47115+ - Chmod restrictions in chroot
47116+ - No signals, ptrace, or viewing of processes outside of chroot
47117+ - Capability restrictions in chroot
47118+ - Deny fchdir out of chroot
47119+ - Priority restrictions in chroot
47120+ - Segmentation-based implementation of PaX
47121+ - Mprotect restrictions
47122+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47123+ - Kernel stack randomization
47124+ - Mount/unmount/remount logging
47125+ - Kernel symbol hiding
47126+ - Prevention of memory exhaustion-based exploits
47127+ - Hardening of module auto-loading
47128+ - Ptrace restrictions
47129+ - Restricted vm86 mode
47130+ - Restricted sysfs/debugfs
47131+ - Active kernel exploit response
47132+
47133+config GRKERNSEC_CUSTOM
47134+ bool "Custom"
47135+ help
47136+ If you say Y here, you will be able to configure every grsecurity
47137+ option, which allows you to enable many more features that aren't
47138+ covered in the basic security levels. These additional features
47139+ include TPE, socket restrictions, and the sysctl system for
47140+ grsecurity. It is advised that you read through the help for
47141+ each option to determine its usefulness in your situation.
47142+
47143+endchoice
47144+
47145+menu "Address Space Protection"
47146+depends on GRKERNSEC
47147+
47148+config GRKERNSEC_KMEM
47149+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
47150+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47151+ help
47152+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47153+ be written to via mmap or otherwise to modify the running kernel.
47154+ /dev/port will also not be allowed to be opened. If you have module
47155+ support disabled, enabling this will close up four ways that are
47156+ currently used to insert malicious code into the running kernel.
47157+ Even with all these features enabled, we still highly recommend that
47158+ you use the RBAC system, as it is still possible for an attacker to
47159+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47160+ If you are not using XFree86, you may be able to stop this additional
47161+ case by enabling the 'Disable privileged I/O' option. Though nothing
47162+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47163+ but only to video memory, which is the only writing we allow in this
47164+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47165+ not be allowed to mprotect it with PROT_WRITE later.
47166+ It is highly recommended that you say Y here if you meet all the
47167+ conditions above.
47168+
47169+config GRKERNSEC_VM86
47170+ bool "Restrict VM86 mode"
47171+ depends on X86_32
47172+
47173+ help
47174+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47175+ make use of a special execution mode on 32bit x86 processors called
47176+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47177+ video cards and will still work with this option enabled. The purpose
47178+ of the option is to prevent exploitation of emulation errors in
47179+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47180+ Nearly all users should be able to enable this option.
47181+
47182+config GRKERNSEC_IO
47183+ bool "Disable privileged I/O"
47184+ depends on X86
47185+ select RTC_CLASS
47186+ select RTC_INTF_DEV
47187+ select RTC_DRV_CMOS
47188+
47189+ help
47190+ If you say Y here, all ioperm and iopl calls will return an error.
47191+ Ioperm and iopl can be used to modify the running kernel.
47192+ Unfortunately, some programs need this access to operate properly,
47193+ the most notable of which are XFree86 and hwclock. hwclock can be
47194+ remedied by having RTC support in the kernel, so real-time
47195+ clock support is enabled if this option is enabled, to ensure
47196+ that hwclock operates correctly. XFree86 still will not
47197+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47198+ IF YOU USE XFree86. If you use XFree86 and you still want to
47199+ protect your kernel against modification, use the RBAC system.
47200+
47201+config GRKERNSEC_PROC_MEMMAP
47202+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47203+ default y if (PAX_NOEXEC || PAX_ASLR)
47204+ depends on PAX_NOEXEC || PAX_ASLR
47205+ help
47206+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47207+ give no information about the addresses of its mappings if
47208+ PaX features that rely on random addresses are enabled on the task.
47209+ If you use PaX it is greatly recommended that you say Y here as it
47210+ closes up a hole that makes the full ASLR useless for suid
47211+ binaries.
47212+
47213+config GRKERNSEC_BRUTE
47214+ bool "Deter exploit bruteforcing"
47215+ help
47216+ If you say Y here, attempts to bruteforce exploits against forking
47217+ daemons such as apache or sshd, as well as against suid/sgid binaries
47218+ will be deterred. When a child of a forking daemon is killed by PaX
47219+ or crashes due to an illegal instruction or other suspicious signal,
47220+ the parent process will be delayed 30 seconds upon every subsequent
47221+ fork until the administrator is able to assess the situation and
47222+ restart the daemon.
47223+ In the suid/sgid case, the attempt is logged, the user has all their
47224+ processes terminated, and they are prevented from executing any further
47225+ processes for 15 minutes.
47226+ It is recommended that you also enable signal logging in the auditing
47227+ section so that logs are generated when a process triggers a suspicious
47228+ signal.
47229+ If the sysctl option is enabled, a sysctl option with name
47230+ "deter_bruteforce" is created.
47231+
47232+
47233+config GRKERNSEC_MODHARDEN
47234+ bool "Harden module auto-loading"
47235+ depends on MODULES
47236+ help
47237+ If you say Y here, module auto-loading in response to use of some
47238+ feature implemented by an unloaded module will be restricted to
47239+ root users. Enabling this option helps defend against attacks
47240+ by unprivileged users who abuse the auto-loading behavior to
47241+ cause a vulnerable module to load that is then exploited.
47242+
47243+ If this option prevents a legitimate use of auto-loading for a
47244+ non-root user, the administrator can execute modprobe manually
47245+ with the exact name of the module mentioned in the alert log.
47246+ Alternatively, the administrator can add the module to the list
47247+ of modules loaded at boot by modifying init scripts.
47248+
47249+ Modification of init scripts will most likely be needed on
47250+ Ubuntu servers with encrypted home directory support enabled,
47251+ as the first non-root user logging in will cause the ecb(aes),
47252+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47253+
47254+config GRKERNSEC_HIDESYM
47255+ bool "Hide kernel symbols"
47256+ help
47257+ If you say Y here, getting information on loaded modules, and
47258+ displaying all kernel symbols through a syscall will be restricted
47259+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47260+ /proc/kallsyms will be restricted to the root user. The RBAC
47261+ system can hide that entry even from root.
47262+
47263+ This option also prevents leaking of kernel addresses through
47264+ several /proc entries.
47265+
47266+ Note that this option is only effective provided the following
47267+ conditions are met:
47268+ 1) The kernel using grsecurity is not precompiled by some distribution
47269+ 2) You have also enabled GRKERNSEC_DMESG
47270+ 3) You are using the RBAC system and hiding other files such as your
47271+ kernel image and System.map. Alternatively, enabling this option
47272+ causes the permissions on /boot, /lib/modules, and the kernel
47273+ source directory to change at compile time to prevent
47274+ reading by non-root users.
47275+ If the above conditions are met, this option will aid in providing a
47276+ useful protection against local kernel exploitation of overflows
47277+ and arbitrary read/write vulnerabilities.
47278+
47279+config GRKERNSEC_KERN_LOCKOUT
47280+ bool "Active kernel exploit response"
47281+ depends on X86 || ARM || PPC || SPARC
47282+ help
47283+ If you say Y here, when a PaX alert is triggered due to suspicious
47284+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47285+ or an OOPs occurs due to bad memory accesses, instead of just
47286+ terminating the offending process (and potentially allowing
47287+ a subsequent exploit from the same user), we will take one of two
47288+ actions:
47289+ If the user was root, we will panic the system
47290+ If the user was non-root, we will log the attempt, terminate
47291+ all processes owned by the user, then prevent them from creating
47292+ any new processes until the system is restarted
47293+ This deters repeated kernel exploitation/bruteforcing attempts
47294+ and is useful for later forensics.
47295+
47296+endmenu
47297+menu "Role Based Access Control Options"
47298+depends on GRKERNSEC
47299+
47300+config GRKERNSEC_RBAC_DEBUG
47301+ bool
47302+
47303+config GRKERNSEC_NO_RBAC
47304+ bool "Disable RBAC system"
47305+ help
47306+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47307+ preventing the RBAC system from being enabled. You should only say Y
47308+ here if you have no intention of using the RBAC system, so as to prevent
47309+ an attacker with root access from misusing the RBAC system to hide files
47310+ and processes when loadable module support and /dev/[k]mem have been
47311+ locked down.
47312+
47313+config GRKERNSEC_ACL_HIDEKERN
47314+ bool "Hide kernel processes"
47315+ help
47316+ If you say Y here, all kernel threads will be hidden to all
47317+ processes but those whose subject has the "view hidden processes"
47318+ flag.
47319+
47320+config GRKERNSEC_ACL_MAXTRIES
47321+ int "Maximum tries before password lockout"
47322+ default 3
47323+ help
47324+ This option enforces the maximum number of times a user can attempt
47325+ to authorize themselves with the grsecurity RBAC system before being
47326+ denied the ability to attempt authorization again for a specified time.
47327+ The lower the number, the harder it will be to brute-force a password.
47328+
47329+config GRKERNSEC_ACL_TIMEOUT
47330+ int "Time to wait after max password tries, in seconds"
47331+ default 30
47332+ help
47333+ This option specifies the time the user must wait after attempting to
47334+ authorize to the RBAC system with the maximum number of invalid
47335+ passwords. The higher the number, the harder it will be to brute-force
47336+ a password.
47337+
47338+endmenu
47339+menu "Filesystem Protections"
47340+depends on GRKERNSEC
47341+
47342+config GRKERNSEC_PROC
47343+ bool "Proc restrictions"
47344+ help
47345+ If you say Y here, the permissions of the /proc filesystem
47346+ will be altered to enhance system security and privacy. You MUST
47347+ choose either a user only restriction or a user and group restriction.
47348+ Depending upon the option you choose, you can either restrict users to
47349+ see only the processes they themselves run, or choose a group that can
47350+ view all processes and files normally restricted to root if you choose
47351+ the "restrict to user only" option. NOTE: If you're running identd as
47352+ a non-root user, you will have to run it as the group you specify here.
47353+
47354+config GRKERNSEC_PROC_USER
47355+ bool "Restrict /proc to user only"
47356+ depends on GRKERNSEC_PROC
47357+ help
47358+ If you say Y here, non-root users will only be able to view their own
47359+ processes, and restricts them from viewing network-related information,
47360+ and viewing kernel symbol and module information.
47361+
47362+config GRKERNSEC_PROC_USERGROUP
47363+ bool "Allow special group"
47364+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47365+ help
47366+ If you say Y here, you will be able to select a group that will be
47367+ able to view all processes and network-related information. If you've
47368+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47369+ remain hidden. This option is useful if you want to run identd as
47370+ a non-root user.
47371+
47372+config GRKERNSEC_PROC_GID
47373+ int "GID for special group"
47374+ depends on GRKERNSEC_PROC_USERGROUP
47375+ default 1001
47376+
47377+config GRKERNSEC_PROC_ADD
47378+ bool "Additional restrictions"
47379+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47380+ help
47381+ If you say Y here, additional restrictions will be placed on
47382+ /proc that keep normal users from viewing device information and
47383+ slabinfo information that could be useful for exploits.
47384+
47385+config GRKERNSEC_LINK
47386+ bool "Linking restrictions"
47387+ help
47388+ If you say Y here, /tmp race exploits will be prevented, since users
47389+ will no longer be able to follow symlinks owned by other users in
47390+ world-writable +t directories (e.g. /tmp), unless the owner of the
47391+ symlink is the owner of the directory. users will also not be
47392+ able to hardlink to files they do not own. If the sysctl option is
47393+ enabled, a sysctl option with name "linking_restrictions" is created.
47394+
47395+config GRKERNSEC_FIFO
47396+ bool "FIFO restrictions"
47397+ help
47398+ If you say Y here, users will not be able to write to FIFOs they don't
47399+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47400+ the FIFO is the same owner of the directory it's held in. If the sysctl
47401+ option is enabled, a sysctl option with name "fifo_restrictions" is
47402+ created.
47403+
47404+config GRKERNSEC_SYSFS_RESTRICT
47405+ bool "Sysfs/debugfs restriction"
47406+ depends on SYSFS
47407+ help
47408+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47409+ any filesystem normally mounted under it (e.g. debugfs) will only
47410+ be accessible by root. These filesystems generally provide access
47411+ to hardware and debug information that isn't appropriate for unprivileged
47412+ users of the system. Sysfs and debugfs have also become a large source
47413+ of new vulnerabilities, ranging from infoleaks to local compromise.
47414+ There has been very little oversight with an eye toward security involved
47415+ in adding new exporters of information to these filesystems, so their
47416+ use is discouraged.
47417+ This option is equivalent to a chmod 0700 of the mount paths.
47418+
47419+config GRKERNSEC_ROFS
47420+ bool "Runtime read-only mount protection"
47421+ help
47422+ If you say Y here, a sysctl option with name "romount_protect" will
47423+ be created. By setting this option to 1 at runtime, filesystems
47424+ will be protected in the following ways:
47425+ * No new writable mounts will be allowed
47426+ * Existing read-only mounts won't be able to be remounted read/write
47427+ * Write operations will be denied on all block devices
47428+ This option acts independently of grsec_lock: once it is set to 1,
47429+ it cannot be turned off. Therefore, please be mindful of the resulting
47430+ behavior if this option is enabled in an init script on a read-only
47431+ filesystem. This feature is mainly intended for secure embedded systems.
47432+
47433+config GRKERNSEC_CHROOT
47434+ bool "Chroot jail restrictions"
47435+ help
47436+ If you say Y here, you will be able to choose several options that will
47437+ make breaking out of a chrooted jail much more difficult. If you
47438+ encounter no software incompatibilities with the following options, it
47439+ is recommended that you enable each one.
47440+
47441+config GRKERNSEC_CHROOT_MOUNT
47442+ bool "Deny mounts"
47443+ depends on GRKERNSEC_CHROOT
47444+ help
47445+ If you say Y here, processes inside a chroot will not be able to
47446+ mount or remount filesystems. If the sysctl option is enabled, a
47447+ sysctl option with name "chroot_deny_mount" is created.
47448+
47449+config GRKERNSEC_CHROOT_DOUBLE
47450+ bool "Deny double-chroots"
47451+ depends on GRKERNSEC_CHROOT
47452+ help
47453+ If you say Y here, processes inside a chroot will not be able to chroot
47454+ again outside the chroot. This is a widely used method of breaking
47455+ out of a chroot jail and should not be allowed. If the sysctl
47456+ option is enabled, a sysctl option with name
47457+ "chroot_deny_chroot" is created.
47458+
47459+config GRKERNSEC_CHROOT_PIVOT
47460+ bool "Deny pivot_root in chroot"
47461+ depends on GRKERNSEC_CHROOT
47462+ help
47463+ If you say Y here, processes inside a chroot will not be able to use
47464+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47465+ works similar to chroot in that it changes the root filesystem. This
47466+ function could be misused in a chrooted process to attempt to break out
47467+ of the chroot, and therefore should not be allowed. If the sysctl
47468+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47469+ created.
47470+
47471+config GRKERNSEC_CHROOT_CHDIR
47472+ bool "Enforce chdir(\"/\") on all chroots"
47473+ depends on GRKERNSEC_CHROOT
47474+ help
47475+ If you say Y here, the current working directory of all newly-chrooted
47476+ applications will be set to the the root directory of the chroot.
47477+ The man page on chroot(2) states:
47478+ Note that this call does not change the current working
47479+ directory, so that `.' can be outside the tree rooted at
47480+ `/'. In particular, the super-user can escape from a
47481+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47482+
47483+ It is recommended that you say Y here, since it's not known to break
47484+ any software. If the sysctl option is enabled, a sysctl option with
47485+ name "chroot_enforce_chdir" is created.
47486+
47487+config GRKERNSEC_CHROOT_CHMOD
47488+ bool "Deny (f)chmod +s"
47489+ depends on GRKERNSEC_CHROOT
47490+ help
47491+ If you say Y here, processes inside a chroot will not be able to chmod
47492+ or fchmod files to make them have suid or sgid bits. This protects
47493+ against another published method of breaking a chroot. If the sysctl
47494+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47495+ created.
47496+
47497+config GRKERNSEC_CHROOT_FCHDIR
47498+ bool "Deny fchdir out of chroot"
47499+ depends on GRKERNSEC_CHROOT
47500+ help
47501+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47502+ to a file descriptor of the chrooting process that points to a directory
47503+ outside the filesystem will be stopped. If the sysctl option
47504+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47505+
47506+config GRKERNSEC_CHROOT_MKNOD
47507+ bool "Deny mknod"
47508+ depends on GRKERNSEC_CHROOT
47509+ help
47510+ If you say Y here, processes inside a chroot will not be allowed to
47511+ mknod. The problem with using mknod inside a chroot is that it
47512+ would allow an attacker to create a device entry that is the same
47513+ as one on the physical root of your system, which could range from
47514+ anything from the console device to a device for your harddrive (which
47515+ they could then use to wipe the drive or steal data). It is recommended
47516+ that you say Y here, unless you run into software incompatibilities.
47517+ If the sysctl option is enabled, a sysctl option with name
47518+ "chroot_deny_mknod" is created.
47519+
47520+config GRKERNSEC_CHROOT_SHMAT
47521+ bool "Deny shmat() out of chroot"
47522+ depends on GRKERNSEC_CHROOT
47523+ help
47524+ If you say Y here, processes inside a chroot will not be able to attach
47525+ to shared memory segments that were created outside of the chroot jail.
47526+ It is recommended that you say Y here. If the sysctl option is enabled,
47527+ a sysctl option with name "chroot_deny_shmat" is created.
47528+
47529+config GRKERNSEC_CHROOT_UNIX
47530+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47531+ depends on GRKERNSEC_CHROOT
47532+ help
47533+ If you say Y here, processes inside a chroot will not be able to
47534+ connect to abstract (meaning not belonging to a filesystem) Unix
47535+ domain sockets that were bound outside of a chroot. It is recommended
47536+ that you say Y here. If the sysctl option is enabled, a sysctl option
47537+ with name "chroot_deny_unix" is created.
47538+
47539+config GRKERNSEC_CHROOT_FINDTASK
47540+ bool "Protect outside processes"
47541+ depends on GRKERNSEC_CHROOT
47542+ help
47543+ If you say Y here, processes inside a chroot will not be able to
47544+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47545+ getsid, or view any process outside of the chroot. If the sysctl
47546+ option is enabled, a sysctl option with name "chroot_findtask" is
47547+ created.
47548+
47549+config GRKERNSEC_CHROOT_NICE
47550+ bool "Restrict priority changes"
47551+ depends on GRKERNSEC_CHROOT
47552+ help
47553+ If you say Y here, processes inside a chroot will not be able to raise
47554+ the priority of processes in the chroot, or alter the priority of
47555+ processes outside the chroot. This provides more security than simply
47556+ removing CAP_SYS_NICE from the process' capability set. If the
47557+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47558+ is created.
47559+
47560+config GRKERNSEC_CHROOT_SYSCTL
47561+ bool "Deny sysctl writes"
47562+ depends on GRKERNSEC_CHROOT
47563+ help
47564+ If you say Y here, an attacker in a chroot will not be able to
47565+ write to sysctl entries, either by sysctl(2) or through a /proc
47566+ interface. It is strongly recommended that you say Y here. If the
47567+ sysctl option is enabled, a sysctl option with name
47568+ "chroot_deny_sysctl" is created.
47569+
47570+config GRKERNSEC_CHROOT_CAPS
47571+ bool "Capability restrictions"
47572+ depends on GRKERNSEC_CHROOT
47573+ help
47574+ If you say Y here, the capabilities on all processes within a
47575+ chroot jail will be lowered to stop module insertion, raw i/o,
47576+ system and net admin tasks, rebooting the system, modifying immutable
47577+ files, modifying IPC owned by another, and changing the system time.
47578+ This is left an option because it can break some apps. Disable this
47579+ if your chrooted apps are having problems performing those kinds of
47580+ tasks. If the sysctl option is enabled, a sysctl option with
47581+ name "chroot_caps" is created.
47582+
47583+endmenu
47584+menu "Kernel Auditing"
47585+depends on GRKERNSEC
47586+
47587+config GRKERNSEC_AUDIT_GROUP
47588+ bool "Single group for auditing"
47589+ help
47590+ If you say Y here, the exec, chdir, and (un)mount logging features
47591+ will only operate on a group you specify. This option is recommended
47592+ if you only want to watch certain users instead of having a large
47593+ amount of logs from the entire system. If the sysctl option is enabled,
47594+ a sysctl option with name "audit_group" is created.
47595+
47596+config GRKERNSEC_AUDIT_GID
47597+ int "GID for auditing"
47598+ depends on GRKERNSEC_AUDIT_GROUP
47599+ default 1007
47600+
47601+config GRKERNSEC_EXECLOG
47602+ bool "Exec logging"
47603+ help
47604+ If you say Y here, all execve() calls will be logged (since the
47605+ other exec*() calls are frontends to execve(), all execution
47606+ will be logged). Useful for shell-servers that like to keep track
47607+ of their users. If the sysctl option is enabled, a sysctl option with
47608+ name "exec_logging" is created.
47609+ WARNING: This option when enabled will produce a LOT of logs, especially
47610+ on an active system.
47611+
47612+config GRKERNSEC_RESLOG
47613+ bool "Resource logging"
47614+ help
47615+ If you say Y here, all attempts to overstep resource limits will
47616+ be logged with the resource name, the requested size, and the current
47617+ limit. It is highly recommended that you say Y here. If the sysctl
47618+ option is enabled, a sysctl option with name "resource_logging" is
47619+ created. If the RBAC system is enabled, the sysctl value is ignored.
47620+
47621+config GRKERNSEC_CHROOT_EXECLOG
47622+ bool "Log execs within chroot"
47623+ help
47624+ If you say Y here, all executions inside a chroot jail will be logged
47625+ to syslog. This can cause a large amount of logs if certain
47626+ applications (eg. djb's daemontools) are installed on the system, and
47627+ is therefore left as an option. If the sysctl option is enabled, a
47628+ sysctl option with name "chroot_execlog" is created.
47629+
47630+config GRKERNSEC_AUDIT_PTRACE
47631+ bool "Ptrace logging"
47632+ help
47633+ If you say Y here, all attempts to attach to a process via ptrace
47634+ will be logged. If the sysctl option is enabled, a sysctl option
47635+ with name "audit_ptrace" is created.
47636+
47637+config GRKERNSEC_AUDIT_CHDIR
47638+ bool "Chdir logging"
47639+ help
47640+ If you say Y here, all chdir() calls will be logged. If the sysctl
47641+ option is enabled, a sysctl option with name "audit_chdir" is created.
47642+
47643+config GRKERNSEC_AUDIT_MOUNT
47644+ bool "(Un)Mount logging"
47645+ help
47646+ If you say Y here, all mounts and unmounts will be logged. If the
47647+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47648+ created.
47649+
47650+config GRKERNSEC_SIGNAL
47651+ bool "Signal logging"
47652+ help
47653+ If you say Y here, certain important signals will be logged, such as
47654+ SIGSEGV, which will as a result inform you of when a error in a program
47655+ occurred, which in some cases could mean a possible exploit attempt.
47656+ If the sysctl option is enabled, a sysctl option with name
47657+ "signal_logging" is created.
47658+
47659+config GRKERNSEC_FORKFAIL
47660+ bool "Fork failure logging"
47661+ help
47662+ If you say Y here, all failed fork() attempts will be logged.
47663+ This could suggest a fork bomb, or someone attempting to overstep
47664+ their process limit. If the sysctl option is enabled, a sysctl option
47665+ with name "forkfail_logging" is created.
47666+
47667+config GRKERNSEC_TIME
47668+ bool "Time change logging"
47669+ help
47670+ If you say Y here, any changes of the system clock will be logged.
47671+ If the sysctl option is enabled, a sysctl option with name
47672+ "timechange_logging" is created.
47673+
47674+config GRKERNSEC_PROC_IPADDR
47675+ bool "/proc/<pid>/ipaddr support"
47676+ help
47677+ If you say Y here, a new entry will be added to each /proc/<pid>
47678+ directory that contains the IP address of the person using the task.
47679+ The IP is carried across local TCP and AF_UNIX stream sockets.
47680+ This information can be useful for IDS/IPSes to perform remote response
47681+ to a local attack. The entry is readable by only the owner of the
47682+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47683+ the RBAC system), and thus does not create privacy concerns.
47684+
47685+config GRKERNSEC_RWXMAP_LOG
47686+ bool 'Denied RWX mmap/mprotect logging'
47687+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47688+ help
47689+ If you say Y here, calls to mmap() and mprotect() with explicit
47690+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47691+ denied by the PAX_MPROTECT feature. If the sysctl option is
47692+ enabled, a sysctl option with name "rwxmap_logging" is created.
47693+
47694+config GRKERNSEC_AUDIT_TEXTREL
47695+ bool 'ELF text relocations logging (READ HELP)'
47696+ depends on PAX_MPROTECT
47697+ help
47698+ If you say Y here, text relocations will be logged with the filename
47699+ of the offending library or binary. The purpose of the feature is
47700+ to help Linux distribution developers get rid of libraries and
47701+ binaries that need text relocations which hinder the future progress
47702+ of PaX. Only Linux distribution developers should say Y here, and
47703+ never on a production machine, as this option creates an information
47704+ leak that could aid an attacker in defeating the randomization of
47705+ a single memory region. If the sysctl option is enabled, a sysctl
47706+ option with name "audit_textrel" is created.
47707+
47708+endmenu
47709+
47710+menu "Executable Protections"
47711+depends on GRKERNSEC
47712+
47713+config GRKERNSEC_DMESG
47714+ bool "Dmesg(8) restriction"
47715+ help
47716+ If you say Y here, non-root users will not be able to use dmesg(8)
47717+ to view up to the last 4kb of messages in the kernel's log buffer.
47718+ The kernel's log buffer often contains kernel addresses and other
47719+ identifying information useful to an attacker in fingerprinting a
47720+ system for a targeted exploit.
47721+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47722+ created.
47723+
47724+config GRKERNSEC_HARDEN_PTRACE
47725+ bool "Deter ptrace-based process snooping"
47726+ help
47727+ If you say Y here, TTY sniffers and other malicious monitoring
47728+ programs implemented through ptrace will be defeated. If you
47729+ have been using the RBAC system, this option has already been
47730+ enabled for several years for all users, with the ability to make
47731+ fine-grained exceptions.
47732+
47733+ This option only affects the ability of non-root users to ptrace
47734+ processes that are not a descendent of the ptracing process.
47735+ This means that strace ./binary and gdb ./binary will still work,
47736+ but attaching to arbitrary processes will not. If the sysctl
47737+ option is enabled, a sysctl option with name "harden_ptrace" is
47738+ created.
47739+
47740+config GRKERNSEC_TPE
47741+ bool "Trusted Path Execution (TPE)"
47742+ help
47743+ If you say Y here, you will be able to choose a gid to add to the
47744+ supplementary groups of users you want to mark as "untrusted."
47745+ These users will not be able to execute any files that are not in
47746+ root-owned directories writable only by root. If the sysctl option
47747+ is enabled, a sysctl option with name "tpe" is created.
47748+
47749+config GRKERNSEC_TPE_ALL
47750+ bool "Partially restrict all non-root users"
47751+ depends on GRKERNSEC_TPE
47752+ help
47753+ If you say Y here, all non-root users will be covered under
47754+ a weaker TPE restriction. This is separate from, and in addition to,
47755+ the main TPE options that you have selected elsewhere. Thus, if a
47756+ "trusted" GID is chosen, this restriction applies to even that GID.
47757+ Under this restriction, all non-root users will only be allowed to
47758+ execute files in directories they own that are not group or
47759+ world-writable, or in directories owned by root and writable only by
47760+ root. If the sysctl option is enabled, a sysctl option with name
47761+ "tpe_restrict_all" is created.
47762+
47763+config GRKERNSEC_TPE_INVERT
47764+ bool "Invert GID option"
47765+ depends on GRKERNSEC_TPE
47766+ help
47767+ If you say Y here, the group you specify in the TPE configuration will
47768+ decide what group TPE restrictions will be *disabled* for. This
47769+ option is useful if you want TPE restrictions to be applied to most
47770+ users on the system. If the sysctl option is enabled, a sysctl option
47771+ with name "tpe_invert" is created. Unlike other sysctl options, this
47772+ entry will default to on for backward-compatibility.
47773+
47774+config GRKERNSEC_TPE_GID
47775+ int "GID for untrusted users"
47776+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47777+ default 1005
47778+ help
47779+ Setting this GID determines what group TPE restrictions will be
47780+ *enabled* for. If the sysctl option is enabled, a sysctl option
47781+ with name "tpe_gid" is created.
47782+
47783+config GRKERNSEC_TPE_GID
47784+ int "GID for trusted users"
47785+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47786+ default 1005
47787+ help
47788+ Setting this GID determines what group TPE restrictions will be
47789+ *disabled* for. If the sysctl option is enabled, a sysctl option
47790+ with name "tpe_gid" is created.
47791+
47792+endmenu
47793+menu "Network Protections"
47794+depends on GRKERNSEC
47795+
47796+config GRKERNSEC_RANDNET
47797+ bool "Larger entropy pools"
47798+ help
47799+ If you say Y here, the entropy pools used for many features of Linux
47800+ and grsecurity will be doubled in size. Since several grsecurity
47801+ features use additional randomness, it is recommended that you say Y
47802+ here. Saying Y here has a similar effect as modifying
47803+ /proc/sys/kernel/random/poolsize.
47804+
47805+config GRKERNSEC_BLACKHOLE
47806+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47807+ depends on NET
47808+ help
47809+ If you say Y here, neither TCP resets nor ICMP
47810+ destination-unreachable packets will be sent in response to packets
47811+ sent to ports for which no associated listening process exists.
47812+ This feature supports both IPV4 and IPV6 and exempts the
47813+ loopback interface from blackholing. Enabling this feature
47814+ makes a host more resilient to DoS attacks and reduces network
47815+ visibility against scanners.
47816+
47817+ The blackhole feature as-implemented is equivalent to the FreeBSD
47818+ blackhole feature, as it prevents RST responses to all packets, not
47819+ just SYNs. Under most application behavior this causes no
47820+ problems, but applications (like haproxy) may not close certain
47821+ connections in a way that cleanly terminates them on the remote
47822+ end, leaving the remote host in LAST_ACK state. Because of this
47823+ side-effect and to prevent intentional LAST_ACK DoSes, this
47824+ feature also adds automatic mitigation against such attacks.
47825+ The mitigation drastically reduces the amount of time a socket
47826+ can spend in LAST_ACK state. If you're using haproxy and not
47827+ all servers it connects to have this option enabled, consider
47828+ disabling this feature on the haproxy host.
47829+
47830+ If the sysctl option is enabled, two sysctl options with names
47831+ "ip_blackhole" and "lastack_retries" will be created.
47832+ While "ip_blackhole" takes the standard zero/non-zero on/off
47833+ toggle, "lastack_retries" uses the same kinds of values as
47834+ "tcp_retries1" and "tcp_retries2". The default value of 4
47835+ prevents a socket from lasting more than 45 seconds in LAST_ACK
47836+ state.
47837+
47838+config GRKERNSEC_SOCKET
47839+ bool "Socket restrictions"
47840+ depends on NET
47841+ help
47842+ If you say Y here, you will be able to choose from several options.
47843+ If you assign a GID on your system and add it to the supplementary
47844+ groups of users you want to restrict socket access to, this patch
47845+ will perform up to three things, based on the option(s) you choose.
47846+
47847+config GRKERNSEC_SOCKET_ALL
47848+ bool "Deny any sockets to group"
47849+ depends on GRKERNSEC_SOCKET
47850+ help
47851+ If you say Y here, you will be able to choose a GID of whose users will
47852+ be unable to connect to other hosts from your machine or run server
47853+ applications from your machine. If the sysctl option is enabled, a
47854+ sysctl option with name "socket_all" is created.
47855+
47856+config GRKERNSEC_SOCKET_ALL_GID
47857+ int "GID to deny all sockets for"
47858+ depends on GRKERNSEC_SOCKET_ALL
47859+ default 1004
47860+ help
47861+ Here you can choose the GID to disable socket access for. Remember to
47862+ add the users you want socket access disabled for to the GID
47863+ specified here. If the sysctl option is enabled, a sysctl option
47864+ with name "socket_all_gid" is created.
47865+
47866+config GRKERNSEC_SOCKET_CLIENT
47867+ bool "Deny client sockets to group"
47868+ depends on GRKERNSEC_SOCKET
47869+ help
47870+ If you say Y here, you will be able to choose a GID of whose users will
47871+ be unable to connect to other hosts from your machine, but will be
47872+ able to run servers. If this option is enabled, all users in the group
47873+ you specify will have to use passive mode when initiating ftp transfers
47874+ from the shell on your machine. If the sysctl option is enabled, a
47875+ sysctl option with name "socket_client" is created.
47876+
47877+config GRKERNSEC_SOCKET_CLIENT_GID
47878+ int "GID to deny client sockets for"
47879+ depends on GRKERNSEC_SOCKET_CLIENT
47880+ default 1003
47881+ help
47882+ Here you can choose the GID to disable client socket access for.
47883+ Remember to add the users you want client socket access disabled for to
47884+ the GID specified here. If the sysctl option is enabled, a sysctl
47885+ option with name "socket_client_gid" is created.
47886+
47887+config GRKERNSEC_SOCKET_SERVER
47888+ bool "Deny server sockets to group"
47889+ depends on GRKERNSEC_SOCKET
47890+ help
47891+ If you say Y here, you will be able to choose a GID of whose users will
47892+ be unable to run server applications from your machine. If the sysctl
47893+ option is enabled, a sysctl option with name "socket_server" is created.
47894+
47895+config GRKERNSEC_SOCKET_SERVER_GID
47896+ int "GID to deny server sockets for"
47897+ depends on GRKERNSEC_SOCKET_SERVER
47898+ default 1002
47899+ help
47900+ Here you can choose the GID to disable server socket access for.
47901+ Remember to add the users you want server socket access disabled for to
47902+ the GID specified here. If the sysctl option is enabled, a sysctl
47903+ option with name "socket_server_gid" is created.
47904+
47905+endmenu
47906+menu "Sysctl support"
47907+depends on GRKERNSEC && SYSCTL
47908+
47909+config GRKERNSEC_SYSCTL
47910+ bool "Sysctl support"
47911+ help
47912+ If you say Y here, you will be able to change the options that
47913+ grsecurity runs with at bootup, without having to recompile your
47914+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
47915+ to enable (1) or disable (0) various features. All the sysctl entries
47916+ are mutable until the "grsec_lock" entry is set to a non-zero value.
47917+ All features enabled in the kernel configuration are disabled at boot
47918+ if you do not say Y to the "Turn on features by default" option.
47919+ All options should be set at startup, and the grsec_lock entry should
47920+ be set to a non-zero value after all the options are set.
47921+ *THIS IS EXTREMELY IMPORTANT*
47922+
47923+config GRKERNSEC_SYSCTL_DISTRO
47924+ bool "Extra sysctl support for distro makers (READ HELP)"
47925+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
47926+ help
47927+ If you say Y here, additional sysctl options will be created
47928+ for features that affect processes running as root. Therefore,
47929+ it is critical when using this option that the grsec_lock entry be
47930+ enabled after boot. Only distros with prebuilt kernel packages
47931+ with this option enabled that can ensure grsec_lock is enabled
47932+ after boot should use this option.
47933+ *Failure to set grsec_lock after boot makes all grsec features
47934+ this option covers useless*
47935+
47936+ Currently this option creates the following sysctl entries:
47937+ "Disable Privileged I/O": "disable_priv_io"
47938+
47939+config GRKERNSEC_SYSCTL_ON
47940+ bool "Turn on features by default"
47941+ depends on GRKERNSEC_SYSCTL
47942+ help
47943+ If you say Y here, instead of having all features enabled in the
47944+ kernel configuration disabled at boot time, the features will be
47945+ enabled at boot time. It is recommended you say Y here unless
47946+ there is some reason you would want all sysctl-tunable features to
47947+ be disabled by default. As mentioned elsewhere, it is important
47948+ to enable the grsec_lock entry once you have finished modifying
47949+ the sysctl entries.
47950+
47951+endmenu
47952+menu "Logging Options"
47953+depends on GRKERNSEC
47954+
47955+config GRKERNSEC_FLOODTIME
47956+ int "Seconds in between log messages (minimum)"
47957+ default 10
47958+ help
47959+ This option allows you to enforce the number of seconds between
47960+ grsecurity log messages. The default should be suitable for most
47961+ people, however, if you choose to change it, choose a value small enough
47962+ to allow informative logs to be produced, but large enough to
47963+ prevent flooding.
47964+
47965+config GRKERNSEC_FLOODBURST
47966+ int "Number of messages in a burst (maximum)"
47967+ default 6
47968+ help
47969+ This option allows you to choose the maximum number of messages allowed
47970+ within the flood time interval you chose in a separate option. The
47971+ default should be suitable for most people, however if you find that
47972+ many of your logs are being interpreted as flooding, you may want to
47973+ raise this value.
47974+
47975+endmenu
47976+
47977+endmenu
47978diff -urNp linux-3.0.7/grsecurity/Makefile linux-3.0.7/grsecurity/Makefile
47979--- linux-3.0.7/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
47980+++ linux-3.0.7/grsecurity/Makefile 2011-10-17 06:45:43.000000000 -0400
47981@@ -0,0 +1,36 @@
47982+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
47983+# during 2001-2009 it has been completely redesigned by Brad Spengler
47984+# into an RBAC system
47985+#
47986+# All code in this directory and various hooks inserted throughout the kernel
47987+# are copyright Brad Spengler - Open Source Security, Inc., and released
47988+# under the GPL v2 or higher
47989+
47990+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
47991+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
47992+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
47993+
47994+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
47995+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
47996+ gracl_learn.o grsec_log.o
47997+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
47998+
47999+ifdef CONFIG_NET
48000+obj-y += grsec_sock.o
48001+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48002+endif
48003+
48004+ifndef CONFIG_GRKERNSEC
48005+obj-y += grsec_disabled.o
48006+endif
48007+
48008+ifdef CONFIG_GRKERNSEC_HIDESYM
48009+extra-y := grsec_hidesym.o
48010+$(obj)/grsec_hidesym.o:
48011+ @-chmod -f 500 /boot
48012+ @-chmod -f 500 /lib/modules
48013+ @-chmod -f 500 /lib64/modules
48014+ @-chmod -f 500 /lib32/modules
48015+ @-chmod -f 700 .
48016+ @echo ' grsec: protected kernel image paths'
48017+endif
48018diff -urNp linux-3.0.7/grsecurity/gracl.c linux-3.0.7/grsecurity/gracl.c
48019--- linux-3.0.7/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
48020+++ linux-3.0.7/grsecurity/gracl.c 2011-10-17 06:42:59.000000000 -0400
48021@@ -0,0 +1,4154 @@
48022+#include <linux/kernel.h>
48023+#include <linux/module.h>
48024+#include <linux/sched.h>
48025+#include <linux/mm.h>
48026+#include <linux/file.h>
48027+#include <linux/fs.h>
48028+#include <linux/namei.h>
48029+#include <linux/mount.h>
48030+#include <linux/tty.h>
48031+#include <linux/proc_fs.h>
48032+#include <linux/lglock.h>
48033+#include <linux/slab.h>
48034+#include <linux/vmalloc.h>
48035+#include <linux/types.h>
48036+#include <linux/sysctl.h>
48037+#include <linux/netdevice.h>
48038+#include <linux/ptrace.h>
48039+#include <linux/gracl.h>
48040+#include <linux/gralloc.h>
48041+#include <linux/grsecurity.h>
48042+#include <linux/grinternal.h>
48043+#include <linux/pid_namespace.h>
48044+#include <linux/fdtable.h>
48045+#include <linux/percpu.h>
48046+
48047+#include <asm/uaccess.h>
48048+#include <asm/errno.h>
48049+#include <asm/mman.h>
48050+
48051+static struct acl_role_db acl_role_set;
48052+static struct name_db name_set;
48053+static struct inodev_db inodev_set;
48054+
48055+/* for keeping track of userspace pointers used for subjects, so we
48056+ can share references in the kernel as well
48057+*/
48058+
48059+static struct path real_root;
48060+
48061+static struct acl_subj_map_db subj_map_set;
48062+
48063+static struct acl_role_label *default_role;
48064+
48065+static struct acl_role_label *role_list;
48066+
48067+static u16 acl_sp_role_value;
48068+
48069+extern char *gr_shared_page[4];
48070+static DEFINE_MUTEX(gr_dev_mutex);
48071+DEFINE_RWLOCK(gr_inode_lock);
48072+
48073+struct gr_arg *gr_usermode;
48074+
48075+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48076+
48077+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48078+extern void gr_clear_learn_entries(void);
48079+
48080+#ifdef CONFIG_GRKERNSEC_RESLOG
48081+extern void gr_log_resource(const struct task_struct *task,
48082+ const int res, const unsigned long wanted, const int gt);
48083+#endif
48084+
48085+unsigned char *gr_system_salt;
48086+unsigned char *gr_system_sum;
48087+
48088+static struct sprole_pw **acl_special_roles = NULL;
48089+static __u16 num_sprole_pws = 0;
48090+
48091+static struct acl_role_label *kernel_role = NULL;
48092+
48093+static unsigned int gr_auth_attempts = 0;
48094+static unsigned long gr_auth_expires = 0UL;
48095+
48096+#ifdef CONFIG_NET
48097+extern struct vfsmount *sock_mnt;
48098+#endif
48099+
48100+extern struct vfsmount *pipe_mnt;
48101+extern struct vfsmount *shm_mnt;
48102+#ifdef CONFIG_HUGETLBFS
48103+extern struct vfsmount *hugetlbfs_vfsmount;
48104+#endif
48105+
48106+static struct acl_object_label *fakefs_obj_rw;
48107+static struct acl_object_label *fakefs_obj_rwx;
48108+
48109+extern int gr_init_uidset(void);
48110+extern void gr_free_uidset(void);
48111+extern void gr_remove_uid(uid_t uid);
48112+extern int gr_find_uid(uid_t uid);
48113+
48114+DECLARE_BRLOCK(vfsmount_lock);
48115+
48116+__inline__ int
48117+gr_acl_is_enabled(void)
48118+{
48119+ return (gr_status & GR_READY);
48120+}
48121+
48122+#ifdef CONFIG_BTRFS_FS
48123+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48124+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48125+#endif
48126+
48127+static inline dev_t __get_dev(const struct dentry *dentry)
48128+{
48129+#ifdef CONFIG_BTRFS_FS
48130+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48131+ return get_btrfs_dev_from_inode(dentry->d_inode);
48132+ else
48133+#endif
48134+ return dentry->d_inode->i_sb->s_dev;
48135+}
48136+
48137+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48138+{
48139+ return __get_dev(dentry);
48140+}
48141+
48142+static char gr_task_roletype_to_char(struct task_struct *task)
48143+{
48144+ switch (task->role->roletype &
48145+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48146+ GR_ROLE_SPECIAL)) {
48147+ case GR_ROLE_DEFAULT:
48148+ return 'D';
48149+ case GR_ROLE_USER:
48150+ return 'U';
48151+ case GR_ROLE_GROUP:
48152+ return 'G';
48153+ case GR_ROLE_SPECIAL:
48154+ return 'S';
48155+ }
48156+
48157+ return 'X';
48158+}
48159+
48160+char gr_roletype_to_char(void)
48161+{
48162+ return gr_task_roletype_to_char(current);
48163+}
48164+
48165+__inline__ int
48166+gr_acl_tpe_check(void)
48167+{
48168+ if (unlikely(!(gr_status & GR_READY)))
48169+ return 0;
48170+ if (current->role->roletype & GR_ROLE_TPE)
48171+ return 1;
48172+ else
48173+ return 0;
48174+}
48175+
48176+int
48177+gr_handle_rawio(const struct inode *inode)
48178+{
48179+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48180+ if (inode && S_ISBLK(inode->i_mode) &&
48181+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48182+ !capable(CAP_SYS_RAWIO))
48183+ return 1;
48184+#endif
48185+ return 0;
48186+}
48187+
48188+static int
48189+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48190+{
48191+ if (likely(lena != lenb))
48192+ return 0;
48193+
48194+ return !memcmp(a, b, lena);
48195+}
48196+
48197+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48198+{
48199+ *buflen -= namelen;
48200+ if (*buflen < 0)
48201+ return -ENAMETOOLONG;
48202+ *buffer -= namelen;
48203+ memcpy(*buffer, str, namelen);
48204+ return 0;
48205+}
48206+
48207+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48208+{
48209+ return prepend(buffer, buflen, name->name, name->len);
48210+}
48211+
48212+static int prepend_path(const struct path *path, struct path *root,
48213+ char **buffer, int *buflen)
48214+{
48215+ struct dentry *dentry = path->dentry;
48216+ struct vfsmount *vfsmnt = path->mnt;
48217+ bool slash = false;
48218+ int error = 0;
48219+
48220+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48221+ struct dentry * parent;
48222+
48223+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48224+ /* Global root? */
48225+ if (vfsmnt->mnt_parent == vfsmnt) {
48226+ goto out;
48227+ }
48228+ dentry = vfsmnt->mnt_mountpoint;
48229+ vfsmnt = vfsmnt->mnt_parent;
48230+ continue;
48231+ }
48232+ parent = dentry->d_parent;
48233+ prefetch(parent);
48234+ spin_lock(&dentry->d_lock);
48235+ error = prepend_name(buffer, buflen, &dentry->d_name);
48236+ spin_unlock(&dentry->d_lock);
48237+ if (!error)
48238+ error = prepend(buffer, buflen, "/", 1);
48239+ if (error)
48240+ break;
48241+
48242+ slash = true;
48243+ dentry = parent;
48244+ }
48245+
48246+out:
48247+ if (!error && !slash)
48248+ error = prepend(buffer, buflen, "/", 1);
48249+
48250+ return error;
48251+}
48252+
48253+/* this must be called with vfsmount_lock and rename_lock held */
48254+
48255+static char *__our_d_path(const struct path *path, struct path *root,
48256+ char *buf, int buflen)
48257+{
48258+ char *res = buf + buflen;
48259+ int error;
48260+
48261+ prepend(&res, &buflen, "\0", 1);
48262+ error = prepend_path(path, root, &res, &buflen);
48263+ if (error)
48264+ return ERR_PTR(error);
48265+
48266+ return res;
48267+}
48268+
48269+static char *
48270+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48271+{
48272+ char *retval;
48273+
48274+ retval = __our_d_path(path, root, buf, buflen);
48275+ if (unlikely(IS_ERR(retval)))
48276+ retval = strcpy(buf, "<path too long>");
48277+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48278+ retval[1] = '\0';
48279+
48280+ return retval;
48281+}
48282+
48283+static char *
48284+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48285+ char *buf, int buflen)
48286+{
48287+ struct path path;
48288+ char *res;
48289+
48290+ path.dentry = (struct dentry *)dentry;
48291+ path.mnt = (struct vfsmount *)vfsmnt;
48292+
48293+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48294+ by the RBAC system */
48295+ res = gen_full_path(&path, &real_root, buf, buflen);
48296+
48297+ return res;
48298+}
48299+
48300+static char *
48301+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48302+ char *buf, int buflen)
48303+{
48304+ char *res;
48305+ struct path path;
48306+ struct path root;
48307+ struct task_struct *reaper = &init_task;
48308+
48309+ path.dentry = (struct dentry *)dentry;
48310+ path.mnt = (struct vfsmount *)vfsmnt;
48311+
48312+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48313+ get_fs_root(reaper->fs, &root);
48314+
48315+ write_seqlock(&rename_lock);
48316+ br_read_lock(vfsmount_lock);
48317+ res = gen_full_path(&path, &root, buf, buflen);
48318+ br_read_unlock(vfsmount_lock);
48319+ write_sequnlock(&rename_lock);
48320+
48321+ path_put(&root);
48322+ return res;
48323+}
48324+
48325+static char *
48326+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48327+{
48328+ char *ret;
48329+ write_seqlock(&rename_lock);
48330+ br_read_lock(vfsmount_lock);
48331+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48332+ PAGE_SIZE);
48333+ br_read_unlock(vfsmount_lock);
48334+ write_sequnlock(&rename_lock);
48335+ return ret;
48336+}
48337+
48338+static char *
48339+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48340+{
48341+ char *ret;
48342+ char *buf;
48343+ int buflen;
48344+
48345+ write_seqlock(&rename_lock);
48346+ br_read_lock(vfsmount_lock);
48347+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48348+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48349+ buflen = (int)(ret - buf);
48350+ if (buflen >= 5)
48351+ prepend(&ret, &buflen, "/proc", 5);
48352+ else
48353+ ret = strcpy(buf, "<path too long>");
48354+ br_read_unlock(vfsmount_lock);
48355+ write_sequnlock(&rename_lock);
48356+ return ret;
48357+}
48358+
48359+char *
48360+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48361+{
48362+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48363+ PAGE_SIZE);
48364+}
48365+
48366+char *
48367+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48368+{
48369+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48370+ PAGE_SIZE);
48371+}
48372+
48373+char *
48374+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48375+{
48376+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48377+ PAGE_SIZE);
48378+}
48379+
48380+char *
48381+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48382+{
48383+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48384+ PAGE_SIZE);
48385+}
48386+
48387+char *
48388+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48389+{
48390+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48391+ PAGE_SIZE);
48392+}
48393+
48394+__inline__ __u32
48395+to_gr_audit(const __u32 reqmode)
48396+{
48397+ /* masks off auditable permission flags, then shifts them to create
48398+ auditing flags, and adds the special case of append auditing if
48399+ we're requesting write */
48400+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48401+}
48402+
48403+struct acl_subject_label *
48404+lookup_subject_map(const struct acl_subject_label *userp)
48405+{
48406+ unsigned int index = shash(userp, subj_map_set.s_size);
48407+ struct subject_map *match;
48408+
48409+ match = subj_map_set.s_hash[index];
48410+
48411+ while (match && match->user != userp)
48412+ match = match->next;
48413+
48414+ if (match != NULL)
48415+ return match->kernel;
48416+ else
48417+ return NULL;
48418+}
48419+
48420+static void
48421+insert_subj_map_entry(struct subject_map *subjmap)
48422+{
48423+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48424+ struct subject_map **curr;
48425+
48426+ subjmap->prev = NULL;
48427+
48428+ curr = &subj_map_set.s_hash[index];
48429+ if (*curr != NULL)
48430+ (*curr)->prev = subjmap;
48431+
48432+ subjmap->next = *curr;
48433+ *curr = subjmap;
48434+
48435+ return;
48436+}
48437+
48438+static struct acl_role_label *
48439+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48440+ const gid_t gid)
48441+{
48442+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48443+ struct acl_role_label *match;
48444+ struct role_allowed_ip *ipp;
48445+ unsigned int x;
48446+ u32 curr_ip = task->signal->curr_ip;
48447+
48448+ task->signal->saved_ip = curr_ip;
48449+
48450+ match = acl_role_set.r_hash[index];
48451+
48452+ while (match) {
48453+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48454+ for (x = 0; x < match->domain_child_num; x++) {
48455+ if (match->domain_children[x] == uid)
48456+ goto found;
48457+ }
48458+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48459+ break;
48460+ match = match->next;
48461+ }
48462+found:
48463+ if (match == NULL) {
48464+ try_group:
48465+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48466+ match = acl_role_set.r_hash[index];
48467+
48468+ while (match) {
48469+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48470+ for (x = 0; x < match->domain_child_num; x++) {
48471+ if (match->domain_children[x] == gid)
48472+ goto found2;
48473+ }
48474+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48475+ break;
48476+ match = match->next;
48477+ }
48478+found2:
48479+ if (match == NULL)
48480+ match = default_role;
48481+ if (match->allowed_ips == NULL)
48482+ return match;
48483+ else {
48484+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48485+ if (likely
48486+ ((ntohl(curr_ip) & ipp->netmask) ==
48487+ (ntohl(ipp->addr) & ipp->netmask)))
48488+ return match;
48489+ }
48490+ match = default_role;
48491+ }
48492+ } else if (match->allowed_ips == NULL) {
48493+ return match;
48494+ } else {
48495+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48496+ if (likely
48497+ ((ntohl(curr_ip) & ipp->netmask) ==
48498+ (ntohl(ipp->addr) & ipp->netmask)))
48499+ return match;
48500+ }
48501+ goto try_group;
48502+ }
48503+
48504+ return match;
48505+}
48506+
48507+struct acl_subject_label *
48508+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48509+ const struct acl_role_label *role)
48510+{
48511+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48512+ struct acl_subject_label *match;
48513+
48514+ match = role->subj_hash[index];
48515+
48516+ while (match && (match->inode != ino || match->device != dev ||
48517+ (match->mode & GR_DELETED))) {
48518+ match = match->next;
48519+ }
48520+
48521+ if (match && !(match->mode & GR_DELETED))
48522+ return match;
48523+ else
48524+ return NULL;
48525+}
48526+
48527+struct acl_subject_label *
48528+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48529+ const struct acl_role_label *role)
48530+{
48531+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48532+ struct acl_subject_label *match;
48533+
48534+ match = role->subj_hash[index];
48535+
48536+ while (match && (match->inode != ino || match->device != dev ||
48537+ !(match->mode & GR_DELETED))) {
48538+ match = match->next;
48539+ }
48540+
48541+ if (match && (match->mode & GR_DELETED))
48542+ return match;
48543+ else
48544+ return NULL;
48545+}
48546+
48547+static struct acl_object_label *
48548+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48549+ const struct acl_subject_label *subj)
48550+{
48551+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48552+ struct acl_object_label *match;
48553+
48554+ match = subj->obj_hash[index];
48555+
48556+ while (match && (match->inode != ino || match->device != dev ||
48557+ (match->mode & GR_DELETED))) {
48558+ match = match->next;
48559+ }
48560+
48561+ if (match && !(match->mode & GR_DELETED))
48562+ return match;
48563+ else
48564+ return NULL;
48565+}
48566+
48567+static struct acl_object_label *
48568+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48569+ const struct acl_subject_label *subj)
48570+{
48571+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48572+ struct acl_object_label *match;
48573+
48574+ match = subj->obj_hash[index];
48575+
48576+ while (match && (match->inode != ino || match->device != dev ||
48577+ !(match->mode & GR_DELETED))) {
48578+ match = match->next;
48579+ }
48580+
48581+ if (match && (match->mode & GR_DELETED))
48582+ return match;
48583+
48584+ match = subj->obj_hash[index];
48585+
48586+ while (match && (match->inode != ino || match->device != dev ||
48587+ (match->mode & GR_DELETED))) {
48588+ match = match->next;
48589+ }
48590+
48591+ if (match && !(match->mode & GR_DELETED))
48592+ return match;
48593+ else
48594+ return NULL;
48595+}
48596+
48597+static struct name_entry *
48598+lookup_name_entry(const char *name)
48599+{
48600+ unsigned int len = strlen(name);
48601+ unsigned int key = full_name_hash(name, len);
48602+ unsigned int index = key % name_set.n_size;
48603+ struct name_entry *match;
48604+
48605+ match = name_set.n_hash[index];
48606+
48607+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48608+ match = match->next;
48609+
48610+ return match;
48611+}
48612+
48613+static struct name_entry *
48614+lookup_name_entry_create(const char *name)
48615+{
48616+ unsigned int len = strlen(name);
48617+ unsigned int key = full_name_hash(name, len);
48618+ unsigned int index = key % name_set.n_size;
48619+ struct name_entry *match;
48620+
48621+ match = name_set.n_hash[index];
48622+
48623+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48624+ !match->deleted))
48625+ match = match->next;
48626+
48627+ if (match && match->deleted)
48628+ return match;
48629+
48630+ match = name_set.n_hash[index];
48631+
48632+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48633+ match->deleted))
48634+ match = match->next;
48635+
48636+ if (match && !match->deleted)
48637+ return match;
48638+ else
48639+ return NULL;
48640+}
48641+
48642+static struct inodev_entry *
48643+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48644+{
48645+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48646+ struct inodev_entry *match;
48647+
48648+ match = inodev_set.i_hash[index];
48649+
48650+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48651+ match = match->next;
48652+
48653+ return match;
48654+}
48655+
48656+static void
48657+insert_inodev_entry(struct inodev_entry *entry)
48658+{
48659+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48660+ inodev_set.i_size);
48661+ struct inodev_entry **curr;
48662+
48663+ entry->prev = NULL;
48664+
48665+ curr = &inodev_set.i_hash[index];
48666+ if (*curr != NULL)
48667+ (*curr)->prev = entry;
48668+
48669+ entry->next = *curr;
48670+ *curr = entry;
48671+
48672+ return;
48673+}
48674+
48675+static void
48676+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48677+{
48678+ unsigned int index =
48679+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48680+ struct acl_role_label **curr;
48681+ struct acl_role_label *tmp;
48682+
48683+ curr = &acl_role_set.r_hash[index];
48684+
48685+ /* if role was already inserted due to domains and already has
48686+ a role in the same bucket as it attached, then we need to
48687+ combine these two buckets
48688+ */
48689+ if (role->next) {
48690+ tmp = role->next;
48691+ while (tmp->next)
48692+ tmp = tmp->next;
48693+ tmp->next = *curr;
48694+ } else
48695+ role->next = *curr;
48696+ *curr = role;
48697+
48698+ return;
48699+}
48700+
48701+static void
48702+insert_acl_role_label(struct acl_role_label *role)
48703+{
48704+ int i;
48705+
48706+ if (role_list == NULL) {
48707+ role_list = role;
48708+ role->prev = NULL;
48709+ } else {
48710+ role->prev = role_list;
48711+ role_list = role;
48712+ }
48713+
48714+ /* used for hash chains */
48715+ role->next = NULL;
48716+
48717+ if (role->roletype & GR_ROLE_DOMAIN) {
48718+ for (i = 0; i < role->domain_child_num; i++)
48719+ __insert_acl_role_label(role, role->domain_children[i]);
48720+ } else
48721+ __insert_acl_role_label(role, role->uidgid);
48722+}
48723+
48724+static int
48725+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48726+{
48727+ struct name_entry **curr, *nentry;
48728+ struct inodev_entry *ientry;
48729+ unsigned int len = strlen(name);
48730+ unsigned int key = full_name_hash(name, len);
48731+ unsigned int index = key % name_set.n_size;
48732+
48733+ curr = &name_set.n_hash[index];
48734+
48735+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48736+ curr = &((*curr)->next);
48737+
48738+ if (*curr != NULL)
48739+ return 1;
48740+
48741+ nentry = acl_alloc(sizeof (struct name_entry));
48742+ if (nentry == NULL)
48743+ return 0;
48744+ ientry = acl_alloc(sizeof (struct inodev_entry));
48745+ if (ientry == NULL)
48746+ return 0;
48747+ ientry->nentry = nentry;
48748+
48749+ nentry->key = key;
48750+ nentry->name = name;
48751+ nentry->inode = inode;
48752+ nentry->device = device;
48753+ nentry->len = len;
48754+ nentry->deleted = deleted;
48755+
48756+ nentry->prev = NULL;
48757+ curr = &name_set.n_hash[index];
48758+ if (*curr != NULL)
48759+ (*curr)->prev = nentry;
48760+ nentry->next = *curr;
48761+ *curr = nentry;
48762+
48763+ /* insert us into the table searchable by inode/dev */
48764+ insert_inodev_entry(ientry);
48765+
48766+ return 1;
48767+}
48768+
48769+static void
48770+insert_acl_obj_label(struct acl_object_label *obj,
48771+ struct acl_subject_label *subj)
48772+{
48773+ unsigned int index =
48774+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48775+ struct acl_object_label **curr;
48776+
48777+
48778+ obj->prev = NULL;
48779+
48780+ curr = &subj->obj_hash[index];
48781+ if (*curr != NULL)
48782+ (*curr)->prev = obj;
48783+
48784+ obj->next = *curr;
48785+ *curr = obj;
48786+
48787+ return;
48788+}
48789+
48790+static void
48791+insert_acl_subj_label(struct acl_subject_label *obj,
48792+ struct acl_role_label *role)
48793+{
48794+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48795+ struct acl_subject_label **curr;
48796+
48797+ obj->prev = NULL;
48798+
48799+ curr = &role->subj_hash[index];
48800+ if (*curr != NULL)
48801+ (*curr)->prev = obj;
48802+
48803+ obj->next = *curr;
48804+ *curr = obj;
48805+
48806+ return;
48807+}
48808+
48809+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48810+
48811+static void *
48812+create_table(__u32 * len, int elementsize)
48813+{
48814+ unsigned int table_sizes[] = {
48815+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48816+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48817+ 4194301, 8388593, 16777213, 33554393, 67108859
48818+ };
48819+ void *newtable = NULL;
48820+ unsigned int pwr = 0;
48821+
48822+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48823+ table_sizes[pwr] <= *len)
48824+ pwr++;
48825+
48826+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48827+ return newtable;
48828+
48829+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48830+ newtable =
48831+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48832+ else
48833+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48834+
48835+ *len = table_sizes[pwr];
48836+
48837+ return newtable;
48838+}
48839+
48840+static int
48841+init_variables(const struct gr_arg *arg)
48842+{
48843+ struct task_struct *reaper = &init_task;
48844+ unsigned int stacksize;
48845+
48846+ subj_map_set.s_size = arg->role_db.num_subjects;
48847+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48848+ name_set.n_size = arg->role_db.num_objects;
48849+ inodev_set.i_size = arg->role_db.num_objects;
48850+
48851+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48852+ !name_set.n_size || !inodev_set.i_size)
48853+ return 1;
48854+
48855+ if (!gr_init_uidset())
48856+ return 1;
48857+
48858+ /* set up the stack that holds allocation info */
48859+
48860+ stacksize = arg->role_db.num_pointers + 5;
48861+
48862+ if (!acl_alloc_stack_init(stacksize))
48863+ return 1;
48864+
48865+ /* grab reference for the real root dentry and vfsmount */
48866+ get_fs_root(reaper->fs, &real_root);
48867+
48868+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48869+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48870+#endif
48871+
48872+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48873+ if (fakefs_obj_rw == NULL)
48874+ return 1;
48875+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48876+
48877+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48878+ if (fakefs_obj_rwx == NULL)
48879+ return 1;
48880+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48881+
48882+ subj_map_set.s_hash =
48883+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48884+ acl_role_set.r_hash =
48885+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48886+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48887+ inodev_set.i_hash =
48888+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48889+
48890+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48891+ !name_set.n_hash || !inodev_set.i_hash)
48892+ return 1;
48893+
48894+ memset(subj_map_set.s_hash, 0,
48895+ sizeof(struct subject_map *) * subj_map_set.s_size);
48896+ memset(acl_role_set.r_hash, 0,
48897+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48898+ memset(name_set.n_hash, 0,
48899+ sizeof (struct name_entry *) * name_set.n_size);
48900+ memset(inodev_set.i_hash, 0,
48901+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48902+
48903+ return 0;
48904+}
48905+
48906+/* free information not needed after startup
48907+ currently contains user->kernel pointer mappings for subjects
48908+*/
48909+
48910+static void
48911+free_init_variables(void)
48912+{
48913+ __u32 i;
48914+
48915+ if (subj_map_set.s_hash) {
48916+ for (i = 0; i < subj_map_set.s_size; i++) {
48917+ if (subj_map_set.s_hash[i]) {
48918+ kfree(subj_map_set.s_hash[i]);
48919+ subj_map_set.s_hash[i] = NULL;
48920+ }
48921+ }
48922+
48923+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48924+ PAGE_SIZE)
48925+ kfree(subj_map_set.s_hash);
48926+ else
48927+ vfree(subj_map_set.s_hash);
48928+ }
48929+
48930+ return;
48931+}
48932+
48933+static void
48934+free_variables(void)
48935+{
48936+ struct acl_subject_label *s;
48937+ struct acl_role_label *r;
48938+ struct task_struct *task, *task2;
48939+ unsigned int x;
48940+
48941+ gr_clear_learn_entries();
48942+
48943+ read_lock(&tasklist_lock);
48944+ do_each_thread(task2, task) {
48945+ task->acl_sp_role = 0;
48946+ task->acl_role_id = 0;
48947+ task->acl = NULL;
48948+ task->role = NULL;
48949+ } while_each_thread(task2, task);
48950+ read_unlock(&tasklist_lock);
48951+
48952+ /* release the reference to the real root dentry and vfsmount */
48953+ path_put(&real_root);
48954+
48955+ /* free all object hash tables */
48956+
48957+ FOR_EACH_ROLE_START(r)
48958+ if (r->subj_hash == NULL)
48959+ goto next_role;
48960+ FOR_EACH_SUBJECT_START(r, s, x)
48961+ if (s->obj_hash == NULL)
48962+ break;
48963+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48964+ kfree(s->obj_hash);
48965+ else
48966+ vfree(s->obj_hash);
48967+ FOR_EACH_SUBJECT_END(s, x)
48968+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48969+ if (s->obj_hash == NULL)
48970+ break;
48971+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48972+ kfree(s->obj_hash);
48973+ else
48974+ vfree(s->obj_hash);
48975+ FOR_EACH_NESTED_SUBJECT_END(s)
48976+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48977+ kfree(r->subj_hash);
48978+ else
48979+ vfree(r->subj_hash);
48980+ r->subj_hash = NULL;
48981+next_role:
48982+ FOR_EACH_ROLE_END(r)
48983+
48984+ acl_free_all();
48985+
48986+ if (acl_role_set.r_hash) {
48987+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48988+ PAGE_SIZE)
48989+ kfree(acl_role_set.r_hash);
48990+ else
48991+ vfree(acl_role_set.r_hash);
48992+ }
48993+ if (name_set.n_hash) {
48994+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48995+ PAGE_SIZE)
48996+ kfree(name_set.n_hash);
48997+ else
48998+ vfree(name_set.n_hash);
48999+ }
49000+
49001+ if (inodev_set.i_hash) {
49002+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49003+ PAGE_SIZE)
49004+ kfree(inodev_set.i_hash);
49005+ else
49006+ vfree(inodev_set.i_hash);
49007+ }
49008+
49009+ gr_free_uidset();
49010+
49011+ memset(&name_set, 0, sizeof (struct name_db));
49012+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49013+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49014+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49015+
49016+ default_role = NULL;
49017+ role_list = NULL;
49018+
49019+ return;
49020+}
49021+
49022+static __u32
49023+count_user_objs(struct acl_object_label *userp)
49024+{
49025+ struct acl_object_label o_tmp;
49026+ __u32 num = 0;
49027+
49028+ while (userp) {
49029+ if (copy_from_user(&o_tmp, userp,
49030+ sizeof (struct acl_object_label)))
49031+ break;
49032+
49033+ userp = o_tmp.prev;
49034+ num++;
49035+ }
49036+
49037+ return num;
49038+}
49039+
49040+static struct acl_subject_label *
49041+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49042+
49043+static int
49044+copy_user_glob(struct acl_object_label *obj)
49045+{
49046+ struct acl_object_label *g_tmp, **guser;
49047+ unsigned int len;
49048+ char *tmp;
49049+
49050+ if (obj->globbed == NULL)
49051+ return 0;
49052+
49053+ guser = &obj->globbed;
49054+ while (*guser) {
49055+ g_tmp = (struct acl_object_label *)
49056+ acl_alloc(sizeof (struct acl_object_label));
49057+ if (g_tmp == NULL)
49058+ return -ENOMEM;
49059+
49060+ if (copy_from_user(g_tmp, *guser,
49061+ sizeof (struct acl_object_label)))
49062+ return -EFAULT;
49063+
49064+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49065+
49066+ if (!len || len >= PATH_MAX)
49067+ return -EINVAL;
49068+
49069+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49070+ return -ENOMEM;
49071+
49072+ if (copy_from_user(tmp, g_tmp->filename, len))
49073+ return -EFAULT;
49074+ tmp[len-1] = '\0';
49075+ g_tmp->filename = tmp;
49076+
49077+ *guser = g_tmp;
49078+ guser = &(g_tmp->next);
49079+ }
49080+
49081+ return 0;
49082+}
49083+
49084+static int
49085+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49086+ struct acl_role_label *role)
49087+{
49088+ struct acl_object_label *o_tmp;
49089+ unsigned int len;
49090+ int ret;
49091+ char *tmp;
49092+
49093+ while (userp) {
49094+ if ((o_tmp = (struct acl_object_label *)
49095+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49096+ return -ENOMEM;
49097+
49098+ if (copy_from_user(o_tmp, userp,
49099+ sizeof (struct acl_object_label)))
49100+ return -EFAULT;
49101+
49102+ userp = o_tmp->prev;
49103+
49104+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49105+
49106+ if (!len || len >= PATH_MAX)
49107+ return -EINVAL;
49108+
49109+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49110+ return -ENOMEM;
49111+
49112+ if (copy_from_user(tmp, o_tmp->filename, len))
49113+ return -EFAULT;
49114+ tmp[len-1] = '\0';
49115+ o_tmp->filename = tmp;
49116+
49117+ insert_acl_obj_label(o_tmp, subj);
49118+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49119+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49120+ return -ENOMEM;
49121+
49122+ ret = copy_user_glob(o_tmp);
49123+ if (ret)
49124+ return ret;
49125+
49126+ if (o_tmp->nested) {
49127+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49128+ if (IS_ERR(o_tmp->nested))
49129+ return PTR_ERR(o_tmp->nested);
49130+
49131+ /* insert into nested subject list */
49132+ o_tmp->nested->next = role->hash->first;
49133+ role->hash->first = o_tmp->nested;
49134+ }
49135+ }
49136+
49137+ return 0;
49138+}
49139+
49140+static __u32
49141+count_user_subjs(struct acl_subject_label *userp)
49142+{
49143+ struct acl_subject_label s_tmp;
49144+ __u32 num = 0;
49145+
49146+ while (userp) {
49147+ if (copy_from_user(&s_tmp, userp,
49148+ sizeof (struct acl_subject_label)))
49149+ break;
49150+
49151+ userp = s_tmp.prev;
49152+ /* do not count nested subjects against this count, since
49153+ they are not included in the hash table, but are
49154+ attached to objects. We have already counted
49155+ the subjects in userspace for the allocation
49156+ stack
49157+ */
49158+ if (!(s_tmp.mode & GR_NESTED))
49159+ num++;
49160+ }
49161+
49162+ return num;
49163+}
49164+
49165+static int
49166+copy_user_allowedips(struct acl_role_label *rolep)
49167+{
49168+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49169+
49170+ ruserip = rolep->allowed_ips;
49171+
49172+ while (ruserip) {
49173+ rlast = rtmp;
49174+
49175+ if ((rtmp = (struct role_allowed_ip *)
49176+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49177+ return -ENOMEM;
49178+
49179+ if (copy_from_user(rtmp, ruserip,
49180+ sizeof (struct role_allowed_ip)))
49181+ return -EFAULT;
49182+
49183+ ruserip = rtmp->prev;
49184+
49185+ if (!rlast) {
49186+ rtmp->prev = NULL;
49187+ rolep->allowed_ips = rtmp;
49188+ } else {
49189+ rlast->next = rtmp;
49190+ rtmp->prev = rlast;
49191+ }
49192+
49193+ if (!ruserip)
49194+ rtmp->next = NULL;
49195+ }
49196+
49197+ return 0;
49198+}
49199+
49200+static int
49201+copy_user_transitions(struct acl_role_label *rolep)
49202+{
49203+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49204+
49205+ unsigned int len;
49206+ char *tmp;
49207+
49208+ rusertp = rolep->transitions;
49209+
49210+ while (rusertp) {
49211+ rlast = rtmp;
49212+
49213+ if ((rtmp = (struct role_transition *)
49214+ acl_alloc(sizeof (struct role_transition))) == NULL)
49215+ return -ENOMEM;
49216+
49217+ if (copy_from_user(rtmp, rusertp,
49218+ sizeof (struct role_transition)))
49219+ return -EFAULT;
49220+
49221+ rusertp = rtmp->prev;
49222+
49223+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49224+
49225+ if (!len || len >= GR_SPROLE_LEN)
49226+ return -EINVAL;
49227+
49228+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49229+ return -ENOMEM;
49230+
49231+ if (copy_from_user(tmp, rtmp->rolename, len))
49232+ return -EFAULT;
49233+ tmp[len-1] = '\0';
49234+ rtmp->rolename = tmp;
49235+
49236+ if (!rlast) {
49237+ rtmp->prev = NULL;
49238+ rolep->transitions = rtmp;
49239+ } else {
49240+ rlast->next = rtmp;
49241+ rtmp->prev = rlast;
49242+ }
49243+
49244+ if (!rusertp)
49245+ rtmp->next = NULL;
49246+ }
49247+
49248+ return 0;
49249+}
49250+
49251+static struct acl_subject_label *
49252+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49253+{
49254+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49255+ unsigned int len;
49256+ char *tmp;
49257+ __u32 num_objs;
49258+ struct acl_ip_label **i_tmp, *i_utmp2;
49259+ struct gr_hash_struct ghash;
49260+ struct subject_map *subjmap;
49261+ unsigned int i_num;
49262+ int err;
49263+
49264+ s_tmp = lookup_subject_map(userp);
49265+
49266+ /* we've already copied this subject into the kernel, just return
49267+ the reference to it, and don't copy it over again
49268+ */
49269+ if (s_tmp)
49270+ return(s_tmp);
49271+
49272+ if ((s_tmp = (struct acl_subject_label *)
49273+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49274+ return ERR_PTR(-ENOMEM);
49275+
49276+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49277+ if (subjmap == NULL)
49278+ return ERR_PTR(-ENOMEM);
49279+
49280+ subjmap->user = userp;
49281+ subjmap->kernel = s_tmp;
49282+ insert_subj_map_entry(subjmap);
49283+
49284+ if (copy_from_user(s_tmp, userp,
49285+ sizeof (struct acl_subject_label)))
49286+ return ERR_PTR(-EFAULT);
49287+
49288+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49289+
49290+ if (!len || len >= PATH_MAX)
49291+ return ERR_PTR(-EINVAL);
49292+
49293+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49294+ return ERR_PTR(-ENOMEM);
49295+
49296+ if (copy_from_user(tmp, s_tmp->filename, len))
49297+ return ERR_PTR(-EFAULT);
49298+ tmp[len-1] = '\0';
49299+ s_tmp->filename = tmp;
49300+
49301+ if (!strcmp(s_tmp->filename, "/"))
49302+ role->root_label = s_tmp;
49303+
49304+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49305+ return ERR_PTR(-EFAULT);
49306+
49307+ /* copy user and group transition tables */
49308+
49309+ if (s_tmp->user_trans_num) {
49310+ uid_t *uidlist;
49311+
49312+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49313+ if (uidlist == NULL)
49314+ return ERR_PTR(-ENOMEM);
49315+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49316+ return ERR_PTR(-EFAULT);
49317+
49318+ s_tmp->user_transitions = uidlist;
49319+ }
49320+
49321+ if (s_tmp->group_trans_num) {
49322+ gid_t *gidlist;
49323+
49324+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49325+ if (gidlist == NULL)
49326+ return ERR_PTR(-ENOMEM);
49327+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49328+ return ERR_PTR(-EFAULT);
49329+
49330+ s_tmp->group_transitions = gidlist;
49331+ }
49332+
49333+ /* set up object hash table */
49334+ num_objs = count_user_objs(ghash.first);
49335+
49336+ s_tmp->obj_hash_size = num_objs;
49337+ s_tmp->obj_hash =
49338+ (struct acl_object_label **)
49339+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49340+
49341+ if (!s_tmp->obj_hash)
49342+ return ERR_PTR(-ENOMEM);
49343+
49344+ memset(s_tmp->obj_hash, 0,
49345+ s_tmp->obj_hash_size *
49346+ sizeof (struct acl_object_label *));
49347+
49348+ /* add in objects */
49349+ err = copy_user_objs(ghash.first, s_tmp, role);
49350+
49351+ if (err)
49352+ return ERR_PTR(err);
49353+
49354+ /* set pointer for parent subject */
49355+ if (s_tmp->parent_subject) {
49356+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49357+
49358+ if (IS_ERR(s_tmp2))
49359+ return s_tmp2;
49360+
49361+ s_tmp->parent_subject = s_tmp2;
49362+ }
49363+
49364+ /* add in ip acls */
49365+
49366+ if (!s_tmp->ip_num) {
49367+ s_tmp->ips = NULL;
49368+ goto insert;
49369+ }
49370+
49371+ i_tmp =
49372+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49373+ sizeof (struct acl_ip_label *));
49374+
49375+ if (!i_tmp)
49376+ return ERR_PTR(-ENOMEM);
49377+
49378+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49379+ *(i_tmp + i_num) =
49380+ (struct acl_ip_label *)
49381+ acl_alloc(sizeof (struct acl_ip_label));
49382+ if (!*(i_tmp + i_num))
49383+ return ERR_PTR(-ENOMEM);
49384+
49385+ if (copy_from_user
49386+ (&i_utmp2, s_tmp->ips + i_num,
49387+ sizeof (struct acl_ip_label *)))
49388+ return ERR_PTR(-EFAULT);
49389+
49390+ if (copy_from_user
49391+ (*(i_tmp + i_num), i_utmp2,
49392+ sizeof (struct acl_ip_label)))
49393+ return ERR_PTR(-EFAULT);
49394+
49395+ if ((*(i_tmp + i_num))->iface == NULL)
49396+ continue;
49397+
49398+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49399+ if (!len || len >= IFNAMSIZ)
49400+ return ERR_PTR(-EINVAL);
49401+ tmp = acl_alloc(len);
49402+ if (tmp == NULL)
49403+ return ERR_PTR(-ENOMEM);
49404+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49405+ return ERR_PTR(-EFAULT);
49406+ (*(i_tmp + i_num))->iface = tmp;
49407+ }
49408+
49409+ s_tmp->ips = i_tmp;
49410+
49411+insert:
49412+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49413+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49414+ return ERR_PTR(-ENOMEM);
49415+
49416+ return s_tmp;
49417+}
49418+
49419+static int
49420+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49421+{
49422+ struct acl_subject_label s_pre;
49423+ struct acl_subject_label * ret;
49424+ int err;
49425+
49426+ while (userp) {
49427+ if (copy_from_user(&s_pre, userp,
49428+ sizeof (struct acl_subject_label)))
49429+ return -EFAULT;
49430+
49431+ /* do not add nested subjects here, add
49432+ while parsing objects
49433+ */
49434+
49435+ if (s_pre.mode & GR_NESTED) {
49436+ userp = s_pre.prev;
49437+ continue;
49438+ }
49439+
49440+ ret = do_copy_user_subj(userp, role);
49441+
49442+ err = PTR_ERR(ret);
49443+ if (IS_ERR(ret))
49444+ return err;
49445+
49446+ insert_acl_subj_label(ret, role);
49447+
49448+ userp = s_pre.prev;
49449+ }
49450+
49451+ return 0;
49452+}
49453+
49454+static int
49455+copy_user_acl(struct gr_arg *arg)
49456+{
49457+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49458+ struct sprole_pw *sptmp;
49459+ struct gr_hash_struct *ghash;
49460+ uid_t *domainlist;
49461+ unsigned int r_num;
49462+ unsigned int len;
49463+ char *tmp;
49464+ int err = 0;
49465+ __u16 i;
49466+ __u32 num_subjs;
49467+
49468+ /* we need a default and kernel role */
49469+ if (arg->role_db.num_roles < 2)
49470+ return -EINVAL;
49471+
49472+ /* copy special role authentication info from userspace */
49473+
49474+ num_sprole_pws = arg->num_sprole_pws;
49475+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49476+
49477+ if (!acl_special_roles) {
49478+ err = -ENOMEM;
49479+ goto cleanup;
49480+ }
49481+
49482+ for (i = 0; i < num_sprole_pws; i++) {
49483+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49484+ if (!sptmp) {
49485+ err = -ENOMEM;
49486+ goto cleanup;
49487+ }
49488+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49489+ sizeof (struct sprole_pw))) {
49490+ err = -EFAULT;
49491+ goto cleanup;
49492+ }
49493+
49494+ len =
49495+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49496+
49497+ if (!len || len >= GR_SPROLE_LEN) {
49498+ err = -EINVAL;
49499+ goto cleanup;
49500+ }
49501+
49502+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49503+ err = -ENOMEM;
49504+ goto cleanup;
49505+ }
49506+
49507+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49508+ err = -EFAULT;
49509+ goto cleanup;
49510+ }
49511+ tmp[len-1] = '\0';
49512+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49513+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49514+#endif
49515+ sptmp->rolename = tmp;
49516+ acl_special_roles[i] = sptmp;
49517+ }
49518+
49519+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49520+
49521+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49522+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49523+
49524+ if (!r_tmp) {
49525+ err = -ENOMEM;
49526+ goto cleanup;
49527+ }
49528+
49529+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49530+ sizeof (struct acl_role_label *))) {
49531+ err = -EFAULT;
49532+ goto cleanup;
49533+ }
49534+
49535+ if (copy_from_user(r_tmp, r_utmp2,
49536+ sizeof (struct acl_role_label))) {
49537+ err = -EFAULT;
49538+ goto cleanup;
49539+ }
49540+
49541+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49542+
49543+ if (!len || len >= PATH_MAX) {
49544+ err = -EINVAL;
49545+ goto cleanup;
49546+ }
49547+
49548+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49549+ err = -ENOMEM;
49550+ goto cleanup;
49551+ }
49552+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49553+ err = -EFAULT;
49554+ goto cleanup;
49555+ }
49556+ tmp[len-1] = '\0';
49557+ r_tmp->rolename = tmp;
49558+
49559+ if (!strcmp(r_tmp->rolename, "default")
49560+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49561+ default_role = r_tmp;
49562+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49563+ kernel_role = r_tmp;
49564+ }
49565+
49566+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49567+ err = -ENOMEM;
49568+ goto cleanup;
49569+ }
49570+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49571+ err = -EFAULT;
49572+ goto cleanup;
49573+ }
49574+
49575+ r_tmp->hash = ghash;
49576+
49577+ num_subjs = count_user_subjs(r_tmp->hash->first);
49578+
49579+ r_tmp->subj_hash_size = num_subjs;
49580+ r_tmp->subj_hash =
49581+ (struct acl_subject_label **)
49582+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49583+
49584+ if (!r_tmp->subj_hash) {
49585+ err = -ENOMEM;
49586+ goto cleanup;
49587+ }
49588+
49589+ err = copy_user_allowedips(r_tmp);
49590+ if (err)
49591+ goto cleanup;
49592+
49593+ /* copy domain info */
49594+ if (r_tmp->domain_children != NULL) {
49595+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49596+ if (domainlist == NULL) {
49597+ err = -ENOMEM;
49598+ goto cleanup;
49599+ }
49600+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49601+ err = -EFAULT;
49602+ goto cleanup;
49603+ }
49604+ r_tmp->domain_children = domainlist;
49605+ }
49606+
49607+ err = copy_user_transitions(r_tmp);
49608+ if (err)
49609+ goto cleanup;
49610+
49611+ memset(r_tmp->subj_hash, 0,
49612+ r_tmp->subj_hash_size *
49613+ sizeof (struct acl_subject_label *));
49614+
49615+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49616+
49617+ if (err)
49618+ goto cleanup;
49619+
49620+ /* set nested subject list to null */
49621+ r_tmp->hash->first = NULL;
49622+
49623+ insert_acl_role_label(r_tmp);
49624+ }
49625+
49626+ goto return_err;
49627+ cleanup:
49628+ free_variables();
49629+ return_err:
49630+ return err;
49631+
49632+}
49633+
49634+static int
49635+gracl_init(struct gr_arg *args)
49636+{
49637+ int error = 0;
49638+
49639+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49640+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49641+
49642+ if (init_variables(args)) {
49643+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49644+ error = -ENOMEM;
49645+ free_variables();
49646+ goto out;
49647+ }
49648+
49649+ error = copy_user_acl(args);
49650+ free_init_variables();
49651+ if (error) {
49652+ free_variables();
49653+ goto out;
49654+ }
49655+
49656+ if ((error = gr_set_acls(0))) {
49657+ free_variables();
49658+ goto out;
49659+ }
49660+
49661+ pax_open_kernel();
49662+ gr_status |= GR_READY;
49663+ pax_close_kernel();
49664+
49665+ out:
49666+ return error;
49667+}
49668+
49669+/* derived from glibc fnmatch() 0: match, 1: no match*/
49670+
49671+static int
49672+glob_match(const char *p, const char *n)
49673+{
49674+ char c;
49675+
49676+ while ((c = *p++) != '\0') {
49677+ switch (c) {
49678+ case '?':
49679+ if (*n == '\0')
49680+ return 1;
49681+ else if (*n == '/')
49682+ return 1;
49683+ break;
49684+ case '\\':
49685+ if (*n != c)
49686+ return 1;
49687+ break;
49688+ case '*':
49689+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49690+ if (*n == '/')
49691+ return 1;
49692+ else if (c == '?') {
49693+ if (*n == '\0')
49694+ return 1;
49695+ else
49696+ ++n;
49697+ }
49698+ }
49699+ if (c == '\0') {
49700+ return 0;
49701+ } else {
49702+ const char *endp;
49703+
49704+ if ((endp = strchr(n, '/')) == NULL)
49705+ endp = n + strlen(n);
49706+
49707+ if (c == '[') {
49708+ for (--p; n < endp; ++n)
49709+ if (!glob_match(p, n))
49710+ return 0;
49711+ } else if (c == '/') {
49712+ while (*n != '\0' && *n != '/')
49713+ ++n;
49714+ if (*n == '/' && !glob_match(p, n + 1))
49715+ return 0;
49716+ } else {
49717+ for (--p; n < endp; ++n)
49718+ if (*n == c && !glob_match(p, n))
49719+ return 0;
49720+ }
49721+
49722+ return 1;
49723+ }
49724+ case '[':
49725+ {
49726+ int not;
49727+ char cold;
49728+
49729+ if (*n == '\0' || *n == '/')
49730+ return 1;
49731+
49732+ not = (*p == '!' || *p == '^');
49733+ if (not)
49734+ ++p;
49735+
49736+ c = *p++;
49737+ for (;;) {
49738+ unsigned char fn = (unsigned char)*n;
49739+
49740+ if (c == '\0')
49741+ return 1;
49742+ else {
49743+ if (c == fn)
49744+ goto matched;
49745+ cold = c;
49746+ c = *p++;
49747+
49748+ if (c == '-' && *p != ']') {
49749+ unsigned char cend = *p++;
49750+
49751+ if (cend == '\0')
49752+ return 1;
49753+
49754+ if (cold <= fn && fn <= cend)
49755+ goto matched;
49756+
49757+ c = *p++;
49758+ }
49759+ }
49760+
49761+ if (c == ']')
49762+ break;
49763+ }
49764+ if (!not)
49765+ return 1;
49766+ break;
49767+ matched:
49768+ while (c != ']') {
49769+ if (c == '\0')
49770+ return 1;
49771+
49772+ c = *p++;
49773+ }
49774+ if (not)
49775+ return 1;
49776+ }
49777+ break;
49778+ default:
49779+ if (c != *n)
49780+ return 1;
49781+ }
49782+
49783+ ++n;
49784+ }
49785+
49786+ if (*n == '\0')
49787+ return 0;
49788+
49789+ if (*n == '/')
49790+ return 0;
49791+
49792+ return 1;
49793+}
49794+
49795+static struct acl_object_label *
49796+chk_glob_label(struct acl_object_label *globbed,
49797+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49798+{
49799+ struct acl_object_label *tmp;
49800+
49801+ if (*path == NULL)
49802+ *path = gr_to_filename_nolock(dentry, mnt);
49803+
49804+ tmp = globbed;
49805+
49806+ while (tmp) {
49807+ if (!glob_match(tmp->filename, *path))
49808+ return tmp;
49809+ tmp = tmp->next;
49810+ }
49811+
49812+ return NULL;
49813+}
49814+
49815+static struct acl_object_label *
49816+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49817+ const ino_t curr_ino, const dev_t curr_dev,
49818+ const struct acl_subject_label *subj, char **path, const int checkglob)
49819+{
49820+ struct acl_subject_label *tmpsubj;
49821+ struct acl_object_label *retval;
49822+ struct acl_object_label *retval2;
49823+
49824+ tmpsubj = (struct acl_subject_label *) subj;
49825+ read_lock(&gr_inode_lock);
49826+ do {
49827+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49828+ if (retval) {
49829+ if (checkglob && retval->globbed) {
49830+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49831+ (struct vfsmount *)orig_mnt, path);
49832+ if (retval2)
49833+ retval = retval2;
49834+ }
49835+ break;
49836+ }
49837+ } while ((tmpsubj = tmpsubj->parent_subject));
49838+ read_unlock(&gr_inode_lock);
49839+
49840+ return retval;
49841+}
49842+
49843+static __inline__ struct acl_object_label *
49844+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49845+ struct dentry *curr_dentry,
49846+ const struct acl_subject_label *subj, char **path, const int checkglob)
49847+{
49848+ int newglob = checkglob;
49849+ ino_t inode;
49850+ dev_t device;
49851+
49852+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49853+ as we don't want a / * rule to match instead of the / object
49854+ don't do this for create lookups that call this function though, since they're looking up
49855+ on the parent and thus need globbing checks on all paths
49856+ */
49857+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49858+ newglob = GR_NO_GLOB;
49859+
49860+ spin_lock(&curr_dentry->d_lock);
49861+ inode = curr_dentry->d_inode->i_ino;
49862+ device = __get_dev(curr_dentry);
49863+ spin_unlock(&curr_dentry->d_lock);
49864+
49865+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49866+}
49867+
49868+static struct acl_object_label *
49869+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49870+ const struct acl_subject_label *subj, char *path, const int checkglob)
49871+{
49872+ struct dentry *dentry = (struct dentry *) l_dentry;
49873+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49874+ struct acl_object_label *retval;
49875+ struct dentry *parent;
49876+
49877+ write_seqlock(&rename_lock);
49878+ br_read_lock(vfsmount_lock);
49879+
49880+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49881+#ifdef CONFIG_NET
49882+ mnt == sock_mnt ||
49883+#endif
49884+#ifdef CONFIG_HUGETLBFS
49885+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49886+#endif
49887+ /* ignore Eric Biederman */
49888+ IS_PRIVATE(l_dentry->d_inode))) {
49889+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49890+ goto out;
49891+ }
49892+
49893+ for (;;) {
49894+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49895+ break;
49896+
49897+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49898+ if (mnt->mnt_parent == mnt)
49899+ break;
49900+
49901+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49902+ if (retval != NULL)
49903+ goto out;
49904+
49905+ dentry = mnt->mnt_mountpoint;
49906+ mnt = mnt->mnt_parent;
49907+ continue;
49908+ }
49909+
49910+ parent = dentry->d_parent;
49911+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49912+ if (retval != NULL)
49913+ goto out;
49914+
49915+ dentry = parent;
49916+ }
49917+
49918+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49919+
49920+ /* real_root is pinned so we don't have to hold a reference */
49921+ if (retval == NULL)
49922+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49923+out:
49924+ br_read_unlock(vfsmount_lock);
49925+ write_sequnlock(&rename_lock);
49926+
49927+ BUG_ON(retval == NULL);
49928+
49929+ return retval;
49930+}
49931+
49932+static __inline__ struct acl_object_label *
49933+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49934+ const struct acl_subject_label *subj)
49935+{
49936+ char *path = NULL;
49937+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49938+}
49939+
49940+static __inline__ struct acl_object_label *
49941+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49942+ const struct acl_subject_label *subj)
49943+{
49944+ char *path = NULL;
49945+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49946+}
49947+
49948+static __inline__ struct acl_object_label *
49949+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49950+ const struct acl_subject_label *subj, char *path)
49951+{
49952+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49953+}
49954+
49955+static struct acl_subject_label *
49956+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49957+ const struct acl_role_label *role)
49958+{
49959+ struct dentry *dentry = (struct dentry *) l_dentry;
49960+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49961+ struct acl_subject_label *retval;
49962+ struct dentry *parent;
49963+
49964+ write_seqlock(&rename_lock);
49965+ br_read_lock(vfsmount_lock);
49966+
49967+ for (;;) {
49968+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49969+ break;
49970+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49971+ if (mnt->mnt_parent == mnt)
49972+ break;
49973+
49974+ spin_lock(&dentry->d_lock);
49975+ read_lock(&gr_inode_lock);
49976+ retval =
49977+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49978+ __get_dev(dentry), role);
49979+ read_unlock(&gr_inode_lock);
49980+ spin_unlock(&dentry->d_lock);
49981+ if (retval != NULL)
49982+ goto out;
49983+
49984+ dentry = mnt->mnt_mountpoint;
49985+ mnt = mnt->mnt_parent;
49986+ continue;
49987+ }
49988+
49989+ spin_lock(&dentry->d_lock);
49990+ read_lock(&gr_inode_lock);
49991+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49992+ __get_dev(dentry), role);
49993+ read_unlock(&gr_inode_lock);
49994+ parent = dentry->d_parent;
49995+ spin_unlock(&dentry->d_lock);
49996+
49997+ if (retval != NULL)
49998+ goto out;
49999+
50000+ dentry = parent;
50001+ }
50002+
50003+ spin_lock(&dentry->d_lock);
50004+ read_lock(&gr_inode_lock);
50005+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50006+ __get_dev(dentry), role);
50007+ read_unlock(&gr_inode_lock);
50008+ spin_unlock(&dentry->d_lock);
50009+
50010+ if (unlikely(retval == NULL)) {
50011+ /* real_root is pinned, we don't need to hold a reference */
50012+ read_lock(&gr_inode_lock);
50013+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50014+ __get_dev(real_root.dentry), role);
50015+ read_unlock(&gr_inode_lock);
50016+ }
50017+out:
50018+ br_read_unlock(vfsmount_lock);
50019+ write_sequnlock(&rename_lock);
50020+
50021+ BUG_ON(retval == NULL);
50022+
50023+ return retval;
50024+}
50025+
50026+static void
50027+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50028+{
50029+ struct task_struct *task = current;
50030+ const struct cred *cred = current_cred();
50031+
50032+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50033+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50034+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50035+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50036+
50037+ return;
50038+}
50039+
50040+static void
50041+gr_log_learn_sysctl(const char *path, const __u32 mode)
50042+{
50043+ struct task_struct *task = current;
50044+ const struct cred *cred = current_cred();
50045+
50046+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50047+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50048+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50049+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50050+
50051+ return;
50052+}
50053+
50054+static void
50055+gr_log_learn_id_change(const char type, const unsigned int real,
50056+ const unsigned int effective, const unsigned int fs)
50057+{
50058+ struct task_struct *task = current;
50059+ const struct cred *cred = current_cred();
50060+
50061+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50062+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50063+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50064+ type, real, effective, fs, &task->signal->saved_ip);
50065+
50066+ return;
50067+}
50068+
50069+__u32
50070+gr_search_file(const struct dentry * dentry, const __u32 mode,
50071+ const struct vfsmount * mnt)
50072+{
50073+ __u32 retval = mode;
50074+ struct acl_subject_label *curracl;
50075+ struct acl_object_label *currobj;
50076+
50077+ if (unlikely(!(gr_status & GR_READY)))
50078+ return (mode & ~GR_AUDITS);
50079+
50080+ curracl = current->acl;
50081+
50082+ currobj = chk_obj_label(dentry, mnt, curracl);
50083+ retval = currobj->mode & mode;
50084+
50085+ /* if we're opening a specified transfer file for writing
50086+ (e.g. /dev/initctl), then transfer our role to init
50087+ */
50088+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50089+ current->role->roletype & GR_ROLE_PERSIST)) {
50090+ struct task_struct *task = init_pid_ns.child_reaper;
50091+
50092+ if (task->role != current->role) {
50093+ task->acl_sp_role = 0;
50094+ task->acl_role_id = current->acl_role_id;
50095+ task->role = current->role;
50096+ rcu_read_lock();
50097+ read_lock(&grsec_exec_file_lock);
50098+ gr_apply_subject_to_task(task);
50099+ read_unlock(&grsec_exec_file_lock);
50100+ rcu_read_unlock();
50101+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50102+ }
50103+ }
50104+
50105+ if (unlikely
50106+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50107+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50108+ __u32 new_mode = mode;
50109+
50110+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50111+
50112+ retval = new_mode;
50113+
50114+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50115+ new_mode |= GR_INHERIT;
50116+
50117+ if (!(mode & GR_NOLEARN))
50118+ gr_log_learn(dentry, mnt, new_mode);
50119+ }
50120+
50121+ return retval;
50122+}
50123+
50124+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50125+ const struct dentry *parent,
50126+ const struct vfsmount *mnt)
50127+{
50128+ struct name_entry *match;
50129+ struct acl_object_label *matchpo;
50130+ struct acl_subject_label *curracl;
50131+ char *path;
50132+
50133+ if (unlikely(!(gr_status & GR_READY)))
50134+ return NULL;
50135+
50136+ preempt_disable();
50137+ path = gr_to_filename_rbac(new_dentry, mnt);
50138+ match = lookup_name_entry_create(path);
50139+
50140+ curracl = current->acl;
50141+
50142+ if (match) {
50143+ read_lock(&gr_inode_lock);
50144+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50145+ read_unlock(&gr_inode_lock);
50146+
50147+ if (matchpo) {
50148+ preempt_enable();
50149+ return matchpo;
50150+ }
50151+ }
50152+
50153+ // lookup parent
50154+
50155+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50156+
50157+ preempt_enable();
50158+ return matchpo;
50159+}
50160+
50161+__u32
50162+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50163+ const struct vfsmount * mnt, const __u32 mode)
50164+{
50165+ struct acl_object_label *matchpo;
50166+ __u32 retval;
50167+
50168+ if (unlikely(!(gr_status & GR_READY)))
50169+ return (mode & ~GR_AUDITS);
50170+
50171+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50172+
50173+ retval = matchpo->mode & mode;
50174+
50175+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50176+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50177+ __u32 new_mode = mode;
50178+
50179+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50180+
50181+ gr_log_learn(new_dentry, mnt, new_mode);
50182+ return new_mode;
50183+ }
50184+
50185+ return retval;
50186+}
50187+
50188+__u32
50189+gr_check_link(const struct dentry * new_dentry,
50190+ const struct dentry * parent_dentry,
50191+ const struct vfsmount * parent_mnt,
50192+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50193+{
50194+ struct acl_object_label *obj;
50195+ __u32 oldmode, newmode;
50196+ __u32 needmode;
50197+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50198+ GR_DELETE | GR_INHERIT;
50199+
50200+ if (unlikely(!(gr_status & GR_READY)))
50201+ return (GR_CREATE | GR_LINK);
50202+
50203+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50204+ oldmode = obj->mode;
50205+
50206+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50207+ newmode = obj->mode;
50208+
50209+ needmode = newmode & checkmodes;
50210+
50211+ // old name for hardlink must have at least the permissions of the new name
50212+ if ((oldmode & needmode) != needmode)
50213+ goto bad;
50214+
50215+ // if old name had restrictions/auditing, make sure the new name does as well
50216+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50217+
50218+ // don't allow hardlinking of suid/sgid files without permission
50219+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50220+ needmode |= GR_SETID;
50221+
50222+ if ((newmode & needmode) != needmode)
50223+ goto bad;
50224+
50225+ // enforce minimum permissions
50226+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50227+ return newmode;
50228+bad:
50229+ needmode = oldmode;
50230+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50231+ needmode |= GR_SETID;
50232+
50233+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50234+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50235+ return (GR_CREATE | GR_LINK);
50236+ } else if (newmode & GR_SUPPRESS)
50237+ return GR_SUPPRESS;
50238+ else
50239+ return 0;
50240+}
50241+
50242+int
50243+gr_check_hidden_task(const struct task_struct *task)
50244+{
50245+ if (unlikely(!(gr_status & GR_READY)))
50246+ return 0;
50247+
50248+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50249+ return 1;
50250+
50251+ return 0;
50252+}
50253+
50254+int
50255+gr_check_protected_task(const struct task_struct *task)
50256+{
50257+ if (unlikely(!(gr_status & GR_READY) || !task))
50258+ return 0;
50259+
50260+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50261+ task->acl != current->acl)
50262+ return 1;
50263+
50264+ return 0;
50265+}
50266+
50267+int
50268+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50269+{
50270+ struct task_struct *p;
50271+ int ret = 0;
50272+
50273+ if (unlikely(!(gr_status & GR_READY) || !pid))
50274+ return ret;
50275+
50276+ read_lock(&tasklist_lock);
50277+ do_each_pid_task(pid, type, p) {
50278+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50279+ p->acl != current->acl) {
50280+ ret = 1;
50281+ goto out;
50282+ }
50283+ } while_each_pid_task(pid, type, p);
50284+out:
50285+ read_unlock(&tasklist_lock);
50286+
50287+ return ret;
50288+}
50289+
50290+void
50291+gr_copy_label(struct task_struct *tsk)
50292+{
50293+ tsk->signal->used_accept = 0;
50294+ tsk->acl_sp_role = 0;
50295+ tsk->acl_role_id = current->acl_role_id;
50296+ tsk->acl = current->acl;
50297+ tsk->role = current->role;
50298+ tsk->signal->curr_ip = current->signal->curr_ip;
50299+ tsk->signal->saved_ip = current->signal->saved_ip;
50300+ if (current->exec_file)
50301+ get_file(current->exec_file);
50302+ tsk->exec_file = current->exec_file;
50303+ tsk->is_writable = current->is_writable;
50304+ if (unlikely(current->signal->used_accept)) {
50305+ current->signal->curr_ip = 0;
50306+ current->signal->saved_ip = 0;
50307+ }
50308+
50309+ return;
50310+}
50311+
50312+static void
50313+gr_set_proc_res(struct task_struct *task)
50314+{
50315+ struct acl_subject_label *proc;
50316+ unsigned short i;
50317+
50318+ proc = task->acl;
50319+
50320+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50321+ return;
50322+
50323+ for (i = 0; i < RLIM_NLIMITS; i++) {
50324+ if (!(proc->resmask & (1 << i)))
50325+ continue;
50326+
50327+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50328+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50329+ }
50330+
50331+ return;
50332+}
50333+
50334+extern int __gr_process_user_ban(struct user_struct *user);
50335+
50336+int
50337+gr_check_user_change(int real, int effective, int fs)
50338+{
50339+ unsigned int i;
50340+ __u16 num;
50341+ uid_t *uidlist;
50342+ int curuid;
50343+ int realok = 0;
50344+ int effectiveok = 0;
50345+ int fsok = 0;
50346+
50347+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50348+ struct user_struct *user;
50349+
50350+ if (real == -1)
50351+ goto skipit;
50352+
50353+ user = find_user(real);
50354+ if (user == NULL)
50355+ goto skipit;
50356+
50357+ if (__gr_process_user_ban(user)) {
50358+ /* for find_user */
50359+ free_uid(user);
50360+ return 1;
50361+ }
50362+
50363+ /* for find_user */
50364+ free_uid(user);
50365+
50366+skipit:
50367+#endif
50368+
50369+ if (unlikely(!(gr_status & GR_READY)))
50370+ return 0;
50371+
50372+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50373+ gr_log_learn_id_change('u', real, effective, fs);
50374+
50375+ num = current->acl->user_trans_num;
50376+ uidlist = current->acl->user_transitions;
50377+
50378+ if (uidlist == NULL)
50379+ return 0;
50380+
50381+ if (real == -1)
50382+ realok = 1;
50383+ if (effective == -1)
50384+ effectiveok = 1;
50385+ if (fs == -1)
50386+ fsok = 1;
50387+
50388+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50389+ for (i = 0; i < num; i++) {
50390+ curuid = (int)uidlist[i];
50391+ if (real == curuid)
50392+ realok = 1;
50393+ if (effective == curuid)
50394+ effectiveok = 1;
50395+ if (fs == curuid)
50396+ fsok = 1;
50397+ }
50398+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50399+ for (i = 0; i < num; i++) {
50400+ curuid = (int)uidlist[i];
50401+ if (real == curuid)
50402+ break;
50403+ if (effective == curuid)
50404+ break;
50405+ if (fs == curuid)
50406+ break;
50407+ }
50408+ /* not in deny list */
50409+ if (i == num) {
50410+ realok = 1;
50411+ effectiveok = 1;
50412+ fsok = 1;
50413+ }
50414+ }
50415+
50416+ if (realok && effectiveok && fsok)
50417+ return 0;
50418+ else {
50419+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50420+ return 1;
50421+ }
50422+}
50423+
50424+int
50425+gr_check_group_change(int real, int effective, int fs)
50426+{
50427+ unsigned int i;
50428+ __u16 num;
50429+ gid_t *gidlist;
50430+ int curgid;
50431+ int realok = 0;
50432+ int effectiveok = 0;
50433+ int fsok = 0;
50434+
50435+ if (unlikely(!(gr_status & GR_READY)))
50436+ return 0;
50437+
50438+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50439+ gr_log_learn_id_change('g', real, effective, fs);
50440+
50441+ num = current->acl->group_trans_num;
50442+ gidlist = current->acl->group_transitions;
50443+
50444+ if (gidlist == NULL)
50445+ return 0;
50446+
50447+ if (real == -1)
50448+ realok = 1;
50449+ if (effective == -1)
50450+ effectiveok = 1;
50451+ if (fs == -1)
50452+ fsok = 1;
50453+
50454+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50455+ for (i = 0; i < num; i++) {
50456+ curgid = (int)gidlist[i];
50457+ if (real == curgid)
50458+ realok = 1;
50459+ if (effective == curgid)
50460+ effectiveok = 1;
50461+ if (fs == curgid)
50462+ fsok = 1;
50463+ }
50464+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50465+ for (i = 0; i < num; i++) {
50466+ curgid = (int)gidlist[i];
50467+ if (real == curgid)
50468+ break;
50469+ if (effective == curgid)
50470+ break;
50471+ if (fs == curgid)
50472+ break;
50473+ }
50474+ /* not in deny list */
50475+ if (i == num) {
50476+ realok = 1;
50477+ effectiveok = 1;
50478+ fsok = 1;
50479+ }
50480+ }
50481+
50482+ if (realok && effectiveok && fsok)
50483+ return 0;
50484+ else {
50485+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50486+ return 1;
50487+ }
50488+}
50489+
50490+void
50491+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50492+{
50493+ struct acl_role_label *role = task->role;
50494+ struct acl_subject_label *subj = NULL;
50495+ struct acl_object_label *obj;
50496+ struct file *filp;
50497+
50498+ if (unlikely(!(gr_status & GR_READY)))
50499+ return;
50500+
50501+ filp = task->exec_file;
50502+
50503+ /* kernel process, we'll give them the kernel role */
50504+ if (unlikely(!filp)) {
50505+ task->role = kernel_role;
50506+ task->acl = kernel_role->root_label;
50507+ return;
50508+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50509+ role = lookup_acl_role_label(task, uid, gid);
50510+
50511+ /* perform subject lookup in possibly new role
50512+ we can use this result below in the case where role == task->role
50513+ */
50514+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50515+
50516+ /* if we changed uid/gid, but result in the same role
50517+ and are using inheritance, don't lose the inherited subject
50518+ if current subject is other than what normal lookup
50519+ would result in, we arrived via inheritance, don't
50520+ lose subject
50521+ */
50522+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50523+ (subj == task->acl)))
50524+ task->acl = subj;
50525+
50526+ task->role = role;
50527+
50528+ task->is_writable = 0;
50529+
50530+ /* ignore additional mmap checks for processes that are writable
50531+ by the default ACL */
50532+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50533+ if (unlikely(obj->mode & GR_WRITE))
50534+ task->is_writable = 1;
50535+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50536+ if (unlikely(obj->mode & GR_WRITE))
50537+ task->is_writable = 1;
50538+
50539+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50540+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50541+#endif
50542+
50543+ gr_set_proc_res(task);
50544+
50545+ return;
50546+}
50547+
50548+int
50549+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50550+ const int unsafe_share)
50551+{
50552+ struct task_struct *task = current;
50553+ struct acl_subject_label *newacl;
50554+ struct acl_object_label *obj;
50555+ __u32 retmode;
50556+
50557+ if (unlikely(!(gr_status & GR_READY)))
50558+ return 0;
50559+
50560+ newacl = chk_subj_label(dentry, mnt, task->role);
50561+
50562+ task_lock(task);
50563+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
50564+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50565+ !(task->role->roletype & GR_ROLE_GOD) &&
50566+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50567+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
50568+ task_unlock(task);
50569+ if (unsafe_share)
50570+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50571+ else
50572+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50573+ return -EACCES;
50574+ }
50575+ task_unlock(task);
50576+
50577+ obj = chk_obj_label(dentry, mnt, task->acl);
50578+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50579+
50580+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50581+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50582+ if (obj->nested)
50583+ task->acl = obj->nested;
50584+ else
50585+ task->acl = newacl;
50586+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50587+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50588+
50589+ task->is_writable = 0;
50590+
50591+ /* ignore additional mmap checks for processes that are writable
50592+ by the default ACL */
50593+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50594+ if (unlikely(obj->mode & GR_WRITE))
50595+ task->is_writable = 1;
50596+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50597+ if (unlikely(obj->mode & GR_WRITE))
50598+ task->is_writable = 1;
50599+
50600+ gr_set_proc_res(task);
50601+
50602+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50603+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50604+#endif
50605+ return 0;
50606+}
50607+
50608+/* always called with valid inodev ptr */
50609+static void
50610+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50611+{
50612+ struct acl_object_label *matchpo;
50613+ struct acl_subject_label *matchps;
50614+ struct acl_subject_label *subj;
50615+ struct acl_role_label *role;
50616+ unsigned int x;
50617+
50618+ FOR_EACH_ROLE_START(role)
50619+ FOR_EACH_SUBJECT_START(role, subj, x)
50620+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50621+ matchpo->mode |= GR_DELETED;
50622+ FOR_EACH_SUBJECT_END(subj,x)
50623+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50624+ if (subj->inode == ino && subj->device == dev)
50625+ subj->mode |= GR_DELETED;
50626+ FOR_EACH_NESTED_SUBJECT_END(subj)
50627+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50628+ matchps->mode |= GR_DELETED;
50629+ FOR_EACH_ROLE_END(role)
50630+
50631+ inodev->nentry->deleted = 1;
50632+
50633+ return;
50634+}
50635+
50636+void
50637+gr_handle_delete(const ino_t ino, const dev_t dev)
50638+{
50639+ struct inodev_entry *inodev;
50640+
50641+ if (unlikely(!(gr_status & GR_READY)))
50642+ return;
50643+
50644+ write_lock(&gr_inode_lock);
50645+ inodev = lookup_inodev_entry(ino, dev);
50646+ if (inodev != NULL)
50647+ do_handle_delete(inodev, ino, dev);
50648+ write_unlock(&gr_inode_lock);
50649+
50650+ return;
50651+}
50652+
50653+static void
50654+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50655+ const ino_t newinode, const dev_t newdevice,
50656+ struct acl_subject_label *subj)
50657+{
50658+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50659+ struct acl_object_label *match;
50660+
50661+ match = subj->obj_hash[index];
50662+
50663+ while (match && (match->inode != oldinode ||
50664+ match->device != olddevice ||
50665+ !(match->mode & GR_DELETED)))
50666+ match = match->next;
50667+
50668+ if (match && (match->inode == oldinode)
50669+ && (match->device == olddevice)
50670+ && (match->mode & GR_DELETED)) {
50671+ if (match->prev == NULL) {
50672+ subj->obj_hash[index] = match->next;
50673+ if (match->next != NULL)
50674+ match->next->prev = NULL;
50675+ } else {
50676+ match->prev->next = match->next;
50677+ if (match->next != NULL)
50678+ match->next->prev = match->prev;
50679+ }
50680+ match->prev = NULL;
50681+ match->next = NULL;
50682+ match->inode = newinode;
50683+ match->device = newdevice;
50684+ match->mode &= ~GR_DELETED;
50685+
50686+ insert_acl_obj_label(match, subj);
50687+ }
50688+
50689+ return;
50690+}
50691+
50692+static void
50693+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50694+ const ino_t newinode, const dev_t newdevice,
50695+ struct acl_role_label *role)
50696+{
50697+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50698+ struct acl_subject_label *match;
50699+
50700+ match = role->subj_hash[index];
50701+
50702+ while (match && (match->inode != oldinode ||
50703+ match->device != olddevice ||
50704+ !(match->mode & GR_DELETED)))
50705+ match = match->next;
50706+
50707+ if (match && (match->inode == oldinode)
50708+ && (match->device == olddevice)
50709+ && (match->mode & GR_DELETED)) {
50710+ if (match->prev == NULL) {
50711+ role->subj_hash[index] = match->next;
50712+ if (match->next != NULL)
50713+ match->next->prev = NULL;
50714+ } else {
50715+ match->prev->next = match->next;
50716+ if (match->next != NULL)
50717+ match->next->prev = match->prev;
50718+ }
50719+ match->prev = NULL;
50720+ match->next = NULL;
50721+ match->inode = newinode;
50722+ match->device = newdevice;
50723+ match->mode &= ~GR_DELETED;
50724+
50725+ insert_acl_subj_label(match, role);
50726+ }
50727+
50728+ return;
50729+}
50730+
50731+static void
50732+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50733+ const ino_t newinode, const dev_t newdevice)
50734+{
50735+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50736+ struct inodev_entry *match;
50737+
50738+ match = inodev_set.i_hash[index];
50739+
50740+ while (match && (match->nentry->inode != oldinode ||
50741+ match->nentry->device != olddevice || !match->nentry->deleted))
50742+ match = match->next;
50743+
50744+ if (match && (match->nentry->inode == oldinode)
50745+ && (match->nentry->device == olddevice) &&
50746+ match->nentry->deleted) {
50747+ if (match->prev == NULL) {
50748+ inodev_set.i_hash[index] = match->next;
50749+ if (match->next != NULL)
50750+ match->next->prev = NULL;
50751+ } else {
50752+ match->prev->next = match->next;
50753+ if (match->next != NULL)
50754+ match->next->prev = match->prev;
50755+ }
50756+ match->prev = NULL;
50757+ match->next = NULL;
50758+ match->nentry->inode = newinode;
50759+ match->nentry->device = newdevice;
50760+ match->nentry->deleted = 0;
50761+
50762+ insert_inodev_entry(match);
50763+ }
50764+
50765+ return;
50766+}
50767+
50768+static void
50769+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50770+{
50771+ struct acl_subject_label *subj;
50772+ struct acl_role_label *role;
50773+ unsigned int x;
50774+
50775+ FOR_EACH_ROLE_START(role)
50776+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50777+
50778+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50779+ if ((subj->inode == ino) && (subj->device == dev)) {
50780+ subj->inode = ino;
50781+ subj->device = dev;
50782+ }
50783+ FOR_EACH_NESTED_SUBJECT_END(subj)
50784+ FOR_EACH_SUBJECT_START(role, subj, x)
50785+ update_acl_obj_label(matchn->inode, matchn->device,
50786+ ino, dev, subj);
50787+ FOR_EACH_SUBJECT_END(subj,x)
50788+ FOR_EACH_ROLE_END(role)
50789+
50790+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50791+
50792+ return;
50793+}
50794+
50795+static void
50796+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50797+ const struct vfsmount *mnt)
50798+{
50799+ ino_t ino = dentry->d_inode->i_ino;
50800+ dev_t dev = __get_dev(dentry);
50801+
50802+ __do_handle_create(matchn, ino, dev);
50803+
50804+ return;
50805+}
50806+
50807+void
50808+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50809+{
50810+ struct name_entry *matchn;
50811+
50812+ if (unlikely(!(gr_status & GR_READY)))
50813+ return;
50814+
50815+ preempt_disable();
50816+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50817+
50818+ if (unlikely((unsigned long)matchn)) {
50819+ write_lock(&gr_inode_lock);
50820+ do_handle_create(matchn, dentry, mnt);
50821+ write_unlock(&gr_inode_lock);
50822+ }
50823+ preempt_enable();
50824+
50825+ return;
50826+}
50827+
50828+void
50829+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50830+{
50831+ struct name_entry *matchn;
50832+
50833+ if (unlikely(!(gr_status & GR_READY)))
50834+ return;
50835+
50836+ preempt_disable();
50837+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50838+
50839+ if (unlikely((unsigned long)matchn)) {
50840+ write_lock(&gr_inode_lock);
50841+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50842+ write_unlock(&gr_inode_lock);
50843+ }
50844+ preempt_enable();
50845+
50846+ return;
50847+}
50848+
50849+void
50850+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50851+ struct dentry *old_dentry,
50852+ struct dentry *new_dentry,
50853+ struct vfsmount *mnt, const __u8 replace)
50854+{
50855+ struct name_entry *matchn;
50856+ struct inodev_entry *inodev;
50857+ ino_t old_ino = old_dentry->d_inode->i_ino;
50858+ dev_t old_dev = __get_dev(old_dentry);
50859+
50860+ /* vfs_rename swaps the name and parent link for old_dentry and
50861+ new_dentry
50862+ at this point, old_dentry has the new name, parent link, and inode
50863+ for the renamed file
50864+ if a file is being replaced by a rename, new_dentry has the inode
50865+ and name for the replaced file
50866+ */
50867+
50868+ if (unlikely(!(gr_status & GR_READY)))
50869+ return;
50870+
50871+ preempt_disable();
50872+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50873+
50874+ /* we wouldn't have to check d_inode if it weren't for
50875+ NFS silly-renaming
50876+ */
50877+
50878+ write_lock(&gr_inode_lock);
50879+ if (unlikely(replace && new_dentry->d_inode)) {
50880+ ino_t new_ino = new_dentry->d_inode->i_ino;
50881+ dev_t new_dev = __get_dev(new_dentry);
50882+
50883+ inodev = lookup_inodev_entry(new_ino, new_dev);
50884+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
50885+ do_handle_delete(inodev, new_ino, new_dev);
50886+ }
50887+
50888+ inodev = lookup_inodev_entry(old_ino, old_dev);
50889+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
50890+ do_handle_delete(inodev, old_ino, old_dev);
50891+
50892+ if (unlikely((unsigned long)matchn))
50893+ do_handle_create(matchn, old_dentry, mnt);
50894+
50895+ write_unlock(&gr_inode_lock);
50896+ preempt_enable();
50897+
50898+ return;
50899+}
50900+
50901+static int
50902+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50903+ unsigned char **sum)
50904+{
50905+ struct acl_role_label *r;
50906+ struct role_allowed_ip *ipp;
50907+ struct role_transition *trans;
50908+ unsigned int i;
50909+ int found = 0;
50910+ u32 curr_ip = current->signal->curr_ip;
50911+
50912+ current->signal->saved_ip = curr_ip;
50913+
50914+ /* check transition table */
50915+
50916+ for (trans = current->role->transitions; trans; trans = trans->next) {
50917+ if (!strcmp(rolename, trans->rolename)) {
50918+ found = 1;
50919+ break;
50920+ }
50921+ }
50922+
50923+ if (!found)
50924+ return 0;
50925+
50926+ /* handle special roles that do not require authentication
50927+ and check ip */
50928+
50929+ FOR_EACH_ROLE_START(r)
50930+ if (!strcmp(rolename, r->rolename) &&
50931+ (r->roletype & GR_ROLE_SPECIAL)) {
50932+ found = 0;
50933+ if (r->allowed_ips != NULL) {
50934+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50935+ if ((ntohl(curr_ip) & ipp->netmask) ==
50936+ (ntohl(ipp->addr) & ipp->netmask))
50937+ found = 1;
50938+ }
50939+ } else
50940+ found = 2;
50941+ if (!found)
50942+ return 0;
50943+
50944+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50945+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50946+ *salt = NULL;
50947+ *sum = NULL;
50948+ return 1;
50949+ }
50950+ }
50951+ FOR_EACH_ROLE_END(r)
50952+
50953+ for (i = 0; i < num_sprole_pws; i++) {
50954+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50955+ *salt = acl_special_roles[i]->salt;
50956+ *sum = acl_special_roles[i]->sum;
50957+ return 1;
50958+ }
50959+ }
50960+
50961+ return 0;
50962+}
50963+
50964+static void
50965+assign_special_role(char *rolename)
50966+{
50967+ struct acl_object_label *obj;
50968+ struct acl_role_label *r;
50969+ struct acl_role_label *assigned = NULL;
50970+ struct task_struct *tsk;
50971+ struct file *filp;
50972+
50973+ FOR_EACH_ROLE_START(r)
50974+ if (!strcmp(rolename, r->rolename) &&
50975+ (r->roletype & GR_ROLE_SPECIAL)) {
50976+ assigned = r;
50977+ break;
50978+ }
50979+ FOR_EACH_ROLE_END(r)
50980+
50981+ if (!assigned)
50982+ return;
50983+
50984+ read_lock(&tasklist_lock);
50985+ read_lock(&grsec_exec_file_lock);
50986+
50987+ tsk = current->real_parent;
50988+ if (tsk == NULL)
50989+ goto out_unlock;
50990+
50991+ filp = tsk->exec_file;
50992+ if (filp == NULL)
50993+ goto out_unlock;
50994+
50995+ tsk->is_writable = 0;
50996+
50997+ tsk->acl_sp_role = 1;
50998+ tsk->acl_role_id = ++acl_sp_role_value;
50999+ tsk->role = assigned;
51000+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51001+
51002+ /* ignore additional mmap checks for processes that are writable
51003+ by the default ACL */
51004+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51005+ if (unlikely(obj->mode & GR_WRITE))
51006+ tsk->is_writable = 1;
51007+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51008+ if (unlikely(obj->mode & GR_WRITE))
51009+ tsk->is_writable = 1;
51010+
51011+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51012+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51013+#endif
51014+
51015+out_unlock:
51016+ read_unlock(&grsec_exec_file_lock);
51017+ read_unlock(&tasklist_lock);
51018+ return;
51019+}
51020+
51021+int gr_check_secure_terminal(struct task_struct *task)
51022+{
51023+ struct task_struct *p, *p2, *p3;
51024+ struct files_struct *files;
51025+ struct fdtable *fdt;
51026+ struct file *our_file = NULL, *file;
51027+ int i;
51028+
51029+ if (task->signal->tty == NULL)
51030+ return 1;
51031+
51032+ files = get_files_struct(task);
51033+ if (files != NULL) {
51034+ rcu_read_lock();
51035+ fdt = files_fdtable(files);
51036+ for (i=0; i < fdt->max_fds; i++) {
51037+ file = fcheck_files(files, i);
51038+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51039+ get_file(file);
51040+ our_file = file;
51041+ }
51042+ }
51043+ rcu_read_unlock();
51044+ put_files_struct(files);
51045+ }
51046+
51047+ if (our_file == NULL)
51048+ return 1;
51049+
51050+ read_lock(&tasklist_lock);
51051+ do_each_thread(p2, p) {
51052+ files = get_files_struct(p);
51053+ if (files == NULL ||
51054+ (p->signal && p->signal->tty == task->signal->tty)) {
51055+ if (files != NULL)
51056+ put_files_struct(files);
51057+ continue;
51058+ }
51059+ rcu_read_lock();
51060+ fdt = files_fdtable(files);
51061+ for (i=0; i < fdt->max_fds; i++) {
51062+ file = fcheck_files(files, i);
51063+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51064+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51065+ p3 = task;
51066+ while (p3->pid > 0) {
51067+ if (p3 == p)
51068+ break;
51069+ p3 = p3->real_parent;
51070+ }
51071+ if (p3 == p)
51072+ break;
51073+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51074+ gr_handle_alertkill(p);
51075+ rcu_read_unlock();
51076+ put_files_struct(files);
51077+ read_unlock(&tasklist_lock);
51078+ fput(our_file);
51079+ return 0;
51080+ }
51081+ }
51082+ rcu_read_unlock();
51083+ put_files_struct(files);
51084+ } while_each_thread(p2, p);
51085+ read_unlock(&tasklist_lock);
51086+
51087+ fput(our_file);
51088+ return 1;
51089+}
51090+
51091+ssize_t
51092+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51093+{
51094+ struct gr_arg_wrapper uwrap;
51095+ unsigned char *sprole_salt = NULL;
51096+ unsigned char *sprole_sum = NULL;
51097+ int error = sizeof (struct gr_arg_wrapper);
51098+ int error2 = 0;
51099+
51100+ mutex_lock(&gr_dev_mutex);
51101+
51102+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51103+ error = -EPERM;
51104+ goto out;
51105+ }
51106+
51107+ if (count != sizeof (struct gr_arg_wrapper)) {
51108+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51109+ error = -EINVAL;
51110+ goto out;
51111+ }
51112+
51113+
51114+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51115+ gr_auth_expires = 0;
51116+ gr_auth_attempts = 0;
51117+ }
51118+
51119+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51120+ error = -EFAULT;
51121+ goto out;
51122+ }
51123+
51124+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51125+ error = -EINVAL;
51126+ goto out;
51127+ }
51128+
51129+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51130+ error = -EFAULT;
51131+ goto out;
51132+ }
51133+
51134+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51135+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51136+ time_after(gr_auth_expires, get_seconds())) {
51137+ error = -EBUSY;
51138+ goto out;
51139+ }
51140+
51141+ /* if non-root trying to do anything other than use a special role,
51142+ do not attempt authentication, do not count towards authentication
51143+ locking
51144+ */
51145+
51146+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51147+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51148+ current_uid()) {
51149+ error = -EPERM;
51150+ goto out;
51151+ }
51152+
51153+ /* ensure pw and special role name are null terminated */
51154+
51155+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51156+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51157+
51158+ /* Okay.
51159+ * We have our enough of the argument structure..(we have yet
51160+ * to copy_from_user the tables themselves) . Copy the tables
51161+ * only if we need them, i.e. for loading operations. */
51162+
51163+ switch (gr_usermode->mode) {
51164+ case GR_STATUS:
51165+ if (gr_status & GR_READY) {
51166+ error = 1;
51167+ if (!gr_check_secure_terminal(current))
51168+ error = 3;
51169+ } else
51170+ error = 2;
51171+ goto out;
51172+ case GR_SHUTDOWN:
51173+ if ((gr_status & GR_READY)
51174+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51175+ pax_open_kernel();
51176+ gr_status &= ~GR_READY;
51177+ pax_close_kernel();
51178+
51179+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51180+ free_variables();
51181+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51182+ memset(gr_system_salt, 0, GR_SALT_LEN);
51183+ memset(gr_system_sum, 0, GR_SHA_LEN);
51184+ } else if (gr_status & GR_READY) {
51185+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51186+ error = -EPERM;
51187+ } else {
51188+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51189+ error = -EAGAIN;
51190+ }
51191+ break;
51192+ case GR_ENABLE:
51193+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51194+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51195+ else {
51196+ if (gr_status & GR_READY)
51197+ error = -EAGAIN;
51198+ else
51199+ error = error2;
51200+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51201+ }
51202+ break;
51203+ case GR_RELOAD:
51204+ if (!(gr_status & GR_READY)) {
51205+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51206+ error = -EAGAIN;
51207+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51208+ preempt_disable();
51209+
51210+ pax_open_kernel();
51211+ gr_status &= ~GR_READY;
51212+ pax_close_kernel();
51213+
51214+ free_variables();
51215+ if (!(error2 = gracl_init(gr_usermode))) {
51216+ preempt_enable();
51217+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51218+ } else {
51219+ preempt_enable();
51220+ error = error2;
51221+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51222+ }
51223+ } else {
51224+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51225+ error = -EPERM;
51226+ }
51227+ break;
51228+ case GR_SEGVMOD:
51229+ if (unlikely(!(gr_status & GR_READY))) {
51230+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51231+ error = -EAGAIN;
51232+ break;
51233+ }
51234+
51235+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51236+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51237+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51238+ struct acl_subject_label *segvacl;
51239+ segvacl =
51240+ lookup_acl_subj_label(gr_usermode->segv_inode,
51241+ gr_usermode->segv_device,
51242+ current->role);
51243+ if (segvacl) {
51244+ segvacl->crashes = 0;
51245+ segvacl->expires = 0;
51246+ }
51247+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51248+ gr_remove_uid(gr_usermode->segv_uid);
51249+ }
51250+ } else {
51251+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51252+ error = -EPERM;
51253+ }
51254+ break;
51255+ case GR_SPROLE:
51256+ case GR_SPROLEPAM:
51257+ if (unlikely(!(gr_status & GR_READY))) {
51258+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51259+ error = -EAGAIN;
51260+ break;
51261+ }
51262+
51263+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51264+ current->role->expires = 0;
51265+ current->role->auth_attempts = 0;
51266+ }
51267+
51268+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51269+ time_after(current->role->expires, get_seconds())) {
51270+ error = -EBUSY;
51271+ goto out;
51272+ }
51273+
51274+ if (lookup_special_role_auth
51275+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51276+ && ((!sprole_salt && !sprole_sum)
51277+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51278+ char *p = "";
51279+ assign_special_role(gr_usermode->sp_role);
51280+ read_lock(&tasklist_lock);
51281+ if (current->real_parent)
51282+ p = current->real_parent->role->rolename;
51283+ read_unlock(&tasklist_lock);
51284+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51285+ p, acl_sp_role_value);
51286+ } else {
51287+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51288+ error = -EPERM;
51289+ if(!(current->role->auth_attempts++))
51290+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51291+
51292+ goto out;
51293+ }
51294+ break;
51295+ case GR_UNSPROLE:
51296+ if (unlikely(!(gr_status & GR_READY))) {
51297+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51298+ error = -EAGAIN;
51299+ break;
51300+ }
51301+
51302+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51303+ char *p = "";
51304+ int i = 0;
51305+
51306+ read_lock(&tasklist_lock);
51307+ if (current->real_parent) {
51308+ p = current->real_parent->role->rolename;
51309+ i = current->real_parent->acl_role_id;
51310+ }
51311+ read_unlock(&tasklist_lock);
51312+
51313+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51314+ gr_set_acls(1);
51315+ } else {
51316+ error = -EPERM;
51317+ goto out;
51318+ }
51319+ break;
51320+ default:
51321+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51322+ error = -EINVAL;
51323+ break;
51324+ }
51325+
51326+ if (error != -EPERM)
51327+ goto out;
51328+
51329+ if(!(gr_auth_attempts++))
51330+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51331+
51332+ out:
51333+ mutex_unlock(&gr_dev_mutex);
51334+ return error;
51335+}
51336+
51337+/* must be called with
51338+ rcu_read_lock();
51339+ read_lock(&tasklist_lock);
51340+ read_lock(&grsec_exec_file_lock);
51341+*/
51342+int gr_apply_subject_to_task(struct task_struct *task)
51343+{
51344+ struct acl_object_label *obj;
51345+ char *tmpname;
51346+ struct acl_subject_label *tmpsubj;
51347+ struct file *filp;
51348+ struct name_entry *nmatch;
51349+
51350+ filp = task->exec_file;
51351+ if (filp == NULL)
51352+ return 0;
51353+
51354+ /* the following is to apply the correct subject
51355+ on binaries running when the RBAC system
51356+ is enabled, when the binaries have been
51357+ replaced or deleted since their execution
51358+ -----
51359+ when the RBAC system starts, the inode/dev
51360+ from exec_file will be one the RBAC system
51361+ is unaware of. It only knows the inode/dev
51362+ of the present file on disk, or the absence
51363+ of it.
51364+ */
51365+ preempt_disable();
51366+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51367+
51368+ nmatch = lookup_name_entry(tmpname);
51369+ preempt_enable();
51370+ tmpsubj = NULL;
51371+ if (nmatch) {
51372+ if (nmatch->deleted)
51373+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51374+ else
51375+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51376+ if (tmpsubj != NULL)
51377+ task->acl = tmpsubj;
51378+ }
51379+ if (tmpsubj == NULL)
51380+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51381+ task->role);
51382+ if (task->acl) {
51383+ task->is_writable = 0;
51384+ /* ignore additional mmap checks for processes that are writable
51385+ by the default ACL */
51386+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51387+ if (unlikely(obj->mode & GR_WRITE))
51388+ task->is_writable = 1;
51389+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51390+ if (unlikely(obj->mode & GR_WRITE))
51391+ task->is_writable = 1;
51392+
51393+ gr_set_proc_res(task);
51394+
51395+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51396+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51397+#endif
51398+ } else {
51399+ return 1;
51400+ }
51401+
51402+ return 0;
51403+}
51404+
51405+int
51406+gr_set_acls(const int type)
51407+{
51408+ struct task_struct *task, *task2;
51409+ struct acl_role_label *role = current->role;
51410+ __u16 acl_role_id = current->acl_role_id;
51411+ const struct cred *cred;
51412+ int ret;
51413+
51414+ rcu_read_lock();
51415+ read_lock(&tasklist_lock);
51416+ read_lock(&grsec_exec_file_lock);
51417+ do_each_thread(task2, task) {
51418+ /* check to see if we're called from the exit handler,
51419+ if so, only replace ACLs that have inherited the admin
51420+ ACL */
51421+
51422+ if (type && (task->role != role ||
51423+ task->acl_role_id != acl_role_id))
51424+ continue;
51425+
51426+ task->acl_role_id = 0;
51427+ task->acl_sp_role = 0;
51428+
51429+ if (task->exec_file) {
51430+ cred = __task_cred(task);
51431+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51432+ ret = gr_apply_subject_to_task(task);
51433+ if (ret) {
51434+ read_unlock(&grsec_exec_file_lock);
51435+ read_unlock(&tasklist_lock);
51436+ rcu_read_unlock();
51437+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51438+ return ret;
51439+ }
51440+ } else {
51441+ // it's a kernel process
51442+ task->role = kernel_role;
51443+ task->acl = kernel_role->root_label;
51444+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51445+ task->acl->mode &= ~GR_PROCFIND;
51446+#endif
51447+ }
51448+ } while_each_thread(task2, task);
51449+ read_unlock(&grsec_exec_file_lock);
51450+ read_unlock(&tasklist_lock);
51451+ rcu_read_unlock();
51452+
51453+ return 0;
51454+}
51455+
51456+void
51457+gr_learn_resource(const struct task_struct *task,
51458+ const int res, const unsigned long wanted, const int gt)
51459+{
51460+ struct acl_subject_label *acl;
51461+ const struct cred *cred;
51462+
51463+ if (unlikely((gr_status & GR_READY) &&
51464+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51465+ goto skip_reslog;
51466+
51467+#ifdef CONFIG_GRKERNSEC_RESLOG
51468+ gr_log_resource(task, res, wanted, gt);
51469+#endif
51470+ skip_reslog:
51471+
51472+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51473+ return;
51474+
51475+ acl = task->acl;
51476+
51477+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51478+ !(acl->resmask & (1 << (unsigned short) res))))
51479+ return;
51480+
51481+ if (wanted >= acl->res[res].rlim_cur) {
51482+ unsigned long res_add;
51483+
51484+ res_add = wanted;
51485+ switch (res) {
51486+ case RLIMIT_CPU:
51487+ res_add += GR_RLIM_CPU_BUMP;
51488+ break;
51489+ case RLIMIT_FSIZE:
51490+ res_add += GR_RLIM_FSIZE_BUMP;
51491+ break;
51492+ case RLIMIT_DATA:
51493+ res_add += GR_RLIM_DATA_BUMP;
51494+ break;
51495+ case RLIMIT_STACK:
51496+ res_add += GR_RLIM_STACK_BUMP;
51497+ break;
51498+ case RLIMIT_CORE:
51499+ res_add += GR_RLIM_CORE_BUMP;
51500+ break;
51501+ case RLIMIT_RSS:
51502+ res_add += GR_RLIM_RSS_BUMP;
51503+ break;
51504+ case RLIMIT_NPROC:
51505+ res_add += GR_RLIM_NPROC_BUMP;
51506+ break;
51507+ case RLIMIT_NOFILE:
51508+ res_add += GR_RLIM_NOFILE_BUMP;
51509+ break;
51510+ case RLIMIT_MEMLOCK:
51511+ res_add += GR_RLIM_MEMLOCK_BUMP;
51512+ break;
51513+ case RLIMIT_AS:
51514+ res_add += GR_RLIM_AS_BUMP;
51515+ break;
51516+ case RLIMIT_LOCKS:
51517+ res_add += GR_RLIM_LOCKS_BUMP;
51518+ break;
51519+ case RLIMIT_SIGPENDING:
51520+ res_add += GR_RLIM_SIGPENDING_BUMP;
51521+ break;
51522+ case RLIMIT_MSGQUEUE:
51523+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51524+ break;
51525+ case RLIMIT_NICE:
51526+ res_add += GR_RLIM_NICE_BUMP;
51527+ break;
51528+ case RLIMIT_RTPRIO:
51529+ res_add += GR_RLIM_RTPRIO_BUMP;
51530+ break;
51531+ case RLIMIT_RTTIME:
51532+ res_add += GR_RLIM_RTTIME_BUMP;
51533+ break;
51534+ }
51535+
51536+ acl->res[res].rlim_cur = res_add;
51537+
51538+ if (wanted > acl->res[res].rlim_max)
51539+ acl->res[res].rlim_max = res_add;
51540+
51541+ /* only log the subject filename, since resource logging is supported for
51542+ single-subject learning only */
51543+ rcu_read_lock();
51544+ cred = __task_cred(task);
51545+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51546+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51547+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51548+ "", (unsigned long) res, &task->signal->saved_ip);
51549+ rcu_read_unlock();
51550+ }
51551+
51552+ return;
51553+}
51554+
51555+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51556+void
51557+pax_set_initial_flags(struct linux_binprm *bprm)
51558+{
51559+ struct task_struct *task = current;
51560+ struct acl_subject_label *proc;
51561+ unsigned long flags;
51562+
51563+ if (unlikely(!(gr_status & GR_READY)))
51564+ return;
51565+
51566+ flags = pax_get_flags(task);
51567+
51568+ proc = task->acl;
51569+
51570+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51571+ flags &= ~MF_PAX_PAGEEXEC;
51572+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51573+ flags &= ~MF_PAX_SEGMEXEC;
51574+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51575+ flags &= ~MF_PAX_RANDMMAP;
51576+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51577+ flags &= ~MF_PAX_EMUTRAMP;
51578+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51579+ flags &= ~MF_PAX_MPROTECT;
51580+
51581+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51582+ flags |= MF_PAX_PAGEEXEC;
51583+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51584+ flags |= MF_PAX_SEGMEXEC;
51585+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51586+ flags |= MF_PAX_RANDMMAP;
51587+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51588+ flags |= MF_PAX_EMUTRAMP;
51589+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51590+ flags |= MF_PAX_MPROTECT;
51591+
51592+ pax_set_flags(task, flags);
51593+
51594+ return;
51595+}
51596+#endif
51597+
51598+#ifdef CONFIG_SYSCTL
51599+/* Eric Biederman likes breaking userland ABI and every inode-based security
51600+ system to save 35kb of memory */
51601+
51602+/* we modify the passed in filename, but adjust it back before returning */
51603+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51604+{
51605+ struct name_entry *nmatch;
51606+ char *p, *lastp = NULL;
51607+ struct acl_object_label *obj = NULL, *tmp;
51608+ struct acl_subject_label *tmpsubj;
51609+ char c = '\0';
51610+
51611+ read_lock(&gr_inode_lock);
51612+
51613+ p = name + len - 1;
51614+ do {
51615+ nmatch = lookup_name_entry(name);
51616+ if (lastp != NULL)
51617+ *lastp = c;
51618+
51619+ if (nmatch == NULL)
51620+ goto next_component;
51621+ tmpsubj = current->acl;
51622+ do {
51623+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51624+ if (obj != NULL) {
51625+ tmp = obj->globbed;
51626+ while (tmp) {
51627+ if (!glob_match(tmp->filename, name)) {
51628+ obj = tmp;
51629+ goto found_obj;
51630+ }
51631+ tmp = tmp->next;
51632+ }
51633+ goto found_obj;
51634+ }
51635+ } while ((tmpsubj = tmpsubj->parent_subject));
51636+next_component:
51637+ /* end case */
51638+ if (p == name)
51639+ break;
51640+
51641+ while (*p != '/')
51642+ p--;
51643+ if (p == name)
51644+ lastp = p + 1;
51645+ else {
51646+ lastp = p;
51647+ p--;
51648+ }
51649+ c = *lastp;
51650+ *lastp = '\0';
51651+ } while (1);
51652+found_obj:
51653+ read_unlock(&gr_inode_lock);
51654+ /* obj returned will always be non-null */
51655+ return obj;
51656+}
51657+
51658+/* returns 0 when allowing, non-zero on error
51659+ op of 0 is used for readdir, so we don't log the names of hidden files
51660+*/
51661+__u32
51662+gr_handle_sysctl(const struct ctl_table *table, const int op)
51663+{
51664+ struct ctl_table *tmp;
51665+ const char *proc_sys = "/proc/sys";
51666+ char *path;
51667+ struct acl_object_label *obj;
51668+ unsigned short len = 0, pos = 0, depth = 0, i;
51669+ __u32 err = 0;
51670+ __u32 mode = 0;
51671+
51672+ if (unlikely(!(gr_status & GR_READY)))
51673+ return 0;
51674+
51675+ /* for now, ignore operations on non-sysctl entries if it's not a
51676+ readdir*/
51677+ if (table->child != NULL && op != 0)
51678+ return 0;
51679+
51680+ mode |= GR_FIND;
51681+ /* it's only a read if it's an entry, read on dirs is for readdir */
51682+ if (op & MAY_READ)
51683+ mode |= GR_READ;
51684+ if (op & MAY_WRITE)
51685+ mode |= GR_WRITE;
51686+
51687+ preempt_disable();
51688+
51689+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51690+
51691+ /* it's only a read/write if it's an actual entry, not a dir
51692+ (which are opened for readdir)
51693+ */
51694+
51695+ /* convert the requested sysctl entry into a pathname */
51696+
51697+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51698+ len += strlen(tmp->procname);
51699+ len++;
51700+ depth++;
51701+ }
51702+
51703+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51704+ /* deny */
51705+ goto out;
51706+ }
51707+
51708+ memset(path, 0, PAGE_SIZE);
51709+
51710+ memcpy(path, proc_sys, strlen(proc_sys));
51711+
51712+ pos += strlen(proc_sys);
51713+
51714+ for (; depth > 0; depth--) {
51715+ path[pos] = '/';
51716+ pos++;
51717+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51718+ if (depth == i) {
51719+ memcpy(path + pos, tmp->procname,
51720+ strlen(tmp->procname));
51721+ pos += strlen(tmp->procname);
51722+ }
51723+ i++;
51724+ }
51725+ }
51726+
51727+ obj = gr_lookup_by_name(path, pos);
51728+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51729+
51730+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51731+ ((err & mode) != mode))) {
51732+ __u32 new_mode = mode;
51733+
51734+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51735+
51736+ err = 0;
51737+ gr_log_learn_sysctl(path, new_mode);
51738+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51739+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51740+ err = -ENOENT;
51741+ } else if (!(err & GR_FIND)) {
51742+ err = -ENOENT;
51743+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51744+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51745+ path, (mode & GR_READ) ? " reading" : "",
51746+ (mode & GR_WRITE) ? " writing" : "");
51747+ err = -EACCES;
51748+ } else if ((err & mode) != mode) {
51749+ err = -EACCES;
51750+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51751+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51752+ path, (mode & GR_READ) ? " reading" : "",
51753+ (mode & GR_WRITE) ? " writing" : "");
51754+ err = 0;
51755+ } else
51756+ err = 0;
51757+
51758+ out:
51759+ preempt_enable();
51760+
51761+ return err;
51762+}
51763+#endif
51764+
51765+int
51766+gr_handle_proc_ptrace(struct task_struct *task)
51767+{
51768+ struct file *filp;
51769+ struct task_struct *tmp = task;
51770+ struct task_struct *curtemp = current;
51771+ __u32 retmode;
51772+
51773+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51774+ if (unlikely(!(gr_status & GR_READY)))
51775+ return 0;
51776+#endif
51777+
51778+ read_lock(&tasklist_lock);
51779+ read_lock(&grsec_exec_file_lock);
51780+ filp = task->exec_file;
51781+
51782+ while (tmp->pid > 0) {
51783+ if (tmp == curtemp)
51784+ break;
51785+ tmp = tmp->real_parent;
51786+ }
51787+
51788+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51789+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51790+ read_unlock(&grsec_exec_file_lock);
51791+ read_unlock(&tasklist_lock);
51792+ return 1;
51793+ }
51794+
51795+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51796+ if (!(gr_status & GR_READY)) {
51797+ read_unlock(&grsec_exec_file_lock);
51798+ read_unlock(&tasklist_lock);
51799+ return 0;
51800+ }
51801+#endif
51802+
51803+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51804+ read_unlock(&grsec_exec_file_lock);
51805+ read_unlock(&tasklist_lock);
51806+
51807+ if (retmode & GR_NOPTRACE)
51808+ return 1;
51809+
51810+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51811+ && (current->acl != task->acl || (current->acl != current->role->root_label
51812+ && current->pid != task->pid)))
51813+ return 1;
51814+
51815+ return 0;
51816+}
51817+
51818+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51819+{
51820+ if (unlikely(!(gr_status & GR_READY)))
51821+ return;
51822+
51823+ if (!(current->role->roletype & GR_ROLE_GOD))
51824+ return;
51825+
51826+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51827+ p->role->rolename, gr_task_roletype_to_char(p),
51828+ p->acl->filename);
51829+}
51830+
51831+int
51832+gr_handle_ptrace(struct task_struct *task, const long request)
51833+{
51834+ struct task_struct *tmp = task;
51835+ struct task_struct *curtemp = current;
51836+ __u32 retmode;
51837+
51838+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51839+ if (unlikely(!(gr_status & GR_READY)))
51840+ return 0;
51841+#endif
51842+
51843+ read_lock(&tasklist_lock);
51844+ while (tmp->pid > 0) {
51845+ if (tmp == curtemp)
51846+ break;
51847+ tmp = tmp->real_parent;
51848+ }
51849+
51850+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51851+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51852+ read_unlock(&tasklist_lock);
51853+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51854+ return 1;
51855+ }
51856+ read_unlock(&tasklist_lock);
51857+
51858+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51859+ if (!(gr_status & GR_READY))
51860+ return 0;
51861+#endif
51862+
51863+ read_lock(&grsec_exec_file_lock);
51864+ if (unlikely(!task->exec_file)) {
51865+ read_unlock(&grsec_exec_file_lock);
51866+ return 0;
51867+ }
51868+
51869+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51870+ read_unlock(&grsec_exec_file_lock);
51871+
51872+ if (retmode & GR_NOPTRACE) {
51873+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51874+ return 1;
51875+ }
51876+
51877+ if (retmode & GR_PTRACERD) {
51878+ switch (request) {
51879+ case PTRACE_POKETEXT:
51880+ case PTRACE_POKEDATA:
51881+ case PTRACE_POKEUSR:
51882+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51883+ case PTRACE_SETREGS:
51884+ case PTRACE_SETFPREGS:
51885+#endif
51886+#ifdef CONFIG_X86
51887+ case PTRACE_SETFPXREGS:
51888+#endif
51889+#ifdef CONFIG_ALTIVEC
51890+ case PTRACE_SETVRREGS:
51891+#endif
51892+ return 1;
51893+ default:
51894+ return 0;
51895+ }
51896+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51897+ !(current->role->roletype & GR_ROLE_GOD) &&
51898+ (current->acl != task->acl)) {
51899+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51900+ return 1;
51901+ }
51902+
51903+ return 0;
51904+}
51905+
51906+static int is_writable_mmap(const struct file *filp)
51907+{
51908+ struct task_struct *task = current;
51909+ struct acl_object_label *obj, *obj2;
51910+
51911+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51912+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51913+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51914+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51915+ task->role->root_label);
51916+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51917+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51918+ return 1;
51919+ }
51920+ }
51921+ return 0;
51922+}
51923+
51924+int
51925+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51926+{
51927+ __u32 mode;
51928+
51929+ if (unlikely(!file || !(prot & PROT_EXEC)))
51930+ return 1;
51931+
51932+ if (is_writable_mmap(file))
51933+ return 0;
51934+
51935+ mode =
51936+ gr_search_file(file->f_path.dentry,
51937+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51938+ file->f_path.mnt);
51939+
51940+ if (!gr_tpe_allow(file))
51941+ return 0;
51942+
51943+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51944+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51945+ return 0;
51946+ } else if (unlikely(!(mode & GR_EXEC))) {
51947+ return 0;
51948+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51949+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51950+ return 1;
51951+ }
51952+
51953+ return 1;
51954+}
51955+
51956+int
51957+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51958+{
51959+ __u32 mode;
51960+
51961+ if (unlikely(!file || !(prot & PROT_EXEC)))
51962+ return 1;
51963+
51964+ if (is_writable_mmap(file))
51965+ return 0;
51966+
51967+ mode =
51968+ gr_search_file(file->f_path.dentry,
51969+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51970+ file->f_path.mnt);
51971+
51972+ if (!gr_tpe_allow(file))
51973+ return 0;
51974+
51975+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51976+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51977+ return 0;
51978+ } else if (unlikely(!(mode & GR_EXEC))) {
51979+ return 0;
51980+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51981+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51982+ return 1;
51983+ }
51984+
51985+ return 1;
51986+}
51987+
51988+void
51989+gr_acl_handle_psacct(struct task_struct *task, const long code)
51990+{
51991+ unsigned long runtime;
51992+ unsigned long cputime;
51993+ unsigned int wday, cday;
51994+ __u8 whr, chr;
51995+ __u8 wmin, cmin;
51996+ __u8 wsec, csec;
51997+ struct timespec timeval;
51998+
51999+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52000+ !(task->acl->mode & GR_PROCACCT)))
52001+ return;
52002+
52003+ do_posix_clock_monotonic_gettime(&timeval);
52004+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52005+ wday = runtime / (3600 * 24);
52006+ runtime -= wday * (3600 * 24);
52007+ whr = runtime / 3600;
52008+ runtime -= whr * 3600;
52009+ wmin = runtime / 60;
52010+ runtime -= wmin * 60;
52011+ wsec = runtime;
52012+
52013+ cputime = (task->utime + task->stime) / HZ;
52014+ cday = cputime / (3600 * 24);
52015+ cputime -= cday * (3600 * 24);
52016+ chr = cputime / 3600;
52017+ cputime -= chr * 3600;
52018+ cmin = cputime / 60;
52019+ cputime -= cmin * 60;
52020+ csec = cputime;
52021+
52022+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52023+
52024+ return;
52025+}
52026+
52027+void gr_set_kernel_label(struct task_struct *task)
52028+{
52029+ if (gr_status & GR_READY) {
52030+ task->role = kernel_role;
52031+ task->acl = kernel_role->root_label;
52032+ }
52033+ return;
52034+}
52035+
52036+#ifdef CONFIG_TASKSTATS
52037+int gr_is_taskstats_denied(int pid)
52038+{
52039+ struct task_struct *task;
52040+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52041+ const struct cred *cred;
52042+#endif
52043+ int ret = 0;
52044+
52045+ /* restrict taskstats viewing to un-chrooted root users
52046+ who have the 'view' subject flag if the RBAC system is enabled
52047+ */
52048+
52049+ rcu_read_lock();
52050+ read_lock(&tasklist_lock);
52051+ task = find_task_by_vpid(pid);
52052+ if (task) {
52053+#ifdef CONFIG_GRKERNSEC_CHROOT
52054+ if (proc_is_chrooted(task))
52055+ ret = -EACCES;
52056+#endif
52057+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52058+ cred = __task_cred(task);
52059+#ifdef CONFIG_GRKERNSEC_PROC_USER
52060+ if (cred->uid != 0)
52061+ ret = -EACCES;
52062+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52063+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52064+ ret = -EACCES;
52065+#endif
52066+#endif
52067+ if (gr_status & GR_READY) {
52068+ if (!(task->acl->mode & GR_VIEW))
52069+ ret = -EACCES;
52070+ }
52071+ } else
52072+ ret = -ENOENT;
52073+
52074+ read_unlock(&tasklist_lock);
52075+ rcu_read_unlock();
52076+
52077+ return ret;
52078+}
52079+#endif
52080+
52081+/* AUXV entries are filled via a descendant of search_binary_handler
52082+ after we've already applied the subject for the target
52083+*/
52084+int gr_acl_enable_at_secure(void)
52085+{
52086+ if (unlikely(!(gr_status & GR_READY)))
52087+ return 0;
52088+
52089+ if (current->acl->mode & GR_ATSECURE)
52090+ return 1;
52091+
52092+ return 0;
52093+}
52094+
52095+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52096+{
52097+ struct task_struct *task = current;
52098+ struct dentry *dentry = file->f_path.dentry;
52099+ struct vfsmount *mnt = file->f_path.mnt;
52100+ struct acl_object_label *obj, *tmp;
52101+ struct acl_subject_label *subj;
52102+ unsigned int bufsize;
52103+ int is_not_root;
52104+ char *path;
52105+ dev_t dev = __get_dev(dentry);
52106+
52107+ if (unlikely(!(gr_status & GR_READY)))
52108+ return 1;
52109+
52110+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52111+ return 1;
52112+
52113+ /* ignore Eric Biederman */
52114+ if (IS_PRIVATE(dentry->d_inode))
52115+ return 1;
52116+
52117+ subj = task->acl;
52118+ do {
52119+ obj = lookup_acl_obj_label(ino, dev, subj);
52120+ if (obj != NULL)
52121+ return (obj->mode & GR_FIND) ? 1 : 0;
52122+ } while ((subj = subj->parent_subject));
52123+
52124+ /* this is purely an optimization since we're looking for an object
52125+ for the directory we're doing a readdir on
52126+ if it's possible for any globbed object to match the entry we're
52127+ filling into the directory, then the object we find here will be
52128+ an anchor point with attached globbed objects
52129+ */
52130+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52131+ if (obj->globbed == NULL)
52132+ return (obj->mode & GR_FIND) ? 1 : 0;
52133+
52134+ is_not_root = ((obj->filename[0] == '/') &&
52135+ (obj->filename[1] == '\0')) ? 0 : 1;
52136+ bufsize = PAGE_SIZE - namelen - is_not_root;
52137+
52138+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52139+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52140+ return 1;
52141+
52142+ preempt_disable();
52143+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52144+ bufsize);
52145+
52146+ bufsize = strlen(path);
52147+
52148+ /* if base is "/", don't append an additional slash */
52149+ if (is_not_root)
52150+ *(path + bufsize) = '/';
52151+ memcpy(path + bufsize + is_not_root, name, namelen);
52152+ *(path + bufsize + namelen + is_not_root) = '\0';
52153+
52154+ tmp = obj->globbed;
52155+ while (tmp) {
52156+ if (!glob_match(tmp->filename, path)) {
52157+ preempt_enable();
52158+ return (tmp->mode & GR_FIND) ? 1 : 0;
52159+ }
52160+ tmp = tmp->next;
52161+ }
52162+ preempt_enable();
52163+ return (obj->mode & GR_FIND) ? 1 : 0;
52164+}
52165+
52166+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52167+EXPORT_SYMBOL(gr_acl_is_enabled);
52168+#endif
52169+EXPORT_SYMBOL(gr_learn_resource);
52170+EXPORT_SYMBOL(gr_set_kernel_label);
52171+#ifdef CONFIG_SECURITY
52172+EXPORT_SYMBOL(gr_check_user_change);
52173+EXPORT_SYMBOL(gr_check_group_change);
52174+#endif
52175+
52176diff -urNp linux-3.0.7/grsecurity/gracl_alloc.c linux-3.0.7/grsecurity/gracl_alloc.c
52177--- linux-3.0.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
52178+++ linux-3.0.7/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
52179@@ -0,0 +1,105 @@
52180+#include <linux/kernel.h>
52181+#include <linux/mm.h>
52182+#include <linux/slab.h>
52183+#include <linux/vmalloc.h>
52184+#include <linux/gracl.h>
52185+#include <linux/grsecurity.h>
52186+
52187+static unsigned long alloc_stack_next = 1;
52188+static unsigned long alloc_stack_size = 1;
52189+static void **alloc_stack;
52190+
52191+static __inline__ int
52192+alloc_pop(void)
52193+{
52194+ if (alloc_stack_next == 1)
52195+ return 0;
52196+
52197+ kfree(alloc_stack[alloc_stack_next - 2]);
52198+
52199+ alloc_stack_next--;
52200+
52201+ return 1;
52202+}
52203+
52204+static __inline__ int
52205+alloc_push(void *buf)
52206+{
52207+ if (alloc_stack_next >= alloc_stack_size)
52208+ return 1;
52209+
52210+ alloc_stack[alloc_stack_next - 1] = buf;
52211+
52212+ alloc_stack_next++;
52213+
52214+ return 0;
52215+}
52216+
52217+void *
52218+acl_alloc(unsigned long len)
52219+{
52220+ void *ret = NULL;
52221+
52222+ if (!len || len > PAGE_SIZE)
52223+ goto out;
52224+
52225+ ret = kmalloc(len, GFP_KERNEL);
52226+
52227+ if (ret) {
52228+ if (alloc_push(ret)) {
52229+ kfree(ret);
52230+ ret = NULL;
52231+ }
52232+ }
52233+
52234+out:
52235+ return ret;
52236+}
52237+
52238+void *
52239+acl_alloc_num(unsigned long num, unsigned long len)
52240+{
52241+ if (!len || (num > (PAGE_SIZE / len)))
52242+ return NULL;
52243+
52244+ return acl_alloc(num * len);
52245+}
52246+
52247+void
52248+acl_free_all(void)
52249+{
52250+ if (gr_acl_is_enabled() || !alloc_stack)
52251+ return;
52252+
52253+ while (alloc_pop()) ;
52254+
52255+ if (alloc_stack) {
52256+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52257+ kfree(alloc_stack);
52258+ else
52259+ vfree(alloc_stack);
52260+ }
52261+
52262+ alloc_stack = NULL;
52263+ alloc_stack_size = 1;
52264+ alloc_stack_next = 1;
52265+
52266+ return;
52267+}
52268+
52269+int
52270+acl_alloc_stack_init(unsigned long size)
52271+{
52272+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52273+ alloc_stack =
52274+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52275+ else
52276+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52277+
52278+ alloc_stack_size = size;
52279+
52280+ if (!alloc_stack)
52281+ return 0;
52282+ else
52283+ return 1;
52284+}
52285diff -urNp linux-3.0.7/grsecurity/gracl_cap.c linux-3.0.7/grsecurity/gracl_cap.c
52286--- linux-3.0.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
52287+++ linux-3.0.7/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
52288@@ -0,0 +1,101 @@
52289+#include <linux/kernel.h>
52290+#include <linux/module.h>
52291+#include <linux/sched.h>
52292+#include <linux/gracl.h>
52293+#include <linux/grsecurity.h>
52294+#include <linux/grinternal.h>
52295+
52296+extern const char *captab_log[];
52297+extern int captab_log_entries;
52298+
52299+int
52300+gr_acl_is_capable(const int cap)
52301+{
52302+ struct task_struct *task = current;
52303+ const struct cred *cred = current_cred();
52304+ struct acl_subject_label *curracl;
52305+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52306+ kernel_cap_t cap_audit = __cap_empty_set;
52307+
52308+ if (!gr_acl_is_enabled())
52309+ return 1;
52310+
52311+ curracl = task->acl;
52312+
52313+ cap_drop = curracl->cap_lower;
52314+ cap_mask = curracl->cap_mask;
52315+ cap_audit = curracl->cap_invert_audit;
52316+
52317+ while ((curracl = curracl->parent_subject)) {
52318+ /* if the cap isn't specified in the current computed mask but is specified in the
52319+ current level subject, and is lowered in the current level subject, then add
52320+ it to the set of dropped capabilities
52321+ otherwise, add the current level subject's mask to the current computed mask
52322+ */
52323+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52324+ cap_raise(cap_mask, cap);
52325+ if (cap_raised(curracl->cap_lower, cap))
52326+ cap_raise(cap_drop, cap);
52327+ if (cap_raised(curracl->cap_invert_audit, cap))
52328+ cap_raise(cap_audit, cap);
52329+ }
52330+ }
52331+
52332+ if (!cap_raised(cap_drop, cap)) {
52333+ if (cap_raised(cap_audit, cap))
52334+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52335+ return 1;
52336+ }
52337+
52338+ curracl = task->acl;
52339+
52340+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52341+ && cap_raised(cred->cap_effective, cap)) {
52342+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52343+ task->role->roletype, cred->uid,
52344+ cred->gid, task->exec_file ?
52345+ gr_to_filename(task->exec_file->f_path.dentry,
52346+ task->exec_file->f_path.mnt) : curracl->filename,
52347+ curracl->filename, 0UL,
52348+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52349+ return 1;
52350+ }
52351+
52352+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52353+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52354+ return 0;
52355+}
52356+
52357+int
52358+gr_acl_is_capable_nolog(const int cap)
52359+{
52360+ struct acl_subject_label *curracl;
52361+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52362+
52363+ if (!gr_acl_is_enabled())
52364+ return 1;
52365+
52366+ curracl = current->acl;
52367+
52368+ cap_drop = curracl->cap_lower;
52369+ cap_mask = curracl->cap_mask;
52370+
52371+ while ((curracl = curracl->parent_subject)) {
52372+ /* if the cap isn't specified in the current computed mask but is specified in the
52373+ current level subject, and is lowered in the current level subject, then add
52374+ it to the set of dropped capabilities
52375+ otherwise, add the current level subject's mask to the current computed mask
52376+ */
52377+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52378+ cap_raise(cap_mask, cap);
52379+ if (cap_raised(curracl->cap_lower, cap))
52380+ cap_raise(cap_drop, cap);
52381+ }
52382+ }
52383+
52384+ if (!cap_raised(cap_drop, cap))
52385+ return 1;
52386+
52387+ return 0;
52388+}
52389+
52390diff -urNp linux-3.0.7/grsecurity/gracl_fs.c linux-3.0.7/grsecurity/gracl_fs.c
52391--- linux-3.0.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
52392+++ linux-3.0.7/grsecurity/gracl_fs.c 2011-10-17 01:22:26.000000000 -0400
52393@@ -0,0 +1,431 @@
52394+#include <linux/kernel.h>
52395+#include <linux/sched.h>
52396+#include <linux/types.h>
52397+#include <linux/fs.h>
52398+#include <linux/file.h>
52399+#include <linux/stat.h>
52400+#include <linux/grsecurity.h>
52401+#include <linux/grinternal.h>
52402+#include <linux/gracl.h>
52403+
52404+__u32
52405+gr_acl_handle_hidden_file(const struct dentry * dentry,
52406+ const struct vfsmount * mnt)
52407+{
52408+ __u32 mode;
52409+
52410+ if (unlikely(!dentry->d_inode))
52411+ return GR_FIND;
52412+
52413+ mode =
52414+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52415+
52416+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52417+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52418+ return mode;
52419+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52420+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52421+ return 0;
52422+ } else if (unlikely(!(mode & GR_FIND)))
52423+ return 0;
52424+
52425+ return GR_FIND;
52426+}
52427+
52428+__u32
52429+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52430+ const int fmode)
52431+{
52432+ __u32 reqmode = GR_FIND;
52433+ __u32 mode;
52434+
52435+ if (unlikely(!dentry->d_inode))
52436+ return reqmode;
52437+
52438+ if (unlikely(fmode & O_APPEND))
52439+ reqmode |= GR_APPEND;
52440+ else if (unlikely(fmode & FMODE_WRITE))
52441+ reqmode |= GR_WRITE;
52442+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
52443+ reqmode |= GR_READ;
52444+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
52445+ reqmode &= ~GR_READ;
52446+ mode =
52447+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52448+ mnt);
52449+
52450+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52451+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52452+ reqmode & GR_READ ? " reading" : "",
52453+ reqmode & GR_WRITE ? " writing" : reqmode &
52454+ GR_APPEND ? " appending" : "");
52455+ return reqmode;
52456+ } else
52457+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52458+ {
52459+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52460+ reqmode & GR_READ ? " reading" : "",
52461+ reqmode & GR_WRITE ? " writing" : reqmode &
52462+ GR_APPEND ? " appending" : "");
52463+ return 0;
52464+ } else if (unlikely((mode & reqmode) != reqmode))
52465+ return 0;
52466+
52467+ return reqmode;
52468+}
52469+
52470+__u32
52471+gr_acl_handle_creat(const struct dentry * dentry,
52472+ const struct dentry * p_dentry,
52473+ const struct vfsmount * p_mnt, const int fmode,
52474+ const int imode)
52475+{
52476+ __u32 reqmode = GR_WRITE | GR_CREATE;
52477+ __u32 mode;
52478+
52479+ if (unlikely(fmode & O_APPEND))
52480+ reqmode |= GR_APPEND;
52481+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
52482+ reqmode |= GR_READ;
52483+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
52484+ reqmode |= GR_SETID;
52485+
52486+ mode =
52487+ gr_check_create(dentry, p_dentry, p_mnt,
52488+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52489+
52490+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52491+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52492+ reqmode & GR_READ ? " reading" : "",
52493+ reqmode & GR_WRITE ? " writing" : reqmode &
52494+ GR_APPEND ? " appending" : "");
52495+ return reqmode;
52496+ } else
52497+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52498+ {
52499+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52500+ reqmode & GR_READ ? " reading" : "",
52501+ reqmode & GR_WRITE ? " writing" : reqmode &
52502+ GR_APPEND ? " appending" : "");
52503+ return 0;
52504+ } else if (unlikely((mode & reqmode) != reqmode))
52505+ return 0;
52506+
52507+ return reqmode;
52508+}
52509+
52510+__u32
52511+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52512+ const int fmode)
52513+{
52514+ __u32 mode, reqmode = GR_FIND;
52515+
52516+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52517+ reqmode |= GR_EXEC;
52518+ if (fmode & S_IWOTH)
52519+ reqmode |= GR_WRITE;
52520+ if (fmode & S_IROTH)
52521+ reqmode |= GR_READ;
52522+
52523+ mode =
52524+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52525+ mnt);
52526+
52527+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52528+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52529+ reqmode & GR_READ ? " reading" : "",
52530+ reqmode & GR_WRITE ? " writing" : "",
52531+ reqmode & GR_EXEC ? " executing" : "");
52532+ return reqmode;
52533+ } else
52534+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52535+ {
52536+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52537+ reqmode & GR_READ ? " reading" : "",
52538+ reqmode & GR_WRITE ? " writing" : "",
52539+ reqmode & GR_EXEC ? " executing" : "");
52540+ return 0;
52541+ } else if (unlikely((mode & reqmode) != reqmode))
52542+ return 0;
52543+
52544+ return reqmode;
52545+}
52546+
52547+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52548+{
52549+ __u32 mode;
52550+
52551+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52552+
52553+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52554+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52555+ return mode;
52556+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52557+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52558+ return 0;
52559+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52560+ return 0;
52561+
52562+ return (reqmode);
52563+}
52564+
52565+__u32
52566+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52567+{
52568+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52569+}
52570+
52571+__u32
52572+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52573+{
52574+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52575+}
52576+
52577+__u32
52578+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52579+{
52580+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52581+}
52582+
52583+__u32
52584+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52585+{
52586+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52587+}
52588+
52589+__u32
52590+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52591+ mode_t mode)
52592+{
52593+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52594+ return 1;
52595+
52596+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52597+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52598+ GR_FCHMOD_ACL_MSG);
52599+ } else {
52600+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52601+ }
52602+}
52603+
52604+__u32
52605+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52606+ mode_t mode)
52607+{
52608+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52609+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52610+ GR_CHMOD_ACL_MSG);
52611+ } else {
52612+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52613+ }
52614+}
52615+
52616+__u32
52617+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52618+{
52619+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52620+}
52621+
52622+__u32
52623+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52624+{
52625+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52626+}
52627+
52628+__u32
52629+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52630+{
52631+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52632+}
52633+
52634+__u32
52635+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52636+{
52637+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52638+ GR_UNIXCONNECT_ACL_MSG);
52639+}
52640+
52641+/* hardlinks require at minimum create and link permission,
52642+ any additional privilege required is based on the
52643+ privilege of the file being linked to
52644+*/
52645+__u32
52646+gr_acl_handle_link(const struct dentry * new_dentry,
52647+ const struct dentry * parent_dentry,
52648+ const struct vfsmount * parent_mnt,
52649+ const struct dentry * old_dentry,
52650+ const struct vfsmount * old_mnt, const char *to)
52651+{
52652+ __u32 mode;
52653+ __u32 needmode = GR_CREATE | GR_LINK;
52654+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52655+
52656+ mode =
52657+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52658+ old_mnt);
52659+
52660+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52661+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52662+ return mode;
52663+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52664+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52665+ return 0;
52666+ } else if (unlikely((mode & needmode) != needmode))
52667+ return 0;
52668+
52669+ return 1;
52670+}
52671+
52672+__u32
52673+gr_acl_handle_symlink(const struct dentry * new_dentry,
52674+ const struct dentry * parent_dentry,
52675+ const struct vfsmount * parent_mnt, const char *from)
52676+{
52677+ __u32 needmode = GR_WRITE | GR_CREATE;
52678+ __u32 mode;
52679+
52680+ mode =
52681+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52682+ GR_CREATE | GR_AUDIT_CREATE |
52683+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52684+
52685+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52686+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52687+ return mode;
52688+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52689+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52690+ return 0;
52691+ } else if (unlikely((mode & needmode) != needmode))
52692+ return 0;
52693+
52694+ return (GR_WRITE | GR_CREATE);
52695+}
52696+
52697+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52698+{
52699+ __u32 mode;
52700+
52701+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52702+
52703+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52704+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52705+ return mode;
52706+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52707+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52708+ return 0;
52709+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52710+ return 0;
52711+
52712+ return (reqmode);
52713+}
52714+
52715+__u32
52716+gr_acl_handle_mknod(const struct dentry * new_dentry,
52717+ const struct dentry * parent_dentry,
52718+ const struct vfsmount * parent_mnt,
52719+ const int mode)
52720+{
52721+ __u32 reqmode = GR_WRITE | GR_CREATE;
52722+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52723+ reqmode |= GR_SETID;
52724+
52725+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52726+ reqmode, GR_MKNOD_ACL_MSG);
52727+}
52728+
52729+__u32
52730+gr_acl_handle_mkdir(const struct dentry *new_dentry,
52731+ const struct dentry *parent_dentry,
52732+ const struct vfsmount *parent_mnt)
52733+{
52734+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52735+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52736+}
52737+
52738+#define RENAME_CHECK_SUCCESS(old, new) \
52739+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52740+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52741+
52742+int
52743+gr_acl_handle_rename(struct dentry *new_dentry,
52744+ struct dentry *parent_dentry,
52745+ const struct vfsmount *parent_mnt,
52746+ struct dentry *old_dentry,
52747+ struct inode *old_parent_inode,
52748+ struct vfsmount *old_mnt, const char *newname)
52749+{
52750+ __u32 comp1, comp2;
52751+ int error = 0;
52752+
52753+ if (unlikely(!gr_acl_is_enabled()))
52754+ return 0;
52755+
52756+ if (!new_dentry->d_inode) {
52757+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52758+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52759+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52760+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52761+ GR_DELETE | GR_AUDIT_DELETE |
52762+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52763+ GR_SUPPRESS, old_mnt);
52764+ } else {
52765+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52766+ GR_CREATE | GR_DELETE |
52767+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52768+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52769+ GR_SUPPRESS, parent_mnt);
52770+ comp2 =
52771+ gr_search_file(old_dentry,
52772+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52773+ GR_DELETE | GR_AUDIT_DELETE |
52774+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52775+ }
52776+
52777+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52778+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52779+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52780+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52781+ && !(comp2 & GR_SUPPRESS)) {
52782+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52783+ error = -EACCES;
52784+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52785+ error = -EACCES;
52786+
52787+ return error;
52788+}
52789+
52790+void
52791+gr_acl_handle_exit(void)
52792+{
52793+ u16 id;
52794+ char *rolename;
52795+ struct file *exec_file;
52796+
52797+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52798+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52799+ id = current->acl_role_id;
52800+ rolename = current->role->rolename;
52801+ gr_set_acls(1);
52802+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52803+ }
52804+
52805+ write_lock(&grsec_exec_file_lock);
52806+ exec_file = current->exec_file;
52807+ current->exec_file = NULL;
52808+ write_unlock(&grsec_exec_file_lock);
52809+
52810+ if (exec_file)
52811+ fput(exec_file);
52812+}
52813+
52814+int
52815+gr_acl_handle_procpidmem(const struct task_struct *task)
52816+{
52817+ if (unlikely(!gr_acl_is_enabled()))
52818+ return 0;
52819+
52820+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52821+ return -EACCES;
52822+
52823+ return 0;
52824+}
52825diff -urNp linux-3.0.7/grsecurity/gracl_ip.c linux-3.0.7/grsecurity/gracl_ip.c
52826--- linux-3.0.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52827+++ linux-3.0.7/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
52828@@ -0,0 +1,381 @@
52829+#include <linux/kernel.h>
52830+#include <asm/uaccess.h>
52831+#include <asm/errno.h>
52832+#include <net/sock.h>
52833+#include <linux/file.h>
52834+#include <linux/fs.h>
52835+#include <linux/net.h>
52836+#include <linux/in.h>
52837+#include <linux/skbuff.h>
52838+#include <linux/ip.h>
52839+#include <linux/udp.h>
52840+#include <linux/types.h>
52841+#include <linux/sched.h>
52842+#include <linux/netdevice.h>
52843+#include <linux/inetdevice.h>
52844+#include <linux/gracl.h>
52845+#include <linux/grsecurity.h>
52846+#include <linux/grinternal.h>
52847+
52848+#define GR_BIND 0x01
52849+#define GR_CONNECT 0x02
52850+#define GR_INVERT 0x04
52851+#define GR_BINDOVERRIDE 0x08
52852+#define GR_CONNECTOVERRIDE 0x10
52853+#define GR_SOCK_FAMILY 0x20
52854+
52855+static const char * gr_protocols[IPPROTO_MAX] = {
52856+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52857+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52858+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52859+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52860+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52861+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52862+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52863+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52864+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52865+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52866+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52867+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52868+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52869+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52870+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52871+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52872+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52873+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52874+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52875+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52876+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52877+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52878+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52879+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52880+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52881+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52882+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52883+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52884+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52885+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52886+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52887+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52888+ };
52889+
52890+static const char * gr_socktypes[SOCK_MAX] = {
52891+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52892+ "unknown:7", "unknown:8", "unknown:9", "packet"
52893+ };
52894+
52895+static const char * gr_sockfamilies[AF_MAX+1] = {
52896+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52897+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52898+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52899+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52900+ };
52901+
52902+const char *
52903+gr_proto_to_name(unsigned char proto)
52904+{
52905+ return gr_protocols[proto];
52906+}
52907+
52908+const char *
52909+gr_socktype_to_name(unsigned char type)
52910+{
52911+ return gr_socktypes[type];
52912+}
52913+
52914+const char *
52915+gr_sockfamily_to_name(unsigned char family)
52916+{
52917+ return gr_sockfamilies[family];
52918+}
52919+
52920+int
52921+gr_search_socket(const int domain, const int type, const int protocol)
52922+{
52923+ struct acl_subject_label *curr;
52924+ const struct cred *cred = current_cred();
52925+
52926+ if (unlikely(!gr_acl_is_enabled()))
52927+ goto exit;
52928+
52929+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52930+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52931+ goto exit; // let the kernel handle it
52932+
52933+ curr = current->acl;
52934+
52935+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52936+ /* the family is allowed, if this is PF_INET allow it only if
52937+ the extra sock type/protocol checks pass */
52938+ if (domain == PF_INET)
52939+ goto inet_check;
52940+ goto exit;
52941+ } else {
52942+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52943+ __u32 fakeip = 0;
52944+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52945+ current->role->roletype, cred->uid,
52946+ cred->gid, current->exec_file ?
52947+ gr_to_filename(current->exec_file->f_path.dentry,
52948+ current->exec_file->f_path.mnt) :
52949+ curr->filename, curr->filename,
52950+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52951+ &current->signal->saved_ip);
52952+ goto exit;
52953+ }
52954+ goto exit_fail;
52955+ }
52956+
52957+inet_check:
52958+ /* the rest of this checking is for IPv4 only */
52959+ if (!curr->ips)
52960+ goto exit;
52961+
52962+ if ((curr->ip_type & (1 << type)) &&
52963+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52964+ goto exit;
52965+
52966+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52967+ /* we don't place acls on raw sockets , and sometimes
52968+ dgram/ip sockets are opened for ioctl and not
52969+ bind/connect, so we'll fake a bind learn log */
52970+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52971+ __u32 fakeip = 0;
52972+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52973+ current->role->roletype, cred->uid,
52974+ cred->gid, current->exec_file ?
52975+ gr_to_filename(current->exec_file->f_path.dentry,
52976+ current->exec_file->f_path.mnt) :
52977+ curr->filename, curr->filename,
52978+ &fakeip, 0, type,
52979+ protocol, GR_CONNECT, &current->signal->saved_ip);
52980+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52981+ __u32 fakeip = 0;
52982+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52983+ current->role->roletype, cred->uid,
52984+ cred->gid, current->exec_file ?
52985+ gr_to_filename(current->exec_file->f_path.dentry,
52986+ current->exec_file->f_path.mnt) :
52987+ curr->filename, curr->filename,
52988+ &fakeip, 0, type,
52989+ protocol, GR_BIND, &current->signal->saved_ip);
52990+ }
52991+ /* we'll log when they use connect or bind */
52992+ goto exit;
52993+ }
52994+
52995+exit_fail:
52996+ if (domain == PF_INET)
52997+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52998+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52999+ else
53000+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53001+ gr_socktype_to_name(type), protocol);
53002+
53003+ return 0;
53004+exit:
53005+ return 1;
53006+}
53007+
53008+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53009+{
53010+ if ((ip->mode & mode) &&
53011+ (ip_port >= ip->low) &&
53012+ (ip_port <= ip->high) &&
53013+ ((ntohl(ip_addr) & our_netmask) ==
53014+ (ntohl(our_addr) & our_netmask))
53015+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53016+ && (ip->type & (1 << type))) {
53017+ if (ip->mode & GR_INVERT)
53018+ return 2; // specifically denied
53019+ else
53020+ return 1; // allowed
53021+ }
53022+
53023+ return 0; // not specifically allowed, may continue parsing
53024+}
53025+
53026+static int
53027+gr_search_connectbind(const int full_mode, struct sock *sk,
53028+ struct sockaddr_in *addr, const int type)
53029+{
53030+ char iface[IFNAMSIZ] = {0};
53031+ struct acl_subject_label *curr;
53032+ struct acl_ip_label *ip;
53033+ struct inet_sock *isk;
53034+ struct net_device *dev;
53035+ struct in_device *idev;
53036+ unsigned long i;
53037+ int ret;
53038+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53039+ __u32 ip_addr = 0;
53040+ __u32 our_addr;
53041+ __u32 our_netmask;
53042+ char *p;
53043+ __u16 ip_port = 0;
53044+ const struct cred *cred = current_cred();
53045+
53046+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53047+ return 0;
53048+
53049+ curr = current->acl;
53050+ isk = inet_sk(sk);
53051+
53052+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53053+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53054+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53055+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53056+ struct sockaddr_in saddr;
53057+ int err;
53058+
53059+ saddr.sin_family = AF_INET;
53060+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53061+ saddr.sin_port = isk->inet_sport;
53062+
53063+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53064+ if (err)
53065+ return err;
53066+
53067+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53068+ if (err)
53069+ return err;
53070+ }
53071+
53072+ if (!curr->ips)
53073+ return 0;
53074+
53075+ ip_addr = addr->sin_addr.s_addr;
53076+ ip_port = ntohs(addr->sin_port);
53077+
53078+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53079+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53080+ current->role->roletype, cred->uid,
53081+ cred->gid, current->exec_file ?
53082+ gr_to_filename(current->exec_file->f_path.dentry,
53083+ current->exec_file->f_path.mnt) :
53084+ curr->filename, curr->filename,
53085+ &ip_addr, ip_port, type,
53086+ sk->sk_protocol, mode, &current->signal->saved_ip);
53087+ return 0;
53088+ }
53089+
53090+ for (i = 0; i < curr->ip_num; i++) {
53091+ ip = *(curr->ips + i);
53092+ if (ip->iface != NULL) {
53093+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53094+ p = strchr(iface, ':');
53095+ if (p != NULL)
53096+ *p = '\0';
53097+ dev = dev_get_by_name(sock_net(sk), iface);
53098+ if (dev == NULL)
53099+ continue;
53100+ idev = in_dev_get(dev);
53101+ if (idev == NULL) {
53102+ dev_put(dev);
53103+ continue;
53104+ }
53105+ rcu_read_lock();
53106+ for_ifa(idev) {
53107+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53108+ our_addr = ifa->ifa_address;
53109+ our_netmask = 0xffffffff;
53110+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53111+ if (ret == 1) {
53112+ rcu_read_unlock();
53113+ in_dev_put(idev);
53114+ dev_put(dev);
53115+ return 0;
53116+ } else if (ret == 2) {
53117+ rcu_read_unlock();
53118+ in_dev_put(idev);
53119+ dev_put(dev);
53120+ goto denied;
53121+ }
53122+ }
53123+ } endfor_ifa(idev);
53124+ rcu_read_unlock();
53125+ in_dev_put(idev);
53126+ dev_put(dev);
53127+ } else {
53128+ our_addr = ip->addr;
53129+ our_netmask = ip->netmask;
53130+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53131+ if (ret == 1)
53132+ return 0;
53133+ else if (ret == 2)
53134+ goto denied;
53135+ }
53136+ }
53137+
53138+denied:
53139+ if (mode == GR_BIND)
53140+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53141+ else if (mode == GR_CONNECT)
53142+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53143+
53144+ return -EACCES;
53145+}
53146+
53147+int
53148+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53149+{
53150+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53151+}
53152+
53153+int
53154+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53155+{
53156+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53157+}
53158+
53159+int gr_search_listen(struct socket *sock)
53160+{
53161+ struct sock *sk = sock->sk;
53162+ struct sockaddr_in addr;
53163+
53164+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53165+ addr.sin_port = inet_sk(sk)->inet_sport;
53166+
53167+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53168+}
53169+
53170+int gr_search_accept(struct socket *sock)
53171+{
53172+ struct sock *sk = sock->sk;
53173+ struct sockaddr_in addr;
53174+
53175+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53176+ addr.sin_port = inet_sk(sk)->inet_sport;
53177+
53178+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53179+}
53180+
53181+int
53182+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53183+{
53184+ if (addr)
53185+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53186+ else {
53187+ struct sockaddr_in sin;
53188+ const struct inet_sock *inet = inet_sk(sk);
53189+
53190+ sin.sin_addr.s_addr = inet->inet_daddr;
53191+ sin.sin_port = inet->inet_dport;
53192+
53193+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53194+ }
53195+}
53196+
53197+int
53198+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53199+{
53200+ struct sockaddr_in sin;
53201+
53202+ if (unlikely(skb->len < sizeof (struct udphdr)))
53203+ return 0; // skip this packet
53204+
53205+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53206+ sin.sin_port = udp_hdr(skb)->source;
53207+
53208+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53209+}
53210diff -urNp linux-3.0.7/grsecurity/gracl_learn.c linux-3.0.7/grsecurity/gracl_learn.c
53211--- linux-3.0.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
53212+++ linux-3.0.7/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
53213@@ -0,0 +1,207 @@
53214+#include <linux/kernel.h>
53215+#include <linux/mm.h>
53216+#include <linux/sched.h>
53217+#include <linux/poll.h>
53218+#include <linux/string.h>
53219+#include <linux/file.h>
53220+#include <linux/types.h>
53221+#include <linux/vmalloc.h>
53222+#include <linux/grinternal.h>
53223+
53224+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53225+ size_t count, loff_t *ppos);
53226+extern int gr_acl_is_enabled(void);
53227+
53228+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53229+static int gr_learn_attached;
53230+
53231+/* use a 512k buffer */
53232+#define LEARN_BUFFER_SIZE (512 * 1024)
53233+
53234+static DEFINE_SPINLOCK(gr_learn_lock);
53235+static DEFINE_MUTEX(gr_learn_user_mutex);
53236+
53237+/* we need to maintain two buffers, so that the kernel context of grlearn
53238+ uses a semaphore around the userspace copying, and the other kernel contexts
53239+ use a spinlock when copying into the buffer, since they cannot sleep
53240+*/
53241+static char *learn_buffer;
53242+static char *learn_buffer_user;
53243+static int learn_buffer_len;
53244+static int learn_buffer_user_len;
53245+
53246+static ssize_t
53247+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53248+{
53249+ DECLARE_WAITQUEUE(wait, current);
53250+ ssize_t retval = 0;
53251+
53252+ add_wait_queue(&learn_wait, &wait);
53253+ set_current_state(TASK_INTERRUPTIBLE);
53254+ do {
53255+ mutex_lock(&gr_learn_user_mutex);
53256+ spin_lock(&gr_learn_lock);
53257+ if (learn_buffer_len)
53258+ break;
53259+ spin_unlock(&gr_learn_lock);
53260+ mutex_unlock(&gr_learn_user_mutex);
53261+ if (file->f_flags & O_NONBLOCK) {
53262+ retval = -EAGAIN;
53263+ goto out;
53264+ }
53265+ if (signal_pending(current)) {
53266+ retval = -ERESTARTSYS;
53267+ goto out;
53268+ }
53269+
53270+ schedule();
53271+ } while (1);
53272+
53273+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53274+ learn_buffer_user_len = learn_buffer_len;
53275+ retval = learn_buffer_len;
53276+ learn_buffer_len = 0;
53277+
53278+ spin_unlock(&gr_learn_lock);
53279+
53280+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53281+ retval = -EFAULT;
53282+
53283+ mutex_unlock(&gr_learn_user_mutex);
53284+out:
53285+ set_current_state(TASK_RUNNING);
53286+ remove_wait_queue(&learn_wait, &wait);
53287+ return retval;
53288+}
53289+
53290+static unsigned int
53291+poll_learn(struct file * file, poll_table * wait)
53292+{
53293+ poll_wait(file, &learn_wait, wait);
53294+
53295+ if (learn_buffer_len)
53296+ return (POLLIN | POLLRDNORM);
53297+
53298+ return 0;
53299+}
53300+
53301+void
53302+gr_clear_learn_entries(void)
53303+{
53304+ char *tmp;
53305+
53306+ mutex_lock(&gr_learn_user_mutex);
53307+ spin_lock(&gr_learn_lock);
53308+ tmp = learn_buffer;
53309+ learn_buffer = NULL;
53310+ spin_unlock(&gr_learn_lock);
53311+ if (tmp)
53312+ vfree(tmp);
53313+ if (learn_buffer_user != NULL) {
53314+ vfree(learn_buffer_user);
53315+ learn_buffer_user = NULL;
53316+ }
53317+ learn_buffer_len = 0;
53318+ mutex_unlock(&gr_learn_user_mutex);
53319+
53320+ return;
53321+}
53322+
53323+void
53324+gr_add_learn_entry(const char *fmt, ...)
53325+{
53326+ va_list args;
53327+ unsigned int len;
53328+
53329+ if (!gr_learn_attached)
53330+ return;
53331+
53332+ spin_lock(&gr_learn_lock);
53333+
53334+ /* leave a gap at the end so we know when it's "full" but don't have to
53335+ compute the exact length of the string we're trying to append
53336+ */
53337+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53338+ spin_unlock(&gr_learn_lock);
53339+ wake_up_interruptible(&learn_wait);
53340+ return;
53341+ }
53342+ if (learn_buffer == NULL) {
53343+ spin_unlock(&gr_learn_lock);
53344+ return;
53345+ }
53346+
53347+ va_start(args, fmt);
53348+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53349+ va_end(args);
53350+
53351+ learn_buffer_len += len + 1;
53352+
53353+ spin_unlock(&gr_learn_lock);
53354+ wake_up_interruptible(&learn_wait);
53355+
53356+ return;
53357+}
53358+
53359+static int
53360+open_learn(struct inode *inode, struct file *file)
53361+{
53362+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53363+ return -EBUSY;
53364+ if (file->f_mode & FMODE_READ) {
53365+ int retval = 0;
53366+ mutex_lock(&gr_learn_user_mutex);
53367+ if (learn_buffer == NULL)
53368+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53369+ if (learn_buffer_user == NULL)
53370+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53371+ if (learn_buffer == NULL) {
53372+ retval = -ENOMEM;
53373+ goto out_error;
53374+ }
53375+ if (learn_buffer_user == NULL) {
53376+ retval = -ENOMEM;
53377+ goto out_error;
53378+ }
53379+ learn_buffer_len = 0;
53380+ learn_buffer_user_len = 0;
53381+ gr_learn_attached = 1;
53382+out_error:
53383+ mutex_unlock(&gr_learn_user_mutex);
53384+ return retval;
53385+ }
53386+ return 0;
53387+}
53388+
53389+static int
53390+close_learn(struct inode *inode, struct file *file)
53391+{
53392+ if (file->f_mode & FMODE_READ) {
53393+ char *tmp = NULL;
53394+ mutex_lock(&gr_learn_user_mutex);
53395+ spin_lock(&gr_learn_lock);
53396+ tmp = learn_buffer;
53397+ learn_buffer = NULL;
53398+ spin_unlock(&gr_learn_lock);
53399+ if (tmp)
53400+ vfree(tmp);
53401+ if (learn_buffer_user != NULL) {
53402+ vfree(learn_buffer_user);
53403+ learn_buffer_user = NULL;
53404+ }
53405+ learn_buffer_len = 0;
53406+ learn_buffer_user_len = 0;
53407+ gr_learn_attached = 0;
53408+ mutex_unlock(&gr_learn_user_mutex);
53409+ }
53410+
53411+ return 0;
53412+}
53413+
53414+const struct file_operations grsec_fops = {
53415+ .read = read_learn,
53416+ .write = write_grsec_handler,
53417+ .open = open_learn,
53418+ .release = close_learn,
53419+ .poll = poll_learn,
53420+};
53421diff -urNp linux-3.0.7/grsecurity/gracl_res.c linux-3.0.7/grsecurity/gracl_res.c
53422--- linux-3.0.7/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
53423+++ linux-3.0.7/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
53424@@ -0,0 +1,68 @@
53425+#include <linux/kernel.h>
53426+#include <linux/sched.h>
53427+#include <linux/gracl.h>
53428+#include <linux/grinternal.h>
53429+
53430+static const char *restab_log[] = {
53431+ [RLIMIT_CPU] = "RLIMIT_CPU",
53432+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53433+ [RLIMIT_DATA] = "RLIMIT_DATA",
53434+ [RLIMIT_STACK] = "RLIMIT_STACK",
53435+ [RLIMIT_CORE] = "RLIMIT_CORE",
53436+ [RLIMIT_RSS] = "RLIMIT_RSS",
53437+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53438+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53439+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53440+ [RLIMIT_AS] = "RLIMIT_AS",
53441+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53442+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53443+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53444+ [RLIMIT_NICE] = "RLIMIT_NICE",
53445+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53446+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53447+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53448+};
53449+
53450+void
53451+gr_log_resource(const struct task_struct *task,
53452+ const int res, const unsigned long wanted, const int gt)
53453+{
53454+ const struct cred *cred;
53455+ unsigned long rlim;
53456+
53457+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53458+ return;
53459+
53460+ // not yet supported resource
53461+ if (unlikely(!restab_log[res]))
53462+ return;
53463+
53464+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53465+ rlim = task_rlimit_max(task, res);
53466+ else
53467+ rlim = task_rlimit(task, res);
53468+
53469+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53470+ return;
53471+
53472+ rcu_read_lock();
53473+ cred = __task_cred(task);
53474+
53475+ if (res == RLIMIT_NPROC &&
53476+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53477+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53478+ goto out_rcu_unlock;
53479+ else if (res == RLIMIT_MEMLOCK &&
53480+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53481+ goto out_rcu_unlock;
53482+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53483+ goto out_rcu_unlock;
53484+ rcu_read_unlock();
53485+
53486+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53487+
53488+ return;
53489+out_rcu_unlock:
53490+ rcu_read_unlock();
53491+ return;
53492+}
53493diff -urNp linux-3.0.7/grsecurity/gracl_segv.c linux-3.0.7/grsecurity/gracl_segv.c
53494--- linux-3.0.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
53495+++ linux-3.0.7/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
53496@@ -0,0 +1,299 @@
53497+#include <linux/kernel.h>
53498+#include <linux/mm.h>
53499+#include <asm/uaccess.h>
53500+#include <asm/errno.h>
53501+#include <asm/mman.h>
53502+#include <net/sock.h>
53503+#include <linux/file.h>
53504+#include <linux/fs.h>
53505+#include <linux/net.h>
53506+#include <linux/in.h>
53507+#include <linux/slab.h>
53508+#include <linux/types.h>
53509+#include <linux/sched.h>
53510+#include <linux/timer.h>
53511+#include <linux/gracl.h>
53512+#include <linux/grsecurity.h>
53513+#include <linux/grinternal.h>
53514+
53515+static struct crash_uid *uid_set;
53516+static unsigned short uid_used;
53517+static DEFINE_SPINLOCK(gr_uid_lock);
53518+extern rwlock_t gr_inode_lock;
53519+extern struct acl_subject_label *
53520+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53521+ struct acl_role_label *role);
53522+
53523+#ifdef CONFIG_BTRFS_FS
53524+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53525+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53526+#endif
53527+
53528+static inline dev_t __get_dev(const struct dentry *dentry)
53529+{
53530+#ifdef CONFIG_BTRFS_FS
53531+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53532+ return get_btrfs_dev_from_inode(dentry->d_inode);
53533+ else
53534+#endif
53535+ return dentry->d_inode->i_sb->s_dev;
53536+}
53537+
53538+int
53539+gr_init_uidset(void)
53540+{
53541+ uid_set =
53542+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53543+ uid_used = 0;
53544+
53545+ return uid_set ? 1 : 0;
53546+}
53547+
53548+void
53549+gr_free_uidset(void)
53550+{
53551+ if (uid_set)
53552+ kfree(uid_set);
53553+
53554+ return;
53555+}
53556+
53557+int
53558+gr_find_uid(const uid_t uid)
53559+{
53560+ struct crash_uid *tmp = uid_set;
53561+ uid_t buid;
53562+ int low = 0, high = uid_used - 1, mid;
53563+
53564+ while (high >= low) {
53565+ mid = (low + high) >> 1;
53566+ buid = tmp[mid].uid;
53567+ if (buid == uid)
53568+ return mid;
53569+ if (buid > uid)
53570+ high = mid - 1;
53571+ if (buid < uid)
53572+ low = mid + 1;
53573+ }
53574+
53575+ return -1;
53576+}
53577+
53578+static __inline__ void
53579+gr_insertsort(void)
53580+{
53581+ unsigned short i, j;
53582+ struct crash_uid index;
53583+
53584+ for (i = 1; i < uid_used; i++) {
53585+ index = uid_set[i];
53586+ j = i;
53587+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53588+ uid_set[j] = uid_set[j - 1];
53589+ j--;
53590+ }
53591+ uid_set[j] = index;
53592+ }
53593+
53594+ return;
53595+}
53596+
53597+static __inline__ void
53598+gr_insert_uid(const uid_t uid, const unsigned long expires)
53599+{
53600+ int loc;
53601+
53602+ if (uid_used == GR_UIDTABLE_MAX)
53603+ return;
53604+
53605+ loc = gr_find_uid(uid);
53606+
53607+ if (loc >= 0) {
53608+ uid_set[loc].expires = expires;
53609+ return;
53610+ }
53611+
53612+ uid_set[uid_used].uid = uid;
53613+ uid_set[uid_used].expires = expires;
53614+ uid_used++;
53615+
53616+ gr_insertsort();
53617+
53618+ return;
53619+}
53620+
53621+void
53622+gr_remove_uid(const unsigned short loc)
53623+{
53624+ unsigned short i;
53625+
53626+ for (i = loc + 1; i < uid_used; i++)
53627+ uid_set[i - 1] = uid_set[i];
53628+
53629+ uid_used--;
53630+
53631+ return;
53632+}
53633+
53634+int
53635+gr_check_crash_uid(const uid_t uid)
53636+{
53637+ int loc;
53638+ int ret = 0;
53639+
53640+ if (unlikely(!gr_acl_is_enabled()))
53641+ return 0;
53642+
53643+ spin_lock(&gr_uid_lock);
53644+ loc = gr_find_uid(uid);
53645+
53646+ if (loc < 0)
53647+ goto out_unlock;
53648+
53649+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53650+ gr_remove_uid(loc);
53651+ else
53652+ ret = 1;
53653+
53654+out_unlock:
53655+ spin_unlock(&gr_uid_lock);
53656+ return ret;
53657+}
53658+
53659+static __inline__ int
53660+proc_is_setxid(const struct cred *cred)
53661+{
53662+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53663+ cred->uid != cred->fsuid)
53664+ return 1;
53665+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53666+ cred->gid != cred->fsgid)
53667+ return 1;
53668+
53669+ return 0;
53670+}
53671+
53672+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53673+
53674+void
53675+gr_handle_crash(struct task_struct *task, const int sig)
53676+{
53677+ struct acl_subject_label *curr;
53678+ struct acl_subject_label *curr2;
53679+ struct task_struct *tsk, *tsk2;
53680+ const struct cred *cred;
53681+ const struct cred *cred2;
53682+
53683+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53684+ return;
53685+
53686+ if (unlikely(!gr_acl_is_enabled()))
53687+ return;
53688+
53689+ curr = task->acl;
53690+
53691+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53692+ return;
53693+
53694+ if (time_before_eq(curr->expires, get_seconds())) {
53695+ curr->expires = 0;
53696+ curr->crashes = 0;
53697+ }
53698+
53699+ curr->crashes++;
53700+
53701+ if (!curr->expires)
53702+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53703+
53704+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53705+ time_after(curr->expires, get_seconds())) {
53706+ rcu_read_lock();
53707+ cred = __task_cred(task);
53708+ if (cred->uid && proc_is_setxid(cred)) {
53709+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53710+ spin_lock(&gr_uid_lock);
53711+ gr_insert_uid(cred->uid, curr->expires);
53712+ spin_unlock(&gr_uid_lock);
53713+ curr->expires = 0;
53714+ curr->crashes = 0;
53715+ read_lock(&tasklist_lock);
53716+ do_each_thread(tsk2, tsk) {
53717+ cred2 = __task_cred(tsk);
53718+ if (tsk != task && cred2->uid == cred->uid)
53719+ gr_fake_force_sig(SIGKILL, tsk);
53720+ } while_each_thread(tsk2, tsk);
53721+ read_unlock(&tasklist_lock);
53722+ } else {
53723+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53724+ read_lock(&tasklist_lock);
53725+ do_each_thread(tsk2, tsk) {
53726+ if (likely(tsk != task)) {
53727+ curr2 = tsk->acl;
53728+
53729+ if (curr2->device == curr->device &&
53730+ curr2->inode == curr->inode)
53731+ gr_fake_force_sig(SIGKILL, tsk);
53732+ }
53733+ } while_each_thread(tsk2, tsk);
53734+ read_unlock(&tasklist_lock);
53735+ }
53736+ rcu_read_unlock();
53737+ }
53738+
53739+ return;
53740+}
53741+
53742+int
53743+gr_check_crash_exec(const struct file *filp)
53744+{
53745+ struct acl_subject_label *curr;
53746+
53747+ if (unlikely(!gr_acl_is_enabled()))
53748+ return 0;
53749+
53750+ read_lock(&gr_inode_lock);
53751+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53752+ __get_dev(filp->f_path.dentry),
53753+ current->role);
53754+ read_unlock(&gr_inode_lock);
53755+
53756+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53757+ (!curr->crashes && !curr->expires))
53758+ return 0;
53759+
53760+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53761+ time_after(curr->expires, get_seconds()))
53762+ return 1;
53763+ else if (time_before_eq(curr->expires, get_seconds())) {
53764+ curr->crashes = 0;
53765+ curr->expires = 0;
53766+ }
53767+
53768+ return 0;
53769+}
53770+
53771+void
53772+gr_handle_alertkill(struct task_struct *task)
53773+{
53774+ struct acl_subject_label *curracl;
53775+ __u32 curr_ip;
53776+ struct task_struct *p, *p2;
53777+
53778+ if (unlikely(!gr_acl_is_enabled()))
53779+ return;
53780+
53781+ curracl = task->acl;
53782+ curr_ip = task->signal->curr_ip;
53783+
53784+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53785+ read_lock(&tasklist_lock);
53786+ do_each_thread(p2, p) {
53787+ if (p->signal->curr_ip == curr_ip)
53788+ gr_fake_force_sig(SIGKILL, p);
53789+ } while_each_thread(p2, p);
53790+ read_unlock(&tasklist_lock);
53791+ } else if (curracl->mode & GR_KILLPROC)
53792+ gr_fake_force_sig(SIGKILL, task);
53793+
53794+ return;
53795+}
53796diff -urNp linux-3.0.7/grsecurity/gracl_shm.c linux-3.0.7/grsecurity/gracl_shm.c
53797--- linux-3.0.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53798+++ linux-3.0.7/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
53799@@ -0,0 +1,40 @@
53800+#include <linux/kernel.h>
53801+#include <linux/mm.h>
53802+#include <linux/sched.h>
53803+#include <linux/file.h>
53804+#include <linux/ipc.h>
53805+#include <linux/gracl.h>
53806+#include <linux/grsecurity.h>
53807+#include <linux/grinternal.h>
53808+
53809+int
53810+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53811+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53812+{
53813+ struct task_struct *task;
53814+
53815+ if (!gr_acl_is_enabled())
53816+ return 1;
53817+
53818+ rcu_read_lock();
53819+ read_lock(&tasklist_lock);
53820+
53821+ task = find_task_by_vpid(shm_cprid);
53822+
53823+ if (unlikely(!task))
53824+ task = find_task_by_vpid(shm_lapid);
53825+
53826+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53827+ (task->pid == shm_lapid)) &&
53828+ (task->acl->mode & GR_PROTSHM) &&
53829+ (task->acl != current->acl))) {
53830+ read_unlock(&tasklist_lock);
53831+ rcu_read_unlock();
53832+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53833+ return 0;
53834+ }
53835+ read_unlock(&tasklist_lock);
53836+ rcu_read_unlock();
53837+
53838+ return 1;
53839+}
53840diff -urNp linux-3.0.7/grsecurity/grsec_chdir.c linux-3.0.7/grsecurity/grsec_chdir.c
53841--- linux-3.0.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53842+++ linux-3.0.7/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
53843@@ -0,0 +1,19 @@
53844+#include <linux/kernel.h>
53845+#include <linux/sched.h>
53846+#include <linux/fs.h>
53847+#include <linux/file.h>
53848+#include <linux/grsecurity.h>
53849+#include <linux/grinternal.h>
53850+
53851+void
53852+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53853+{
53854+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53855+ if ((grsec_enable_chdir && grsec_enable_group &&
53856+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53857+ !grsec_enable_group)) {
53858+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53859+ }
53860+#endif
53861+ return;
53862+}
53863diff -urNp linux-3.0.7/grsecurity/grsec_chroot.c linux-3.0.7/grsecurity/grsec_chroot.c
53864--- linux-3.0.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53865+++ linux-3.0.7/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
53866@@ -0,0 +1,351 @@
53867+#include <linux/kernel.h>
53868+#include <linux/module.h>
53869+#include <linux/sched.h>
53870+#include <linux/file.h>
53871+#include <linux/fs.h>
53872+#include <linux/mount.h>
53873+#include <linux/types.h>
53874+#include <linux/pid_namespace.h>
53875+#include <linux/grsecurity.h>
53876+#include <linux/grinternal.h>
53877+
53878+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53879+{
53880+#ifdef CONFIG_GRKERNSEC
53881+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53882+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53883+ task->gr_is_chrooted = 1;
53884+ else
53885+ task->gr_is_chrooted = 0;
53886+
53887+ task->gr_chroot_dentry = path->dentry;
53888+#endif
53889+ return;
53890+}
53891+
53892+void gr_clear_chroot_entries(struct task_struct *task)
53893+{
53894+#ifdef CONFIG_GRKERNSEC
53895+ task->gr_is_chrooted = 0;
53896+ task->gr_chroot_dentry = NULL;
53897+#endif
53898+ return;
53899+}
53900+
53901+int
53902+gr_handle_chroot_unix(const pid_t pid)
53903+{
53904+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53905+ struct task_struct *p;
53906+
53907+ if (unlikely(!grsec_enable_chroot_unix))
53908+ return 1;
53909+
53910+ if (likely(!proc_is_chrooted(current)))
53911+ return 1;
53912+
53913+ rcu_read_lock();
53914+ read_lock(&tasklist_lock);
53915+ p = find_task_by_vpid_unrestricted(pid);
53916+ if (unlikely(p && !have_same_root(current, p))) {
53917+ read_unlock(&tasklist_lock);
53918+ rcu_read_unlock();
53919+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53920+ return 0;
53921+ }
53922+ read_unlock(&tasklist_lock);
53923+ rcu_read_unlock();
53924+#endif
53925+ return 1;
53926+}
53927+
53928+int
53929+gr_handle_chroot_nice(void)
53930+{
53931+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53932+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53933+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53934+ return -EPERM;
53935+ }
53936+#endif
53937+ return 0;
53938+}
53939+
53940+int
53941+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53942+{
53943+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53944+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53945+ && proc_is_chrooted(current)) {
53946+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53947+ return -EACCES;
53948+ }
53949+#endif
53950+ return 0;
53951+}
53952+
53953+int
53954+gr_handle_chroot_rawio(const struct inode *inode)
53955+{
53956+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53957+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53958+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53959+ return 1;
53960+#endif
53961+ return 0;
53962+}
53963+
53964+int
53965+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53966+{
53967+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53968+ struct task_struct *p;
53969+ int ret = 0;
53970+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53971+ return ret;
53972+
53973+ read_lock(&tasklist_lock);
53974+ do_each_pid_task(pid, type, p) {
53975+ if (!have_same_root(current, p)) {
53976+ ret = 1;
53977+ goto out;
53978+ }
53979+ } while_each_pid_task(pid, type, p);
53980+out:
53981+ read_unlock(&tasklist_lock);
53982+ return ret;
53983+#endif
53984+ return 0;
53985+}
53986+
53987+int
53988+gr_pid_is_chrooted(struct task_struct *p)
53989+{
53990+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53991+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53992+ return 0;
53993+
53994+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53995+ !have_same_root(current, p)) {
53996+ return 1;
53997+ }
53998+#endif
53999+ return 0;
54000+}
54001+
54002+EXPORT_SYMBOL(gr_pid_is_chrooted);
54003+
54004+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54005+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54006+{
54007+ struct path path, currentroot;
54008+ int ret = 0;
54009+
54010+ path.dentry = (struct dentry *)u_dentry;
54011+ path.mnt = (struct vfsmount *)u_mnt;
54012+ get_fs_root(current->fs, &currentroot);
54013+ if (path_is_under(&path, &currentroot))
54014+ ret = 1;
54015+ path_put(&currentroot);
54016+
54017+ return ret;
54018+}
54019+#endif
54020+
54021+int
54022+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54023+{
54024+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54025+ if (!grsec_enable_chroot_fchdir)
54026+ return 1;
54027+
54028+ if (!proc_is_chrooted(current))
54029+ return 1;
54030+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54031+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54032+ return 0;
54033+ }
54034+#endif
54035+ return 1;
54036+}
54037+
54038+int
54039+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54040+ const time_t shm_createtime)
54041+{
54042+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54043+ struct task_struct *p;
54044+ time_t starttime;
54045+
54046+ if (unlikely(!grsec_enable_chroot_shmat))
54047+ return 1;
54048+
54049+ if (likely(!proc_is_chrooted(current)))
54050+ return 1;
54051+
54052+ rcu_read_lock();
54053+ read_lock(&tasklist_lock);
54054+
54055+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54056+ starttime = p->start_time.tv_sec;
54057+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54058+ if (have_same_root(current, p)) {
54059+ goto allow;
54060+ } else {
54061+ read_unlock(&tasklist_lock);
54062+ rcu_read_unlock();
54063+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54064+ return 0;
54065+ }
54066+ }
54067+ /* creator exited, pid reuse, fall through to next check */
54068+ }
54069+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54070+ if (unlikely(!have_same_root(current, p))) {
54071+ read_unlock(&tasklist_lock);
54072+ rcu_read_unlock();
54073+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54074+ return 0;
54075+ }
54076+ }
54077+
54078+allow:
54079+ read_unlock(&tasklist_lock);
54080+ rcu_read_unlock();
54081+#endif
54082+ return 1;
54083+}
54084+
54085+void
54086+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54087+{
54088+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54089+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54090+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54091+#endif
54092+ return;
54093+}
54094+
54095+int
54096+gr_handle_chroot_mknod(const struct dentry *dentry,
54097+ const struct vfsmount *mnt, const int mode)
54098+{
54099+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54100+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54101+ proc_is_chrooted(current)) {
54102+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54103+ return -EPERM;
54104+ }
54105+#endif
54106+ return 0;
54107+}
54108+
54109+int
54110+gr_handle_chroot_mount(const struct dentry *dentry,
54111+ const struct vfsmount *mnt, const char *dev_name)
54112+{
54113+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54114+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54115+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54116+ return -EPERM;
54117+ }
54118+#endif
54119+ return 0;
54120+}
54121+
54122+int
54123+gr_handle_chroot_pivot(void)
54124+{
54125+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54126+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54127+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54128+ return -EPERM;
54129+ }
54130+#endif
54131+ return 0;
54132+}
54133+
54134+int
54135+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54136+{
54137+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54138+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54139+ !gr_is_outside_chroot(dentry, mnt)) {
54140+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54141+ return -EPERM;
54142+ }
54143+#endif
54144+ return 0;
54145+}
54146+
54147+extern const char *captab_log[];
54148+extern int captab_log_entries;
54149+
54150+int
54151+gr_chroot_is_capable(const int cap)
54152+{
54153+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54154+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54155+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54156+ if (cap_raised(chroot_caps, cap)) {
54157+ const struct cred *creds = current_cred();
54158+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54159+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54160+ }
54161+ return 0;
54162+ }
54163+ }
54164+#endif
54165+ return 1;
54166+}
54167+
54168+int
54169+gr_chroot_is_capable_nolog(const int cap)
54170+{
54171+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54172+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54173+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54174+ if (cap_raised(chroot_caps, cap)) {
54175+ return 0;
54176+ }
54177+ }
54178+#endif
54179+ return 1;
54180+}
54181+
54182+int
54183+gr_handle_chroot_sysctl(const int op)
54184+{
54185+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54186+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54187+ proc_is_chrooted(current))
54188+ return -EACCES;
54189+#endif
54190+ return 0;
54191+}
54192+
54193+void
54194+gr_handle_chroot_chdir(struct path *path)
54195+{
54196+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54197+ if (grsec_enable_chroot_chdir)
54198+ set_fs_pwd(current->fs, path);
54199+#endif
54200+ return;
54201+}
54202+
54203+int
54204+gr_handle_chroot_chmod(const struct dentry *dentry,
54205+ const struct vfsmount *mnt, const int mode)
54206+{
54207+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54208+ /* allow chmod +s on directories, but not files */
54209+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54210+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54211+ proc_is_chrooted(current)) {
54212+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54213+ return -EPERM;
54214+ }
54215+#endif
54216+ return 0;
54217+}
54218diff -urNp linux-3.0.7/grsecurity/grsec_disabled.c linux-3.0.7/grsecurity/grsec_disabled.c
54219--- linux-3.0.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
54220+++ linux-3.0.7/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
54221@@ -0,0 +1,433 @@
54222+#include <linux/kernel.h>
54223+#include <linux/module.h>
54224+#include <linux/sched.h>
54225+#include <linux/file.h>
54226+#include <linux/fs.h>
54227+#include <linux/kdev_t.h>
54228+#include <linux/net.h>
54229+#include <linux/in.h>
54230+#include <linux/ip.h>
54231+#include <linux/skbuff.h>
54232+#include <linux/sysctl.h>
54233+
54234+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54235+void
54236+pax_set_initial_flags(struct linux_binprm *bprm)
54237+{
54238+ return;
54239+}
54240+#endif
54241+
54242+#ifdef CONFIG_SYSCTL
54243+__u32
54244+gr_handle_sysctl(const struct ctl_table * table, const int op)
54245+{
54246+ return 0;
54247+}
54248+#endif
54249+
54250+#ifdef CONFIG_TASKSTATS
54251+int gr_is_taskstats_denied(int pid)
54252+{
54253+ return 0;
54254+}
54255+#endif
54256+
54257+int
54258+gr_acl_is_enabled(void)
54259+{
54260+ return 0;
54261+}
54262+
54263+int
54264+gr_handle_rawio(const struct inode *inode)
54265+{
54266+ return 0;
54267+}
54268+
54269+void
54270+gr_acl_handle_psacct(struct task_struct *task, const long code)
54271+{
54272+ return;
54273+}
54274+
54275+int
54276+gr_handle_ptrace(struct task_struct *task, const long request)
54277+{
54278+ return 0;
54279+}
54280+
54281+int
54282+gr_handle_proc_ptrace(struct task_struct *task)
54283+{
54284+ return 0;
54285+}
54286+
54287+void
54288+gr_learn_resource(const struct task_struct *task,
54289+ const int res, const unsigned long wanted, const int gt)
54290+{
54291+ return;
54292+}
54293+
54294+int
54295+gr_set_acls(const int type)
54296+{
54297+ return 0;
54298+}
54299+
54300+int
54301+gr_check_hidden_task(const struct task_struct *tsk)
54302+{
54303+ return 0;
54304+}
54305+
54306+int
54307+gr_check_protected_task(const struct task_struct *task)
54308+{
54309+ return 0;
54310+}
54311+
54312+int
54313+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54314+{
54315+ return 0;
54316+}
54317+
54318+void
54319+gr_copy_label(struct task_struct *tsk)
54320+{
54321+ return;
54322+}
54323+
54324+void
54325+gr_set_pax_flags(struct task_struct *task)
54326+{
54327+ return;
54328+}
54329+
54330+int
54331+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54332+ const int unsafe_share)
54333+{
54334+ return 0;
54335+}
54336+
54337+void
54338+gr_handle_delete(const ino_t ino, const dev_t dev)
54339+{
54340+ return;
54341+}
54342+
54343+void
54344+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54345+{
54346+ return;
54347+}
54348+
54349+void
54350+gr_handle_crash(struct task_struct *task, const int sig)
54351+{
54352+ return;
54353+}
54354+
54355+int
54356+gr_check_crash_exec(const struct file *filp)
54357+{
54358+ return 0;
54359+}
54360+
54361+int
54362+gr_check_crash_uid(const uid_t uid)
54363+{
54364+ return 0;
54365+}
54366+
54367+void
54368+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54369+ struct dentry *old_dentry,
54370+ struct dentry *new_dentry,
54371+ struct vfsmount *mnt, const __u8 replace)
54372+{
54373+ return;
54374+}
54375+
54376+int
54377+gr_search_socket(const int family, const int type, const int protocol)
54378+{
54379+ return 1;
54380+}
54381+
54382+int
54383+gr_search_connectbind(const int mode, const struct socket *sock,
54384+ const struct sockaddr_in *addr)
54385+{
54386+ return 0;
54387+}
54388+
54389+void
54390+gr_handle_alertkill(struct task_struct *task)
54391+{
54392+ return;
54393+}
54394+
54395+__u32
54396+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54397+{
54398+ return 1;
54399+}
54400+
54401+__u32
54402+gr_acl_handle_hidden_file(const struct dentry * dentry,
54403+ const struct vfsmount * mnt)
54404+{
54405+ return 1;
54406+}
54407+
54408+__u32
54409+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54410+ const int fmode)
54411+{
54412+ return 1;
54413+}
54414+
54415+__u32
54416+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54417+{
54418+ return 1;
54419+}
54420+
54421+__u32
54422+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54423+{
54424+ return 1;
54425+}
54426+
54427+int
54428+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54429+ unsigned int *vm_flags)
54430+{
54431+ return 1;
54432+}
54433+
54434+__u32
54435+gr_acl_handle_truncate(const struct dentry * dentry,
54436+ const struct vfsmount * mnt)
54437+{
54438+ return 1;
54439+}
54440+
54441+__u32
54442+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54443+{
54444+ return 1;
54445+}
54446+
54447+__u32
54448+gr_acl_handle_access(const struct dentry * dentry,
54449+ const struct vfsmount * mnt, const int fmode)
54450+{
54451+ return 1;
54452+}
54453+
54454+__u32
54455+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54456+ mode_t mode)
54457+{
54458+ return 1;
54459+}
54460+
54461+__u32
54462+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54463+ mode_t mode)
54464+{
54465+ return 1;
54466+}
54467+
54468+__u32
54469+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54470+{
54471+ return 1;
54472+}
54473+
54474+__u32
54475+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54476+{
54477+ return 1;
54478+}
54479+
54480+void
54481+grsecurity_init(void)
54482+{
54483+ return;
54484+}
54485+
54486+__u32
54487+gr_acl_handle_mknod(const struct dentry * new_dentry,
54488+ const struct dentry * parent_dentry,
54489+ const struct vfsmount * parent_mnt,
54490+ const int mode)
54491+{
54492+ return 1;
54493+}
54494+
54495+__u32
54496+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54497+ const struct dentry * parent_dentry,
54498+ const struct vfsmount * parent_mnt)
54499+{
54500+ return 1;
54501+}
54502+
54503+__u32
54504+gr_acl_handle_symlink(const struct dentry * new_dentry,
54505+ const struct dentry * parent_dentry,
54506+ const struct vfsmount * parent_mnt, const char *from)
54507+{
54508+ return 1;
54509+}
54510+
54511+__u32
54512+gr_acl_handle_link(const struct dentry * new_dentry,
54513+ const struct dentry * parent_dentry,
54514+ const struct vfsmount * parent_mnt,
54515+ const struct dentry * old_dentry,
54516+ const struct vfsmount * old_mnt, const char *to)
54517+{
54518+ return 1;
54519+}
54520+
54521+int
54522+gr_acl_handle_rename(const struct dentry *new_dentry,
54523+ const struct dentry *parent_dentry,
54524+ const struct vfsmount *parent_mnt,
54525+ const struct dentry *old_dentry,
54526+ const struct inode *old_parent_inode,
54527+ const struct vfsmount *old_mnt, const char *newname)
54528+{
54529+ return 0;
54530+}
54531+
54532+int
54533+gr_acl_handle_filldir(const struct file *file, const char *name,
54534+ const int namelen, const ino_t ino)
54535+{
54536+ return 1;
54537+}
54538+
54539+int
54540+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54541+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54542+{
54543+ return 1;
54544+}
54545+
54546+int
54547+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54548+{
54549+ return 0;
54550+}
54551+
54552+int
54553+gr_search_accept(const struct socket *sock)
54554+{
54555+ return 0;
54556+}
54557+
54558+int
54559+gr_search_listen(const struct socket *sock)
54560+{
54561+ return 0;
54562+}
54563+
54564+int
54565+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54566+{
54567+ return 0;
54568+}
54569+
54570+__u32
54571+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54572+{
54573+ return 1;
54574+}
54575+
54576+__u32
54577+gr_acl_handle_creat(const struct dentry * dentry,
54578+ const struct dentry * p_dentry,
54579+ const struct vfsmount * p_mnt, const int fmode,
54580+ const int imode)
54581+{
54582+ return 1;
54583+}
54584+
54585+void
54586+gr_acl_handle_exit(void)
54587+{
54588+ return;
54589+}
54590+
54591+int
54592+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54593+{
54594+ return 1;
54595+}
54596+
54597+void
54598+gr_set_role_label(const uid_t uid, const gid_t gid)
54599+{
54600+ return;
54601+}
54602+
54603+int
54604+gr_acl_handle_procpidmem(const struct task_struct *task)
54605+{
54606+ return 0;
54607+}
54608+
54609+int
54610+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54611+{
54612+ return 0;
54613+}
54614+
54615+int
54616+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54617+{
54618+ return 0;
54619+}
54620+
54621+void
54622+gr_set_kernel_label(struct task_struct *task)
54623+{
54624+ return;
54625+}
54626+
54627+int
54628+gr_check_user_change(int real, int effective, int fs)
54629+{
54630+ return 0;
54631+}
54632+
54633+int
54634+gr_check_group_change(int real, int effective, int fs)
54635+{
54636+ return 0;
54637+}
54638+
54639+int gr_acl_enable_at_secure(void)
54640+{
54641+ return 0;
54642+}
54643+
54644+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54645+{
54646+ return dentry->d_inode->i_sb->s_dev;
54647+}
54648+
54649+EXPORT_SYMBOL(gr_learn_resource);
54650+EXPORT_SYMBOL(gr_set_kernel_label);
54651+#ifdef CONFIG_SECURITY
54652+EXPORT_SYMBOL(gr_check_user_change);
54653+EXPORT_SYMBOL(gr_check_group_change);
54654+#endif
54655diff -urNp linux-3.0.7/grsecurity/grsec_exec.c linux-3.0.7/grsecurity/grsec_exec.c
54656--- linux-3.0.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
54657+++ linux-3.0.7/grsecurity/grsec_exec.c 2011-10-20 00:50:54.000000000 -0400
54658@@ -0,0 +1,146 @@
54659+#include <linux/kernel.h>
54660+#include <linux/sched.h>
54661+#include <linux/file.h>
54662+#include <linux/binfmts.h>
54663+#include <linux/fs.h>
54664+#include <linux/types.h>
54665+#include <linux/grdefs.h>
54666+#include <linux/grsecurity.h>
54667+#include <linux/grinternal.h>
54668+#include <linux/capability.h>
54669+#include <linux/module.h>
54670+
54671+#include <asm/uaccess.h>
54672+
54673+#ifdef CONFIG_GRKERNSEC_EXECLOG
54674+static char gr_exec_arg_buf[132];
54675+static DEFINE_MUTEX(gr_exec_arg_mutex);
54676+#endif
54677+
54678+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54679+
54680+void
54681+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54682+{
54683+#ifdef CONFIG_GRKERNSEC_EXECLOG
54684+ char *grarg = gr_exec_arg_buf;
54685+ unsigned int i, x, execlen = 0;
54686+ char c;
54687+
54688+ if (!((grsec_enable_execlog && grsec_enable_group &&
54689+ in_group_p(grsec_audit_gid))
54690+ || (grsec_enable_execlog && !grsec_enable_group)))
54691+ return;
54692+
54693+ mutex_lock(&gr_exec_arg_mutex);
54694+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54695+
54696+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54697+ const char __user *p;
54698+ unsigned int len;
54699+
54700+ p = get_user_arg_ptr(argv, i);
54701+ if (IS_ERR(p))
54702+ goto log;
54703+
54704+ len = strnlen_user(p, 128 - execlen);
54705+ if (len > 128 - execlen)
54706+ len = 128 - execlen;
54707+ else if (len > 0)
54708+ len--;
54709+ if (copy_from_user(grarg + execlen, p, len))
54710+ goto log;
54711+
54712+ /* rewrite unprintable characters */
54713+ for (x = 0; x < len; x++) {
54714+ c = *(grarg + execlen + x);
54715+ if (c < 32 || c > 126)
54716+ *(grarg + execlen + x) = ' ';
54717+ }
54718+
54719+ execlen += len;
54720+ *(grarg + execlen) = ' ';
54721+ *(grarg + execlen + 1) = '\0';
54722+ execlen++;
54723+ }
54724+
54725+ log:
54726+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54727+ bprm->file->f_path.mnt, grarg);
54728+ mutex_unlock(&gr_exec_arg_mutex);
54729+#endif
54730+ return;
54731+}
54732+
54733+#ifdef CONFIG_GRKERNSEC
54734+extern int gr_acl_is_capable(const int cap);
54735+extern int gr_acl_is_capable_nolog(const int cap);
54736+extern int gr_chroot_is_capable(const int cap);
54737+extern int gr_chroot_is_capable_nolog(const int cap);
54738+#endif
54739+
54740+const char *captab_log[] = {
54741+ "CAP_CHOWN",
54742+ "CAP_DAC_OVERRIDE",
54743+ "CAP_DAC_READ_SEARCH",
54744+ "CAP_FOWNER",
54745+ "CAP_FSETID",
54746+ "CAP_KILL",
54747+ "CAP_SETGID",
54748+ "CAP_SETUID",
54749+ "CAP_SETPCAP",
54750+ "CAP_LINUX_IMMUTABLE",
54751+ "CAP_NET_BIND_SERVICE",
54752+ "CAP_NET_BROADCAST",
54753+ "CAP_NET_ADMIN",
54754+ "CAP_NET_RAW",
54755+ "CAP_IPC_LOCK",
54756+ "CAP_IPC_OWNER",
54757+ "CAP_SYS_MODULE",
54758+ "CAP_SYS_RAWIO",
54759+ "CAP_SYS_CHROOT",
54760+ "CAP_SYS_PTRACE",
54761+ "CAP_SYS_PACCT",
54762+ "CAP_SYS_ADMIN",
54763+ "CAP_SYS_BOOT",
54764+ "CAP_SYS_NICE",
54765+ "CAP_SYS_RESOURCE",
54766+ "CAP_SYS_TIME",
54767+ "CAP_SYS_TTY_CONFIG",
54768+ "CAP_MKNOD",
54769+ "CAP_LEASE",
54770+ "CAP_AUDIT_WRITE",
54771+ "CAP_AUDIT_CONTROL",
54772+ "CAP_SETFCAP",
54773+ "CAP_MAC_OVERRIDE",
54774+ "CAP_MAC_ADMIN",
54775+ "CAP_SYSLOG",
54776+ "CAP_WAKE_ALARM"
54777+};
54778+
54779+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54780+
54781+int gr_is_capable(const int cap)
54782+{
54783+#ifdef CONFIG_GRKERNSEC
54784+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54785+ return 1;
54786+ return 0;
54787+#else
54788+ return 1;
54789+#endif
54790+}
54791+
54792+int gr_is_capable_nolog(const int cap)
54793+{
54794+#ifdef CONFIG_GRKERNSEC
54795+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54796+ return 1;
54797+ return 0;
54798+#else
54799+ return 1;
54800+#endif
54801+}
54802+
54803+EXPORT_SYMBOL(gr_is_capable);
54804+EXPORT_SYMBOL(gr_is_capable_nolog);
54805diff -urNp linux-3.0.7/grsecurity/grsec_fifo.c linux-3.0.7/grsecurity/grsec_fifo.c
54806--- linux-3.0.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54807+++ linux-3.0.7/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
54808@@ -0,0 +1,24 @@
54809+#include <linux/kernel.h>
54810+#include <linux/sched.h>
54811+#include <linux/fs.h>
54812+#include <linux/file.h>
54813+#include <linux/grinternal.h>
54814+
54815+int
54816+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54817+ const struct dentry *dir, const int flag, const int acc_mode)
54818+{
54819+#ifdef CONFIG_GRKERNSEC_FIFO
54820+ const struct cred *cred = current_cred();
54821+
54822+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54823+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54824+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54825+ (cred->fsuid != dentry->d_inode->i_uid)) {
54826+ if (!inode_permission(dentry->d_inode, acc_mode))
54827+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54828+ return -EACCES;
54829+ }
54830+#endif
54831+ return 0;
54832+}
54833diff -urNp linux-3.0.7/grsecurity/grsec_fork.c linux-3.0.7/grsecurity/grsec_fork.c
54834--- linux-3.0.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54835+++ linux-3.0.7/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
54836@@ -0,0 +1,23 @@
54837+#include <linux/kernel.h>
54838+#include <linux/sched.h>
54839+#include <linux/grsecurity.h>
54840+#include <linux/grinternal.h>
54841+#include <linux/errno.h>
54842+
54843+void
54844+gr_log_forkfail(const int retval)
54845+{
54846+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54847+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54848+ switch (retval) {
54849+ case -EAGAIN:
54850+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54851+ break;
54852+ case -ENOMEM:
54853+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54854+ break;
54855+ }
54856+ }
54857+#endif
54858+ return;
54859+}
54860diff -urNp linux-3.0.7/grsecurity/grsec_init.c linux-3.0.7/grsecurity/grsec_init.c
54861--- linux-3.0.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54862+++ linux-3.0.7/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
54863@@ -0,0 +1,269 @@
54864+#include <linux/kernel.h>
54865+#include <linux/sched.h>
54866+#include <linux/mm.h>
54867+#include <linux/gracl.h>
54868+#include <linux/slab.h>
54869+#include <linux/vmalloc.h>
54870+#include <linux/percpu.h>
54871+#include <linux/module.h>
54872+
54873+int grsec_enable_brute;
54874+int grsec_enable_link;
54875+int grsec_enable_dmesg;
54876+int grsec_enable_harden_ptrace;
54877+int grsec_enable_fifo;
54878+int grsec_enable_execlog;
54879+int grsec_enable_signal;
54880+int grsec_enable_forkfail;
54881+int grsec_enable_audit_ptrace;
54882+int grsec_enable_time;
54883+int grsec_enable_audit_textrel;
54884+int grsec_enable_group;
54885+int grsec_audit_gid;
54886+int grsec_enable_chdir;
54887+int grsec_enable_mount;
54888+int grsec_enable_rofs;
54889+int grsec_enable_chroot_findtask;
54890+int grsec_enable_chroot_mount;
54891+int grsec_enable_chroot_shmat;
54892+int grsec_enable_chroot_fchdir;
54893+int grsec_enable_chroot_double;
54894+int grsec_enable_chroot_pivot;
54895+int grsec_enable_chroot_chdir;
54896+int grsec_enable_chroot_chmod;
54897+int grsec_enable_chroot_mknod;
54898+int grsec_enable_chroot_nice;
54899+int grsec_enable_chroot_execlog;
54900+int grsec_enable_chroot_caps;
54901+int grsec_enable_chroot_sysctl;
54902+int grsec_enable_chroot_unix;
54903+int grsec_enable_tpe;
54904+int grsec_tpe_gid;
54905+int grsec_enable_blackhole;
54906+#ifdef CONFIG_IPV6_MODULE
54907+EXPORT_SYMBOL(grsec_enable_blackhole);
54908+#endif
54909+int grsec_lastack_retries;
54910+int grsec_enable_tpe_all;
54911+int grsec_enable_tpe_invert;
54912+int grsec_enable_socket_all;
54913+int grsec_socket_all_gid;
54914+int grsec_enable_socket_client;
54915+int grsec_socket_client_gid;
54916+int grsec_enable_socket_server;
54917+int grsec_socket_server_gid;
54918+int grsec_resource_logging;
54919+int grsec_disable_privio;
54920+int grsec_enable_log_rwxmaps;
54921+int grsec_lock;
54922+
54923+DEFINE_SPINLOCK(grsec_alert_lock);
54924+unsigned long grsec_alert_wtime = 0;
54925+unsigned long grsec_alert_fyet = 0;
54926+
54927+DEFINE_SPINLOCK(grsec_audit_lock);
54928+
54929+DEFINE_RWLOCK(grsec_exec_file_lock);
54930+
54931+char *gr_shared_page[4];
54932+
54933+char *gr_alert_log_fmt;
54934+char *gr_audit_log_fmt;
54935+char *gr_alert_log_buf;
54936+char *gr_audit_log_buf;
54937+
54938+extern struct gr_arg *gr_usermode;
54939+extern unsigned char *gr_system_salt;
54940+extern unsigned char *gr_system_sum;
54941+
54942+void __init
54943+grsecurity_init(void)
54944+{
54945+ int j;
54946+ /* create the per-cpu shared pages */
54947+
54948+#ifdef CONFIG_X86
54949+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54950+#endif
54951+
54952+ for (j = 0; j < 4; j++) {
54953+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54954+ if (gr_shared_page[j] == NULL) {
54955+ panic("Unable to allocate grsecurity shared page");
54956+ return;
54957+ }
54958+ }
54959+
54960+ /* allocate log buffers */
54961+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54962+ if (!gr_alert_log_fmt) {
54963+ panic("Unable to allocate grsecurity alert log format buffer");
54964+ return;
54965+ }
54966+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54967+ if (!gr_audit_log_fmt) {
54968+ panic("Unable to allocate grsecurity audit log format buffer");
54969+ return;
54970+ }
54971+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54972+ if (!gr_alert_log_buf) {
54973+ panic("Unable to allocate grsecurity alert log buffer");
54974+ return;
54975+ }
54976+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54977+ if (!gr_audit_log_buf) {
54978+ panic("Unable to allocate grsecurity audit log buffer");
54979+ return;
54980+ }
54981+
54982+ /* allocate memory for authentication structure */
54983+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54984+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54985+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54986+
54987+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54988+ panic("Unable to allocate grsecurity authentication structure");
54989+ return;
54990+ }
54991+
54992+
54993+#ifdef CONFIG_GRKERNSEC_IO
54994+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54995+ grsec_disable_privio = 1;
54996+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54997+ grsec_disable_privio = 1;
54998+#else
54999+ grsec_disable_privio = 0;
55000+#endif
55001+#endif
55002+
55003+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55004+ /* for backward compatibility, tpe_invert always defaults to on if
55005+ enabled in the kernel
55006+ */
55007+ grsec_enable_tpe_invert = 1;
55008+#endif
55009+
55010+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55011+#ifndef CONFIG_GRKERNSEC_SYSCTL
55012+ grsec_lock = 1;
55013+#endif
55014+
55015+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55016+ grsec_enable_audit_textrel = 1;
55017+#endif
55018+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55019+ grsec_enable_log_rwxmaps = 1;
55020+#endif
55021+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55022+ grsec_enable_group = 1;
55023+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55024+#endif
55025+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55026+ grsec_enable_chdir = 1;
55027+#endif
55028+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55029+ grsec_enable_harden_ptrace = 1;
55030+#endif
55031+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55032+ grsec_enable_mount = 1;
55033+#endif
55034+#ifdef CONFIG_GRKERNSEC_LINK
55035+ grsec_enable_link = 1;
55036+#endif
55037+#ifdef CONFIG_GRKERNSEC_BRUTE
55038+ grsec_enable_brute = 1;
55039+#endif
55040+#ifdef CONFIG_GRKERNSEC_DMESG
55041+ grsec_enable_dmesg = 1;
55042+#endif
55043+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55044+ grsec_enable_blackhole = 1;
55045+ grsec_lastack_retries = 4;
55046+#endif
55047+#ifdef CONFIG_GRKERNSEC_FIFO
55048+ grsec_enable_fifo = 1;
55049+#endif
55050+#ifdef CONFIG_GRKERNSEC_EXECLOG
55051+ grsec_enable_execlog = 1;
55052+#endif
55053+#ifdef CONFIG_GRKERNSEC_SIGNAL
55054+ grsec_enable_signal = 1;
55055+#endif
55056+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55057+ grsec_enable_forkfail = 1;
55058+#endif
55059+#ifdef CONFIG_GRKERNSEC_TIME
55060+ grsec_enable_time = 1;
55061+#endif
55062+#ifdef CONFIG_GRKERNSEC_RESLOG
55063+ grsec_resource_logging = 1;
55064+#endif
55065+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55066+ grsec_enable_chroot_findtask = 1;
55067+#endif
55068+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55069+ grsec_enable_chroot_unix = 1;
55070+#endif
55071+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55072+ grsec_enable_chroot_mount = 1;
55073+#endif
55074+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55075+ grsec_enable_chroot_fchdir = 1;
55076+#endif
55077+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55078+ grsec_enable_chroot_shmat = 1;
55079+#endif
55080+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55081+ grsec_enable_audit_ptrace = 1;
55082+#endif
55083+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55084+ grsec_enable_chroot_double = 1;
55085+#endif
55086+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55087+ grsec_enable_chroot_pivot = 1;
55088+#endif
55089+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55090+ grsec_enable_chroot_chdir = 1;
55091+#endif
55092+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55093+ grsec_enable_chroot_chmod = 1;
55094+#endif
55095+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55096+ grsec_enable_chroot_mknod = 1;
55097+#endif
55098+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55099+ grsec_enable_chroot_nice = 1;
55100+#endif
55101+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55102+ grsec_enable_chroot_execlog = 1;
55103+#endif
55104+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55105+ grsec_enable_chroot_caps = 1;
55106+#endif
55107+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55108+ grsec_enable_chroot_sysctl = 1;
55109+#endif
55110+#ifdef CONFIG_GRKERNSEC_TPE
55111+ grsec_enable_tpe = 1;
55112+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55113+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55114+ grsec_enable_tpe_all = 1;
55115+#endif
55116+#endif
55117+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55118+ grsec_enable_socket_all = 1;
55119+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55120+#endif
55121+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55122+ grsec_enable_socket_client = 1;
55123+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55124+#endif
55125+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55126+ grsec_enable_socket_server = 1;
55127+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55128+#endif
55129+#endif
55130+
55131+ return;
55132+}
55133diff -urNp linux-3.0.7/grsecurity/grsec_link.c linux-3.0.7/grsecurity/grsec_link.c
55134--- linux-3.0.7/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
55135+++ linux-3.0.7/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
55136@@ -0,0 +1,43 @@
55137+#include <linux/kernel.h>
55138+#include <linux/sched.h>
55139+#include <linux/fs.h>
55140+#include <linux/file.h>
55141+#include <linux/grinternal.h>
55142+
55143+int
55144+gr_handle_follow_link(const struct inode *parent,
55145+ const struct inode *inode,
55146+ const struct dentry *dentry, const struct vfsmount *mnt)
55147+{
55148+#ifdef CONFIG_GRKERNSEC_LINK
55149+ const struct cred *cred = current_cred();
55150+
55151+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55152+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55153+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55154+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55155+ return -EACCES;
55156+ }
55157+#endif
55158+ return 0;
55159+}
55160+
55161+int
55162+gr_handle_hardlink(const struct dentry *dentry,
55163+ const struct vfsmount *mnt,
55164+ struct inode *inode, const int mode, const char *to)
55165+{
55166+#ifdef CONFIG_GRKERNSEC_LINK
55167+ const struct cred *cred = current_cred();
55168+
55169+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55170+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55171+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55172+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55173+ !capable(CAP_FOWNER) && cred->uid) {
55174+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55175+ return -EPERM;
55176+ }
55177+#endif
55178+ return 0;
55179+}
55180diff -urNp linux-3.0.7/grsecurity/grsec_log.c linux-3.0.7/grsecurity/grsec_log.c
55181--- linux-3.0.7/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
55182+++ linux-3.0.7/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
55183@@ -0,0 +1,315 @@
55184+#include <linux/kernel.h>
55185+#include <linux/sched.h>
55186+#include <linux/file.h>
55187+#include <linux/tty.h>
55188+#include <linux/fs.h>
55189+#include <linux/grinternal.h>
55190+
55191+#ifdef CONFIG_TREE_PREEMPT_RCU
55192+#define DISABLE_PREEMPT() preempt_disable()
55193+#define ENABLE_PREEMPT() preempt_enable()
55194+#else
55195+#define DISABLE_PREEMPT()
55196+#define ENABLE_PREEMPT()
55197+#endif
55198+
55199+#define BEGIN_LOCKS(x) \
55200+ DISABLE_PREEMPT(); \
55201+ rcu_read_lock(); \
55202+ read_lock(&tasklist_lock); \
55203+ read_lock(&grsec_exec_file_lock); \
55204+ if (x != GR_DO_AUDIT) \
55205+ spin_lock(&grsec_alert_lock); \
55206+ else \
55207+ spin_lock(&grsec_audit_lock)
55208+
55209+#define END_LOCKS(x) \
55210+ if (x != GR_DO_AUDIT) \
55211+ spin_unlock(&grsec_alert_lock); \
55212+ else \
55213+ spin_unlock(&grsec_audit_lock); \
55214+ read_unlock(&grsec_exec_file_lock); \
55215+ read_unlock(&tasklist_lock); \
55216+ rcu_read_unlock(); \
55217+ ENABLE_PREEMPT(); \
55218+ if (x == GR_DONT_AUDIT) \
55219+ gr_handle_alertkill(current)
55220+
55221+enum {
55222+ FLOODING,
55223+ NO_FLOODING
55224+};
55225+
55226+extern char *gr_alert_log_fmt;
55227+extern char *gr_audit_log_fmt;
55228+extern char *gr_alert_log_buf;
55229+extern char *gr_audit_log_buf;
55230+
55231+static int gr_log_start(int audit)
55232+{
55233+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55234+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55235+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55236+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55237+ unsigned long curr_secs = get_seconds();
55238+
55239+ if (audit == GR_DO_AUDIT)
55240+ goto set_fmt;
55241+
55242+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55243+ grsec_alert_wtime = curr_secs;
55244+ grsec_alert_fyet = 0;
55245+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55246+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55247+ grsec_alert_fyet++;
55248+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55249+ grsec_alert_wtime = curr_secs;
55250+ grsec_alert_fyet++;
55251+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55252+ return FLOODING;
55253+ }
55254+ else return FLOODING;
55255+
55256+set_fmt:
55257+#endif
55258+ memset(buf, 0, PAGE_SIZE);
55259+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55260+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55261+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55262+ } else if (current->signal->curr_ip) {
55263+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55264+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55265+ } else if (gr_acl_is_enabled()) {
55266+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55267+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55268+ } else {
55269+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55270+ strcpy(buf, fmt);
55271+ }
55272+
55273+ return NO_FLOODING;
55274+}
55275+
55276+static void gr_log_middle(int audit, const char *msg, va_list ap)
55277+ __attribute__ ((format (printf, 2, 0)));
55278+
55279+static void gr_log_middle(int audit, const char *msg, va_list ap)
55280+{
55281+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55282+ unsigned int len = strlen(buf);
55283+
55284+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55285+
55286+ return;
55287+}
55288+
55289+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55290+ __attribute__ ((format (printf, 2, 3)));
55291+
55292+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55293+{
55294+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55295+ unsigned int len = strlen(buf);
55296+ va_list ap;
55297+
55298+ va_start(ap, msg);
55299+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55300+ va_end(ap);
55301+
55302+ return;
55303+}
55304+
55305+static void gr_log_end(int audit)
55306+{
55307+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55308+ unsigned int len = strlen(buf);
55309+
55310+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55311+ printk("%s\n", buf);
55312+
55313+ return;
55314+}
55315+
55316+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55317+{
55318+ int logtype;
55319+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55320+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55321+ void *voidptr = NULL;
55322+ int num1 = 0, num2 = 0;
55323+ unsigned long ulong1 = 0, ulong2 = 0;
55324+ struct dentry *dentry = NULL;
55325+ struct vfsmount *mnt = NULL;
55326+ struct file *file = NULL;
55327+ struct task_struct *task = NULL;
55328+ const struct cred *cred, *pcred;
55329+ va_list ap;
55330+
55331+ BEGIN_LOCKS(audit);
55332+ logtype = gr_log_start(audit);
55333+ if (logtype == FLOODING) {
55334+ END_LOCKS(audit);
55335+ return;
55336+ }
55337+ va_start(ap, argtypes);
55338+ switch (argtypes) {
55339+ case GR_TTYSNIFF:
55340+ task = va_arg(ap, struct task_struct *);
55341+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55342+ break;
55343+ case GR_SYSCTL_HIDDEN:
55344+ str1 = va_arg(ap, char *);
55345+ gr_log_middle_varargs(audit, msg, result, str1);
55346+ break;
55347+ case GR_RBAC:
55348+ dentry = va_arg(ap, struct dentry *);
55349+ mnt = va_arg(ap, struct vfsmount *);
55350+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55351+ break;
55352+ case GR_RBAC_STR:
55353+ dentry = va_arg(ap, struct dentry *);
55354+ mnt = va_arg(ap, struct vfsmount *);
55355+ str1 = va_arg(ap, char *);
55356+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55357+ break;
55358+ case GR_STR_RBAC:
55359+ str1 = va_arg(ap, char *);
55360+ dentry = va_arg(ap, struct dentry *);
55361+ mnt = va_arg(ap, struct vfsmount *);
55362+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55363+ break;
55364+ case GR_RBAC_MODE2:
55365+ dentry = va_arg(ap, struct dentry *);
55366+ mnt = va_arg(ap, struct vfsmount *);
55367+ str1 = va_arg(ap, char *);
55368+ str2 = va_arg(ap, char *);
55369+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55370+ break;
55371+ case GR_RBAC_MODE3:
55372+ dentry = va_arg(ap, struct dentry *);
55373+ mnt = va_arg(ap, struct vfsmount *);
55374+ str1 = va_arg(ap, char *);
55375+ str2 = va_arg(ap, char *);
55376+ str3 = va_arg(ap, char *);
55377+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55378+ break;
55379+ case GR_FILENAME:
55380+ dentry = va_arg(ap, struct dentry *);
55381+ mnt = va_arg(ap, struct vfsmount *);
55382+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55383+ break;
55384+ case GR_STR_FILENAME:
55385+ str1 = va_arg(ap, char *);
55386+ dentry = va_arg(ap, struct dentry *);
55387+ mnt = va_arg(ap, struct vfsmount *);
55388+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55389+ break;
55390+ case GR_FILENAME_STR:
55391+ dentry = va_arg(ap, struct dentry *);
55392+ mnt = va_arg(ap, struct vfsmount *);
55393+ str1 = va_arg(ap, char *);
55394+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55395+ break;
55396+ case GR_FILENAME_TWO_INT:
55397+ dentry = va_arg(ap, struct dentry *);
55398+ mnt = va_arg(ap, struct vfsmount *);
55399+ num1 = va_arg(ap, int);
55400+ num2 = va_arg(ap, int);
55401+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55402+ break;
55403+ case GR_FILENAME_TWO_INT_STR:
55404+ dentry = va_arg(ap, struct dentry *);
55405+ mnt = va_arg(ap, struct vfsmount *);
55406+ num1 = va_arg(ap, int);
55407+ num2 = va_arg(ap, int);
55408+ str1 = va_arg(ap, char *);
55409+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55410+ break;
55411+ case GR_TEXTREL:
55412+ file = va_arg(ap, struct file *);
55413+ ulong1 = va_arg(ap, unsigned long);
55414+ ulong2 = va_arg(ap, unsigned long);
55415+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55416+ break;
55417+ case GR_PTRACE:
55418+ task = va_arg(ap, struct task_struct *);
55419+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55420+ break;
55421+ case GR_RESOURCE:
55422+ task = va_arg(ap, struct task_struct *);
55423+ cred = __task_cred(task);
55424+ pcred = __task_cred(task->real_parent);
55425+ ulong1 = va_arg(ap, unsigned long);
55426+ str1 = va_arg(ap, char *);
55427+ ulong2 = va_arg(ap, unsigned long);
55428+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55429+ break;
55430+ case GR_CAP:
55431+ task = va_arg(ap, struct task_struct *);
55432+ cred = __task_cred(task);
55433+ pcred = __task_cred(task->real_parent);
55434+ str1 = va_arg(ap, char *);
55435+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55436+ break;
55437+ case GR_SIG:
55438+ str1 = va_arg(ap, char *);
55439+ voidptr = va_arg(ap, void *);
55440+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55441+ break;
55442+ case GR_SIG2:
55443+ task = va_arg(ap, struct task_struct *);
55444+ cred = __task_cred(task);
55445+ pcred = __task_cred(task->real_parent);
55446+ num1 = va_arg(ap, int);
55447+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55448+ break;
55449+ case GR_CRASH1:
55450+ task = va_arg(ap, struct task_struct *);
55451+ cred = __task_cred(task);
55452+ pcred = __task_cred(task->real_parent);
55453+ ulong1 = va_arg(ap, unsigned long);
55454+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55455+ break;
55456+ case GR_CRASH2:
55457+ task = va_arg(ap, struct task_struct *);
55458+ cred = __task_cred(task);
55459+ pcred = __task_cred(task->real_parent);
55460+ ulong1 = va_arg(ap, unsigned long);
55461+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55462+ break;
55463+ case GR_RWXMAP:
55464+ file = va_arg(ap, struct file *);
55465+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55466+ break;
55467+ case GR_PSACCT:
55468+ {
55469+ unsigned int wday, cday;
55470+ __u8 whr, chr;
55471+ __u8 wmin, cmin;
55472+ __u8 wsec, csec;
55473+ char cur_tty[64] = { 0 };
55474+ char parent_tty[64] = { 0 };
55475+
55476+ task = va_arg(ap, struct task_struct *);
55477+ wday = va_arg(ap, unsigned int);
55478+ cday = va_arg(ap, unsigned int);
55479+ whr = va_arg(ap, int);
55480+ chr = va_arg(ap, int);
55481+ wmin = va_arg(ap, int);
55482+ cmin = va_arg(ap, int);
55483+ wsec = va_arg(ap, int);
55484+ csec = va_arg(ap, int);
55485+ ulong1 = va_arg(ap, unsigned long);
55486+ cred = __task_cred(task);
55487+ pcred = __task_cred(task->real_parent);
55488+
55489+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55490+ }
55491+ break;
55492+ default:
55493+ gr_log_middle(audit, msg, ap);
55494+ }
55495+ va_end(ap);
55496+ gr_log_end(audit);
55497+ END_LOCKS(audit);
55498+}
55499diff -urNp linux-3.0.7/grsecurity/grsec_mem.c linux-3.0.7/grsecurity/grsec_mem.c
55500--- linux-3.0.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
55501+++ linux-3.0.7/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
55502@@ -0,0 +1,33 @@
55503+#include <linux/kernel.h>
55504+#include <linux/sched.h>
55505+#include <linux/mm.h>
55506+#include <linux/mman.h>
55507+#include <linux/grinternal.h>
55508+
55509+void
55510+gr_handle_ioperm(void)
55511+{
55512+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55513+ return;
55514+}
55515+
55516+void
55517+gr_handle_iopl(void)
55518+{
55519+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55520+ return;
55521+}
55522+
55523+void
55524+gr_handle_mem_readwrite(u64 from, u64 to)
55525+{
55526+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55527+ return;
55528+}
55529+
55530+void
55531+gr_handle_vm86(void)
55532+{
55533+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55534+ return;
55535+}
55536diff -urNp linux-3.0.7/grsecurity/grsec_mount.c linux-3.0.7/grsecurity/grsec_mount.c
55537--- linux-3.0.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
55538+++ linux-3.0.7/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
55539@@ -0,0 +1,62 @@
55540+#include <linux/kernel.h>
55541+#include <linux/sched.h>
55542+#include <linux/mount.h>
55543+#include <linux/grsecurity.h>
55544+#include <linux/grinternal.h>
55545+
55546+void
55547+gr_log_remount(const char *devname, const int retval)
55548+{
55549+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55550+ if (grsec_enable_mount && (retval >= 0))
55551+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55552+#endif
55553+ return;
55554+}
55555+
55556+void
55557+gr_log_unmount(const char *devname, const int retval)
55558+{
55559+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55560+ if (grsec_enable_mount && (retval >= 0))
55561+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55562+#endif
55563+ return;
55564+}
55565+
55566+void
55567+gr_log_mount(const char *from, const char *to, const int retval)
55568+{
55569+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55570+ if (grsec_enable_mount && (retval >= 0))
55571+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55572+#endif
55573+ return;
55574+}
55575+
55576+int
55577+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55578+{
55579+#ifdef CONFIG_GRKERNSEC_ROFS
55580+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55581+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55582+ return -EPERM;
55583+ } else
55584+ return 0;
55585+#endif
55586+ return 0;
55587+}
55588+
55589+int
55590+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55591+{
55592+#ifdef CONFIG_GRKERNSEC_ROFS
55593+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55594+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55595+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55596+ return -EPERM;
55597+ } else
55598+ return 0;
55599+#endif
55600+ return 0;
55601+}
55602diff -urNp linux-3.0.7/grsecurity/grsec_pax.c linux-3.0.7/grsecurity/grsec_pax.c
55603--- linux-3.0.7/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
55604+++ linux-3.0.7/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
55605@@ -0,0 +1,36 @@
55606+#include <linux/kernel.h>
55607+#include <linux/sched.h>
55608+#include <linux/mm.h>
55609+#include <linux/file.h>
55610+#include <linux/grinternal.h>
55611+#include <linux/grsecurity.h>
55612+
55613+void
55614+gr_log_textrel(struct vm_area_struct * vma)
55615+{
55616+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55617+ if (grsec_enable_audit_textrel)
55618+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55619+#endif
55620+ return;
55621+}
55622+
55623+void
55624+gr_log_rwxmmap(struct file *file)
55625+{
55626+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55627+ if (grsec_enable_log_rwxmaps)
55628+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55629+#endif
55630+ return;
55631+}
55632+
55633+void
55634+gr_log_rwxmprotect(struct file *file)
55635+{
55636+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55637+ if (grsec_enable_log_rwxmaps)
55638+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55639+#endif
55640+ return;
55641+}
55642diff -urNp linux-3.0.7/grsecurity/grsec_ptrace.c linux-3.0.7/grsecurity/grsec_ptrace.c
55643--- linux-3.0.7/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
55644+++ linux-3.0.7/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
55645@@ -0,0 +1,14 @@
55646+#include <linux/kernel.h>
55647+#include <linux/sched.h>
55648+#include <linux/grinternal.h>
55649+#include <linux/grsecurity.h>
55650+
55651+void
55652+gr_audit_ptrace(struct task_struct *task)
55653+{
55654+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55655+ if (grsec_enable_audit_ptrace)
55656+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55657+#endif
55658+ return;
55659+}
55660diff -urNp linux-3.0.7/grsecurity/grsec_sig.c linux-3.0.7/grsecurity/grsec_sig.c
55661--- linux-3.0.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
55662+++ linux-3.0.7/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
55663@@ -0,0 +1,206 @@
55664+#include <linux/kernel.h>
55665+#include <linux/sched.h>
55666+#include <linux/delay.h>
55667+#include <linux/grsecurity.h>
55668+#include <linux/grinternal.h>
55669+#include <linux/hardirq.h>
55670+
55671+char *signames[] = {
55672+ [SIGSEGV] = "Segmentation fault",
55673+ [SIGILL] = "Illegal instruction",
55674+ [SIGABRT] = "Abort",
55675+ [SIGBUS] = "Invalid alignment/Bus error"
55676+};
55677+
55678+void
55679+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55680+{
55681+#ifdef CONFIG_GRKERNSEC_SIGNAL
55682+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55683+ (sig == SIGABRT) || (sig == SIGBUS))) {
55684+ if (t->pid == current->pid) {
55685+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55686+ } else {
55687+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55688+ }
55689+ }
55690+#endif
55691+ return;
55692+}
55693+
55694+int
55695+gr_handle_signal(const struct task_struct *p, const int sig)
55696+{
55697+#ifdef CONFIG_GRKERNSEC
55698+ if (current->pid > 1 && gr_check_protected_task(p)) {
55699+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55700+ return -EPERM;
55701+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55702+ return -EPERM;
55703+ }
55704+#endif
55705+ return 0;
55706+}
55707+
55708+#ifdef CONFIG_GRKERNSEC
55709+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55710+
55711+int gr_fake_force_sig(int sig, struct task_struct *t)
55712+{
55713+ unsigned long int flags;
55714+ int ret, blocked, ignored;
55715+ struct k_sigaction *action;
55716+
55717+ spin_lock_irqsave(&t->sighand->siglock, flags);
55718+ action = &t->sighand->action[sig-1];
55719+ ignored = action->sa.sa_handler == SIG_IGN;
55720+ blocked = sigismember(&t->blocked, sig);
55721+ if (blocked || ignored) {
55722+ action->sa.sa_handler = SIG_DFL;
55723+ if (blocked) {
55724+ sigdelset(&t->blocked, sig);
55725+ recalc_sigpending_and_wake(t);
55726+ }
55727+ }
55728+ if (action->sa.sa_handler == SIG_DFL)
55729+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
55730+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55731+
55732+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
55733+
55734+ return ret;
55735+}
55736+#endif
55737+
55738+#ifdef CONFIG_GRKERNSEC_BRUTE
55739+#define GR_USER_BAN_TIME (15 * 60)
55740+
55741+static int __get_dumpable(unsigned long mm_flags)
55742+{
55743+ int ret;
55744+
55745+ ret = mm_flags & MMF_DUMPABLE_MASK;
55746+ return (ret >= 2) ? 2 : ret;
55747+}
55748+#endif
55749+
55750+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55751+{
55752+#ifdef CONFIG_GRKERNSEC_BRUTE
55753+ uid_t uid = 0;
55754+
55755+ if (!grsec_enable_brute)
55756+ return;
55757+
55758+ rcu_read_lock();
55759+ read_lock(&tasklist_lock);
55760+ read_lock(&grsec_exec_file_lock);
55761+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55762+ p->real_parent->brute = 1;
55763+ else {
55764+ const struct cred *cred = __task_cred(p), *cred2;
55765+ struct task_struct *tsk, *tsk2;
55766+
55767+ if (!__get_dumpable(mm_flags) && cred->uid) {
55768+ struct user_struct *user;
55769+
55770+ uid = cred->uid;
55771+
55772+ /* this is put upon execution past expiration */
55773+ user = find_user(uid);
55774+ if (user == NULL)
55775+ goto unlock;
55776+ user->banned = 1;
55777+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55778+ if (user->ban_expires == ~0UL)
55779+ user->ban_expires--;
55780+
55781+ do_each_thread(tsk2, tsk) {
55782+ cred2 = __task_cred(tsk);
55783+ if (tsk != p && cred2->uid == uid)
55784+ gr_fake_force_sig(SIGKILL, tsk);
55785+ } while_each_thread(tsk2, tsk);
55786+ }
55787+ }
55788+unlock:
55789+ read_unlock(&grsec_exec_file_lock);
55790+ read_unlock(&tasklist_lock);
55791+ rcu_read_unlock();
55792+
55793+ if (uid)
55794+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55795+
55796+#endif
55797+ return;
55798+}
55799+
55800+void gr_handle_brute_check(void)
55801+{
55802+#ifdef CONFIG_GRKERNSEC_BRUTE
55803+ if (current->brute)
55804+ msleep(30 * 1000);
55805+#endif
55806+ return;
55807+}
55808+
55809+void gr_handle_kernel_exploit(void)
55810+{
55811+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55812+ const struct cred *cred;
55813+ struct task_struct *tsk, *tsk2;
55814+ struct user_struct *user;
55815+ uid_t uid;
55816+
55817+ if (in_irq() || in_serving_softirq() || in_nmi())
55818+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55819+
55820+ uid = current_uid();
55821+
55822+ if (uid == 0)
55823+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55824+ else {
55825+ /* kill all the processes of this user, hold a reference
55826+ to their creds struct, and prevent them from creating
55827+ another process until system reset
55828+ */
55829+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55830+ /* we intentionally leak this ref */
55831+ user = get_uid(current->cred->user);
55832+ if (user) {
55833+ user->banned = 1;
55834+ user->ban_expires = ~0UL;
55835+ }
55836+
55837+ read_lock(&tasklist_lock);
55838+ do_each_thread(tsk2, tsk) {
55839+ cred = __task_cred(tsk);
55840+ if (cred->uid == uid)
55841+ gr_fake_force_sig(SIGKILL, tsk);
55842+ } while_each_thread(tsk2, tsk);
55843+ read_unlock(&tasklist_lock);
55844+ }
55845+#endif
55846+}
55847+
55848+int __gr_process_user_ban(struct user_struct *user)
55849+{
55850+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55851+ if (unlikely(user->banned)) {
55852+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55853+ user->banned = 0;
55854+ user->ban_expires = 0;
55855+ free_uid(user);
55856+ } else
55857+ return -EPERM;
55858+ }
55859+#endif
55860+ return 0;
55861+}
55862+
55863+int gr_process_user_ban(void)
55864+{
55865+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55866+ return __gr_process_user_ban(current->cred->user);
55867+#endif
55868+ return 0;
55869+}
55870diff -urNp linux-3.0.7/grsecurity/grsec_sock.c linux-3.0.7/grsecurity/grsec_sock.c
55871--- linux-3.0.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55872+++ linux-3.0.7/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
55873@@ -0,0 +1,244 @@
55874+#include <linux/kernel.h>
55875+#include <linux/module.h>
55876+#include <linux/sched.h>
55877+#include <linux/file.h>
55878+#include <linux/net.h>
55879+#include <linux/in.h>
55880+#include <linux/ip.h>
55881+#include <net/sock.h>
55882+#include <net/inet_sock.h>
55883+#include <linux/grsecurity.h>
55884+#include <linux/grinternal.h>
55885+#include <linux/gracl.h>
55886+
55887+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55888+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55889+
55890+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55891+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55892+
55893+#ifdef CONFIG_UNIX_MODULE
55894+EXPORT_SYMBOL(gr_acl_handle_unix);
55895+EXPORT_SYMBOL(gr_acl_handle_mknod);
55896+EXPORT_SYMBOL(gr_handle_chroot_unix);
55897+EXPORT_SYMBOL(gr_handle_create);
55898+#endif
55899+
55900+#ifdef CONFIG_GRKERNSEC
55901+#define gr_conn_table_size 32749
55902+struct conn_table_entry {
55903+ struct conn_table_entry *next;
55904+ struct signal_struct *sig;
55905+};
55906+
55907+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55908+DEFINE_SPINLOCK(gr_conn_table_lock);
55909+
55910+extern const char * gr_socktype_to_name(unsigned char type);
55911+extern const char * gr_proto_to_name(unsigned char proto);
55912+extern const char * gr_sockfamily_to_name(unsigned char family);
55913+
55914+static __inline__ int
55915+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55916+{
55917+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55918+}
55919+
55920+static __inline__ int
55921+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55922+ __u16 sport, __u16 dport)
55923+{
55924+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55925+ sig->gr_sport == sport && sig->gr_dport == dport))
55926+ return 1;
55927+ else
55928+ return 0;
55929+}
55930+
55931+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55932+{
55933+ struct conn_table_entry **match;
55934+ unsigned int index;
55935+
55936+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55937+ sig->gr_sport, sig->gr_dport,
55938+ gr_conn_table_size);
55939+
55940+ newent->sig = sig;
55941+
55942+ match = &gr_conn_table[index];
55943+ newent->next = *match;
55944+ *match = newent;
55945+
55946+ return;
55947+}
55948+
55949+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55950+{
55951+ struct conn_table_entry *match, *last = NULL;
55952+ unsigned int index;
55953+
55954+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55955+ sig->gr_sport, sig->gr_dport,
55956+ gr_conn_table_size);
55957+
55958+ match = gr_conn_table[index];
55959+ while (match && !conn_match(match->sig,
55960+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55961+ sig->gr_dport)) {
55962+ last = match;
55963+ match = match->next;
55964+ }
55965+
55966+ if (match) {
55967+ if (last)
55968+ last->next = match->next;
55969+ else
55970+ gr_conn_table[index] = NULL;
55971+ kfree(match);
55972+ }
55973+
55974+ return;
55975+}
55976+
55977+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55978+ __u16 sport, __u16 dport)
55979+{
55980+ struct conn_table_entry *match;
55981+ unsigned int index;
55982+
55983+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55984+
55985+ match = gr_conn_table[index];
55986+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55987+ match = match->next;
55988+
55989+ if (match)
55990+ return match->sig;
55991+ else
55992+ return NULL;
55993+}
55994+
55995+#endif
55996+
55997+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55998+{
55999+#ifdef CONFIG_GRKERNSEC
56000+ struct signal_struct *sig = task->signal;
56001+ struct conn_table_entry *newent;
56002+
56003+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56004+ if (newent == NULL)
56005+ return;
56006+ /* no bh lock needed since we are called with bh disabled */
56007+ spin_lock(&gr_conn_table_lock);
56008+ gr_del_task_from_ip_table_nolock(sig);
56009+ sig->gr_saddr = inet->inet_rcv_saddr;
56010+ sig->gr_daddr = inet->inet_daddr;
56011+ sig->gr_sport = inet->inet_sport;
56012+ sig->gr_dport = inet->inet_dport;
56013+ gr_add_to_task_ip_table_nolock(sig, newent);
56014+ spin_unlock(&gr_conn_table_lock);
56015+#endif
56016+ return;
56017+}
56018+
56019+void gr_del_task_from_ip_table(struct task_struct *task)
56020+{
56021+#ifdef CONFIG_GRKERNSEC
56022+ spin_lock_bh(&gr_conn_table_lock);
56023+ gr_del_task_from_ip_table_nolock(task->signal);
56024+ spin_unlock_bh(&gr_conn_table_lock);
56025+#endif
56026+ return;
56027+}
56028+
56029+void
56030+gr_attach_curr_ip(const struct sock *sk)
56031+{
56032+#ifdef CONFIG_GRKERNSEC
56033+ struct signal_struct *p, *set;
56034+ const struct inet_sock *inet = inet_sk(sk);
56035+
56036+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56037+ return;
56038+
56039+ set = current->signal;
56040+
56041+ spin_lock_bh(&gr_conn_table_lock);
56042+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56043+ inet->inet_dport, inet->inet_sport);
56044+ if (unlikely(p != NULL)) {
56045+ set->curr_ip = p->curr_ip;
56046+ set->used_accept = 1;
56047+ gr_del_task_from_ip_table_nolock(p);
56048+ spin_unlock_bh(&gr_conn_table_lock);
56049+ return;
56050+ }
56051+ spin_unlock_bh(&gr_conn_table_lock);
56052+
56053+ set->curr_ip = inet->inet_daddr;
56054+ set->used_accept = 1;
56055+#endif
56056+ return;
56057+}
56058+
56059+int
56060+gr_handle_sock_all(const int family, const int type, const int protocol)
56061+{
56062+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56063+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56064+ (family != AF_UNIX)) {
56065+ if (family == AF_INET)
56066+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56067+ else
56068+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56069+ return -EACCES;
56070+ }
56071+#endif
56072+ return 0;
56073+}
56074+
56075+int
56076+gr_handle_sock_server(const struct sockaddr *sck)
56077+{
56078+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56079+ if (grsec_enable_socket_server &&
56080+ in_group_p(grsec_socket_server_gid) &&
56081+ sck && (sck->sa_family != AF_UNIX) &&
56082+ (sck->sa_family != AF_LOCAL)) {
56083+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56084+ return -EACCES;
56085+ }
56086+#endif
56087+ return 0;
56088+}
56089+
56090+int
56091+gr_handle_sock_server_other(const struct sock *sck)
56092+{
56093+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56094+ if (grsec_enable_socket_server &&
56095+ in_group_p(grsec_socket_server_gid) &&
56096+ sck && (sck->sk_family != AF_UNIX) &&
56097+ (sck->sk_family != AF_LOCAL)) {
56098+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56099+ return -EACCES;
56100+ }
56101+#endif
56102+ return 0;
56103+}
56104+
56105+int
56106+gr_handle_sock_client(const struct sockaddr *sck)
56107+{
56108+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56109+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56110+ sck && (sck->sa_family != AF_UNIX) &&
56111+ (sck->sa_family != AF_LOCAL)) {
56112+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56113+ return -EACCES;
56114+ }
56115+#endif
56116+ return 0;
56117+}
56118diff -urNp linux-3.0.7/grsecurity/grsec_sysctl.c linux-3.0.7/grsecurity/grsec_sysctl.c
56119--- linux-3.0.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
56120+++ linux-3.0.7/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
56121@@ -0,0 +1,433 @@
56122+#include <linux/kernel.h>
56123+#include <linux/sched.h>
56124+#include <linux/sysctl.h>
56125+#include <linux/grsecurity.h>
56126+#include <linux/grinternal.h>
56127+
56128+int
56129+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56130+{
56131+#ifdef CONFIG_GRKERNSEC_SYSCTL
56132+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56133+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56134+ return -EACCES;
56135+ }
56136+#endif
56137+ return 0;
56138+}
56139+
56140+#ifdef CONFIG_GRKERNSEC_ROFS
56141+static int __maybe_unused one = 1;
56142+#endif
56143+
56144+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56145+struct ctl_table grsecurity_table[] = {
56146+#ifdef CONFIG_GRKERNSEC_SYSCTL
56147+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56148+#ifdef CONFIG_GRKERNSEC_IO
56149+ {
56150+ .procname = "disable_priv_io",
56151+ .data = &grsec_disable_privio,
56152+ .maxlen = sizeof(int),
56153+ .mode = 0600,
56154+ .proc_handler = &proc_dointvec,
56155+ },
56156+#endif
56157+#endif
56158+#ifdef CONFIG_GRKERNSEC_LINK
56159+ {
56160+ .procname = "linking_restrictions",
56161+ .data = &grsec_enable_link,
56162+ .maxlen = sizeof(int),
56163+ .mode = 0600,
56164+ .proc_handler = &proc_dointvec,
56165+ },
56166+#endif
56167+#ifdef CONFIG_GRKERNSEC_BRUTE
56168+ {
56169+ .procname = "deter_bruteforce",
56170+ .data = &grsec_enable_brute,
56171+ .maxlen = sizeof(int),
56172+ .mode = 0600,
56173+ .proc_handler = &proc_dointvec,
56174+ },
56175+#endif
56176+#ifdef CONFIG_GRKERNSEC_FIFO
56177+ {
56178+ .procname = "fifo_restrictions",
56179+ .data = &grsec_enable_fifo,
56180+ .maxlen = sizeof(int),
56181+ .mode = 0600,
56182+ .proc_handler = &proc_dointvec,
56183+ },
56184+#endif
56185+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56186+ {
56187+ .procname = "ip_blackhole",
56188+ .data = &grsec_enable_blackhole,
56189+ .maxlen = sizeof(int),
56190+ .mode = 0600,
56191+ .proc_handler = &proc_dointvec,
56192+ },
56193+ {
56194+ .procname = "lastack_retries",
56195+ .data = &grsec_lastack_retries,
56196+ .maxlen = sizeof(int),
56197+ .mode = 0600,
56198+ .proc_handler = &proc_dointvec,
56199+ },
56200+#endif
56201+#ifdef CONFIG_GRKERNSEC_EXECLOG
56202+ {
56203+ .procname = "exec_logging",
56204+ .data = &grsec_enable_execlog,
56205+ .maxlen = sizeof(int),
56206+ .mode = 0600,
56207+ .proc_handler = &proc_dointvec,
56208+ },
56209+#endif
56210+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56211+ {
56212+ .procname = "rwxmap_logging",
56213+ .data = &grsec_enable_log_rwxmaps,
56214+ .maxlen = sizeof(int),
56215+ .mode = 0600,
56216+ .proc_handler = &proc_dointvec,
56217+ },
56218+#endif
56219+#ifdef CONFIG_GRKERNSEC_SIGNAL
56220+ {
56221+ .procname = "signal_logging",
56222+ .data = &grsec_enable_signal,
56223+ .maxlen = sizeof(int),
56224+ .mode = 0600,
56225+ .proc_handler = &proc_dointvec,
56226+ },
56227+#endif
56228+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56229+ {
56230+ .procname = "forkfail_logging",
56231+ .data = &grsec_enable_forkfail,
56232+ .maxlen = sizeof(int),
56233+ .mode = 0600,
56234+ .proc_handler = &proc_dointvec,
56235+ },
56236+#endif
56237+#ifdef CONFIG_GRKERNSEC_TIME
56238+ {
56239+ .procname = "timechange_logging",
56240+ .data = &grsec_enable_time,
56241+ .maxlen = sizeof(int),
56242+ .mode = 0600,
56243+ .proc_handler = &proc_dointvec,
56244+ },
56245+#endif
56246+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56247+ {
56248+ .procname = "chroot_deny_shmat",
56249+ .data = &grsec_enable_chroot_shmat,
56250+ .maxlen = sizeof(int),
56251+ .mode = 0600,
56252+ .proc_handler = &proc_dointvec,
56253+ },
56254+#endif
56255+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56256+ {
56257+ .procname = "chroot_deny_unix",
56258+ .data = &grsec_enable_chroot_unix,
56259+ .maxlen = sizeof(int),
56260+ .mode = 0600,
56261+ .proc_handler = &proc_dointvec,
56262+ },
56263+#endif
56264+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56265+ {
56266+ .procname = "chroot_deny_mount",
56267+ .data = &grsec_enable_chroot_mount,
56268+ .maxlen = sizeof(int),
56269+ .mode = 0600,
56270+ .proc_handler = &proc_dointvec,
56271+ },
56272+#endif
56273+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56274+ {
56275+ .procname = "chroot_deny_fchdir",
56276+ .data = &grsec_enable_chroot_fchdir,
56277+ .maxlen = sizeof(int),
56278+ .mode = 0600,
56279+ .proc_handler = &proc_dointvec,
56280+ },
56281+#endif
56282+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56283+ {
56284+ .procname = "chroot_deny_chroot",
56285+ .data = &grsec_enable_chroot_double,
56286+ .maxlen = sizeof(int),
56287+ .mode = 0600,
56288+ .proc_handler = &proc_dointvec,
56289+ },
56290+#endif
56291+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56292+ {
56293+ .procname = "chroot_deny_pivot",
56294+ .data = &grsec_enable_chroot_pivot,
56295+ .maxlen = sizeof(int),
56296+ .mode = 0600,
56297+ .proc_handler = &proc_dointvec,
56298+ },
56299+#endif
56300+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56301+ {
56302+ .procname = "chroot_enforce_chdir",
56303+ .data = &grsec_enable_chroot_chdir,
56304+ .maxlen = sizeof(int),
56305+ .mode = 0600,
56306+ .proc_handler = &proc_dointvec,
56307+ },
56308+#endif
56309+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56310+ {
56311+ .procname = "chroot_deny_chmod",
56312+ .data = &grsec_enable_chroot_chmod,
56313+ .maxlen = sizeof(int),
56314+ .mode = 0600,
56315+ .proc_handler = &proc_dointvec,
56316+ },
56317+#endif
56318+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56319+ {
56320+ .procname = "chroot_deny_mknod",
56321+ .data = &grsec_enable_chroot_mknod,
56322+ .maxlen = sizeof(int),
56323+ .mode = 0600,
56324+ .proc_handler = &proc_dointvec,
56325+ },
56326+#endif
56327+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56328+ {
56329+ .procname = "chroot_restrict_nice",
56330+ .data = &grsec_enable_chroot_nice,
56331+ .maxlen = sizeof(int),
56332+ .mode = 0600,
56333+ .proc_handler = &proc_dointvec,
56334+ },
56335+#endif
56336+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56337+ {
56338+ .procname = "chroot_execlog",
56339+ .data = &grsec_enable_chroot_execlog,
56340+ .maxlen = sizeof(int),
56341+ .mode = 0600,
56342+ .proc_handler = &proc_dointvec,
56343+ },
56344+#endif
56345+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56346+ {
56347+ .procname = "chroot_caps",
56348+ .data = &grsec_enable_chroot_caps,
56349+ .maxlen = sizeof(int),
56350+ .mode = 0600,
56351+ .proc_handler = &proc_dointvec,
56352+ },
56353+#endif
56354+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56355+ {
56356+ .procname = "chroot_deny_sysctl",
56357+ .data = &grsec_enable_chroot_sysctl,
56358+ .maxlen = sizeof(int),
56359+ .mode = 0600,
56360+ .proc_handler = &proc_dointvec,
56361+ },
56362+#endif
56363+#ifdef CONFIG_GRKERNSEC_TPE
56364+ {
56365+ .procname = "tpe",
56366+ .data = &grsec_enable_tpe,
56367+ .maxlen = sizeof(int),
56368+ .mode = 0600,
56369+ .proc_handler = &proc_dointvec,
56370+ },
56371+ {
56372+ .procname = "tpe_gid",
56373+ .data = &grsec_tpe_gid,
56374+ .maxlen = sizeof(int),
56375+ .mode = 0600,
56376+ .proc_handler = &proc_dointvec,
56377+ },
56378+#endif
56379+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56380+ {
56381+ .procname = "tpe_invert",
56382+ .data = &grsec_enable_tpe_invert,
56383+ .maxlen = sizeof(int),
56384+ .mode = 0600,
56385+ .proc_handler = &proc_dointvec,
56386+ },
56387+#endif
56388+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56389+ {
56390+ .procname = "tpe_restrict_all",
56391+ .data = &grsec_enable_tpe_all,
56392+ .maxlen = sizeof(int),
56393+ .mode = 0600,
56394+ .proc_handler = &proc_dointvec,
56395+ },
56396+#endif
56397+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56398+ {
56399+ .procname = "socket_all",
56400+ .data = &grsec_enable_socket_all,
56401+ .maxlen = sizeof(int),
56402+ .mode = 0600,
56403+ .proc_handler = &proc_dointvec,
56404+ },
56405+ {
56406+ .procname = "socket_all_gid",
56407+ .data = &grsec_socket_all_gid,
56408+ .maxlen = sizeof(int),
56409+ .mode = 0600,
56410+ .proc_handler = &proc_dointvec,
56411+ },
56412+#endif
56413+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56414+ {
56415+ .procname = "socket_client",
56416+ .data = &grsec_enable_socket_client,
56417+ .maxlen = sizeof(int),
56418+ .mode = 0600,
56419+ .proc_handler = &proc_dointvec,
56420+ },
56421+ {
56422+ .procname = "socket_client_gid",
56423+ .data = &grsec_socket_client_gid,
56424+ .maxlen = sizeof(int),
56425+ .mode = 0600,
56426+ .proc_handler = &proc_dointvec,
56427+ },
56428+#endif
56429+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56430+ {
56431+ .procname = "socket_server",
56432+ .data = &grsec_enable_socket_server,
56433+ .maxlen = sizeof(int),
56434+ .mode = 0600,
56435+ .proc_handler = &proc_dointvec,
56436+ },
56437+ {
56438+ .procname = "socket_server_gid",
56439+ .data = &grsec_socket_server_gid,
56440+ .maxlen = sizeof(int),
56441+ .mode = 0600,
56442+ .proc_handler = &proc_dointvec,
56443+ },
56444+#endif
56445+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56446+ {
56447+ .procname = "audit_group",
56448+ .data = &grsec_enable_group,
56449+ .maxlen = sizeof(int),
56450+ .mode = 0600,
56451+ .proc_handler = &proc_dointvec,
56452+ },
56453+ {
56454+ .procname = "audit_gid",
56455+ .data = &grsec_audit_gid,
56456+ .maxlen = sizeof(int),
56457+ .mode = 0600,
56458+ .proc_handler = &proc_dointvec,
56459+ },
56460+#endif
56461+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56462+ {
56463+ .procname = "audit_chdir",
56464+ .data = &grsec_enable_chdir,
56465+ .maxlen = sizeof(int),
56466+ .mode = 0600,
56467+ .proc_handler = &proc_dointvec,
56468+ },
56469+#endif
56470+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56471+ {
56472+ .procname = "audit_mount",
56473+ .data = &grsec_enable_mount,
56474+ .maxlen = sizeof(int),
56475+ .mode = 0600,
56476+ .proc_handler = &proc_dointvec,
56477+ },
56478+#endif
56479+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56480+ {
56481+ .procname = "audit_textrel",
56482+ .data = &grsec_enable_audit_textrel,
56483+ .maxlen = sizeof(int),
56484+ .mode = 0600,
56485+ .proc_handler = &proc_dointvec,
56486+ },
56487+#endif
56488+#ifdef CONFIG_GRKERNSEC_DMESG
56489+ {
56490+ .procname = "dmesg",
56491+ .data = &grsec_enable_dmesg,
56492+ .maxlen = sizeof(int),
56493+ .mode = 0600,
56494+ .proc_handler = &proc_dointvec,
56495+ },
56496+#endif
56497+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56498+ {
56499+ .procname = "chroot_findtask",
56500+ .data = &grsec_enable_chroot_findtask,
56501+ .maxlen = sizeof(int),
56502+ .mode = 0600,
56503+ .proc_handler = &proc_dointvec,
56504+ },
56505+#endif
56506+#ifdef CONFIG_GRKERNSEC_RESLOG
56507+ {
56508+ .procname = "resource_logging",
56509+ .data = &grsec_resource_logging,
56510+ .maxlen = sizeof(int),
56511+ .mode = 0600,
56512+ .proc_handler = &proc_dointvec,
56513+ },
56514+#endif
56515+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56516+ {
56517+ .procname = "audit_ptrace",
56518+ .data = &grsec_enable_audit_ptrace,
56519+ .maxlen = sizeof(int),
56520+ .mode = 0600,
56521+ .proc_handler = &proc_dointvec,
56522+ },
56523+#endif
56524+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56525+ {
56526+ .procname = "harden_ptrace",
56527+ .data = &grsec_enable_harden_ptrace,
56528+ .maxlen = sizeof(int),
56529+ .mode = 0600,
56530+ .proc_handler = &proc_dointvec,
56531+ },
56532+#endif
56533+ {
56534+ .procname = "grsec_lock",
56535+ .data = &grsec_lock,
56536+ .maxlen = sizeof(int),
56537+ .mode = 0600,
56538+ .proc_handler = &proc_dointvec,
56539+ },
56540+#endif
56541+#ifdef CONFIG_GRKERNSEC_ROFS
56542+ {
56543+ .procname = "romount_protect",
56544+ .data = &grsec_enable_rofs,
56545+ .maxlen = sizeof(int),
56546+ .mode = 0600,
56547+ .proc_handler = &proc_dointvec_minmax,
56548+ .extra1 = &one,
56549+ .extra2 = &one,
56550+ },
56551+#endif
56552+ { }
56553+};
56554+#endif
56555diff -urNp linux-3.0.7/grsecurity/grsec_time.c linux-3.0.7/grsecurity/grsec_time.c
56556--- linux-3.0.7/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
56557+++ linux-3.0.7/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
56558@@ -0,0 +1,16 @@
56559+#include <linux/kernel.h>
56560+#include <linux/sched.h>
56561+#include <linux/grinternal.h>
56562+#include <linux/module.h>
56563+
56564+void
56565+gr_log_timechange(void)
56566+{
56567+#ifdef CONFIG_GRKERNSEC_TIME
56568+ if (grsec_enable_time)
56569+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56570+#endif
56571+ return;
56572+}
56573+
56574+EXPORT_SYMBOL(gr_log_timechange);
56575diff -urNp linux-3.0.7/grsecurity/grsec_tpe.c linux-3.0.7/grsecurity/grsec_tpe.c
56576--- linux-3.0.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
56577+++ linux-3.0.7/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
56578@@ -0,0 +1,39 @@
56579+#include <linux/kernel.h>
56580+#include <linux/sched.h>
56581+#include <linux/file.h>
56582+#include <linux/fs.h>
56583+#include <linux/grinternal.h>
56584+
56585+extern int gr_acl_tpe_check(void);
56586+
56587+int
56588+gr_tpe_allow(const struct file *file)
56589+{
56590+#ifdef CONFIG_GRKERNSEC
56591+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56592+ const struct cred *cred = current_cred();
56593+
56594+ if (cred->uid && ((grsec_enable_tpe &&
56595+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56596+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
56597+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
56598+#else
56599+ in_group_p(grsec_tpe_gid)
56600+#endif
56601+ ) || gr_acl_tpe_check()) &&
56602+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
56603+ (inode->i_mode & S_IWOTH))))) {
56604+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56605+ return 0;
56606+ }
56607+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56608+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
56609+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
56610+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
56611+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56612+ return 0;
56613+ }
56614+#endif
56615+#endif
56616+ return 1;
56617+}
56618diff -urNp linux-3.0.7/grsecurity/grsum.c linux-3.0.7/grsecurity/grsum.c
56619--- linux-3.0.7/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
56620+++ linux-3.0.7/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
56621@@ -0,0 +1,61 @@
56622+#include <linux/err.h>
56623+#include <linux/kernel.h>
56624+#include <linux/sched.h>
56625+#include <linux/mm.h>
56626+#include <linux/scatterlist.h>
56627+#include <linux/crypto.h>
56628+#include <linux/gracl.h>
56629+
56630+
56631+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56632+#error "crypto and sha256 must be built into the kernel"
56633+#endif
56634+
56635+int
56636+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56637+{
56638+ char *p;
56639+ struct crypto_hash *tfm;
56640+ struct hash_desc desc;
56641+ struct scatterlist sg;
56642+ unsigned char temp_sum[GR_SHA_LEN];
56643+ volatile int retval = 0;
56644+ volatile int dummy = 0;
56645+ unsigned int i;
56646+
56647+ sg_init_table(&sg, 1);
56648+
56649+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56650+ if (IS_ERR(tfm)) {
56651+ /* should never happen, since sha256 should be built in */
56652+ return 1;
56653+ }
56654+
56655+ desc.tfm = tfm;
56656+ desc.flags = 0;
56657+
56658+ crypto_hash_init(&desc);
56659+
56660+ p = salt;
56661+ sg_set_buf(&sg, p, GR_SALT_LEN);
56662+ crypto_hash_update(&desc, &sg, sg.length);
56663+
56664+ p = entry->pw;
56665+ sg_set_buf(&sg, p, strlen(p));
56666+
56667+ crypto_hash_update(&desc, &sg, sg.length);
56668+
56669+ crypto_hash_final(&desc, temp_sum);
56670+
56671+ memset(entry->pw, 0, GR_PW_LEN);
56672+
56673+ for (i = 0; i < GR_SHA_LEN; i++)
56674+ if (sum[i] != temp_sum[i])
56675+ retval = 1;
56676+ else
56677+ dummy = 1; // waste a cycle
56678+
56679+ crypto_free_hash(tfm);
56680+
56681+ return retval;
56682+}
56683diff -urNp linux-3.0.7/include/acpi/acpi_bus.h linux-3.0.7/include/acpi/acpi_bus.h
56684--- linux-3.0.7/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
56685+++ linux-3.0.7/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
56686@@ -107,7 +107,7 @@ struct acpi_device_ops {
56687 acpi_op_bind bind;
56688 acpi_op_unbind unbind;
56689 acpi_op_notify notify;
56690-};
56691+} __no_const;
56692
56693 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56694
56695diff -urNp linux-3.0.7/include/asm-generic/atomic-long.h linux-3.0.7/include/asm-generic/atomic-long.h
56696--- linux-3.0.7/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
56697+++ linux-3.0.7/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
56698@@ -22,6 +22,12 @@
56699
56700 typedef atomic64_t atomic_long_t;
56701
56702+#ifdef CONFIG_PAX_REFCOUNT
56703+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56704+#else
56705+typedef atomic64_t atomic_long_unchecked_t;
56706+#endif
56707+
56708 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56709
56710 static inline long atomic_long_read(atomic_long_t *l)
56711@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56712 return (long)atomic64_read(v);
56713 }
56714
56715+#ifdef CONFIG_PAX_REFCOUNT
56716+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56717+{
56718+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56719+
56720+ return (long)atomic64_read_unchecked(v);
56721+}
56722+#endif
56723+
56724 static inline void atomic_long_set(atomic_long_t *l, long i)
56725 {
56726 atomic64_t *v = (atomic64_t *)l;
56727@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56728 atomic64_set(v, i);
56729 }
56730
56731+#ifdef CONFIG_PAX_REFCOUNT
56732+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56733+{
56734+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56735+
56736+ atomic64_set_unchecked(v, i);
56737+}
56738+#endif
56739+
56740 static inline void atomic_long_inc(atomic_long_t *l)
56741 {
56742 atomic64_t *v = (atomic64_t *)l;
56743@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56744 atomic64_inc(v);
56745 }
56746
56747+#ifdef CONFIG_PAX_REFCOUNT
56748+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56749+{
56750+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56751+
56752+ atomic64_inc_unchecked(v);
56753+}
56754+#endif
56755+
56756 static inline void atomic_long_dec(atomic_long_t *l)
56757 {
56758 atomic64_t *v = (atomic64_t *)l;
56759@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56760 atomic64_dec(v);
56761 }
56762
56763+#ifdef CONFIG_PAX_REFCOUNT
56764+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56765+{
56766+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56767+
56768+ atomic64_dec_unchecked(v);
56769+}
56770+#endif
56771+
56772 static inline void atomic_long_add(long i, atomic_long_t *l)
56773 {
56774 atomic64_t *v = (atomic64_t *)l;
56775@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56776 atomic64_add(i, v);
56777 }
56778
56779+#ifdef CONFIG_PAX_REFCOUNT
56780+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56781+{
56782+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56783+
56784+ atomic64_add_unchecked(i, v);
56785+}
56786+#endif
56787+
56788 static inline void atomic_long_sub(long i, atomic_long_t *l)
56789 {
56790 atomic64_t *v = (atomic64_t *)l;
56791@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
56792 atomic64_sub(i, v);
56793 }
56794
56795+#ifdef CONFIG_PAX_REFCOUNT
56796+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56797+{
56798+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56799+
56800+ atomic64_sub_unchecked(i, v);
56801+}
56802+#endif
56803+
56804 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56805 {
56806 atomic64_t *v = (atomic64_t *)l;
56807@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
56808 return (long)atomic64_inc_return(v);
56809 }
56810
56811+#ifdef CONFIG_PAX_REFCOUNT
56812+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56813+{
56814+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56815+
56816+ return (long)atomic64_inc_return_unchecked(v);
56817+}
56818+#endif
56819+
56820 static inline long atomic_long_dec_return(atomic_long_t *l)
56821 {
56822 atomic64_t *v = (atomic64_t *)l;
56823@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
56824
56825 typedef atomic_t atomic_long_t;
56826
56827+#ifdef CONFIG_PAX_REFCOUNT
56828+typedef atomic_unchecked_t atomic_long_unchecked_t;
56829+#else
56830+typedef atomic_t atomic_long_unchecked_t;
56831+#endif
56832+
56833 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56834 static inline long atomic_long_read(atomic_long_t *l)
56835 {
56836@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
56837 return (long)atomic_read(v);
56838 }
56839
56840+#ifdef CONFIG_PAX_REFCOUNT
56841+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56842+{
56843+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56844+
56845+ return (long)atomic_read_unchecked(v);
56846+}
56847+#endif
56848+
56849 static inline void atomic_long_set(atomic_long_t *l, long i)
56850 {
56851 atomic_t *v = (atomic_t *)l;
56852@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
56853 atomic_set(v, i);
56854 }
56855
56856+#ifdef CONFIG_PAX_REFCOUNT
56857+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56858+{
56859+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56860+
56861+ atomic_set_unchecked(v, i);
56862+}
56863+#endif
56864+
56865 static inline void atomic_long_inc(atomic_long_t *l)
56866 {
56867 atomic_t *v = (atomic_t *)l;
56868@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
56869 atomic_inc(v);
56870 }
56871
56872+#ifdef CONFIG_PAX_REFCOUNT
56873+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56874+{
56875+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56876+
56877+ atomic_inc_unchecked(v);
56878+}
56879+#endif
56880+
56881 static inline void atomic_long_dec(atomic_long_t *l)
56882 {
56883 atomic_t *v = (atomic_t *)l;
56884@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
56885 atomic_dec(v);
56886 }
56887
56888+#ifdef CONFIG_PAX_REFCOUNT
56889+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56890+{
56891+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56892+
56893+ atomic_dec_unchecked(v);
56894+}
56895+#endif
56896+
56897 static inline void atomic_long_add(long i, atomic_long_t *l)
56898 {
56899 atomic_t *v = (atomic_t *)l;
56900@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
56901 atomic_add(i, v);
56902 }
56903
56904+#ifdef CONFIG_PAX_REFCOUNT
56905+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56906+{
56907+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56908+
56909+ atomic_add_unchecked(i, v);
56910+}
56911+#endif
56912+
56913 static inline void atomic_long_sub(long i, atomic_long_t *l)
56914 {
56915 atomic_t *v = (atomic_t *)l;
56916@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
56917 atomic_sub(i, v);
56918 }
56919
56920+#ifdef CONFIG_PAX_REFCOUNT
56921+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56922+{
56923+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56924+
56925+ atomic_sub_unchecked(i, v);
56926+}
56927+#endif
56928+
56929 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56930 {
56931 atomic_t *v = (atomic_t *)l;
56932@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
56933 return (long)atomic_inc_return(v);
56934 }
56935
56936+#ifdef CONFIG_PAX_REFCOUNT
56937+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56938+{
56939+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56940+
56941+ return (long)atomic_inc_return_unchecked(v);
56942+}
56943+#endif
56944+
56945 static inline long atomic_long_dec_return(atomic_long_t *l)
56946 {
56947 atomic_t *v = (atomic_t *)l;
56948@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
56949
56950 #endif /* BITS_PER_LONG == 64 */
56951
56952+#ifdef CONFIG_PAX_REFCOUNT
56953+static inline void pax_refcount_needs_these_functions(void)
56954+{
56955+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56956+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56957+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56958+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56959+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56960+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56961+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56962+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56963+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56964+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56965+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56966+
56967+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56968+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56969+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56970+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
56971+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56972+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56973+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56974+}
56975+#else
56976+#define atomic_read_unchecked(v) atomic_read(v)
56977+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56978+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56979+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56980+#define atomic_inc_unchecked(v) atomic_inc(v)
56981+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56982+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56983+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56984+#define atomic_dec_unchecked(v) atomic_dec(v)
56985+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56986+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56987+
56988+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56989+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56990+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56991+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
56992+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56993+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56994+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56995+#endif
56996+
56997 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56998diff -urNp linux-3.0.7/include/asm-generic/cache.h linux-3.0.7/include/asm-generic/cache.h
56999--- linux-3.0.7/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
57000+++ linux-3.0.7/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
57001@@ -6,7 +6,7 @@
57002 * cache lines need to provide their own cache.h.
57003 */
57004
57005-#define L1_CACHE_SHIFT 5
57006-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57007+#define L1_CACHE_SHIFT 5UL
57008+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57009
57010 #endif /* __ASM_GENERIC_CACHE_H */
57011diff -urNp linux-3.0.7/include/asm-generic/int-l64.h linux-3.0.7/include/asm-generic/int-l64.h
57012--- linux-3.0.7/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
57013+++ linux-3.0.7/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
57014@@ -46,6 +46,8 @@ typedef unsigned int u32;
57015 typedef signed long s64;
57016 typedef unsigned long u64;
57017
57018+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57019+
57020 #define S8_C(x) x
57021 #define U8_C(x) x ## U
57022 #define S16_C(x) x
57023diff -urNp linux-3.0.7/include/asm-generic/int-ll64.h linux-3.0.7/include/asm-generic/int-ll64.h
57024--- linux-3.0.7/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
57025+++ linux-3.0.7/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
57026@@ -51,6 +51,8 @@ typedef unsigned int u32;
57027 typedef signed long long s64;
57028 typedef unsigned long long u64;
57029
57030+typedef unsigned long long intoverflow_t;
57031+
57032 #define S8_C(x) x
57033 #define U8_C(x) x ## U
57034 #define S16_C(x) x
57035diff -urNp linux-3.0.7/include/asm-generic/kmap_types.h linux-3.0.7/include/asm-generic/kmap_types.h
57036--- linux-3.0.7/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
57037+++ linux-3.0.7/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
57038@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57039 KMAP_D(17) KM_NMI,
57040 KMAP_D(18) KM_NMI_PTE,
57041 KMAP_D(19) KM_KDB,
57042+KMAP_D(20) KM_CLEARPAGE,
57043 /*
57044 * Remember to update debug_kmap_atomic() when adding new kmap types!
57045 */
57046-KMAP_D(20) KM_TYPE_NR
57047+KMAP_D(21) KM_TYPE_NR
57048 };
57049
57050 #undef KMAP_D
57051diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopmd.h linux-3.0.7/include/asm-generic/pgtable-nopmd.h
57052--- linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
57053+++ linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
57054@@ -1,14 +1,19 @@
57055 #ifndef _PGTABLE_NOPMD_H
57056 #define _PGTABLE_NOPMD_H
57057
57058-#ifndef __ASSEMBLY__
57059-
57060 #include <asm-generic/pgtable-nopud.h>
57061
57062-struct mm_struct;
57063-
57064 #define __PAGETABLE_PMD_FOLDED
57065
57066+#define PMD_SHIFT PUD_SHIFT
57067+#define PTRS_PER_PMD 1
57068+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57069+#define PMD_MASK (~(PMD_SIZE-1))
57070+
57071+#ifndef __ASSEMBLY__
57072+
57073+struct mm_struct;
57074+
57075 /*
57076 * Having the pmd type consist of a pud gets the size right, and allows
57077 * us to conceptually access the pud entry that this pmd is folded into
57078@@ -16,11 +21,6 @@ struct mm_struct;
57079 */
57080 typedef struct { pud_t pud; } pmd_t;
57081
57082-#define PMD_SHIFT PUD_SHIFT
57083-#define PTRS_PER_PMD 1
57084-#define PMD_SIZE (1UL << PMD_SHIFT)
57085-#define PMD_MASK (~(PMD_SIZE-1))
57086-
57087 /*
57088 * The "pud_xxx()" functions here are trivial for a folded two-level
57089 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57090diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopud.h linux-3.0.7/include/asm-generic/pgtable-nopud.h
57091--- linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
57092+++ linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
57093@@ -1,10 +1,15 @@
57094 #ifndef _PGTABLE_NOPUD_H
57095 #define _PGTABLE_NOPUD_H
57096
57097-#ifndef __ASSEMBLY__
57098-
57099 #define __PAGETABLE_PUD_FOLDED
57100
57101+#define PUD_SHIFT PGDIR_SHIFT
57102+#define PTRS_PER_PUD 1
57103+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57104+#define PUD_MASK (~(PUD_SIZE-1))
57105+
57106+#ifndef __ASSEMBLY__
57107+
57108 /*
57109 * Having the pud type consist of a pgd gets the size right, and allows
57110 * us to conceptually access the pgd entry that this pud is folded into
57111@@ -12,11 +17,6 @@
57112 */
57113 typedef struct { pgd_t pgd; } pud_t;
57114
57115-#define PUD_SHIFT PGDIR_SHIFT
57116-#define PTRS_PER_PUD 1
57117-#define PUD_SIZE (1UL << PUD_SHIFT)
57118-#define PUD_MASK (~(PUD_SIZE-1))
57119-
57120 /*
57121 * The "pgd_xxx()" functions here are trivial for a folded two-level
57122 * setup: the pud is never bad, and a pud always exists (as it's folded
57123diff -urNp linux-3.0.7/include/asm-generic/pgtable.h linux-3.0.7/include/asm-generic/pgtable.h
57124--- linux-3.0.7/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
57125+++ linux-3.0.7/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
57126@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57127 #endif /* __HAVE_ARCH_PMD_WRITE */
57128 #endif
57129
57130+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57131+static inline unsigned long pax_open_kernel(void) { return 0; }
57132+#endif
57133+
57134+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57135+static inline unsigned long pax_close_kernel(void) { return 0; }
57136+#endif
57137+
57138 #endif /* !__ASSEMBLY__ */
57139
57140 #endif /* _ASM_GENERIC_PGTABLE_H */
57141diff -urNp linux-3.0.7/include/asm-generic/vmlinux.lds.h linux-3.0.7/include/asm-generic/vmlinux.lds.h
57142--- linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
57143+++ linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
57144@@ -217,6 +217,7 @@
57145 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57146 VMLINUX_SYMBOL(__start_rodata) = .; \
57147 *(.rodata) *(.rodata.*) \
57148+ *(.data..read_only) \
57149 *(__vermagic) /* Kernel version magic */ \
57150 . = ALIGN(8); \
57151 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57152@@ -723,17 +724,18 @@
57153 * section in the linker script will go there too. @phdr should have
57154 * a leading colon.
57155 *
57156- * Note that this macros defines __per_cpu_load as an absolute symbol.
57157+ * Note that this macros defines per_cpu_load as an absolute symbol.
57158 * If there is no need to put the percpu section at a predetermined
57159 * address, use PERCPU_SECTION.
57160 */
57161 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57162- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57163- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57164+ per_cpu_load = .; \
57165+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57166 - LOAD_OFFSET) { \
57167+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57168 PERCPU_INPUT(cacheline) \
57169 } phdr \
57170- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57171+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57172
57173 /**
57174 * PERCPU_SECTION - define output section for percpu area, simple version
57175diff -urNp linux-3.0.7/include/drm/drmP.h linux-3.0.7/include/drm/drmP.h
57176--- linux-3.0.7/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
57177+++ linux-3.0.7/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
57178@@ -73,6 +73,7 @@
57179 #include <linux/workqueue.h>
57180 #include <linux/poll.h>
57181 #include <asm/pgalloc.h>
57182+#include <asm/local.h>
57183 #include "drm.h"
57184
57185 #include <linux/idr.h>
57186@@ -1033,7 +1034,7 @@ struct drm_device {
57187
57188 /** \name Usage Counters */
57189 /*@{ */
57190- int open_count; /**< Outstanding files open */
57191+ local_t open_count; /**< Outstanding files open */
57192 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57193 atomic_t vma_count; /**< Outstanding vma areas open */
57194 int buf_use; /**< Buffers in use -- cannot alloc */
57195@@ -1044,7 +1045,7 @@ struct drm_device {
57196 /*@{ */
57197 unsigned long counters;
57198 enum drm_stat_type types[15];
57199- atomic_t counts[15];
57200+ atomic_unchecked_t counts[15];
57201 /*@} */
57202
57203 struct list_head filelist;
57204diff -urNp linux-3.0.7/include/drm/drm_crtc_helper.h linux-3.0.7/include/drm/drm_crtc_helper.h
57205--- linux-3.0.7/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
57206+++ linux-3.0.7/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
57207@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57208
57209 /* disable crtc when not in use - more explicit than dpms off */
57210 void (*disable)(struct drm_crtc *crtc);
57211-};
57212+} __no_const;
57213
57214 struct drm_encoder_helper_funcs {
57215 void (*dpms)(struct drm_encoder *encoder, int mode);
57216@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57217 struct drm_connector *connector);
57218 /* disable encoder when not in use - more explicit than dpms off */
57219 void (*disable)(struct drm_encoder *encoder);
57220-};
57221+} __no_const;
57222
57223 struct drm_connector_helper_funcs {
57224 int (*get_modes)(struct drm_connector *connector);
57225diff -urNp linux-3.0.7/include/drm/ttm/ttm_memory.h linux-3.0.7/include/drm/ttm/ttm_memory.h
57226--- linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
57227+++ linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
57228@@ -47,7 +47,7 @@
57229
57230 struct ttm_mem_shrink {
57231 int (*do_shrink) (struct ttm_mem_shrink *);
57232-};
57233+} __no_const;
57234
57235 /**
57236 * struct ttm_mem_global - Global memory accounting structure.
57237diff -urNp linux-3.0.7/include/linux/a.out.h linux-3.0.7/include/linux/a.out.h
57238--- linux-3.0.7/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
57239+++ linux-3.0.7/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
57240@@ -39,6 +39,14 @@ enum machine_type {
57241 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57242 };
57243
57244+/* Constants for the N_FLAGS field */
57245+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57246+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57247+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57248+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57249+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57250+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57251+
57252 #if !defined (N_MAGIC)
57253 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57254 #endif
57255diff -urNp linux-3.0.7/include/linux/atmdev.h linux-3.0.7/include/linux/atmdev.h
57256--- linux-3.0.7/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
57257+++ linux-3.0.7/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
57258@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57259 #endif
57260
57261 struct k_atm_aal_stats {
57262-#define __HANDLE_ITEM(i) atomic_t i
57263+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57264 __AAL_STAT_ITEMS
57265 #undef __HANDLE_ITEM
57266 };
57267diff -urNp linux-3.0.7/include/linux/binfmts.h linux-3.0.7/include/linux/binfmts.h
57268--- linux-3.0.7/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
57269+++ linux-3.0.7/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
57270@@ -88,6 +88,7 @@ struct linux_binfmt {
57271 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57272 int (*load_shlib)(struct file *);
57273 int (*core_dump)(struct coredump_params *cprm);
57274+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57275 unsigned long min_coredump; /* minimal dump size */
57276 };
57277
57278diff -urNp linux-3.0.7/include/linux/blkdev.h linux-3.0.7/include/linux/blkdev.h
57279--- linux-3.0.7/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
57280+++ linux-3.0.7/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
57281@@ -1308,7 +1308,7 @@ struct block_device_operations {
57282 /* this callback is with swap_lock and sometimes page table lock held */
57283 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57284 struct module *owner;
57285-};
57286+} __do_const;
57287
57288 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57289 unsigned long);
57290diff -urNp linux-3.0.7/include/linux/blktrace_api.h linux-3.0.7/include/linux/blktrace_api.h
57291--- linux-3.0.7/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
57292+++ linux-3.0.7/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
57293@@ -161,7 +161,7 @@ struct blk_trace {
57294 struct dentry *dir;
57295 struct dentry *dropped_file;
57296 struct dentry *msg_file;
57297- atomic_t dropped;
57298+ atomic_unchecked_t dropped;
57299 };
57300
57301 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57302diff -urNp linux-3.0.7/include/linux/byteorder/little_endian.h linux-3.0.7/include/linux/byteorder/little_endian.h
57303--- linux-3.0.7/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
57304+++ linux-3.0.7/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
57305@@ -42,51 +42,51 @@
57306
57307 static inline __le64 __cpu_to_le64p(const __u64 *p)
57308 {
57309- return (__force __le64)*p;
57310+ return (__force const __le64)*p;
57311 }
57312 static inline __u64 __le64_to_cpup(const __le64 *p)
57313 {
57314- return (__force __u64)*p;
57315+ return (__force const __u64)*p;
57316 }
57317 static inline __le32 __cpu_to_le32p(const __u32 *p)
57318 {
57319- return (__force __le32)*p;
57320+ return (__force const __le32)*p;
57321 }
57322 static inline __u32 __le32_to_cpup(const __le32 *p)
57323 {
57324- return (__force __u32)*p;
57325+ return (__force const __u32)*p;
57326 }
57327 static inline __le16 __cpu_to_le16p(const __u16 *p)
57328 {
57329- return (__force __le16)*p;
57330+ return (__force const __le16)*p;
57331 }
57332 static inline __u16 __le16_to_cpup(const __le16 *p)
57333 {
57334- return (__force __u16)*p;
57335+ return (__force const __u16)*p;
57336 }
57337 static inline __be64 __cpu_to_be64p(const __u64 *p)
57338 {
57339- return (__force __be64)__swab64p(p);
57340+ return (__force const __be64)__swab64p(p);
57341 }
57342 static inline __u64 __be64_to_cpup(const __be64 *p)
57343 {
57344- return __swab64p((__u64 *)p);
57345+ return __swab64p((const __u64 *)p);
57346 }
57347 static inline __be32 __cpu_to_be32p(const __u32 *p)
57348 {
57349- return (__force __be32)__swab32p(p);
57350+ return (__force const __be32)__swab32p(p);
57351 }
57352 static inline __u32 __be32_to_cpup(const __be32 *p)
57353 {
57354- return __swab32p((__u32 *)p);
57355+ return __swab32p((const __u32 *)p);
57356 }
57357 static inline __be16 __cpu_to_be16p(const __u16 *p)
57358 {
57359- return (__force __be16)__swab16p(p);
57360+ return (__force const __be16)__swab16p(p);
57361 }
57362 static inline __u16 __be16_to_cpup(const __be16 *p)
57363 {
57364- return __swab16p((__u16 *)p);
57365+ return __swab16p((const __u16 *)p);
57366 }
57367 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57368 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57369diff -urNp linux-3.0.7/include/linux/cache.h linux-3.0.7/include/linux/cache.h
57370--- linux-3.0.7/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
57371+++ linux-3.0.7/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
57372@@ -16,6 +16,10 @@
57373 #define __read_mostly
57374 #endif
57375
57376+#ifndef __read_only
57377+#define __read_only __read_mostly
57378+#endif
57379+
57380 #ifndef ____cacheline_aligned
57381 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57382 #endif
57383diff -urNp linux-3.0.7/include/linux/capability.h linux-3.0.7/include/linux/capability.h
57384--- linux-3.0.7/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
57385+++ linux-3.0.7/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
57386@@ -547,6 +547,9 @@ extern bool capable(int cap);
57387 extern bool ns_capable(struct user_namespace *ns, int cap);
57388 extern bool task_ns_capable(struct task_struct *t, int cap);
57389 extern bool nsown_capable(int cap);
57390+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57391+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57392+extern bool capable_nolog(int cap);
57393
57394 /* audit system wants to get cap info from files as well */
57395 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57396diff -urNp linux-3.0.7/include/linux/cleancache.h linux-3.0.7/include/linux/cleancache.h
57397--- linux-3.0.7/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
57398+++ linux-3.0.7/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
57399@@ -31,7 +31,7 @@ struct cleancache_ops {
57400 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57401 void (*flush_inode)(int, struct cleancache_filekey);
57402 void (*flush_fs)(int);
57403-};
57404+} __no_const;
57405
57406 extern struct cleancache_ops
57407 cleancache_register_ops(struct cleancache_ops *ops);
57408diff -urNp linux-3.0.7/include/linux/compiler-gcc4.h linux-3.0.7/include/linux/compiler-gcc4.h
57409--- linux-3.0.7/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
57410+++ linux-3.0.7/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
57411@@ -31,6 +31,12 @@
57412
57413
57414 #if __GNUC_MINOR__ >= 5
57415+
57416+#ifdef CONSTIFY_PLUGIN
57417+#define __no_const __attribute__((no_const))
57418+#define __do_const __attribute__((do_const))
57419+#endif
57420+
57421 /*
57422 * Mark a position in code as unreachable. This can be used to
57423 * suppress control flow warnings after asm blocks that transfer
57424@@ -46,6 +52,11 @@
57425 #define __noclone __attribute__((__noclone__))
57426
57427 #endif
57428+
57429+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57430+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57431+#define __bos0(ptr) __bos((ptr), 0)
57432+#define __bos1(ptr) __bos((ptr), 1)
57433 #endif
57434
57435 #if __GNUC_MINOR__ > 0
57436diff -urNp linux-3.0.7/include/linux/compiler.h linux-3.0.7/include/linux/compiler.h
57437--- linux-3.0.7/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
57438+++ linux-3.0.7/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
57439@@ -5,31 +5,62 @@
57440
57441 #ifdef __CHECKER__
57442 # define __user __attribute__((noderef, address_space(1)))
57443+# define __force_user __force __user
57444 # define __kernel __attribute__((address_space(0)))
57445+# define __force_kernel __force __kernel
57446 # define __safe __attribute__((safe))
57447 # define __force __attribute__((force))
57448 # define __nocast __attribute__((nocast))
57449 # define __iomem __attribute__((noderef, address_space(2)))
57450+# define __force_iomem __force __iomem
57451 # define __acquires(x) __attribute__((context(x,0,1)))
57452 # define __releases(x) __attribute__((context(x,1,0)))
57453 # define __acquire(x) __context__(x,1)
57454 # define __release(x) __context__(x,-1)
57455 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57456 # define __percpu __attribute__((noderef, address_space(3)))
57457+# define __force_percpu __force __percpu
57458 #ifdef CONFIG_SPARSE_RCU_POINTER
57459 # define __rcu __attribute__((noderef, address_space(4)))
57460+# define __force_rcu __force __rcu
57461 #else
57462 # define __rcu
57463+# define __force_rcu
57464 #endif
57465 extern void __chk_user_ptr(const volatile void __user *);
57466 extern void __chk_io_ptr(const volatile void __iomem *);
57467+#elif defined(CHECKER_PLUGIN)
57468+//# define __user
57469+//# define __force_user
57470+//# define __kernel
57471+//# define __force_kernel
57472+# define __safe
57473+# define __force
57474+# define __nocast
57475+# define __iomem
57476+# define __force_iomem
57477+# define __chk_user_ptr(x) (void)0
57478+# define __chk_io_ptr(x) (void)0
57479+# define __builtin_warning(x, y...) (1)
57480+# define __acquires(x)
57481+# define __releases(x)
57482+# define __acquire(x) (void)0
57483+# define __release(x) (void)0
57484+# define __cond_lock(x,c) (c)
57485+# define __percpu
57486+# define __force_percpu
57487+# define __rcu
57488+# define __force_rcu
57489 #else
57490 # define __user
57491+# define __force_user
57492 # define __kernel
57493+# define __force_kernel
57494 # define __safe
57495 # define __force
57496 # define __nocast
57497 # define __iomem
57498+# define __force_iomem
57499 # define __chk_user_ptr(x) (void)0
57500 # define __chk_io_ptr(x) (void)0
57501 # define __builtin_warning(x, y...) (1)
57502@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57503 # define __release(x) (void)0
57504 # define __cond_lock(x,c) (c)
57505 # define __percpu
57506+# define __force_percpu
57507 # define __rcu
57508+# define __force_rcu
57509 #endif
57510
57511 #ifdef __KERNEL__
57512@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57513 # define __attribute_const__ /* unimplemented */
57514 #endif
57515
57516+#ifndef __no_const
57517+# define __no_const
57518+#endif
57519+
57520+#ifndef __do_const
57521+# define __do_const
57522+#endif
57523+
57524 /*
57525 * Tell gcc if a function is cold. The compiler will assume any path
57526 * directly leading to the call is unlikely.
57527@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57528 #define __cold
57529 #endif
57530
57531+#ifndef __alloc_size
57532+#define __alloc_size(...)
57533+#endif
57534+
57535+#ifndef __bos
57536+#define __bos(ptr, arg)
57537+#endif
57538+
57539+#ifndef __bos0
57540+#define __bos0(ptr)
57541+#endif
57542+
57543+#ifndef __bos1
57544+#define __bos1(ptr)
57545+#endif
57546+
57547 /* Simple shorthand for a section definition */
57548 #ifndef __section
57549 # define __section(S) __attribute__ ((__section__(#S)))
57550@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57551 * use is to mediate communication between process-level code and irq/NMI
57552 * handlers, all running on the same CPU.
57553 */
57554-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57555+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57556+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57557
57558 #endif /* __LINUX_COMPILER_H */
57559diff -urNp linux-3.0.7/include/linux/cpuset.h linux-3.0.7/include/linux/cpuset.h
57560--- linux-3.0.7/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
57561+++ linux-3.0.7/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
57562@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57563 * nodemask.
57564 */
57565 smp_mb();
57566- --ACCESS_ONCE(current->mems_allowed_change_disable);
57567+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57568 }
57569
57570 static inline void set_mems_allowed(nodemask_t nodemask)
57571diff -urNp linux-3.0.7/include/linux/crypto.h linux-3.0.7/include/linux/crypto.h
57572--- linux-3.0.7/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
57573+++ linux-3.0.7/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
57574@@ -361,7 +361,7 @@ struct cipher_tfm {
57575 const u8 *key, unsigned int keylen);
57576 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57577 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57578-};
57579+} __no_const;
57580
57581 struct hash_tfm {
57582 int (*init)(struct hash_desc *desc);
57583@@ -382,13 +382,13 @@ struct compress_tfm {
57584 int (*cot_decompress)(struct crypto_tfm *tfm,
57585 const u8 *src, unsigned int slen,
57586 u8 *dst, unsigned int *dlen);
57587-};
57588+} __no_const;
57589
57590 struct rng_tfm {
57591 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57592 unsigned int dlen);
57593 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57594-};
57595+} __no_const;
57596
57597 #define crt_ablkcipher crt_u.ablkcipher
57598 #define crt_aead crt_u.aead
57599diff -urNp linux-3.0.7/include/linux/decompress/mm.h linux-3.0.7/include/linux/decompress/mm.h
57600--- linux-3.0.7/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
57601+++ linux-3.0.7/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
57602@@ -77,7 +77,7 @@ static void free(void *where)
57603 * warnings when not needed (indeed large_malloc / large_free are not
57604 * needed by inflate */
57605
57606-#define malloc(a) kmalloc(a, GFP_KERNEL)
57607+#define malloc(a) kmalloc((a), GFP_KERNEL)
57608 #define free(a) kfree(a)
57609
57610 #define large_malloc(a) vmalloc(a)
57611diff -urNp linux-3.0.7/include/linux/dma-mapping.h linux-3.0.7/include/linux/dma-mapping.h
57612--- linux-3.0.7/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
57613+++ linux-3.0.7/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
57614@@ -50,7 +50,7 @@ struct dma_map_ops {
57615 int (*dma_supported)(struct device *dev, u64 mask);
57616 int (*set_dma_mask)(struct device *dev, u64 mask);
57617 int is_phys;
57618-};
57619+} __do_const;
57620
57621 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57622
57623diff -urNp linux-3.0.7/include/linux/efi.h linux-3.0.7/include/linux/efi.h
57624--- linux-3.0.7/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
57625+++ linux-3.0.7/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
57626@@ -410,7 +410,7 @@ struct efivar_operations {
57627 efi_get_variable_t *get_variable;
57628 efi_get_next_variable_t *get_next_variable;
57629 efi_set_variable_t *set_variable;
57630-};
57631+} __no_const;
57632
57633 struct efivars {
57634 /*
57635diff -urNp linux-3.0.7/include/linux/elf.h linux-3.0.7/include/linux/elf.h
57636--- linux-3.0.7/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
57637+++ linux-3.0.7/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
57638@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57639 #define PT_GNU_EH_FRAME 0x6474e550
57640
57641 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57642+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57643+
57644+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57645+
57646+/* Constants for the e_flags field */
57647+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57648+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57649+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57650+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57651+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57652+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57653
57654 /*
57655 * Extended Numbering
57656@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57657 #define DT_DEBUG 21
57658 #define DT_TEXTREL 22
57659 #define DT_JMPREL 23
57660+#define DT_FLAGS 30
57661+ #define DF_TEXTREL 0x00000004
57662 #define DT_ENCODING 32
57663 #define OLD_DT_LOOS 0x60000000
57664 #define DT_LOOS 0x6000000d
57665@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57666 #define PF_W 0x2
57667 #define PF_X 0x1
57668
57669+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57670+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57671+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57672+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57673+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57674+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57675+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57676+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57677+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57678+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57679+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57680+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57681+
57682 typedef struct elf32_phdr{
57683 Elf32_Word p_type;
57684 Elf32_Off p_offset;
57685@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57686 #define EI_OSABI 7
57687 #define EI_PAD 8
57688
57689+#define EI_PAX 14
57690+
57691 #define ELFMAG0 0x7f /* EI_MAG */
57692 #define ELFMAG1 'E'
57693 #define ELFMAG2 'L'
57694@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57695 #define elf_note elf32_note
57696 #define elf_addr_t Elf32_Off
57697 #define Elf_Half Elf32_Half
57698+#define elf_dyn Elf32_Dyn
57699
57700 #else
57701
57702@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57703 #define elf_note elf64_note
57704 #define elf_addr_t Elf64_Off
57705 #define Elf_Half Elf64_Half
57706+#define elf_dyn Elf64_Dyn
57707
57708 #endif
57709
57710diff -urNp linux-3.0.7/include/linux/firewire.h linux-3.0.7/include/linux/firewire.h
57711--- linux-3.0.7/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
57712+++ linux-3.0.7/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
57713@@ -428,7 +428,7 @@ struct fw_iso_context {
57714 union {
57715 fw_iso_callback_t sc;
57716 fw_iso_mc_callback_t mc;
57717- } callback;
57718+ } __no_const callback;
57719 void *callback_data;
57720 };
57721
57722diff -urNp linux-3.0.7/include/linux/fs.h linux-3.0.7/include/linux/fs.h
57723--- linux-3.0.7/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
57724+++ linux-3.0.7/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
57725@@ -109,6 +109,11 @@ struct inodes_stat_t {
57726 /* File was opened by fanotify and shouldn't generate fanotify events */
57727 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
57728
57729+/* Hack for grsec so as not to require read permission simply to execute
57730+ * a binary
57731+ */
57732+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57733+
57734 /*
57735 * The below are the various read and write types that we support. Some of
57736 * them include behavioral modifiers that send information down to the
57737@@ -1571,7 +1576,8 @@ struct file_operations {
57738 int (*setlease)(struct file *, long, struct file_lock **);
57739 long (*fallocate)(struct file *file, int mode, loff_t offset,
57740 loff_t len);
57741-};
57742+} __do_const;
57743+typedef struct file_operations __no_const file_operations_no_const;
57744
57745 #define IPERM_FLAG_RCU 0x0001
57746
57747diff -urNp linux-3.0.7/include/linux/fs_struct.h linux-3.0.7/include/linux/fs_struct.h
57748--- linux-3.0.7/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
57749+++ linux-3.0.7/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
57750@@ -6,7 +6,7 @@
57751 #include <linux/seqlock.h>
57752
57753 struct fs_struct {
57754- int users;
57755+ atomic_t users;
57756 spinlock_t lock;
57757 seqcount_t seq;
57758 int umask;
57759diff -urNp linux-3.0.7/include/linux/fscache-cache.h linux-3.0.7/include/linux/fscache-cache.h
57760--- linux-3.0.7/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
57761+++ linux-3.0.7/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
57762@@ -102,7 +102,7 @@ struct fscache_operation {
57763 fscache_operation_release_t release;
57764 };
57765
57766-extern atomic_t fscache_op_debug_id;
57767+extern atomic_unchecked_t fscache_op_debug_id;
57768 extern void fscache_op_work_func(struct work_struct *work);
57769
57770 extern void fscache_enqueue_operation(struct fscache_operation *);
57771@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57772 {
57773 INIT_WORK(&op->work, fscache_op_work_func);
57774 atomic_set(&op->usage, 1);
57775- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57776+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57777 op->processor = processor;
57778 op->release = release;
57779 INIT_LIST_HEAD(&op->pend_link);
57780diff -urNp linux-3.0.7/include/linux/fsnotify.h linux-3.0.7/include/linux/fsnotify.h
57781--- linux-3.0.7/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
57782+++ linux-3.0.7/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
57783@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57784 */
57785 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57786 {
57787- return kstrdup(name, GFP_KERNEL);
57788+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57789 }
57790
57791 /*
57792diff -urNp linux-3.0.7/include/linux/ftrace_event.h linux-3.0.7/include/linux/ftrace_event.h
57793--- linux-3.0.7/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
57794+++ linux-3.0.7/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
57795@@ -96,7 +96,7 @@ struct trace_event_functions {
57796 trace_print_func raw;
57797 trace_print_func hex;
57798 trace_print_func binary;
57799-};
57800+} __no_const;
57801
57802 struct trace_event {
57803 struct hlist_node node;
57804@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
57805 extern int trace_add_event_call(struct ftrace_event_call *call);
57806 extern void trace_remove_event_call(struct ftrace_event_call *call);
57807
57808-#define is_signed_type(type) (((type)(-1)) < 0)
57809+#define is_signed_type(type) (((type)(-1)) < (type)1)
57810
57811 int trace_set_clr_event(const char *system, const char *event, int set);
57812
57813diff -urNp linux-3.0.7/include/linux/genhd.h linux-3.0.7/include/linux/genhd.h
57814--- linux-3.0.7/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
57815+++ linux-3.0.7/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
57816@@ -184,7 +184,7 @@ struct gendisk {
57817 struct kobject *slave_dir;
57818
57819 struct timer_rand_state *random;
57820- atomic_t sync_io; /* RAID */
57821+ atomic_unchecked_t sync_io; /* RAID */
57822 struct disk_events *ev;
57823 #ifdef CONFIG_BLK_DEV_INTEGRITY
57824 struct blk_integrity *integrity;
57825diff -urNp linux-3.0.7/include/linux/gracl.h linux-3.0.7/include/linux/gracl.h
57826--- linux-3.0.7/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57827+++ linux-3.0.7/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
57828@@ -0,0 +1,317 @@
57829+#ifndef GR_ACL_H
57830+#define GR_ACL_H
57831+
57832+#include <linux/grdefs.h>
57833+#include <linux/resource.h>
57834+#include <linux/capability.h>
57835+#include <linux/dcache.h>
57836+#include <asm/resource.h>
57837+
57838+/* Major status information */
57839+
57840+#define GR_VERSION "grsecurity 2.2.2"
57841+#define GRSECURITY_VERSION 0x2202
57842+
57843+enum {
57844+ GR_SHUTDOWN = 0,
57845+ GR_ENABLE = 1,
57846+ GR_SPROLE = 2,
57847+ GR_RELOAD = 3,
57848+ GR_SEGVMOD = 4,
57849+ GR_STATUS = 5,
57850+ GR_UNSPROLE = 6,
57851+ GR_PASSSET = 7,
57852+ GR_SPROLEPAM = 8,
57853+};
57854+
57855+/* Password setup definitions
57856+ * kernel/grhash.c */
57857+enum {
57858+ GR_PW_LEN = 128,
57859+ GR_SALT_LEN = 16,
57860+ GR_SHA_LEN = 32,
57861+};
57862+
57863+enum {
57864+ GR_SPROLE_LEN = 64,
57865+};
57866+
57867+enum {
57868+ GR_NO_GLOB = 0,
57869+ GR_REG_GLOB,
57870+ GR_CREATE_GLOB
57871+};
57872+
57873+#define GR_NLIMITS 32
57874+
57875+/* Begin Data Structures */
57876+
57877+struct sprole_pw {
57878+ unsigned char *rolename;
57879+ unsigned char salt[GR_SALT_LEN];
57880+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57881+};
57882+
57883+struct name_entry {
57884+ __u32 key;
57885+ ino_t inode;
57886+ dev_t device;
57887+ char *name;
57888+ __u16 len;
57889+ __u8 deleted;
57890+ struct name_entry *prev;
57891+ struct name_entry *next;
57892+};
57893+
57894+struct inodev_entry {
57895+ struct name_entry *nentry;
57896+ struct inodev_entry *prev;
57897+ struct inodev_entry *next;
57898+};
57899+
57900+struct acl_role_db {
57901+ struct acl_role_label **r_hash;
57902+ __u32 r_size;
57903+};
57904+
57905+struct inodev_db {
57906+ struct inodev_entry **i_hash;
57907+ __u32 i_size;
57908+};
57909+
57910+struct name_db {
57911+ struct name_entry **n_hash;
57912+ __u32 n_size;
57913+};
57914+
57915+struct crash_uid {
57916+ uid_t uid;
57917+ unsigned long expires;
57918+};
57919+
57920+struct gr_hash_struct {
57921+ void **table;
57922+ void **nametable;
57923+ void *first;
57924+ __u32 table_size;
57925+ __u32 used_size;
57926+ int type;
57927+};
57928+
57929+/* Userspace Grsecurity ACL data structures */
57930+
57931+struct acl_subject_label {
57932+ char *filename;
57933+ ino_t inode;
57934+ dev_t device;
57935+ __u32 mode;
57936+ kernel_cap_t cap_mask;
57937+ kernel_cap_t cap_lower;
57938+ kernel_cap_t cap_invert_audit;
57939+
57940+ struct rlimit res[GR_NLIMITS];
57941+ __u32 resmask;
57942+
57943+ __u8 user_trans_type;
57944+ __u8 group_trans_type;
57945+ uid_t *user_transitions;
57946+ gid_t *group_transitions;
57947+ __u16 user_trans_num;
57948+ __u16 group_trans_num;
57949+
57950+ __u32 sock_families[2];
57951+ __u32 ip_proto[8];
57952+ __u32 ip_type;
57953+ struct acl_ip_label **ips;
57954+ __u32 ip_num;
57955+ __u32 inaddr_any_override;
57956+
57957+ __u32 crashes;
57958+ unsigned long expires;
57959+
57960+ struct acl_subject_label *parent_subject;
57961+ struct gr_hash_struct *hash;
57962+ struct acl_subject_label *prev;
57963+ struct acl_subject_label *next;
57964+
57965+ struct acl_object_label **obj_hash;
57966+ __u32 obj_hash_size;
57967+ __u16 pax_flags;
57968+};
57969+
57970+struct role_allowed_ip {
57971+ __u32 addr;
57972+ __u32 netmask;
57973+
57974+ struct role_allowed_ip *prev;
57975+ struct role_allowed_ip *next;
57976+};
57977+
57978+struct role_transition {
57979+ char *rolename;
57980+
57981+ struct role_transition *prev;
57982+ struct role_transition *next;
57983+};
57984+
57985+struct acl_role_label {
57986+ char *rolename;
57987+ uid_t uidgid;
57988+ __u16 roletype;
57989+
57990+ __u16 auth_attempts;
57991+ unsigned long expires;
57992+
57993+ struct acl_subject_label *root_label;
57994+ struct gr_hash_struct *hash;
57995+
57996+ struct acl_role_label *prev;
57997+ struct acl_role_label *next;
57998+
57999+ struct role_transition *transitions;
58000+ struct role_allowed_ip *allowed_ips;
58001+ uid_t *domain_children;
58002+ __u16 domain_child_num;
58003+
58004+ struct acl_subject_label **subj_hash;
58005+ __u32 subj_hash_size;
58006+};
58007+
58008+struct user_acl_role_db {
58009+ struct acl_role_label **r_table;
58010+ __u32 num_pointers; /* Number of allocations to track */
58011+ __u32 num_roles; /* Number of roles */
58012+ __u32 num_domain_children; /* Number of domain children */
58013+ __u32 num_subjects; /* Number of subjects */
58014+ __u32 num_objects; /* Number of objects */
58015+};
58016+
58017+struct acl_object_label {
58018+ char *filename;
58019+ ino_t inode;
58020+ dev_t device;
58021+ __u32 mode;
58022+
58023+ struct acl_subject_label *nested;
58024+ struct acl_object_label *globbed;
58025+
58026+ /* next two structures not used */
58027+
58028+ struct acl_object_label *prev;
58029+ struct acl_object_label *next;
58030+};
58031+
58032+struct acl_ip_label {
58033+ char *iface;
58034+ __u32 addr;
58035+ __u32 netmask;
58036+ __u16 low, high;
58037+ __u8 mode;
58038+ __u32 type;
58039+ __u32 proto[8];
58040+
58041+ /* next two structures not used */
58042+
58043+ struct acl_ip_label *prev;
58044+ struct acl_ip_label *next;
58045+};
58046+
58047+struct gr_arg {
58048+ struct user_acl_role_db role_db;
58049+ unsigned char pw[GR_PW_LEN];
58050+ unsigned char salt[GR_SALT_LEN];
58051+ unsigned char sum[GR_SHA_LEN];
58052+ unsigned char sp_role[GR_SPROLE_LEN];
58053+ struct sprole_pw *sprole_pws;
58054+ dev_t segv_device;
58055+ ino_t segv_inode;
58056+ uid_t segv_uid;
58057+ __u16 num_sprole_pws;
58058+ __u16 mode;
58059+};
58060+
58061+struct gr_arg_wrapper {
58062+ struct gr_arg *arg;
58063+ __u32 version;
58064+ __u32 size;
58065+};
58066+
58067+struct subject_map {
58068+ struct acl_subject_label *user;
58069+ struct acl_subject_label *kernel;
58070+ struct subject_map *prev;
58071+ struct subject_map *next;
58072+};
58073+
58074+struct acl_subj_map_db {
58075+ struct subject_map **s_hash;
58076+ __u32 s_size;
58077+};
58078+
58079+/* End Data Structures Section */
58080+
58081+/* Hash functions generated by empirical testing by Brad Spengler
58082+ Makes good use of the low bits of the inode. Generally 0-1 times
58083+ in loop for successful match. 0-3 for unsuccessful match.
58084+ Shift/add algorithm with modulus of table size and an XOR*/
58085+
58086+static __inline__ unsigned int
58087+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58088+{
58089+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58090+}
58091+
58092+ static __inline__ unsigned int
58093+shash(const struct acl_subject_label *userp, const unsigned int sz)
58094+{
58095+ return ((const unsigned long)userp % sz);
58096+}
58097+
58098+static __inline__ unsigned int
58099+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58100+{
58101+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58102+}
58103+
58104+static __inline__ unsigned int
58105+nhash(const char *name, const __u16 len, const unsigned int sz)
58106+{
58107+ return full_name_hash((const unsigned char *)name, len) % sz;
58108+}
58109+
58110+#define FOR_EACH_ROLE_START(role) \
58111+ role = role_list; \
58112+ while (role) {
58113+
58114+#define FOR_EACH_ROLE_END(role) \
58115+ role = role->prev; \
58116+ }
58117+
58118+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58119+ subj = NULL; \
58120+ iter = 0; \
58121+ while (iter < role->subj_hash_size) { \
58122+ if (subj == NULL) \
58123+ subj = role->subj_hash[iter]; \
58124+ if (subj == NULL) { \
58125+ iter++; \
58126+ continue; \
58127+ }
58128+
58129+#define FOR_EACH_SUBJECT_END(subj,iter) \
58130+ subj = subj->next; \
58131+ if (subj == NULL) \
58132+ iter++; \
58133+ }
58134+
58135+
58136+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58137+ subj = role->hash->first; \
58138+ while (subj != NULL) {
58139+
58140+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58141+ subj = subj->next; \
58142+ }
58143+
58144+#endif
58145+
58146diff -urNp linux-3.0.7/include/linux/gralloc.h linux-3.0.7/include/linux/gralloc.h
58147--- linux-3.0.7/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58148+++ linux-3.0.7/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
58149@@ -0,0 +1,9 @@
58150+#ifndef __GRALLOC_H
58151+#define __GRALLOC_H
58152+
58153+void acl_free_all(void);
58154+int acl_alloc_stack_init(unsigned long size);
58155+void *acl_alloc(unsigned long len);
58156+void *acl_alloc_num(unsigned long num, unsigned long len);
58157+
58158+#endif
58159diff -urNp linux-3.0.7/include/linux/grdefs.h linux-3.0.7/include/linux/grdefs.h
58160--- linux-3.0.7/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58161+++ linux-3.0.7/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
58162@@ -0,0 +1,140 @@
58163+#ifndef GRDEFS_H
58164+#define GRDEFS_H
58165+
58166+/* Begin grsecurity status declarations */
58167+
58168+enum {
58169+ GR_READY = 0x01,
58170+ GR_STATUS_INIT = 0x00 // disabled state
58171+};
58172+
58173+/* Begin ACL declarations */
58174+
58175+/* Role flags */
58176+
58177+enum {
58178+ GR_ROLE_USER = 0x0001,
58179+ GR_ROLE_GROUP = 0x0002,
58180+ GR_ROLE_DEFAULT = 0x0004,
58181+ GR_ROLE_SPECIAL = 0x0008,
58182+ GR_ROLE_AUTH = 0x0010,
58183+ GR_ROLE_NOPW = 0x0020,
58184+ GR_ROLE_GOD = 0x0040,
58185+ GR_ROLE_LEARN = 0x0080,
58186+ GR_ROLE_TPE = 0x0100,
58187+ GR_ROLE_DOMAIN = 0x0200,
58188+ GR_ROLE_PAM = 0x0400,
58189+ GR_ROLE_PERSIST = 0x0800
58190+};
58191+
58192+/* ACL Subject and Object mode flags */
58193+enum {
58194+ GR_DELETED = 0x80000000
58195+};
58196+
58197+/* ACL Object-only mode flags */
58198+enum {
58199+ GR_READ = 0x00000001,
58200+ GR_APPEND = 0x00000002,
58201+ GR_WRITE = 0x00000004,
58202+ GR_EXEC = 0x00000008,
58203+ GR_FIND = 0x00000010,
58204+ GR_INHERIT = 0x00000020,
58205+ GR_SETID = 0x00000040,
58206+ GR_CREATE = 0x00000080,
58207+ GR_DELETE = 0x00000100,
58208+ GR_LINK = 0x00000200,
58209+ GR_AUDIT_READ = 0x00000400,
58210+ GR_AUDIT_APPEND = 0x00000800,
58211+ GR_AUDIT_WRITE = 0x00001000,
58212+ GR_AUDIT_EXEC = 0x00002000,
58213+ GR_AUDIT_FIND = 0x00004000,
58214+ GR_AUDIT_INHERIT= 0x00008000,
58215+ GR_AUDIT_SETID = 0x00010000,
58216+ GR_AUDIT_CREATE = 0x00020000,
58217+ GR_AUDIT_DELETE = 0x00040000,
58218+ GR_AUDIT_LINK = 0x00080000,
58219+ GR_PTRACERD = 0x00100000,
58220+ GR_NOPTRACE = 0x00200000,
58221+ GR_SUPPRESS = 0x00400000,
58222+ GR_NOLEARN = 0x00800000,
58223+ GR_INIT_TRANSFER= 0x01000000
58224+};
58225+
58226+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58227+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58228+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58229+
58230+/* ACL subject-only mode flags */
58231+enum {
58232+ GR_KILL = 0x00000001,
58233+ GR_VIEW = 0x00000002,
58234+ GR_PROTECTED = 0x00000004,
58235+ GR_LEARN = 0x00000008,
58236+ GR_OVERRIDE = 0x00000010,
58237+ /* just a placeholder, this mode is only used in userspace */
58238+ GR_DUMMY = 0x00000020,
58239+ GR_PROTSHM = 0x00000040,
58240+ GR_KILLPROC = 0x00000080,
58241+ GR_KILLIPPROC = 0x00000100,
58242+ /* just a placeholder, this mode is only used in userspace */
58243+ GR_NOTROJAN = 0x00000200,
58244+ GR_PROTPROCFD = 0x00000400,
58245+ GR_PROCACCT = 0x00000800,
58246+ GR_RELAXPTRACE = 0x00001000,
58247+ GR_NESTED = 0x00002000,
58248+ GR_INHERITLEARN = 0x00004000,
58249+ GR_PROCFIND = 0x00008000,
58250+ GR_POVERRIDE = 0x00010000,
58251+ GR_KERNELAUTH = 0x00020000,
58252+ GR_ATSECURE = 0x00040000,
58253+ GR_SHMEXEC = 0x00080000
58254+};
58255+
58256+enum {
58257+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58258+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58259+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58260+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58261+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58262+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58263+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58264+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58265+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58266+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58267+};
58268+
58269+enum {
58270+ GR_ID_USER = 0x01,
58271+ GR_ID_GROUP = 0x02,
58272+};
58273+
58274+enum {
58275+ GR_ID_ALLOW = 0x01,
58276+ GR_ID_DENY = 0x02,
58277+};
58278+
58279+#define GR_CRASH_RES 31
58280+#define GR_UIDTABLE_MAX 500
58281+
58282+/* begin resource learning section */
58283+enum {
58284+ GR_RLIM_CPU_BUMP = 60,
58285+ GR_RLIM_FSIZE_BUMP = 50000,
58286+ GR_RLIM_DATA_BUMP = 10000,
58287+ GR_RLIM_STACK_BUMP = 1000,
58288+ GR_RLIM_CORE_BUMP = 10000,
58289+ GR_RLIM_RSS_BUMP = 500000,
58290+ GR_RLIM_NPROC_BUMP = 1,
58291+ GR_RLIM_NOFILE_BUMP = 5,
58292+ GR_RLIM_MEMLOCK_BUMP = 50000,
58293+ GR_RLIM_AS_BUMP = 500000,
58294+ GR_RLIM_LOCKS_BUMP = 2,
58295+ GR_RLIM_SIGPENDING_BUMP = 5,
58296+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58297+ GR_RLIM_NICE_BUMP = 1,
58298+ GR_RLIM_RTPRIO_BUMP = 1,
58299+ GR_RLIM_RTTIME_BUMP = 1000000
58300+};
58301+
58302+#endif
58303diff -urNp linux-3.0.7/include/linux/grinternal.h linux-3.0.7/include/linux/grinternal.h
58304--- linux-3.0.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58305+++ linux-3.0.7/include/linux/grinternal.h 2011-10-20 00:47:28.000000000 -0400
58306@@ -0,0 +1,220 @@
58307+#ifndef __GRINTERNAL_H
58308+#define __GRINTERNAL_H
58309+
58310+#ifdef CONFIG_GRKERNSEC
58311+
58312+#include <linux/fs.h>
58313+#include <linux/mnt_namespace.h>
58314+#include <linux/nsproxy.h>
58315+#include <linux/gracl.h>
58316+#include <linux/grdefs.h>
58317+#include <linux/grmsg.h>
58318+
58319+void gr_add_learn_entry(const char *fmt, ...)
58320+ __attribute__ ((format (printf, 1, 2)));
58321+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58322+ const struct vfsmount *mnt);
58323+__u32 gr_check_create(const struct dentry *new_dentry,
58324+ const struct dentry *parent,
58325+ const struct vfsmount *mnt, const __u32 mode);
58326+int gr_check_protected_task(const struct task_struct *task);
58327+__u32 to_gr_audit(const __u32 reqmode);
58328+int gr_set_acls(const int type);
58329+int gr_apply_subject_to_task(struct task_struct *task);
58330+int gr_acl_is_enabled(void);
58331+char gr_roletype_to_char(void);
58332+
58333+void gr_handle_alertkill(struct task_struct *task);
58334+char *gr_to_filename(const struct dentry *dentry,
58335+ const struct vfsmount *mnt);
58336+char *gr_to_filename1(const struct dentry *dentry,
58337+ const struct vfsmount *mnt);
58338+char *gr_to_filename2(const struct dentry *dentry,
58339+ const struct vfsmount *mnt);
58340+char *gr_to_filename3(const struct dentry *dentry,
58341+ const struct vfsmount *mnt);
58342+
58343+extern int grsec_enable_harden_ptrace;
58344+extern int grsec_enable_link;
58345+extern int grsec_enable_fifo;
58346+extern int grsec_enable_execve;
58347+extern int grsec_enable_shm;
58348+extern int grsec_enable_execlog;
58349+extern int grsec_enable_signal;
58350+extern int grsec_enable_audit_ptrace;
58351+extern int grsec_enable_forkfail;
58352+extern int grsec_enable_time;
58353+extern int grsec_enable_rofs;
58354+extern int grsec_enable_chroot_shmat;
58355+extern int grsec_enable_chroot_mount;
58356+extern int grsec_enable_chroot_double;
58357+extern int grsec_enable_chroot_pivot;
58358+extern int grsec_enable_chroot_chdir;
58359+extern int grsec_enable_chroot_chmod;
58360+extern int grsec_enable_chroot_mknod;
58361+extern int grsec_enable_chroot_fchdir;
58362+extern int grsec_enable_chroot_nice;
58363+extern int grsec_enable_chroot_execlog;
58364+extern int grsec_enable_chroot_caps;
58365+extern int grsec_enable_chroot_sysctl;
58366+extern int grsec_enable_chroot_unix;
58367+extern int grsec_enable_tpe;
58368+extern int grsec_tpe_gid;
58369+extern int grsec_enable_tpe_all;
58370+extern int grsec_enable_tpe_invert;
58371+extern int grsec_enable_socket_all;
58372+extern int grsec_socket_all_gid;
58373+extern int grsec_enable_socket_client;
58374+extern int grsec_socket_client_gid;
58375+extern int grsec_enable_socket_server;
58376+extern int grsec_socket_server_gid;
58377+extern int grsec_audit_gid;
58378+extern int grsec_enable_group;
58379+extern int grsec_enable_audit_textrel;
58380+extern int grsec_enable_log_rwxmaps;
58381+extern int grsec_enable_mount;
58382+extern int grsec_enable_chdir;
58383+extern int grsec_resource_logging;
58384+extern int grsec_enable_blackhole;
58385+extern int grsec_lastack_retries;
58386+extern int grsec_enable_brute;
58387+extern int grsec_lock;
58388+
58389+extern spinlock_t grsec_alert_lock;
58390+extern unsigned long grsec_alert_wtime;
58391+extern unsigned long grsec_alert_fyet;
58392+
58393+extern spinlock_t grsec_audit_lock;
58394+
58395+extern rwlock_t grsec_exec_file_lock;
58396+
58397+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58398+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58399+ (tsk)->exec_file->f_vfsmnt) : "/")
58400+
58401+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58402+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58403+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58404+
58405+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58406+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58407+ (tsk)->exec_file->f_vfsmnt) : "/")
58408+
58409+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58410+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58411+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58412+
58413+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58414+
58415+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58416+
58417+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58418+ (task)->pid, (cred)->uid, \
58419+ (cred)->euid, (cred)->gid, (cred)->egid, \
58420+ gr_parent_task_fullpath(task), \
58421+ (task)->real_parent->comm, (task)->real_parent->pid, \
58422+ (pcred)->uid, (pcred)->euid, \
58423+ (pcred)->gid, (pcred)->egid
58424+
58425+#define GR_CHROOT_CAPS {{ \
58426+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58427+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58428+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58429+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58430+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58431+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58432+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58433+
58434+#define security_learn(normal_msg,args...) \
58435+({ \
58436+ read_lock(&grsec_exec_file_lock); \
58437+ gr_add_learn_entry(normal_msg "\n", ## args); \
58438+ read_unlock(&grsec_exec_file_lock); \
58439+})
58440+
58441+enum {
58442+ GR_DO_AUDIT,
58443+ GR_DONT_AUDIT,
58444+ /* used for non-audit messages that we shouldn't kill the task on */
58445+ GR_DONT_AUDIT_GOOD
58446+};
58447+
58448+enum {
58449+ GR_TTYSNIFF,
58450+ GR_RBAC,
58451+ GR_RBAC_STR,
58452+ GR_STR_RBAC,
58453+ GR_RBAC_MODE2,
58454+ GR_RBAC_MODE3,
58455+ GR_FILENAME,
58456+ GR_SYSCTL_HIDDEN,
58457+ GR_NOARGS,
58458+ GR_ONE_INT,
58459+ GR_ONE_INT_TWO_STR,
58460+ GR_ONE_STR,
58461+ GR_STR_INT,
58462+ GR_TWO_STR_INT,
58463+ GR_TWO_INT,
58464+ GR_TWO_U64,
58465+ GR_THREE_INT,
58466+ GR_FIVE_INT_TWO_STR,
58467+ GR_TWO_STR,
58468+ GR_THREE_STR,
58469+ GR_FOUR_STR,
58470+ GR_STR_FILENAME,
58471+ GR_FILENAME_STR,
58472+ GR_FILENAME_TWO_INT,
58473+ GR_FILENAME_TWO_INT_STR,
58474+ GR_TEXTREL,
58475+ GR_PTRACE,
58476+ GR_RESOURCE,
58477+ GR_CAP,
58478+ GR_SIG,
58479+ GR_SIG2,
58480+ GR_CRASH1,
58481+ GR_CRASH2,
58482+ GR_PSACCT,
58483+ GR_RWXMAP
58484+};
58485+
58486+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58487+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58488+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58489+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58490+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58491+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58492+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58493+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58494+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58495+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58496+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58497+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58498+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58499+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58500+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58501+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58502+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58503+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58504+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58505+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58506+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58507+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58508+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58509+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58510+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58511+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58512+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58513+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58514+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58515+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58516+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58517+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58518+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58519+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58520+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58521+
58522+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58523+
58524+#endif
58525+
58526+#endif
58527diff -urNp linux-3.0.7/include/linux/grmsg.h linux-3.0.7/include/linux/grmsg.h
58528--- linux-3.0.7/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58529+++ linux-3.0.7/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
58530@@ -0,0 +1,108 @@
58531+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58532+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58533+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58534+#define GR_STOPMOD_MSG "denied modification of module state by "
58535+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58536+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58537+#define GR_IOPERM_MSG "denied use of ioperm() by "
58538+#define GR_IOPL_MSG "denied use of iopl() by "
58539+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58540+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58541+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58542+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58543+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58544+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58545+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58546+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58547+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58548+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58549+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58550+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58551+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58552+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58553+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58554+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58555+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58556+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58557+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58558+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58559+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58560+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58561+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58562+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58563+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58564+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58565+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58566+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58567+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58568+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58569+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58570+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58571+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58572+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58573+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58574+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58575+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58576+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58577+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58578+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58579+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58580+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58581+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58582+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58583+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58584+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58585+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58586+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58587+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58588+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58589+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58590+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58591+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58592+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58593+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58594+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58595+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58596+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58597+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58598+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58599+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58600+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58601+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58602+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58603+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58604+#define GR_NICE_CHROOT_MSG "denied priority change by "
58605+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58606+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58607+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58608+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58609+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58610+#define GR_TIME_MSG "time set by "
58611+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58612+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58613+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58614+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58615+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58616+#define GR_BIND_MSG "denied bind() by "
58617+#define GR_CONNECT_MSG "denied connect() by "
58618+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58619+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58620+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58621+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58622+#define GR_CAP_ACL_MSG "use of %s denied for "
58623+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58624+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58625+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58626+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58627+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58628+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58629+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58630+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58631+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58632+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58633+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58634+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58635+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58636+#define GR_VM86_MSG "denied use of vm86 by "
58637+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58638+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58639diff -urNp linux-3.0.7/include/linux/grsecurity.h linux-3.0.7/include/linux/grsecurity.h
58640--- linux-3.0.7/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58641+++ linux-3.0.7/include/linux/grsecurity.h 2011-10-17 06:35:30.000000000 -0400
58642@@ -0,0 +1,228 @@
58643+#ifndef GR_SECURITY_H
58644+#define GR_SECURITY_H
58645+#include <linux/fs.h>
58646+#include <linux/fs_struct.h>
58647+#include <linux/binfmts.h>
58648+#include <linux/gracl.h>
58649+
58650+/* notify of brain-dead configs */
58651+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58652+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58653+#endif
58654+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58655+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58656+#endif
58657+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58658+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58659+#endif
58660+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58661+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58662+#endif
58663+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58664+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58665+#endif
58666+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58667+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58668+#endif
58669+
58670+#include <linux/compat.h>
58671+
58672+struct user_arg_ptr {
58673+#ifdef CONFIG_COMPAT
58674+ bool is_compat;
58675+#endif
58676+ union {
58677+ const char __user *const __user *native;
58678+#ifdef CONFIG_COMPAT
58679+ compat_uptr_t __user *compat;
58680+#endif
58681+ } ptr;
58682+};
58683+
58684+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58685+void gr_handle_brute_check(void);
58686+void gr_handle_kernel_exploit(void);
58687+int gr_process_user_ban(void);
58688+
58689+char gr_roletype_to_char(void);
58690+
58691+int gr_acl_enable_at_secure(void);
58692+
58693+int gr_check_user_change(int real, int effective, int fs);
58694+int gr_check_group_change(int real, int effective, int fs);
58695+
58696+void gr_del_task_from_ip_table(struct task_struct *p);
58697+
58698+int gr_pid_is_chrooted(struct task_struct *p);
58699+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58700+int gr_handle_chroot_nice(void);
58701+int gr_handle_chroot_sysctl(const int op);
58702+int gr_handle_chroot_setpriority(struct task_struct *p,
58703+ const int niceval);
58704+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58705+int gr_handle_chroot_chroot(const struct dentry *dentry,
58706+ const struct vfsmount *mnt);
58707+void gr_handle_chroot_chdir(struct path *path);
58708+int gr_handle_chroot_chmod(const struct dentry *dentry,
58709+ const struct vfsmount *mnt, const int mode);
58710+int gr_handle_chroot_mknod(const struct dentry *dentry,
58711+ const struct vfsmount *mnt, const int mode);
58712+int gr_handle_chroot_mount(const struct dentry *dentry,
58713+ const struct vfsmount *mnt,
58714+ const char *dev_name);
58715+int gr_handle_chroot_pivot(void);
58716+int gr_handle_chroot_unix(const pid_t pid);
58717+
58718+int gr_handle_rawio(const struct inode *inode);
58719+
58720+void gr_handle_ioperm(void);
58721+void gr_handle_iopl(void);
58722+
58723+int gr_tpe_allow(const struct file *file);
58724+
58725+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58726+void gr_clear_chroot_entries(struct task_struct *task);
58727+
58728+void gr_log_forkfail(const int retval);
58729+void gr_log_timechange(void);
58730+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58731+void gr_log_chdir(const struct dentry *dentry,
58732+ const struct vfsmount *mnt);
58733+void gr_log_chroot_exec(const struct dentry *dentry,
58734+ const struct vfsmount *mnt);
58735+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58736+void gr_log_remount(const char *devname, const int retval);
58737+void gr_log_unmount(const char *devname, const int retval);
58738+void gr_log_mount(const char *from, const char *to, const int retval);
58739+void gr_log_textrel(struct vm_area_struct *vma);
58740+void gr_log_rwxmmap(struct file *file);
58741+void gr_log_rwxmprotect(struct file *file);
58742+
58743+int gr_handle_follow_link(const struct inode *parent,
58744+ const struct inode *inode,
58745+ const struct dentry *dentry,
58746+ const struct vfsmount *mnt);
58747+int gr_handle_fifo(const struct dentry *dentry,
58748+ const struct vfsmount *mnt,
58749+ const struct dentry *dir, const int flag,
58750+ const int acc_mode);
58751+int gr_handle_hardlink(const struct dentry *dentry,
58752+ const struct vfsmount *mnt,
58753+ struct inode *inode,
58754+ const int mode, const char *to);
58755+
58756+int gr_is_capable(const int cap);
58757+int gr_is_capable_nolog(const int cap);
58758+void gr_learn_resource(const struct task_struct *task, const int limit,
58759+ const unsigned long wanted, const int gt);
58760+void gr_copy_label(struct task_struct *tsk);
58761+void gr_handle_crash(struct task_struct *task, const int sig);
58762+int gr_handle_signal(const struct task_struct *p, const int sig);
58763+int gr_check_crash_uid(const uid_t uid);
58764+int gr_check_protected_task(const struct task_struct *task);
58765+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58766+int gr_acl_handle_mmap(const struct file *file,
58767+ const unsigned long prot);
58768+int gr_acl_handle_mprotect(const struct file *file,
58769+ const unsigned long prot);
58770+int gr_check_hidden_task(const struct task_struct *tsk);
58771+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58772+ const struct vfsmount *mnt);
58773+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58774+ const struct vfsmount *mnt);
58775+__u32 gr_acl_handle_access(const struct dentry *dentry,
58776+ const struct vfsmount *mnt, const int fmode);
58777+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58778+ const struct vfsmount *mnt, mode_t mode);
58779+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58780+ const struct vfsmount *mnt, mode_t mode);
58781+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58782+ const struct vfsmount *mnt);
58783+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58784+ const struct vfsmount *mnt);
58785+int gr_handle_ptrace(struct task_struct *task, const long request);
58786+int gr_handle_proc_ptrace(struct task_struct *task);
58787+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58788+ const struct vfsmount *mnt);
58789+int gr_check_crash_exec(const struct file *filp);
58790+int gr_acl_is_enabled(void);
58791+void gr_set_kernel_label(struct task_struct *task);
58792+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58793+ const gid_t gid);
58794+int gr_set_proc_label(const struct dentry *dentry,
58795+ const struct vfsmount *mnt,
58796+ const int unsafe_share);
58797+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58798+ const struct vfsmount *mnt);
58799+__u32 gr_acl_handle_open(const struct dentry *dentry,
58800+ const struct vfsmount *mnt, const int fmode);
58801+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58802+ const struct dentry *p_dentry,
58803+ const struct vfsmount *p_mnt, const int fmode,
58804+ const int imode);
58805+void gr_handle_create(const struct dentry *dentry,
58806+ const struct vfsmount *mnt);
58807+void gr_handle_proc_create(const struct dentry *dentry,
58808+ const struct inode *inode);
58809+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58810+ const struct dentry *parent_dentry,
58811+ const struct vfsmount *parent_mnt,
58812+ const int mode);
58813+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58814+ const struct dentry *parent_dentry,
58815+ const struct vfsmount *parent_mnt);
58816+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58817+ const struct vfsmount *mnt);
58818+void gr_handle_delete(const ino_t ino, const dev_t dev);
58819+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58820+ const struct vfsmount *mnt);
58821+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58822+ const struct dentry *parent_dentry,
58823+ const struct vfsmount *parent_mnt,
58824+ const char *from);
58825+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58826+ const struct dentry *parent_dentry,
58827+ const struct vfsmount *parent_mnt,
58828+ const struct dentry *old_dentry,
58829+ const struct vfsmount *old_mnt, const char *to);
58830+int gr_acl_handle_rename(struct dentry *new_dentry,
58831+ struct dentry *parent_dentry,
58832+ const struct vfsmount *parent_mnt,
58833+ struct dentry *old_dentry,
58834+ struct inode *old_parent_inode,
58835+ struct vfsmount *old_mnt, const char *newname);
58836+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58837+ struct dentry *old_dentry,
58838+ struct dentry *new_dentry,
58839+ struct vfsmount *mnt, const __u8 replace);
58840+__u32 gr_check_link(const struct dentry *new_dentry,
58841+ const struct dentry *parent_dentry,
58842+ const struct vfsmount *parent_mnt,
58843+ const struct dentry *old_dentry,
58844+ const struct vfsmount *old_mnt);
58845+int gr_acl_handle_filldir(const struct file *file, const char *name,
58846+ const unsigned int namelen, const ino_t ino);
58847+
58848+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58849+ const struct vfsmount *mnt);
58850+void gr_acl_handle_exit(void);
58851+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58852+int gr_acl_handle_procpidmem(const struct task_struct *task);
58853+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58854+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58855+void gr_audit_ptrace(struct task_struct *task);
58856+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58857+
58858+#ifdef CONFIG_GRKERNSEC
58859+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58860+void gr_handle_vm86(void);
58861+void gr_handle_mem_readwrite(u64 from, u64 to);
58862+
58863+extern int grsec_enable_dmesg;
58864+extern int grsec_disable_privio;
58865+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58866+extern int grsec_enable_chroot_findtask;
58867+#endif
58868+#endif
58869+
58870+#endif
58871diff -urNp linux-3.0.7/include/linux/grsock.h linux-3.0.7/include/linux/grsock.h
58872--- linux-3.0.7/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
58873+++ linux-3.0.7/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
58874@@ -0,0 +1,19 @@
58875+#ifndef __GRSOCK_H
58876+#define __GRSOCK_H
58877+
58878+extern void gr_attach_curr_ip(const struct sock *sk);
58879+extern int gr_handle_sock_all(const int family, const int type,
58880+ const int protocol);
58881+extern int gr_handle_sock_server(const struct sockaddr *sck);
58882+extern int gr_handle_sock_server_other(const struct sock *sck);
58883+extern int gr_handle_sock_client(const struct sockaddr *sck);
58884+extern int gr_search_connect(struct socket * sock,
58885+ struct sockaddr_in * addr);
58886+extern int gr_search_bind(struct socket * sock,
58887+ struct sockaddr_in * addr);
58888+extern int gr_search_listen(struct socket * sock);
58889+extern int gr_search_accept(struct socket * sock);
58890+extern int gr_search_socket(const int domain, const int type,
58891+ const int protocol);
58892+
58893+#endif
58894diff -urNp linux-3.0.7/include/linux/hid.h linux-3.0.7/include/linux/hid.h
58895--- linux-3.0.7/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
58896+++ linux-3.0.7/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
58897@@ -675,7 +675,7 @@ struct hid_ll_driver {
58898 unsigned int code, int value);
58899
58900 int (*parse)(struct hid_device *hdev);
58901-};
58902+} __no_const;
58903
58904 #define PM_HINT_FULLON 1<<5
58905 #define PM_HINT_NORMAL 1<<1
58906diff -urNp linux-3.0.7/include/linux/highmem.h linux-3.0.7/include/linux/highmem.h
58907--- linux-3.0.7/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
58908+++ linux-3.0.7/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
58909@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
58910 kunmap_atomic(kaddr, KM_USER0);
58911 }
58912
58913+static inline void sanitize_highpage(struct page *page)
58914+{
58915+ void *kaddr;
58916+ unsigned long flags;
58917+
58918+ local_irq_save(flags);
58919+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58920+ clear_page(kaddr);
58921+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58922+ local_irq_restore(flags);
58923+}
58924+
58925 static inline void zero_user_segments(struct page *page,
58926 unsigned start1, unsigned end1,
58927 unsigned start2, unsigned end2)
58928diff -urNp linux-3.0.7/include/linux/i2c.h linux-3.0.7/include/linux/i2c.h
58929--- linux-3.0.7/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
58930+++ linux-3.0.7/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
58931@@ -346,6 +346,7 @@ struct i2c_algorithm {
58932 /* To determine what the adapter supports */
58933 u32 (*functionality) (struct i2c_adapter *);
58934 };
58935+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58936
58937 /*
58938 * i2c_adapter is the structure used to identify a physical i2c bus along
58939diff -urNp linux-3.0.7/include/linux/i2o.h linux-3.0.7/include/linux/i2o.h
58940--- linux-3.0.7/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
58941+++ linux-3.0.7/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
58942@@ -564,7 +564,7 @@ struct i2o_controller {
58943 struct i2o_device *exec; /* Executive */
58944 #if BITS_PER_LONG == 64
58945 spinlock_t context_list_lock; /* lock for context_list */
58946- atomic_t context_list_counter; /* needed for unique contexts */
58947+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58948 struct list_head context_list; /* list of context id's
58949 and pointers */
58950 #endif
58951diff -urNp linux-3.0.7/include/linux/init.h linux-3.0.7/include/linux/init.h
58952--- linux-3.0.7/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
58953+++ linux-3.0.7/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
58954@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
58955
58956 /* Each module must use one module_init(). */
58957 #define module_init(initfn) \
58958- static inline initcall_t __inittest(void) \
58959+ static inline __used initcall_t __inittest(void) \
58960 { return initfn; } \
58961 int init_module(void) __attribute__((alias(#initfn)));
58962
58963 /* This is only required if you want to be unloadable. */
58964 #define module_exit(exitfn) \
58965- static inline exitcall_t __exittest(void) \
58966+ static inline __used exitcall_t __exittest(void) \
58967 { return exitfn; } \
58968 void cleanup_module(void) __attribute__((alias(#exitfn)));
58969
58970diff -urNp linux-3.0.7/include/linux/init_task.h linux-3.0.7/include/linux/init_task.h
58971--- linux-3.0.7/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
58972+++ linux-3.0.7/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
58973@@ -126,6 +126,12 @@ extern struct cred init_cred;
58974 # define INIT_PERF_EVENTS(tsk)
58975 #endif
58976
58977+#ifdef CONFIG_X86
58978+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58979+#else
58980+#define INIT_TASK_THREAD_INFO
58981+#endif
58982+
58983 /*
58984 * INIT_TASK is used to set up the first task table, touch at
58985 * your own risk!. Base=0, limit=0x1fffff (=2MB)
58986@@ -164,6 +170,7 @@ extern struct cred init_cred;
58987 RCU_INIT_POINTER(.cred, &init_cred), \
58988 .comm = "swapper", \
58989 .thread = INIT_THREAD, \
58990+ INIT_TASK_THREAD_INFO \
58991 .fs = &init_fs, \
58992 .files = &init_files, \
58993 .signal = &init_signals, \
58994diff -urNp linux-3.0.7/include/linux/intel-iommu.h linux-3.0.7/include/linux/intel-iommu.h
58995--- linux-3.0.7/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
58996+++ linux-3.0.7/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
58997@@ -296,7 +296,7 @@ struct iommu_flush {
58998 u8 fm, u64 type);
58999 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59000 unsigned int size_order, u64 type);
59001-};
59002+} __no_const;
59003
59004 enum {
59005 SR_DMAR_FECTL_REG,
59006diff -urNp linux-3.0.7/include/linux/interrupt.h linux-3.0.7/include/linux/interrupt.h
59007--- linux-3.0.7/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
59008+++ linux-3.0.7/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
59009@@ -422,7 +422,7 @@ enum
59010 /* map softirq index to softirq name. update 'softirq_to_name' in
59011 * kernel/softirq.c when adding a new softirq.
59012 */
59013-extern char *softirq_to_name[NR_SOFTIRQS];
59014+extern const char * const softirq_to_name[NR_SOFTIRQS];
59015
59016 /* softirq mask and active fields moved to irq_cpustat_t in
59017 * asm/hardirq.h to get better cache usage. KAO
59018@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59019
59020 struct softirq_action
59021 {
59022- void (*action)(struct softirq_action *);
59023+ void (*action)(void);
59024 };
59025
59026 asmlinkage void do_softirq(void);
59027 asmlinkage void __do_softirq(void);
59028-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59029+extern void open_softirq(int nr, void (*action)(void));
59030 extern void softirq_init(void);
59031 static inline void __raise_softirq_irqoff(unsigned int nr)
59032 {
59033diff -urNp linux-3.0.7/include/linux/kallsyms.h linux-3.0.7/include/linux/kallsyms.h
59034--- linux-3.0.7/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
59035+++ linux-3.0.7/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
59036@@ -15,7 +15,8 @@
59037
59038 struct module;
59039
59040-#ifdef CONFIG_KALLSYMS
59041+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59042+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59043 /* Lookup the address for a symbol. Returns 0 if not found. */
59044 unsigned long kallsyms_lookup_name(const char *name);
59045
59046@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59047 /* Stupid that this does nothing, but I didn't create this mess. */
59048 #define __print_symbol(fmt, addr)
59049 #endif /*CONFIG_KALLSYMS*/
59050+#else /* when included by kallsyms.c, vsnprintf.c, or
59051+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59052+extern void __print_symbol(const char *fmt, unsigned long address);
59053+extern int sprint_backtrace(char *buffer, unsigned long address);
59054+extern int sprint_symbol(char *buffer, unsigned long address);
59055+const char *kallsyms_lookup(unsigned long addr,
59056+ unsigned long *symbolsize,
59057+ unsigned long *offset,
59058+ char **modname, char *namebuf);
59059+#endif
59060
59061 /* This macro allows us to keep printk typechecking */
59062 static void __check_printsym_format(const char *fmt, ...)
59063diff -urNp linux-3.0.7/include/linux/kgdb.h linux-3.0.7/include/linux/kgdb.h
59064--- linux-3.0.7/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
59065+++ linux-3.0.7/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
59066@@ -53,7 +53,7 @@ extern int kgdb_connected;
59067 extern int kgdb_io_module_registered;
59068
59069 extern atomic_t kgdb_setting_breakpoint;
59070-extern atomic_t kgdb_cpu_doing_single_step;
59071+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59072
59073 extern struct task_struct *kgdb_usethread;
59074 extern struct task_struct *kgdb_contthread;
59075@@ -251,7 +251,7 @@ struct kgdb_arch {
59076 void (*disable_hw_break)(struct pt_regs *regs);
59077 void (*remove_all_hw_break)(void);
59078 void (*correct_hw_break)(void);
59079-};
59080+} __do_const;
59081
59082 /**
59083 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59084@@ -276,7 +276,7 @@ struct kgdb_io {
59085 void (*pre_exception) (void);
59086 void (*post_exception) (void);
59087 int is_console;
59088-};
59089+} __do_const;
59090
59091 extern struct kgdb_arch arch_kgdb_ops;
59092
59093diff -urNp linux-3.0.7/include/linux/kmod.h linux-3.0.7/include/linux/kmod.h
59094--- linux-3.0.7/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
59095+++ linux-3.0.7/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
59096@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59097 * usually useless though. */
59098 extern int __request_module(bool wait, const char *name, ...) \
59099 __attribute__((format(printf, 2, 3)));
59100+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59101+ __attribute__((format(printf, 3, 4)));
59102 #define request_module(mod...) __request_module(true, mod)
59103 #define request_module_nowait(mod...) __request_module(false, mod)
59104 #define try_then_request_module(x, mod...) \
59105diff -urNp linux-3.0.7/include/linux/kvm_host.h linux-3.0.7/include/linux/kvm_host.h
59106--- linux-3.0.7/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
59107+++ linux-3.0.7/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
59108@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59109 void vcpu_load(struct kvm_vcpu *vcpu);
59110 void vcpu_put(struct kvm_vcpu *vcpu);
59111
59112-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59113+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59114 struct module *module);
59115 void kvm_exit(void);
59116
59117@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59118 struct kvm_guest_debug *dbg);
59119 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59120
59121-int kvm_arch_init(void *opaque);
59122+int kvm_arch_init(const void *opaque);
59123 void kvm_arch_exit(void);
59124
59125 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59126diff -urNp linux-3.0.7/include/linux/libata.h linux-3.0.7/include/linux/libata.h
59127--- linux-3.0.7/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
59128+++ linux-3.0.7/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
59129@@ -899,7 +899,7 @@ struct ata_port_operations {
59130 * fields must be pointers.
59131 */
59132 const struct ata_port_operations *inherits;
59133-};
59134+} __do_const;
59135
59136 struct ata_port_info {
59137 unsigned long flags;
59138diff -urNp linux-3.0.7/include/linux/linkage.h linux-3.0.7/include/linux/linkage.h
59139--- linux-3.0.7/include/linux/linkage.h 2011-07-21 22:17:23.000000000 -0400
59140+++ linux-3.0.7/include/linux/linkage.h 2011-10-11 10:44:33.000000000 -0400
59141@@ -82,6 +82,7 @@
59142 */
59143 #ifndef ENDPROC
59144 #define ENDPROC(name) \
59145+ .size name, .-name; \
59146 .type name, @function; \
59147 END(name)
59148 #endif
59149diff -urNp linux-3.0.7/include/linux/mca.h linux-3.0.7/include/linux/mca.h
59150--- linux-3.0.7/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
59151+++ linux-3.0.7/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
59152@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59153 int region);
59154 void * (*mca_transform_memory)(struct mca_device *,
59155 void *memory);
59156-};
59157+} __no_const;
59158
59159 struct mca_bus {
59160 u64 default_dma_mask;
59161diff -urNp linux-3.0.7/include/linux/memory.h linux-3.0.7/include/linux/memory.h
59162--- linux-3.0.7/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
59163+++ linux-3.0.7/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
59164@@ -144,7 +144,7 @@ struct memory_accessor {
59165 size_t count);
59166 ssize_t (*write)(struct memory_accessor *, const char *buf,
59167 off_t offset, size_t count);
59168-};
59169+} __no_const;
59170
59171 /*
59172 * Kernel text modification mutex, used for code patching. Users of this lock
59173diff -urNp linux-3.0.7/include/linux/mfd/abx500.h linux-3.0.7/include/linux/mfd/abx500.h
59174--- linux-3.0.7/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
59175+++ linux-3.0.7/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
59176@@ -234,6 +234,7 @@ struct abx500_ops {
59177 int (*event_registers_startup_state_get) (struct device *, u8 *);
59178 int (*startup_irq_enabled) (struct device *, unsigned int);
59179 };
59180+typedef struct abx500_ops __no_const abx500_ops_no_const;
59181
59182 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59183 void abx500_remove_ops(struct device *dev);
59184diff -urNp linux-3.0.7/include/linux/mm.h linux-3.0.7/include/linux/mm.h
59185--- linux-3.0.7/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
59186+++ linux-3.0.7/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
59187@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
59188
59189 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59190 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59191+
59192+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59193+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59194+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59195+#else
59196 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59197+#endif
59198+
59199 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59200 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59201
59202@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
59203 int set_page_dirty_lock(struct page *page);
59204 int clear_page_dirty_for_io(struct page *page);
59205
59206-/* Is the vma a continuation of the stack vma above it? */
59207-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59208-{
59209- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59210-}
59211-
59212-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59213- unsigned long addr)
59214-{
59215- return (vma->vm_flags & VM_GROWSDOWN) &&
59216- (vma->vm_start == addr) &&
59217- !vma_growsdown(vma->vm_prev, addr);
59218-}
59219-
59220-/* Is the vma a continuation of the stack vma below it? */
59221-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59222-{
59223- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59224-}
59225-
59226-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59227- unsigned long addr)
59228-{
59229- return (vma->vm_flags & VM_GROWSUP) &&
59230- (vma->vm_end == addr) &&
59231- !vma_growsup(vma->vm_next, addr);
59232-}
59233-
59234 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59235 unsigned long old_addr, struct vm_area_struct *new_vma,
59236 unsigned long new_addr, unsigned long len);
59237@@ -1169,6 +1148,15 @@ struct shrinker {
59238 extern void register_shrinker(struct shrinker *);
59239 extern void unregister_shrinker(struct shrinker *);
59240
59241+#ifdef CONFIG_MMU
59242+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59243+#else
59244+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59245+{
59246+ return __pgprot(0);
59247+}
59248+#endif
59249+
59250 int vma_wants_writenotify(struct vm_area_struct *vma);
59251
59252 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59253@@ -1452,6 +1440,7 @@ out:
59254 }
59255
59256 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59257+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59258
59259 extern unsigned long do_brk(unsigned long, unsigned long);
59260
59261@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
59262 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59263 struct vm_area_struct **pprev);
59264
59265+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59266+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59267+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59268+
59269 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59270 NULL if none. Assume start_addr < end_addr. */
59271 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59272@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
59273 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59274 }
59275
59276-#ifdef CONFIG_MMU
59277-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59278-#else
59279-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59280-{
59281- return __pgprot(0);
59282-}
59283-#endif
59284-
59285 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59286 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59287 unsigned long pfn, unsigned long size, pgprot_t);
59288@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
59289 extern int sysctl_memory_failure_early_kill;
59290 extern int sysctl_memory_failure_recovery;
59291 extern void shake_page(struct page *p, int access);
59292-extern atomic_long_t mce_bad_pages;
59293+extern atomic_long_unchecked_t mce_bad_pages;
59294 extern int soft_offline_page(struct page *page, int flags);
59295
59296 extern void dump_page(struct page *page);
59297@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
59298 unsigned int pages_per_huge_page);
59299 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59300
59301+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59302+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59303+#else
59304+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59305+#endif
59306+
59307 #endif /* __KERNEL__ */
59308 #endif /* _LINUX_MM_H */
59309diff -urNp linux-3.0.7/include/linux/mm_types.h linux-3.0.7/include/linux/mm_types.h
59310--- linux-3.0.7/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
59311+++ linux-3.0.7/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
59312@@ -184,6 +184,8 @@ struct vm_area_struct {
59313 #ifdef CONFIG_NUMA
59314 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59315 #endif
59316+
59317+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59318 };
59319
59320 struct core_thread {
59321@@ -316,6 +318,24 @@ struct mm_struct {
59322 #ifdef CONFIG_CPUMASK_OFFSTACK
59323 struct cpumask cpumask_allocation;
59324 #endif
59325+
59326+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59327+ unsigned long pax_flags;
59328+#endif
59329+
59330+#ifdef CONFIG_PAX_DLRESOLVE
59331+ unsigned long call_dl_resolve;
59332+#endif
59333+
59334+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59335+ unsigned long call_syscall;
59336+#endif
59337+
59338+#ifdef CONFIG_PAX_ASLR
59339+ unsigned long delta_mmap; /* randomized offset */
59340+ unsigned long delta_stack; /* randomized offset */
59341+#endif
59342+
59343 };
59344
59345 static inline void mm_init_cpumask(struct mm_struct *mm)
59346diff -urNp linux-3.0.7/include/linux/mmu_notifier.h linux-3.0.7/include/linux/mmu_notifier.h
59347--- linux-3.0.7/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
59348+++ linux-3.0.7/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
59349@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59350 */
59351 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59352 ({ \
59353- pte_t __pte; \
59354+ pte_t ___pte; \
59355 struct vm_area_struct *___vma = __vma; \
59356 unsigned long ___address = __address; \
59357- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59358+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59359 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59360- __pte; \
59361+ ___pte; \
59362 })
59363
59364 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59365diff -urNp linux-3.0.7/include/linux/mmzone.h linux-3.0.7/include/linux/mmzone.h
59366--- linux-3.0.7/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
59367+++ linux-3.0.7/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
59368@@ -350,7 +350,7 @@ struct zone {
59369 unsigned long flags; /* zone flags, see below */
59370
59371 /* Zone statistics */
59372- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59373+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59374
59375 /*
59376 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59377diff -urNp linux-3.0.7/include/linux/mod_devicetable.h linux-3.0.7/include/linux/mod_devicetable.h
59378--- linux-3.0.7/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
59379+++ linux-3.0.7/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
59380@@ -12,7 +12,7 @@
59381 typedef unsigned long kernel_ulong_t;
59382 #endif
59383
59384-#define PCI_ANY_ID (~0)
59385+#define PCI_ANY_ID ((__u16)~0)
59386
59387 struct pci_device_id {
59388 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59389@@ -131,7 +131,7 @@ struct usb_device_id {
59390 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59391 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59392
59393-#define HID_ANY_ID (~0)
59394+#define HID_ANY_ID (~0U)
59395
59396 struct hid_device_id {
59397 __u16 bus;
59398diff -urNp linux-3.0.7/include/linux/module.h linux-3.0.7/include/linux/module.h
59399--- linux-3.0.7/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
59400+++ linux-3.0.7/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
59401@@ -16,6 +16,7 @@
59402 #include <linux/kobject.h>
59403 #include <linux/moduleparam.h>
59404 #include <linux/tracepoint.h>
59405+#include <linux/fs.h>
59406
59407 #include <linux/percpu.h>
59408 #include <asm/module.h>
59409@@ -325,19 +326,16 @@ struct module
59410 int (*init)(void);
59411
59412 /* If this is non-NULL, vfree after init() returns */
59413- void *module_init;
59414+ void *module_init_rx, *module_init_rw;
59415
59416 /* Here is the actual code + data, vfree'd on unload. */
59417- void *module_core;
59418+ void *module_core_rx, *module_core_rw;
59419
59420 /* Here are the sizes of the init and core sections */
59421- unsigned int init_size, core_size;
59422+ unsigned int init_size_rw, core_size_rw;
59423
59424 /* The size of the executable code in each section. */
59425- unsigned int init_text_size, core_text_size;
59426-
59427- /* Size of RO sections of the module (text+rodata) */
59428- unsigned int init_ro_size, core_ro_size;
59429+ unsigned int init_size_rx, core_size_rx;
59430
59431 /* Arch-specific module values */
59432 struct mod_arch_specific arch;
59433@@ -393,6 +391,10 @@ struct module
59434 #ifdef CONFIG_EVENT_TRACING
59435 struct ftrace_event_call **trace_events;
59436 unsigned int num_trace_events;
59437+ struct file_operations trace_id;
59438+ struct file_operations trace_enable;
59439+ struct file_operations trace_format;
59440+ struct file_operations trace_filter;
59441 #endif
59442 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59443 unsigned int num_ftrace_callsites;
59444@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
59445 bool is_module_percpu_address(unsigned long addr);
59446 bool is_module_text_address(unsigned long addr);
59447
59448+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59449+{
59450+
59451+#ifdef CONFIG_PAX_KERNEXEC
59452+ if (ktla_ktva(addr) >= (unsigned long)start &&
59453+ ktla_ktva(addr) < (unsigned long)start + size)
59454+ return 1;
59455+#endif
59456+
59457+ return ((void *)addr >= start && (void *)addr < start + size);
59458+}
59459+
59460+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59461+{
59462+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59463+}
59464+
59465+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59466+{
59467+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59468+}
59469+
59470+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59471+{
59472+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59473+}
59474+
59475+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59476+{
59477+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59478+}
59479+
59480 static inline int within_module_core(unsigned long addr, struct module *mod)
59481 {
59482- return (unsigned long)mod->module_core <= addr &&
59483- addr < (unsigned long)mod->module_core + mod->core_size;
59484+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59485 }
59486
59487 static inline int within_module_init(unsigned long addr, struct module *mod)
59488 {
59489- return (unsigned long)mod->module_init <= addr &&
59490- addr < (unsigned long)mod->module_init + mod->init_size;
59491+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59492 }
59493
59494 /* Search for module by name: must hold module_mutex. */
59495diff -urNp linux-3.0.7/include/linux/moduleloader.h linux-3.0.7/include/linux/moduleloader.h
59496--- linux-3.0.7/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
59497+++ linux-3.0.7/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
59498@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59499 sections. Returns NULL on failure. */
59500 void *module_alloc(unsigned long size);
59501
59502+#ifdef CONFIG_PAX_KERNEXEC
59503+void *module_alloc_exec(unsigned long size);
59504+#else
59505+#define module_alloc_exec(x) module_alloc(x)
59506+#endif
59507+
59508 /* Free memory returned from module_alloc. */
59509 void module_free(struct module *mod, void *module_region);
59510
59511+#ifdef CONFIG_PAX_KERNEXEC
59512+void module_free_exec(struct module *mod, void *module_region);
59513+#else
59514+#define module_free_exec(x, y) module_free((x), (y))
59515+#endif
59516+
59517 /* Apply the given relocation to the (simplified) ELF. Return -error
59518 or 0. */
59519 int apply_relocate(Elf_Shdr *sechdrs,
59520diff -urNp linux-3.0.7/include/linux/moduleparam.h linux-3.0.7/include/linux/moduleparam.h
59521--- linux-3.0.7/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
59522+++ linux-3.0.7/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
59523@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59524 * @len is usually just sizeof(string).
59525 */
59526 #define module_param_string(name, string, len, perm) \
59527- static const struct kparam_string __param_string_##name \
59528+ static const struct kparam_string __param_string_##name __used \
59529 = { len, string }; \
59530 __module_param_call(MODULE_PARAM_PREFIX, name, \
59531 &param_ops_string, \
59532@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59533 * module_param_named() for why this might be necessary.
59534 */
59535 #define module_param_array_named(name, array, type, nump, perm) \
59536- static const struct kparam_array __param_arr_##name \
59537+ static const struct kparam_array __param_arr_##name __used \
59538 = { .max = ARRAY_SIZE(array), .num = nump, \
59539 .ops = &param_ops_##type, \
59540 .elemsize = sizeof(array[0]), .elem = array }; \
59541diff -urNp linux-3.0.7/include/linux/namei.h linux-3.0.7/include/linux/namei.h
59542--- linux-3.0.7/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
59543+++ linux-3.0.7/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
59544@@ -24,7 +24,7 @@ struct nameidata {
59545 unsigned seq;
59546 int last_type;
59547 unsigned depth;
59548- char *saved_names[MAX_NESTED_LINKS + 1];
59549+ const char *saved_names[MAX_NESTED_LINKS + 1];
59550
59551 /* Intent data */
59552 union {
59553@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
59554 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59555 extern void unlock_rename(struct dentry *, struct dentry *);
59556
59557-static inline void nd_set_link(struct nameidata *nd, char *path)
59558+static inline void nd_set_link(struct nameidata *nd, const char *path)
59559 {
59560 nd->saved_names[nd->depth] = path;
59561 }
59562
59563-static inline char *nd_get_link(struct nameidata *nd)
59564+static inline const char *nd_get_link(const struct nameidata *nd)
59565 {
59566 return nd->saved_names[nd->depth];
59567 }
59568diff -urNp linux-3.0.7/include/linux/netdevice.h linux-3.0.7/include/linux/netdevice.h
59569--- linux-3.0.7/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
59570+++ linux-3.0.7/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
59571@@ -979,6 +979,7 @@ struct net_device_ops {
59572 int (*ndo_set_features)(struct net_device *dev,
59573 u32 features);
59574 };
59575+typedef struct net_device_ops __no_const net_device_ops_no_const;
59576
59577 /*
59578 * The DEVICE structure.
59579diff -urNp linux-3.0.7/include/linux/netfilter/xt_gradm.h linux-3.0.7/include/linux/netfilter/xt_gradm.h
59580--- linux-3.0.7/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59581+++ linux-3.0.7/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
59582@@ -0,0 +1,9 @@
59583+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59584+#define _LINUX_NETFILTER_XT_GRADM_H 1
59585+
59586+struct xt_gradm_mtinfo {
59587+ __u16 flags;
59588+ __u16 invflags;
59589+};
59590+
59591+#endif
59592diff -urNp linux-3.0.7/include/linux/of_pdt.h linux-3.0.7/include/linux/of_pdt.h
59593--- linux-3.0.7/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
59594+++ linux-3.0.7/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
59595@@ -32,7 +32,7 @@ struct of_pdt_ops {
59596
59597 /* return 0 on success; fill in 'len' with number of bytes in path */
59598 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59599-};
59600+} __no_const;
59601
59602 extern void *prom_early_alloc(unsigned long size);
59603
59604diff -urNp linux-3.0.7/include/linux/oprofile.h linux-3.0.7/include/linux/oprofile.h
59605--- linux-3.0.7/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
59606+++ linux-3.0.7/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
59607@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59608 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59609 char const * name, ulong * val);
59610
59611-/** Create a file for read-only access to an atomic_t. */
59612+/** Create a file for read-only access to an atomic_unchecked_t. */
59613 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59614- char const * name, atomic_t * val);
59615+ char const * name, atomic_unchecked_t * val);
59616
59617 /** create a directory */
59618 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59619diff -urNp linux-3.0.7/include/linux/padata.h linux-3.0.7/include/linux/padata.h
59620--- linux-3.0.7/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
59621+++ linux-3.0.7/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
59622@@ -129,7 +129,7 @@ struct parallel_data {
59623 struct padata_instance *pinst;
59624 struct padata_parallel_queue __percpu *pqueue;
59625 struct padata_serial_queue __percpu *squeue;
59626- atomic_t seq_nr;
59627+ atomic_unchecked_t seq_nr;
59628 atomic_t reorder_objects;
59629 atomic_t refcnt;
59630 unsigned int max_seq_nr;
59631diff -urNp linux-3.0.7/include/linux/perf_event.h linux-3.0.7/include/linux/perf_event.h
59632--- linux-3.0.7/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
59633+++ linux-3.0.7/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
59634@@ -761,8 +761,8 @@ struct perf_event {
59635
59636 enum perf_event_active_state state;
59637 unsigned int attach_state;
59638- local64_t count;
59639- atomic64_t child_count;
59640+ local64_t count; /* PaX: fix it one day */
59641+ atomic64_unchecked_t child_count;
59642
59643 /*
59644 * These are the total time in nanoseconds that the event
59645@@ -813,8 +813,8 @@ struct perf_event {
59646 * These accumulate total time (in nanoseconds) that children
59647 * events have been enabled and running, respectively.
59648 */
59649- atomic64_t child_total_time_enabled;
59650- atomic64_t child_total_time_running;
59651+ atomic64_unchecked_t child_total_time_enabled;
59652+ atomic64_unchecked_t child_total_time_running;
59653
59654 /*
59655 * Protect attach/detach and child_list:
59656diff -urNp linux-3.0.7/include/linux/pipe_fs_i.h linux-3.0.7/include/linux/pipe_fs_i.h
59657--- linux-3.0.7/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
59658+++ linux-3.0.7/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
59659@@ -46,9 +46,9 @@ struct pipe_buffer {
59660 struct pipe_inode_info {
59661 wait_queue_head_t wait;
59662 unsigned int nrbufs, curbuf, buffers;
59663- unsigned int readers;
59664- unsigned int writers;
59665- unsigned int waiting_writers;
59666+ atomic_t readers;
59667+ atomic_t writers;
59668+ atomic_t waiting_writers;
59669 unsigned int r_counter;
59670 unsigned int w_counter;
59671 struct page *tmp_page;
59672diff -urNp linux-3.0.7/include/linux/pm_runtime.h linux-3.0.7/include/linux/pm_runtime.h
59673--- linux-3.0.7/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
59674+++ linux-3.0.7/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
59675@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
59676
59677 static inline void pm_runtime_mark_last_busy(struct device *dev)
59678 {
59679- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59680+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59681 }
59682
59683 #else /* !CONFIG_PM_RUNTIME */
59684diff -urNp linux-3.0.7/include/linux/poison.h linux-3.0.7/include/linux/poison.h
59685--- linux-3.0.7/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
59686+++ linux-3.0.7/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
59687@@ -19,8 +19,8 @@
59688 * under normal circumstances, used to verify that nobody uses
59689 * non-initialized list entries.
59690 */
59691-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59692-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59693+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59694+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59695
59696 /********** include/linux/timer.h **********/
59697 /*
59698diff -urNp linux-3.0.7/include/linux/preempt.h linux-3.0.7/include/linux/preempt.h
59699--- linux-3.0.7/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
59700+++ linux-3.0.7/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
59701@@ -115,7 +115,7 @@ struct preempt_ops {
59702 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59703 void (*sched_out)(struct preempt_notifier *notifier,
59704 struct task_struct *next);
59705-};
59706+} __no_const;
59707
59708 /**
59709 * preempt_notifier - key for installing preemption notifiers
59710diff -urNp linux-3.0.7/include/linux/proc_fs.h linux-3.0.7/include/linux/proc_fs.h
59711--- linux-3.0.7/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
59712+++ linux-3.0.7/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
59713@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59714 return proc_create_data(name, mode, parent, proc_fops, NULL);
59715 }
59716
59717+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59718+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59719+{
59720+#ifdef CONFIG_GRKERNSEC_PROC_USER
59721+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59722+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59723+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59724+#else
59725+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59726+#endif
59727+}
59728+
59729+
59730 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59731 mode_t mode, struct proc_dir_entry *base,
59732 read_proc_t *read_proc, void * data)
59733@@ -258,7 +271,7 @@ union proc_op {
59734 int (*proc_show)(struct seq_file *m,
59735 struct pid_namespace *ns, struct pid *pid,
59736 struct task_struct *task);
59737-};
59738+} __no_const;
59739
59740 struct ctl_table_header;
59741 struct ctl_table;
59742diff -urNp linux-3.0.7/include/linux/ptrace.h linux-3.0.7/include/linux/ptrace.h
59743--- linux-3.0.7/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
59744+++ linux-3.0.7/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
59745@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
59746 extern void exit_ptrace(struct task_struct *tracer);
59747 #define PTRACE_MODE_READ 1
59748 #define PTRACE_MODE_ATTACH 2
59749-/* Returns 0 on success, -errno on denial. */
59750-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59751 /* Returns true on success, false on denial. */
59752 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59753+/* Returns true on success, false on denial. */
59754+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59755
59756 static inline int ptrace_reparented(struct task_struct *child)
59757 {
59758diff -urNp linux-3.0.7/include/linux/random.h linux-3.0.7/include/linux/random.h
59759--- linux-3.0.7/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
59760+++ linux-3.0.7/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
59761@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59762
59763 u32 prandom32(struct rnd_state *);
59764
59765+static inline unsigned long pax_get_random_long(void)
59766+{
59767+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59768+}
59769+
59770 /*
59771 * Handle minimum values for seeds
59772 */
59773 static inline u32 __seed(u32 x, u32 m)
59774 {
59775- return (x < m) ? x + m : x;
59776+ return (x <= m) ? x + m + 1 : x;
59777 }
59778
59779 /**
59780diff -urNp linux-3.0.7/include/linux/reboot.h linux-3.0.7/include/linux/reboot.h
59781--- linux-3.0.7/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
59782+++ linux-3.0.7/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
59783@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59784 * Architecture-specific implementations of sys_reboot commands.
59785 */
59786
59787-extern void machine_restart(char *cmd);
59788-extern void machine_halt(void);
59789-extern void machine_power_off(void);
59790+extern void machine_restart(char *cmd) __noreturn;
59791+extern void machine_halt(void) __noreturn;
59792+extern void machine_power_off(void) __noreturn;
59793
59794 extern void machine_shutdown(void);
59795 struct pt_regs;
59796@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59797 */
59798
59799 extern void kernel_restart_prepare(char *cmd);
59800-extern void kernel_restart(char *cmd);
59801-extern void kernel_halt(void);
59802-extern void kernel_power_off(void);
59803+extern void kernel_restart(char *cmd) __noreturn;
59804+extern void kernel_halt(void) __noreturn;
59805+extern void kernel_power_off(void) __noreturn;
59806
59807 extern int C_A_D; /* for sysctl */
59808 void ctrl_alt_del(void);
59809@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
59810 * Emergency restart, callable from an interrupt handler.
59811 */
59812
59813-extern void emergency_restart(void);
59814+extern void emergency_restart(void) __noreturn;
59815 #include <asm/emergency-restart.h>
59816
59817 #endif
59818diff -urNp linux-3.0.7/include/linux/reiserfs_fs.h linux-3.0.7/include/linux/reiserfs_fs.h
59819--- linux-3.0.7/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
59820+++ linux-3.0.7/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
59821@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
59822 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59823
59824 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59825-#define get_generation(s) atomic_read (&fs_generation(s))
59826+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59827 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59828 #define __fs_changed(gen,s) (gen != get_generation (s))
59829 #define fs_changed(gen,s) \
59830diff -urNp linux-3.0.7/include/linux/reiserfs_fs_sb.h linux-3.0.7/include/linux/reiserfs_fs_sb.h
59831--- linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
59832+++ linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
59833@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59834 /* Comment? -Hans */
59835 wait_queue_head_t s_wait;
59836 /* To be obsoleted soon by per buffer seals.. -Hans */
59837- atomic_t s_generation_counter; // increased by one every time the
59838+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59839 // tree gets re-balanced
59840 unsigned long s_properties; /* File system properties. Currently holds
59841 on-disk FS format */
59842diff -urNp linux-3.0.7/include/linux/relay.h linux-3.0.7/include/linux/relay.h
59843--- linux-3.0.7/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
59844+++ linux-3.0.7/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
59845@@ -159,7 +159,7 @@ struct rchan_callbacks
59846 * The callback should return 0 if successful, negative if not.
59847 */
59848 int (*remove_buf_file)(struct dentry *dentry);
59849-};
59850+} __no_const;
59851
59852 /*
59853 * CONFIG_RELAY kernel API, kernel/relay.c
59854diff -urNp linux-3.0.7/include/linux/rfkill.h linux-3.0.7/include/linux/rfkill.h
59855--- linux-3.0.7/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
59856+++ linux-3.0.7/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
59857@@ -147,6 +147,7 @@ struct rfkill_ops {
59858 void (*query)(struct rfkill *rfkill, void *data);
59859 int (*set_block)(void *data, bool blocked);
59860 };
59861+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59862
59863 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59864 /**
59865diff -urNp linux-3.0.7/include/linux/rmap.h linux-3.0.7/include/linux/rmap.h
59866--- linux-3.0.7/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
59867+++ linux-3.0.7/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
59868@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
59869 void anon_vma_init(void); /* create anon_vma_cachep */
59870 int anon_vma_prepare(struct vm_area_struct *);
59871 void unlink_anon_vmas(struct vm_area_struct *);
59872-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
59873-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
59874+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
59875+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
59876 void __anon_vma_link(struct vm_area_struct *);
59877
59878 static inline void anon_vma_merge(struct vm_area_struct *vma,
59879diff -urNp linux-3.0.7/include/linux/sched.h linux-3.0.7/include/linux/sched.h
59880--- linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:09.000000000 -0400
59881+++ linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:19.000000000 -0400
59882@@ -100,6 +100,7 @@ struct bio_list;
59883 struct fs_struct;
59884 struct perf_event_context;
59885 struct blk_plug;
59886+struct linux_binprm;
59887
59888 /*
59889 * List of flags we want to share for kernel threads,
59890@@ -380,10 +381,13 @@ struct user_namespace;
59891 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59892
59893 extern int sysctl_max_map_count;
59894+extern unsigned long sysctl_heap_stack_gap;
59895
59896 #include <linux/aio.h>
59897
59898 #ifdef CONFIG_MMU
59899+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59900+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59901 extern void arch_pick_mmap_layout(struct mm_struct *mm);
59902 extern unsigned long
59903 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59904@@ -629,6 +633,17 @@ struct signal_struct {
59905 #ifdef CONFIG_TASKSTATS
59906 struct taskstats *stats;
59907 #endif
59908+
59909+#ifdef CONFIG_GRKERNSEC
59910+ u32 curr_ip;
59911+ u32 saved_ip;
59912+ u32 gr_saddr;
59913+ u32 gr_daddr;
59914+ u16 gr_sport;
59915+ u16 gr_dport;
59916+ u8 used_accept:1;
59917+#endif
59918+
59919 #ifdef CONFIG_AUDIT
59920 unsigned audit_tty;
59921 struct tty_audit_buf *tty_audit_buf;
59922@@ -710,6 +725,11 @@ struct user_struct {
59923 struct key *session_keyring; /* UID's default session keyring */
59924 #endif
59925
59926+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59927+ unsigned int banned;
59928+ unsigned long ban_expires;
59929+#endif
59930+
59931 /* Hash table maintenance information */
59932 struct hlist_node uidhash_node;
59933 uid_t uid;
59934@@ -1340,8 +1360,8 @@ struct task_struct {
59935 struct list_head thread_group;
59936
59937 struct completion *vfork_done; /* for vfork() */
59938- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59939- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59940+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59941+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59942
59943 cputime_t utime, stime, utimescaled, stimescaled;
59944 cputime_t gtime;
59945@@ -1357,13 +1377,6 @@ struct task_struct {
59946 struct task_cputime cputime_expires;
59947 struct list_head cpu_timers[3];
59948
59949-/* process credentials */
59950- const struct cred __rcu *real_cred; /* objective and real subjective task
59951- * credentials (COW) */
59952- const struct cred __rcu *cred; /* effective (overridable) subjective task
59953- * credentials (COW) */
59954- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59955-
59956 char comm[TASK_COMM_LEN]; /* executable name excluding path
59957 - access with [gs]et_task_comm (which lock
59958 it with task_lock())
59959@@ -1380,8 +1393,16 @@ struct task_struct {
59960 #endif
59961 /* CPU-specific state of this task */
59962 struct thread_struct thread;
59963+/* thread_info moved to task_struct */
59964+#ifdef CONFIG_X86
59965+ struct thread_info tinfo;
59966+#endif
59967 /* filesystem information */
59968 struct fs_struct *fs;
59969+
59970+ const struct cred __rcu *cred; /* effective (overridable) subjective task
59971+ * credentials (COW) */
59972+
59973 /* open file information */
59974 struct files_struct *files;
59975 /* namespaces */
59976@@ -1428,6 +1449,11 @@ struct task_struct {
59977 struct rt_mutex_waiter *pi_blocked_on;
59978 #endif
59979
59980+/* process credentials */
59981+ const struct cred __rcu *real_cred; /* objective and real subjective task
59982+ * credentials (COW) */
59983+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59984+
59985 #ifdef CONFIG_DEBUG_MUTEXES
59986 /* mutex deadlock detection */
59987 struct mutex_waiter *blocked_on;
59988@@ -1538,6 +1564,21 @@ struct task_struct {
59989 unsigned long default_timer_slack_ns;
59990
59991 struct list_head *scm_work_list;
59992+
59993+#ifdef CONFIG_GRKERNSEC
59994+ /* grsecurity */
59995+ struct dentry *gr_chroot_dentry;
59996+ struct acl_subject_label *acl;
59997+ struct acl_role_label *role;
59998+ struct file *exec_file;
59999+ u16 acl_role_id;
60000+ /* is this the task that authenticated to the special role */
60001+ u8 acl_sp_role;
60002+ u8 is_writable;
60003+ u8 brute;
60004+ u8 gr_is_chrooted;
60005+#endif
60006+
60007 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60008 /* Index of current stored address in ret_stack */
60009 int curr_ret_stack;
60010@@ -1572,6 +1613,57 @@ struct task_struct {
60011 #endif
60012 };
60013
60014+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60015+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60016+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60017+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60018+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60019+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60020+
60021+#ifdef CONFIG_PAX_SOFTMODE
60022+extern int pax_softmode;
60023+#endif
60024+
60025+extern int pax_check_flags(unsigned long *);
60026+
60027+/* if tsk != current then task_lock must be held on it */
60028+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60029+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60030+{
60031+ if (likely(tsk->mm))
60032+ return tsk->mm->pax_flags;
60033+ else
60034+ return 0UL;
60035+}
60036+
60037+/* if tsk != current then task_lock must be held on it */
60038+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60039+{
60040+ if (likely(tsk->mm)) {
60041+ tsk->mm->pax_flags = flags;
60042+ return 0;
60043+ }
60044+ return -EINVAL;
60045+}
60046+#endif
60047+
60048+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60049+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60050+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60051+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60052+#endif
60053+
60054+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60055+extern void pax_report_insns(void *pc, void *sp);
60056+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60057+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60058+
60059+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60060+extern void pax_track_stack(void);
60061+#else
60062+static inline void pax_track_stack(void) {}
60063+#endif
60064+
60065 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60066 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60067
60068@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
60069 #define PF_DUMPCORE 0x00000200 /* dumped core */
60070 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60071 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60072+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60073 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60074 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60075 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60076@@ -2055,7 +2148,9 @@ void yield(void);
60077 extern struct exec_domain default_exec_domain;
60078
60079 union thread_union {
60080+#ifndef CONFIG_X86
60081 struct thread_info thread_info;
60082+#endif
60083 unsigned long stack[THREAD_SIZE/sizeof(long)];
60084 };
60085
60086@@ -2088,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
60087 */
60088
60089 extern struct task_struct *find_task_by_vpid(pid_t nr);
60090+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60091 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60092 struct pid_namespace *ns);
60093
60094@@ -2224,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
60095 extern void exit_itimers(struct signal_struct *);
60096 extern void flush_itimer_signals(void);
60097
60098-extern NORET_TYPE void do_group_exit(int);
60099+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60100
60101 extern void daemonize(const char *, ...);
60102 extern int allow_signal(int);
60103@@ -2392,13 +2488,17 @@ static inline unsigned long *end_of_stac
60104
60105 #endif
60106
60107-static inline int object_is_on_stack(void *obj)
60108+static inline int object_starts_on_stack(void *obj)
60109 {
60110- void *stack = task_stack_page(current);
60111+ const void *stack = task_stack_page(current);
60112
60113 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60114 }
60115
60116+#ifdef CONFIG_PAX_USERCOPY
60117+extern int object_is_on_stack(const void *obj, unsigned long len);
60118+#endif
60119+
60120 extern void thread_info_cache_init(void);
60121
60122 #ifdef CONFIG_DEBUG_STACK_USAGE
60123diff -urNp linux-3.0.7/include/linux/screen_info.h linux-3.0.7/include/linux/screen_info.h
60124--- linux-3.0.7/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
60125+++ linux-3.0.7/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
60126@@ -43,7 +43,8 @@ struct screen_info {
60127 __u16 pages; /* 0x32 */
60128 __u16 vesa_attributes; /* 0x34 */
60129 __u32 capabilities; /* 0x36 */
60130- __u8 _reserved[6]; /* 0x3a */
60131+ __u16 vesapm_size; /* 0x3a */
60132+ __u8 _reserved[4]; /* 0x3c */
60133 } __attribute__((packed));
60134
60135 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60136diff -urNp linux-3.0.7/include/linux/security.h linux-3.0.7/include/linux/security.h
60137--- linux-3.0.7/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
60138+++ linux-3.0.7/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
60139@@ -36,6 +36,7 @@
60140 #include <linux/key.h>
60141 #include <linux/xfrm.h>
60142 #include <linux/slab.h>
60143+#include <linux/grsecurity.h>
60144 #include <net/flow.h>
60145
60146 /* Maximum number of letters for an LSM name string */
60147diff -urNp linux-3.0.7/include/linux/seq_file.h linux-3.0.7/include/linux/seq_file.h
60148--- linux-3.0.7/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
60149+++ linux-3.0.7/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
60150@@ -32,6 +32,7 @@ struct seq_operations {
60151 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60152 int (*show) (struct seq_file *m, void *v);
60153 };
60154+typedef struct seq_operations __no_const seq_operations_no_const;
60155
60156 #define SEQ_SKIP 1
60157
60158diff -urNp linux-3.0.7/include/linux/shm.h linux-3.0.7/include/linux/shm.h
60159--- linux-3.0.7/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
60160+++ linux-3.0.7/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
60161@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60162 pid_t shm_cprid;
60163 pid_t shm_lprid;
60164 struct user_struct *mlock_user;
60165+#ifdef CONFIG_GRKERNSEC
60166+ time_t shm_createtime;
60167+ pid_t shm_lapid;
60168+#endif
60169 };
60170
60171 /* shm_mode upper byte flags */
60172diff -urNp linux-3.0.7/include/linux/shmem_fs.h linux-3.0.7/include/linux/shmem_fs.h
60173--- linux-3.0.7/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
60174+++ linux-3.0.7/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
60175@@ -10,7 +10,7 @@
60176
60177 #define SHMEM_NR_DIRECT 16
60178
60179-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
60180+#define SHMEM_SYMLINK_INLINE_LEN 64
60181
60182 struct shmem_inode_info {
60183 spinlock_t lock;
60184diff -urNp linux-3.0.7/include/linux/skbuff.h linux-3.0.7/include/linux/skbuff.h
60185--- linux-3.0.7/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
60186+++ linux-3.0.7/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
60187@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
60188 */
60189 static inline int skb_queue_empty(const struct sk_buff_head *list)
60190 {
60191- return list->next == (struct sk_buff *)list;
60192+ return list->next == (const struct sk_buff *)list;
60193 }
60194
60195 /**
60196@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
60197 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60198 const struct sk_buff *skb)
60199 {
60200- return skb->next == (struct sk_buff *)list;
60201+ return skb->next == (const struct sk_buff *)list;
60202 }
60203
60204 /**
60205@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
60206 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60207 const struct sk_buff *skb)
60208 {
60209- return skb->prev == (struct sk_buff *)list;
60210+ return skb->prev == (const struct sk_buff *)list;
60211 }
60212
60213 /**
60214@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
60215 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60216 */
60217 #ifndef NET_SKB_PAD
60218-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60219+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60220 #endif
60221
60222 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60223diff -urNp linux-3.0.7/include/linux/slab.h linux-3.0.7/include/linux/slab.h
60224--- linux-3.0.7/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
60225+++ linux-3.0.7/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
60226@@ -11,12 +11,20 @@
60227
60228 #include <linux/gfp.h>
60229 #include <linux/types.h>
60230+#include <linux/err.h>
60231
60232 /*
60233 * Flags to pass to kmem_cache_create().
60234 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60235 */
60236 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60237+
60238+#ifdef CONFIG_PAX_USERCOPY
60239+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60240+#else
60241+#define SLAB_USERCOPY 0x00000000UL
60242+#endif
60243+
60244 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60245 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60246 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60247@@ -87,10 +95,13 @@
60248 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60249 * Both make kfree a no-op.
60250 */
60251-#define ZERO_SIZE_PTR ((void *)16)
60252+#define ZERO_SIZE_PTR \
60253+({ \
60254+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60255+ (void *)(-MAX_ERRNO-1L); \
60256+})
60257
60258-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60259- (unsigned long)ZERO_SIZE_PTR)
60260+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60261
60262 /*
60263 * struct kmem_cache related prototypes
60264@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
60265 void kfree(const void *);
60266 void kzfree(const void *);
60267 size_t ksize(const void *);
60268+void check_object_size(const void *ptr, unsigned long n, bool to);
60269
60270 /*
60271 * Allocator specific definitions. These are mainly used to establish optimized
60272@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
60273
60274 void __init kmem_cache_init_late(void);
60275
60276+#define kmalloc(x, y) \
60277+({ \
60278+ void *___retval; \
60279+ intoverflow_t ___x = (intoverflow_t)x; \
60280+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60281+ ___retval = NULL; \
60282+ else \
60283+ ___retval = kmalloc((size_t)___x, (y)); \
60284+ ___retval; \
60285+})
60286+
60287+#define kmalloc_node(x, y, z) \
60288+({ \
60289+ void *___retval; \
60290+ intoverflow_t ___x = (intoverflow_t)x; \
60291+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60292+ ___retval = NULL; \
60293+ else \
60294+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60295+ ___retval; \
60296+})
60297+
60298+#define kzalloc(x, y) \
60299+({ \
60300+ void *___retval; \
60301+ intoverflow_t ___x = (intoverflow_t)x; \
60302+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60303+ ___retval = NULL; \
60304+ else \
60305+ ___retval = kzalloc((size_t)___x, (y)); \
60306+ ___retval; \
60307+})
60308+
60309+#define __krealloc(x, y, z) \
60310+({ \
60311+ void *___retval; \
60312+ intoverflow_t ___y = (intoverflow_t)y; \
60313+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60314+ ___retval = NULL; \
60315+ else \
60316+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60317+ ___retval; \
60318+})
60319+
60320+#define krealloc(x, y, z) \
60321+({ \
60322+ void *___retval; \
60323+ intoverflow_t ___y = (intoverflow_t)y; \
60324+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60325+ ___retval = NULL; \
60326+ else \
60327+ ___retval = krealloc((x), (size_t)___y, (z)); \
60328+ ___retval; \
60329+})
60330+
60331 #endif /* _LINUX_SLAB_H */
60332diff -urNp linux-3.0.7/include/linux/slab_def.h linux-3.0.7/include/linux/slab_def.h
60333--- linux-3.0.7/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
60334+++ linux-3.0.7/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
60335@@ -96,10 +96,10 @@ struct kmem_cache {
60336 unsigned long node_allocs;
60337 unsigned long node_frees;
60338 unsigned long node_overflow;
60339- atomic_t allochit;
60340- atomic_t allocmiss;
60341- atomic_t freehit;
60342- atomic_t freemiss;
60343+ atomic_unchecked_t allochit;
60344+ atomic_unchecked_t allocmiss;
60345+ atomic_unchecked_t freehit;
60346+ atomic_unchecked_t freemiss;
60347
60348 /*
60349 * If debugging is enabled, then the allocator can add additional
60350diff -urNp linux-3.0.7/include/linux/slub_def.h linux-3.0.7/include/linux/slub_def.h
60351--- linux-3.0.7/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
60352+++ linux-3.0.7/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
60353@@ -82,7 +82,7 @@ struct kmem_cache {
60354 struct kmem_cache_order_objects max;
60355 struct kmem_cache_order_objects min;
60356 gfp_t allocflags; /* gfp flags to use on each alloc */
60357- int refcount; /* Refcount for slab cache destroy */
60358+ atomic_t refcount; /* Refcount for slab cache destroy */
60359 void (*ctor)(void *);
60360 int inuse; /* Offset to metadata */
60361 int align; /* Alignment */
60362@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
60363 }
60364
60365 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60366-void *__kmalloc(size_t size, gfp_t flags);
60367+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60368
60369 static __always_inline void *
60370 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60371diff -urNp linux-3.0.7/include/linux/sonet.h linux-3.0.7/include/linux/sonet.h
60372--- linux-3.0.7/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
60373+++ linux-3.0.7/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
60374@@ -61,7 +61,7 @@ struct sonet_stats {
60375 #include <asm/atomic.h>
60376
60377 struct k_sonet_stats {
60378-#define __HANDLE_ITEM(i) atomic_t i
60379+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60380 __SONET_ITEMS
60381 #undef __HANDLE_ITEM
60382 };
60383diff -urNp linux-3.0.7/include/linux/sunrpc/clnt.h linux-3.0.7/include/linux/sunrpc/clnt.h
60384--- linux-3.0.7/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
60385+++ linux-3.0.7/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
60386@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60387 {
60388 switch (sap->sa_family) {
60389 case AF_INET:
60390- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60391+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60392 case AF_INET6:
60393- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60394+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60395 }
60396 return 0;
60397 }
60398@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60399 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60400 const struct sockaddr *src)
60401 {
60402- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60403+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60404 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60405
60406 dsin->sin_family = ssin->sin_family;
60407@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60408 if (sa->sa_family != AF_INET6)
60409 return 0;
60410
60411- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60412+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60413 }
60414
60415 #endif /* __KERNEL__ */
60416diff -urNp linux-3.0.7/include/linux/sunrpc/svc_rdma.h linux-3.0.7/include/linux/sunrpc/svc_rdma.h
60417--- linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
60418+++ linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
60419@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60420 extern unsigned int svcrdma_max_requests;
60421 extern unsigned int svcrdma_max_req_size;
60422
60423-extern atomic_t rdma_stat_recv;
60424-extern atomic_t rdma_stat_read;
60425-extern atomic_t rdma_stat_write;
60426-extern atomic_t rdma_stat_sq_starve;
60427-extern atomic_t rdma_stat_rq_starve;
60428-extern atomic_t rdma_stat_rq_poll;
60429-extern atomic_t rdma_stat_rq_prod;
60430-extern atomic_t rdma_stat_sq_poll;
60431-extern atomic_t rdma_stat_sq_prod;
60432+extern atomic_unchecked_t rdma_stat_recv;
60433+extern atomic_unchecked_t rdma_stat_read;
60434+extern atomic_unchecked_t rdma_stat_write;
60435+extern atomic_unchecked_t rdma_stat_sq_starve;
60436+extern atomic_unchecked_t rdma_stat_rq_starve;
60437+extern atomic_unchecked_t rdma_stat_rq_poll;
60438+extern atomic_unchecked_t rdma_stat_rq_prod;
60439+extern atomic_unchecked_t rdma_stat_sq_poll;
60440+extern atomic_unchecked_t rdma_stat_sq_prod;
60441
60442 #define RPCRDMA_VERSION 1
60443
60444diff -urNp linux-3.0.7/include/linux/sysctl.h linux-3.0.7/include/linux/sysctl.h
60445--- linux-3.0.7/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
60446+++ linux-3.0.7/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
60447@@ -155,7 +155,11 @@ enum
60448 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60449 };
60450
60451-
60452+#ifdef CONFIG_PAX_SOFTMODE
60453+enum {
60454+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60455+};
60456+#endif
60457
60458 /* CTL_VM names: */
60459 enum
60460@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60461
60462 extern int proc_dostring(struct ctl_table *, int,
60463 void __user *, size_t *, loff_t *);
60464+extern int proc_dostring_modpriv(struct ctl_table *, int,
60465+ void __user *, size_t *, loff_t *);
60466 extern int proc_dointvec(struct ctl_table *, int,
60467 void __user *, size_t *, loff_t *);
60468 extern int proc_dointvec_minmax(struct ctl_table *, int,
60469diff -urNp linux-3.0.7/include/linux/tty_ldisc.h linux-3.0.7/include/linux/tty_ldisc.h
60470--- linux-3.0.7/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
60471+++ linux-3.0.7/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
60472@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60473
60474 struct module *owner;
60475
60476- int refcount;
60477+ atomic_t refcount;
60478 };
60479
60480 struct tty_ldisc {
60481diff -urNp linux-3.0.7/include/linux/types.h linux-3.0.7/include/linux/types.h
60482--- linux-3.0.7/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
60483+++ linux-3.0.7/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
60484@@ -213,10 +213,26 @@ typedef struct {
60485 int counter;
60486 } atomic_t;
60487
60488+#ifdef CONFIG_PAX_REFCOUNT
60489+typedef struct {
60490+ int counter;
60491+} atomic_unchecked_t;
60492+#else
60493+typedef atomic_t atomic_unchecked_t;
60494+#endif
60495+
60496 #ifdef CONFIG_64BIT
60497 typedef struct {
60498 long counter;
60499 } atomic64_t;
60500+
60501+#ifdef CONFIG_PAX_REFCOUNT
60502+typedef struct {
60503+ long counter;
60504+} atomic64_unchecked_t;
60505+#else
60506+typedef atomic64_t atomic64_unchecked_t;
60507+#endif
60508 #endif
60509
60510 struct list_head {
60511diff -urNp linux-3.0.7/include/linux/uaccess.h linux-3.0.7/include/linux/uaccess.h
60512--- linux-3.0.7/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
60513+++ linux-3.0.7/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
60514@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60515 long ret; \
60516 mm_segment_t old_fs = get_fs(); \
60517 \
60518- set_fs(KERNEL_DS); \
60519 pagefault_disable(); \
60520- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60521- pagefault_enable(); \
60522+ set_fs(KERNEL_DS); \
60523+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60524 set_fs(old_fs); \
60525+ pagefault_enable(); \
60526 ret; \
60527 })
60528
60529diff -urNp linux-3.0.7/include/linux/unaligned/access_ok.h linux-3.0.7/include/linux/unaligned/access_ok.h
60530--- linux-3.0.7/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
60531+++ linux-3.0.7/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
60532@@ -6,32 +6,32 @@
60533
60534 static inline u16 get_unaligned_le16(const void *p)
60535 {
60536- return le16_to_cpup((__le16 *)p);
60537+ return le16_to_cpup((const __le16 *)p);
60538 }
60539
60540 static inline u32 get_unaligned_le32(const void *p)
60541 {
60542- return le32_to_cpup((__le32 *)p);
60543+ return le32_to_cpup((const __le32 *)p);
60544 }
60545
60546 static inline u64 get_unaligned_le64(const void *p)
60547 {
60548- return le64_to_cpup((__le64 *)p);
60549+ return le64_to_cpup((const __le64 *)p);
60550 }
60551
60552 static inline u16 get_unaligned_be16(const void *p)
60553 {
60554- return be16_to_cpup((__be16 *)p);
60555+ return be16_to_cpup((const __be16 *)p);
60556 }
60557
60558 static inline u32 get_unaligned_be32(const void *p)
60559 {
60560- return be32_to_cpup((__be32 *)p);
60561+ return be32_to_cpup((const __be32 *)p);
60562 }
60563
60564 static inline u64 get_unaligned_be64(const void *p)
60565 {
60566- return be64_to_cpup((__be64 *)p);
60567+ return be64_to_cpup((const __be64 *)p);
60568 }
60569
60570 static inline void put_unaligned_le16(u16 val, void *p)
60571diff -urNp linux-3.0.7/include/linux/vermagic.h linux-3.0.7/include/linux/vermagic.h
60572--- linux-3.0.7/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
60573+++ linux-3.0.7/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
60574@@ -26,9 +26,28 @@
60575 #define MODULE_ARCH_VERMAGIC ""
60576 #endif
60577
60578+#ifdef CONFIG_PAX_REFCOUNT
60579+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60580+#else
60581+#define MODULE_PAX_REFCOUNT ""
60582+#endif
60583+
60584+#ifdef CONSTIFY_PLUGIN
60585+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60586+#else
60587+#define MODULE_CONSTIFY_PLUGIN ""
60588+#endif
60589+
60590+#ifdef CONFIG_GRKERNSEC
60591+#define MODULE_GRSEC "GRSEC "
60592+#else
60593+#define MODULE_GRSEC ""
60594+#endif
60595+
60596 #define VERMAGIC_STRING \
60597 UTS_RELEASE " " \
60598 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60599 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60600- MODULE_ARCH_VERMAGIC
60601+ MODULE_ARCH_VERMAGIC \
60602+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
60603
60604diff -urNp linux-3.0.7/include/linux/vmalloc.h linux-3.0.7/include/linux/vmalloc.h
60605--- linux-3.0.7/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
60606+++ linux-3.0.7/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
60607@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60608 #define VM_MAP 0x00000004 /* vmap()ed pages */
60609 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60610 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60611+
60612+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60613+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60614+#endif
60615+
60616 /* bits [20..32] reserved for arch specific ioremap internals */
60617
60618 /*
60619@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60620 # endif
60621 #endif
60622
60623+#define vmalloc(x) \
60624+({ \
60625+ void *___retval; \
60626+ intoverflow_t ___x = (intoverflow_t)x; \
60627+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60628+ ___retval = NULL; \
60629+ else \
60630+ ___retval = vmalloc((unsigned long)___x); \
60631+ ___retval; \
60632+})
60633+
60634+#define vzalloc(x) \
60635+({ \
60636+ void *___retval; \
60637+ intoverflow_t ___x = (intoverflow_t)x; \
60638+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60639+ ___retval = NULL; \
60640+ else \
60641+ ___retval = vzalloc((unsigned long)___x); \
60642+ ___retval; \
60643+})
60644+
60645+#define __vmalloc(x, y, z) \
60646+({ \
60647+ void *___retval; \
60648+ intoverflow_t ___x = (intoverflow_t)x; \
60649+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60650+ ___retval = NULL; \
60651+ else \
60652+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60653+ ___retval; \
60654+})
60655+
60656+#define vmalloc_user(x) \
60657+({ \
60658+ void *___retval; \
60659+ intoverflow_t ___x = (intoverflow_t)x; \
60660+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60661+ ___retval = NULL; \
60662+ else \
60663+ ___retval = vmalloc_user((unsigned long)___x); \
60664+ ___retval; \
60665+})
60666+
60667+#define vmalloc_exec(x) \
60668+({ \
60669+ void *___retval; \
60670+ intoverflow_t ___x = (intoverflow_t)x; \
60671+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60672+ ___retval = NULL; \
60673+ else \
60674+ ___retval = vmalloc_exec((unsigned long)___x); \
60675+ ___retval; \
60676+})
60677+
60678+#define vmalloc_node(x, y) \
60679+({ \
60680+ void *___retval; \
60681+ intoverflow_t ___x = (intoverflow_t)x; \
60682+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60683+ ___retval = NULL; \
60684+ else \
60685+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60686+ ___retval; \
60687+})
60688+
60689+#define vzalloc_node(x, y) \
60690+({ \
60691+ void *___retval; \
60692+ intoverflow_t ___x = (intoverflow_t)x; \
60693+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60694+ ___retval = NULL; \
60695+ else \
60696+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60697+ ___retval; \
60698+})
60699+
60700+#define vmalloc_32(x) \
60701+({ \
60702+ void *___retval; \
60703+ intoverflow_t ___x = (intoverflow_t)x; \
60704+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60705+ ___retval = NULL; \
60706+ else \
60707+ ___retval = vmalloc_32((unsigned long)___x); \
60708+ ___retval; \
60709+})
60710+
60711+#define vmalloc_32_user(x) \
60712+({ \
60713+void *___retval; \
60714+ intoverflow_t ___x = (intoverflow_t)x; \
60715+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60716+ ___retval = NULL; \
60717+ else \
60718+ ___retval = vmalloc_32_user((unsigned long)___x);\
60719+ ___retval; \
60720+})
60721+
60722 #endif /* _LINUX_VMALLOC_H */
60723diff -urNp linux-3.0.7/include/linux/vmstat.h linux-3.0.7/include/linux/vmstat.h
60724--- linux-3.0.7/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
60725+++ linux-3.0.7/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
60726@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60727 /*
60728 * Zone based page accounting with per cpu differentials.
60729 */
60730-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60731+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60732
60733 static inline void zone_page_state_add(long x, struct zone *zone,
60734 enum zone_stat_item item)
60735 {
60736- atomic_long_add(x, &zone->vm_stat[item]);
60737- atomic_long_add(x, &vm_stat[item]);
60738+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60739+ atomic_long_add_unchecked(x, &vm_stat[item]);
60740 }
60741
60742 static inline unsigned long global_page_state(enum zone_stat_item item)
60743 {
60744- long x = atomic_long_read(&vm_stat[item]);
60745+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60746 #ifdef CONFIG_SMP
60747 if (x < 0)
60748 x = 0;
60749@@ -109,7 +109,7 @@ static inline unsigned long global_page_
60750 static inline unsigned long zone_page_state(struct zone *zone,
60751 enum zone_stat_item item)
60752 {
60753- long x = atomic_long_read(&zone->vm_stat[item]);
60754+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60755 #ifdef CONFIG_SMP
60756 if (x < 0)
60757 x = 0;
60758@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60759 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60760 enum zone_stat_item item)
60761 {
60762- long x = atomic_long_read(&zone->vm_stat[item]);
60763+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60764
60765 #ifdef CONFIG_SMP
60766 int cpu;
60767@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60768
60769 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60770 {
60771- atomic_long_inc(&zone->vm_stat[item]);
60772- atomic_long_inc(&vm_stat[item]);
60773+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60774+ atomic_long_inc_unchecked(&vm_stat[item]);
60775 }
60776
60777 static inline void __inc_zone_page_state(struct page *page,
60778@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60779
60780 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60781 {
60782- atomic_long_dec(&zone->vm_stat[item]);
60783- atomic_long_dec(&vm_stat[item]);
60784+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60785+ atomic_long_dec_unchecked(&vm_stat[item]);
60786 }
60787
60788 static inline void __dec_zone_page_state(struct page *page,
60789diff -urNp linux-3.0.7/include/media/saa7146_vv.h linux-3.0.7/include/media/saa7146_vv.h
60790--- linux-3.0.7/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
60791+++ linux-3.0.7/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
60792@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60793 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60794
60795 /* the extension can override this */
60796- struct v4l2_ioctl_ops ops;
60797+ v4l2_ioctl_ops_no_const ops;
60798 /* pointer to the saa7146 core ops */
60799 const struct v4l2_ioctl_ops *core_ops;
60800
60801diff -urNp linux-3.0.7/include/media/v4l2-dev.h linux-3.0.7/include/media/v4l2-dev.h
60802--- linux-3.0.7/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
60803+++ linux-3.0.7/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
60804@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
60805
60806
60807 struct v4l2_file_operations {
60808- struct module *owner;
60809+ struct module * const owner;
60810 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60811 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60812 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60813@@ -68,6 +68,7 @@ struct v4l2_file_operations {
60814 int (*open) (struct file *);
60815 int (*release) (struct file *);
60816 };
60817+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
60818
60819 /*
60820 * Newer version of video_device, handled by videodev2.c
60821diff -urNp linux-3.0.7/include/media/v4l2-ioctl.h linux-3.0.7/include/media/v4l2-ioctl.h
60822--- linux-3.0.7/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
60823+++ linux-3.0.7/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
60824@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
60825 long (*vidioc_default) (struct file *file, void *fh,
60826 bool valid_prio, int cmd, void *arg);
60827 };
60828+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60829
60830
60831 /* v4l debugging and diagnostics */
60832diff -urNp linux-3.0.7/include/net/caif/cfctrl.h linux-3.0.7/include/net/caif/cfctrl.h
60833--- linux-3.0.7/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
60834+++ linux-3.0.7/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
60835@@ -52,7 +52,7 @@ struct cfctrl_rsp {
60836 void (*radioset_rsp)(void);
60837 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
60838 struct cflayer *client_layer);
60839-};
60840+} __no_const;
60841
60842 /* Link Setup Parameters for CAIF-Links. */
60843 struct cfctrl_link_param {
60844@@ -101,8 +101,8 @@ struct cfctrl_request_info {
60845 struct cfctrl {
60846 struct cfsrvl serv;
60847 struct cfctrl_rsp res;
60848- atomic_t req_seq_no;
60849- atomic_t rsp_seq_no;
60850+ atomic_unchecked_t req_seq_no;
60851+ atomic_unchecked_t rsp_seq_no;
60852 struct list_head list;
60853 /* Protects from simultaneous access to first_req list */
60854 spinlock_t info_list_lock;
60855diff -urNp linux-3.0.7/include/net/flow.h linux-3.0.7/include/net/flow.h
60856--- linux-3.0.7/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
60857+++ linux-3.0.7/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
60858@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
60859 u8 dir, flow_resolve_t resolver, void *ctx);
60860
60861 extern void flow_cache_flush(void);
60862-extern atomic_t flow_cache_genid;
60863+extern atomic_unchecked_t flow_cache_genid;
60864
60865 #endif
60866diff -urNp linux-3.0.7/include/net/inetpeer.h linux-3.0.7/include/net/inetpeer.h
60867--- linux-3.0.7/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
60868+++ linux-3.0.7/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
60869@@ -43,8 +43,8 @@ struct inet_peer {
60870 */
60871 union {
60872 struct {
60873- atomic_t rid; /* Frag reception counter */
60874- atomic_t ip_id_count; /* IP ID for the next packet */
60875+ atomic_unchecked_t rid; /* Frag reception counter */
60876+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
60877 __u32 tcp_ts;
60878 __u32 tcp_ts_stamp;
60879 u32 metrics[RTAX_MAX];
60880@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
60881 {
60882 more++;
60883 inet_peer_refcheck(p);
60884- return atomic_add_return(more, &p->ip_id_count) - more;
60885+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
60886 }
60887
60888 #endif /* _NET_INETPEER_H */
60889diff -urNp linux-3.0.7/include/net/ip_fib.h linux-3.0.7/include/net/ip_fib.h
60890--- linux-3.0.7/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
60891+++ linux-3.0.7/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
60892@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
60893
60894 #define FIB_RES_SADDR(net, res) \
60895 ((FIB_RES_NH(res).nh_saddr_genid == \
60896- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
60897+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
60898 FIB_RES_NH(res).nh_saddr : \
60899 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
60900 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
60901diff -urNp linux-3.0.7/include/net/ip_vs.h linux-3.0.7/include/net/ip_vs.h
60902--- linux-3.0.7/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
60903+++ linux-3.0.7/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
60904@@ -509,7 +509,7 @@ struct ip_vs_conn {
60905 struct ip_vs_conn *control; /* Master control connection */
60906 atomic_t n_control; /* Number of controlled ones */
60907 struct ip_vs_dest *dest; /* real server */
60908- atomic_t in_pkts; /* incoming packet counter */
60909+ atomic_unchecked_t in_pkts; /* incoming packet counter */
60910
60911 /* packet transmitter for different forwarding methods. If it
60912 mangles the packet, it must return NF_DROP or better NF_STOLEN,
60913@@ -647,7 +647,7 @@ struct ip_vs_dest {
60914 __be16 port; /* port number of the server */
60915 union nf_inet_addr addr; /* IP address of the server */
60916 volatile unsigned flags; /* dest status flags */
60917- atomic_t conn_flags; /* flags to copy to conn */
60918+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
60919 atomic_t weight; /* server weight */
60920
60921 atomic_t refcnt; /* reference counter */
60922diff -urNp linux-3.0.7/include/net/irda/ircomm_core.h linux-3.0.7/include/net/irda/ircomm_core.h
60923--- linux-3.0.7/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
60924+++ linux-3.0.7/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
60925@@ -51,7 +51,7 @@ typedef struct {
60926 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
60927 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
60928 struct ircomm_info *);
60929-} call_t;
60930+} __no_const call_t;
60931
60932 struct ircomm_cb {
60933 irda_queue_t queue;
60934diff -urNp linux-3.0.7/include/net/irda/ircomm_tty.h linux-3.0.7/include/net/irda/ircomm_tty.h
60935--- linux-3.0.7/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
60936+++ linux-3.0.7/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
60937@@ -35,6 +35,7 @@
60938 #include <linux/termios.h>
60939 #include <linux/timer.h>
60940 #include <linux/tty.h> /* struct tty_struct */
60941+#include <asm/local.h>
60942
60943 #include <net/irda/irias_object.h>
60944 #include <net/irda/ircomm_core.h>
60945@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
60946 unsigned short close_delay;
60947 unsigned short closing_wait; /* time to wait before closing */
60948
60949- int open_count;
60950- int blocked_open; /* # of blocked opens */
60951+ local_t open_count;
60952+ local_t blocked_open; /* # of blocked opens */
60953
60954 /* Protect concurent access to :
60955 * o self->open_count
60956diff -urNp linux-3.0.7/include/net/iucv/af_iucv.h linux-3.0.7/include/net/iucv/af_iucv.h
60957--- linux-3.0.7/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
60958+++ linux-3.0.7/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
60959@@ -87,7 +87,7 @@ struct iucv_sock {
60960 struct iucv_sock_list {
60961 struct hlist_head head;
60962 rwlock_t lock;
60963- atomic_t autobind_name;
60964+ atomic_unchecked_t autobind_name;
60965 };
60966
60967 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
60968diff -urNp linux-3.0.7/include/net/lapb.h linux-3.0.7/include/net/lapb.h
60969--- linux-3.0.7/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
60970+++ linux-3.0.7/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
60971@@ -95,7 +95,7 @@ struct lapb_cb {
60972 struct sk_buff_head write_queue;
60973 struct sk_buff_head ack_queue;
60974 unsigned char window;
60975- struct lapb_register_struct callbacks;
60976+ struct lapb_register_struct *callbacks;
60977
60978 /* FRMR control information */
60979 struct lapb_frame frmr_data;
60980diff -urNp linux-3.0.7/include/net/neighbour.h linux-3.0.7/include/net/neighbour.h
60981--- linux-3.0.7/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
60982+++ linux-3.0.7/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
60983@@ -124,7 +124,7 @@ struct neigh_ops {
60984 int (*connected_output)(struct sk_buff*);
60985 int (*hh_output)(struct sk_buff*);
60986 int (*queue_xmit)(struct sk_buff*);
60987-};
60988+} __do_const;
60989
60990 struct pneigh_entry {
60991 struct pneigh_entry *next;
60992diff -urNp linux-3.0.7/include/net/netlink.h linux-3.0.7/include/net/netlink.h
60993--- linux-3.0.7/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
60994+++ linux-3.0.7/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
60995@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
60996 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
60997 {
60998 if (mark)
60999- skb_trim(skb, (unsigned char *) mark - skb->data);
61000+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61001 }
61002
61003 /**
61004diff -urNp linux-3.0.7/include/net/netns/ipv4.h linux-3.0.7/include/net/netns/ipv4.h
61005--- linux-3.0.7/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
61006+++ linux-3.0.7/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
61007@@ -56,8 +56,8 @@ struct netns_ipv4 {
61008
61009 unsigned int sysctl_ping_group_range[2];
61010
61011- atomic_t rt_genid;
61012- atomic_t dev_addr_genid;
61013+ atomic_unchecked_t rt_genid;
61014+ atomic_unchecked_t dev_addr_genid;
61015
61016 #ifdef CONFIG_IP_MROUTE
61017 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61018diff -urNp linux-3.0.7/include/net/sctp/sctp.h linux-3.0.7/include/net/sctp/sctp.h
61019--- linux-3.0.7/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
61020+++ linux-3.0.7/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
61021@@ -315,9 +315,9 @@ do { \
61022
61023 #else /* SCTP_DEBUG */
61024
61025-#define SCTP_DEBUG_PRINTK(whatever...)
61026-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61027-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61028+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61029+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61030+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61031 #define SCTP_ENABLE_DEBUG
61032 #define SCTP_DISABLE_DEBUG
61033 #define SCTP_ASSERT(expr, str, func)
61034diff -urNp linux-3.0.7/include/net/sock.h linux-3.0.7/include/net/sock.h
61035--- linux-3.0.7/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
61036+++ linux-3.0.7/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
61037@@ -277,7 +277,7 @@ struct sock {
61038 #ifdef CONFIG_RPS
61039 __u32 sk_rxhash;
61040 #endif
61041- atomic_t sk_drops;
61042+ atomic_unchecked_t sk_drops;
61043 int sk_rcvbuf;
61044
61045 struct sk_filter __rcu *sk_filter;
61046@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
61047 }
61048
61049 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61050- char __user *from, char *to,
61051+ char __user *from, unsigned char *to,
61052 int copy, int offset)
61053 {
61054 if (skb->ip_summed == CHECKSUM_NONE) {
61055diff -urNp linux-3.0.7/include/net/tcp.h linux-3.0.7/include/net/tcp.h
61056--- linux-3.0.7/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
61057+++ linux-3.0.7/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
61058@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
61059 struct tcp_seq_afinfo {
61060 char *name;
61061 sa_family_t family;
61062- struct file_operations seq_fops;
61063- struct seq_operations seq_ops;
61064+ file_operations_no_const seq_fops;
61065+ seq_operations_no_const seq_ops;
61066 };
61067
61068 struct tcp_iter_state {
61069diff -urNp linux-3.0.7/include/net/udp.h linux-3.0.7/include/net/udp.h
61070--- linux-3.0.7/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
61071+++ linux-3.0.7/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
61072@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61073 char *name;
61074 sa_family_t family;
61075 struct udp_table *udp_table;
61076- struct file_operations seq_fops;
61077- struct seq_operations seq_ops;
61078+ file_operations_no_const seq_fops;
61079+ seq_operations_no_const seq_ops;
61080 };
61081
61082 struct udp_iter_state {
61083diff -urNp linux-3.0.7/include/net/xfrm.h linux-3.0.7/include/net/xfrm.h
61084--- linux-3.0.7/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
61085+++ linux-3.0.7/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
61086@@ -505,7 +505,7 @@ struct xfrm_policy {
61087 struct timer_list timer;
61088
61089 struct flow_cache_object flo;
61090- atomic_t genid;
61091+ atomic_unchecked_t genid;
61092 u32 priority;
61093 u32 index;
61094 struct xfrm_mark mark;
61095diff -urNp linux-3.0.7/include/rdma/iw_cm.h linux-3.0.7/include/rdma/iw_cm.h
61096--- linux-3.0.7/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
61097+++ linux-3.0.7/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
61098@@ -120,7 +120,7 @@ struct iw_cm_verbs {
61099 int backlog);
61100
61101 int (*destroy_listen)(struct iw_cm_id *cm_id);
61102-};
61103+} __no_const;
61104
61105 /**
61106 * iw_create_cm_id - Create an IW CM identifier.
61107diff -urNp linux-3.0.7/include/scsi/libfc.h linux-3.0.7/include/scsi/libfc.h
61108--- linux-3.0.7/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
61109+++ linux-3.0.7/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
61110@@ -750,6 +750,7 @@ struct libfc_function_template {
61111 */
61112 void (*disc_stop_final) (struct fc_lport *);
61113 };
61114+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61115
61116 /**
61117 * struct fc_disc - Discovery context
61118@@ -853,7 +854,7 @@ struct fc_lport {
61119 struct fc_vport *vport;
61120
61121 /* Operational Information */
61122- struct libfc_function_template tt;
61123+ libfc_function_template_no_const tt;
61124 u8 link_up;
61125 u8 qfull;
61126 enum fc_lport_state state;
61127diff -urNp linux-3.0.7/include/scsi/scsi_device.h linux-3.0.7/include/scsi/scsi_device.h
61128--- linux-3.0.7/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
61129+++ linux-3.0.7/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
61130@@ -161,9 +161,9 @@ struct scsi_device {
61131 unsigned int max_device_blocked; /* what device_blocked counts down from */
61132 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61133
61134- atomic_t iorequest_cnt;
61135- atomic_t iodone_cnt;
61136- atomic_t ioerr_cnt;
61137+ atomic_unchecked_t iorequest_cnt;
61138+ atomic_unchecked_t iodone_cnt;
61139+ atomic_unchecked_t ioerr_cnt;
61140
61141 struct device sdev_gendev,
61142 sdev_dev;
61143diff -urNp linux-3.0.7/include/scsi/scsi_transport_fc.h linux-3.0.7/include/scsi/scsi_transport_fc.h
61144--- linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
61145+++ linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
61146@@ -711,7 +711,7 @@ struct fc_function_template {
61147 unsigned long show_host_system_hostname:1;
61148
61149 unsigned long disable_target_scan:1;
61150-};
61151+} __do_const;
61152
61153
61154 /**
61155diff -urNp linux-3.0.7/include/sound/ak4xxx-adda.h linux-3.0.7/include/sound/ak4xxx-adda.h
61156--- linux-3.0.7/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
61157+++ linux-3.0.7/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
61158@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61159 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61160 unsigned char val);
61161 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61162-};
61163+} __no_const;
61164
61165 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61166
61167diff -urNp linux-3.0.7/include/sound/hwdep.h linux-3.0.7/include/sound/hwdep.h
61168--- linux-3.0.7/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
61169+++ linux-3.0.7/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
61170@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61171 struct snd_hwdep_dsp_status *status);
61172 int (*dsp_load)(struct snd_hwdep *hw,
61173 struct snd_hwdep_dsp_image *image);
61174-};
61175+} __no_const;
61176
61177 struct snd_hwdep {
61178 struct snd_card *card;
61179diff -urNp linux-3.0.7/include/sound/info.h linux-3.0.7/include/sound/info.h
61180--- linux-3.0.7/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
61181+++ linux-3.0.7/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
61182@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61183 struct snd_info_buffer *buffer);
61184 void (*write)(struct snd_info_entry *entry,
61185 struct snd_info_buffer *buffer);
61186-};
61187+} __no_const;
61188
61189 struct snd_info_entry_ops {
61190 int (*open)(struct snd_info_entry *entry,
61191diff -urNp linux-3.0.7/include/sound/pcm.h linux-3.0.7/include/sound/pcm.h
61192--- linux-3.0.7/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
61193+++ linux-3.0.7/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
61194@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61195 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61196 int (*ack)(struct snd_pcm_substream *substream);
61197 };
61198+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61199
61200 /*
61201 *
61202diff -urNp linux-3.0.7/include/sound/sb16_csp.h linux-3.0.7/include/sound/sb16_csp.h
61203--- linux-3.0.7/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
61204+++ linux-3.0.7/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
61205@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61206 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61207 int (*csp_stop) (struct snd_sb_csp * p);
61208 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61209-};
61210+} __no_const;
61211
61212 /*
61213 * CSP private data
61214diff -urNp linux-3.0.7/include/sound/soc.h linux-3.0.7/include/sound/soc.h
61215--- linux-3.0.7/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
61216+++ linux-3.0.7/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
61217@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
61218
61219 /* platform stream ops */
61220 struct snd_pcm_ops *ops;
61221-};
61222+} __do_const;
61223
61224 struct snd_soc_platform {
61225 const char *name;
61226diff -urNp linux-3.0.7/include/sound/ymfpci.h linux-3.0.7/include/sound/ymfpci.h
61227--- linux-3.0.7/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
61228+++ linux-3.0.7/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
61229@@ -358,7 +358,7 @@ struct snd_ymfpci {
61230 spinlock_t reg_lock;
61231 spinlock_t voice_lock;
61232 wait_queue_head_t interrupt_sleep;
61233- atomic_t interrupt_sleep_count;
61234+ atomic_unchecked_t interrupt_sleep_count;
61235 struct snd_info_entry *proc_entry;
61236 const struct firmware *dsp_microcode;
61237 const struct firmware *controller_microcode;
61238diff -urNp linux-3.0.7/include/target/target_core_base.h linux-3.0.7/include/target/target_core_base.h
61239--- linux-3.0.7/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
61240+++ linux-3.0.7/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
61241@@ -364,7 +364,7 @@ struct t10_reservation_ops {
61242 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61243 int (*t10_pr_register)(struct se_cmd *);
61244 int (*t10_pr_clear)(struct se_cmd *);
61245-};
61246+} __no_const;
61247
61248 struct t10_reservation_template {
61249 /* Reservation effects all target ports */
61250@@ -432,8 +432,8 @@ struct se_transport_task {
61251 atomic_t t_task_cdbs_left;
61252 atomic_t t_task_cdbs_ex_left;
61253 atomic_t t_task_cdbs_timeout_left;
61254- atomic_t t_task_cdbs_sent;
61255- atomic_t t_transport_aborted;
61256+ atomic_unchecked_t t_task_cdbs_sent;
61257+ atomic_unchecked_t t_transport_aborted;
61258 atomic_t t_transport_active;
61259 atomic_t t_transport_complete;
61260 atomic_t t_transport_queue_active;
61261@@ -774,7 +774,7 @@ struct se_device {
61262 atomic_t active_cmds;
61263 atomic_t simple_cmds;
61264 atomic_t depth_left;
61265- atomic_t dev_ordered_id;
61266+ atomic_unchecked_t dev_ordered_id;
61267 atomic_t dev_tur_active;
61268 atomic_t execute_tasks;
61269 atomic_t dev_status_thr_count;
61270diff -urNp linux-3.0.7/include/trace/events/irq.h linux-3.0.7/include/trace/events/irq.h
61271--- linux-3.0.7/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
61272+++ linux-3.0.7/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
61273@@ -36,7 +36,7 @@ struct softirq_action;
61274 */
61275 TRACE_EVENT(irq_handler_entry,
61276
61277- TP_PROTO(int irq, struct irqaction *action),
61278+ TP_PROTO(int irq, const struct irqaction *action),
61279
61280 TP_ARGS(irq, action),
61281
61282@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61283 */
61284 TRACE_EVENT(irq_handler_exit,
61285
61286- TP_PROTO(int irq, struct irqaction *action, int ret),
61287+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61288
61289 TP_ARGS(irq, action, ret),
61290
61291diff -urNp linux-3.0.7/include/video/udlfb.h linux-3.0.7/include/video/udlfb.h
61292--- linux-3.0.7/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
61293+++ linux-3.0.7/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
61294@@ -51,10 +51,10 @@ struct dlfb_data {
61295 int base8;
61296 u32 pseudo_palette[256];
61297 /* blit-only rendering path metrics, exposed through sysfs */
61298- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61299- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61300- atomic_t bytes_sent; /* to usb, after compression including overhead */
61301- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61302+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61303+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61304+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61305+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61306 };
61307
61308 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61309diff -urNp linux-3.0.7/include/video/uvesafb.h linux-3.0.7/include/video/uvesafb.h
61310--- linux-3.0.7/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
61311+++ linux-3.0.7/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
61312@@ -177,6 +177,7 @@ struct uvesafb_par {
61313 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61314 u8 pmi_setpal; /* PMI for palette changes */
61315 u16 *pmi_base; /* protected mode interface location */
61316+ u8 *pmi_code; /* protected mode code location */
61317 void *pmi_start;
61318 void *pmi_pal;
61319 u8 *vbe_state_orig; /*
61320diff -urNp linux-3.0.7/init/Kconfig linux-3.0.7/init/Kconfig
61321--- linux-3.0.7/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
61322+++ linux-3.0.7/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
61323@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
61324
61325 config COMPAT_BRK
61326 bool "Disable heap randomization"
61327- default y
61328+ default n
61329 help
61330 Randomizing heap placement makes heap exploits harder, but it
61331 also breaks ancient binaries (including anything libc5 based).
61332diff -urNp linux-3.0.7/init/do_mounts.c linux-3.0.7/init/do_mounts.c
61333--- linux-3.0.7/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
61334+++ linux-3.0.7/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
61335@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61336
61337 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61338 {
61339- int err = sys_mount(name, "/root", fs, flags, data);
61340+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61341 if (err)
61342 return err;
61343
61344- sys_chdir((const char __user __force *)"/root");
61345+ sys_chdir((const char __force_user*)"/root");
61346 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61347 printk(KERN_INFO
61348 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61349@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61350 va_start(args, fmt);
61351 vsprintf(buf, fmt, args);
61352 va_end(args);
61353- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61354+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61355 if (fd >= 0) {
61356 sys_ioctl(fd, FDEJECT, 0);
61357 sys_close(fd);
61358 }
61359 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61360- fd = sys_open("/dev/console", O_RDWR, 0);
61361+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61362 if (fd >= 0) {
61363 sys_ioctl(fd, TCGETS, (long)&termios);
61364 termios.c_lflag &= ~ICANON;
61365 sys_ioctl(fd, TCSETSF, (long)&termios);
61366- sys_read(fd, &c, 1);
61367+ sys_read(fd, (char __user *)&c, 1);
61368 termios.c_lflag |= ICANON;
61369 sys_ioctl(fd, TCSETSF, (long)&termios);
61370 sys_close(fd);
61371@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61372 mount_root();
61373 out:
61374 devtmpfs_mount("dev");
61375- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61376- sys_chroot((const char __user __force *)".");
61377+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61378+ sys_chroot((const char __force_user *)".");
61379 }
61380diff -urNp linux-3.0.7/init/do_mounts.h linux-3.0.7/init/do_mounts.h
61381--- linux-3.0.7/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
61382+++ linux-3.0.7/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
61383@@ -15,15 +15,15 @@ extern int root_mountflags;
61384
61385 static inline int create_dev(char *name, dev_t dev)
61386 {
61387- sys_unlink(name);
61388- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61389+ sys_unlink((char __force_user *)name);
61390+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61391 }
61392
61393 #if BITS_PER_LONG == 32
61394 static inline u32 bstat(char *name)
61395 {
61396 struct stat64 stat;
61397- if (sys_stat64(name, &stat) != 0)
61398+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61399 return 0;
61400 if (!S_ISBLK(stat.st_mode))
61401 return 0;
61402@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61403 static inline u32 bstat(char *name)
61404 {
61405 struct stat stat;
61406- if (sys_newstat(name, &stat) != 0)
61407+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61408 return 0;
61409 if (!S_ISBLK(stat.st_mode))
61410 return 0;
61411diff -urNp linux-3.0.7/init/do_mounts_initrd.c linux-3.0.7/init/do_mounts_initrd.c
61412--- linux-3.0.7/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
61413+++ linux-3.0.7/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
61414@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61415 create_dev("/dev/root.old", Root_RAM0);
61416 /* mount initrd on rootfs' /root */
61417 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61418- sys_mkdir("/old", 0700);
61419- root_fd = sys_open("/", 0, 0);
61420- old_fd = sys_open("/old", 0, 0);
61421+ sys_mkdir((const char __force_user *)"/old", 0700);
61422+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61423+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61424 /* move initrd over / and chdir/chroot in initrd root */
61425- sys_chdir("/root");
61426- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61427- sys_chroot(".");
61428+ sys_chdir((const char __force_user *)"/root");
61429+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61430+ sys_chroot((const char __force_user *)".");
61431
61432 /*
61433 * In case that a resume from disk is carried out by linuxrc or one of
61434@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61435
61436 /* move initrd to rootfs' /old */
61437 sys_fchdir(old_fd);
61438- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61439+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61440 /* switch root and cwd back to / of rootfs */
61441 sys_fchdir(root_fd);
61442- sys_chroot(".");
61443+ sys_chroot((const char __force_user *)".");
61444 sys_close(old_fd);
61445 sys_close(root_fd);
61446
61447 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61448- sys_chdir("/old");
61449+ sys_chdir((const char __force_user *)"/old");
61450 return;
61451 }
61452
61453@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61454 mount_root();
61455
61456 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61457- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61458+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61459 if (!error)
61460 printk("okay\n");
61461 else {
61462- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61463+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61464 if (error == -ENOENT)
61465 printk("/initrd does not exist. Ignored.\n");
61466 else
61467 printk("failed\n");
61468 printk(KERN_NOTICE "Unmounting old root\n");
61469- sys_umount("/old", MNT_DETACH);
61470+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61471 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61472 if (fd < 0) {
61473 error = fd;
61474@@ -116,11 +116,11 @@ int __init initrd_load(void)
61475 * mounted in the normal path.
61476 */
61477 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61478- sys_unlink("/initrd.image");
61479+ sys_unlink((const char __force_user *)"/initrd.image");
61480 handle_initrd();
61481 return 1;
61482 }
61483 }
61484- sys_unlink("/initrd.image");
61485+ sys_unlink((const char __force_user *)"/initrd.image");
61486 return 0;
61487 }
61488diff -urNp linux-3.0.7/init/do_mounts_md.c linux-3.0.7/init/do_mounts_md.c
61489--- linux-3.0.7/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
61490+++ linux-3.0.7/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
61491@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61492 partitioned ? "_d" : "", minor,
61493 md_setup_args[ent].device_names);
61494
61495- fd = sys_open(name, 0, 0);
61496+ fd = sys_open((char __force_user *)name, 0, 0);
61497 if (fd < 0) {
61498 printk(KERN_ERR "md: open failed - cannot start "
61499 "array %s\n", name);
61500@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61501 * array without it
61502 */
61503 sys_close(fd);
61504- fd = sys_open(name, 0, 0);
61505+ fd = sys_open((char __force_user *)name, 0, 0);
61506 sys_ioctl(fd, BLKRRPART, 0);
61507 }
61508 sys_close(fd);
61509@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61510
61511 wait_for_device_probe();
61512
61513- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61514+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61515 if (fd >= 0) {
61516 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61517 sys_close(fd);
61518diff -urNp linux-3.0.7/init/initramfs.c linux-3.0.7/init/initramfs.c
61519--- linux-3.0.7/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
61520+++ linux-3.0.7/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
61521@@ -74,7 +74,7 @@ static void __init free_hash(void)
61522 }
61523 }
61524
61525-static long __init do_utime(char __user *filename, time_t mtime)
61526+static long __init do_utime(__force char __user *filename, time_t mtime)
61527 {
61528 struct timespec t[2];
61529
61530@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61531 struct dir_entry *de, *tmp;
61532 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61533 list_del(&de->list);
61534- do_utime(de->name, de->mtime);
61535+ do_utime((char __force_user *)de->name, de->mtime);
61536 kfree(de->name);
61537 kfree(de);
61538 }
61539@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61540 if (nlink >= 2) {
61541 char *old = find_link(major, minor, ino, mode, collected);
61542 if (old)
61543- return (sys_link(old, collected) < 0) ? -1 : 1;
61544+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61545 }
61546 return 0;
61547 }
61548@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61549 {
61550 struct stat st;
61551
61552- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61553+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61554 if (S_ISDIR(st.st_mode))
61555- sys_rmdir(path);
61556+ sys_rmdir((char __force_user *)path);
61557 else
61558- sys_unlink(path);
61559+ sys_unlink((char __force_user *)path);
61560 }
61561 }
61562
61563@@ -305,7 +305,7 @@ static int __init do_name(void)
61564 int openflags = O_WRONLY|O_CREAT;
61565 if (ml != 1)
61566 openflags |= O_TRUNC;
61567- wfd = sys_open(collected, openflags, mode);
61568+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61569
61570 if (wfd >= 0) {
61571 sys_fchown(wfd, uid, gid);
61572@@ -317,17 +317,17 @@ static int __init do_name(void)
61573 }
61574 }
61575 } else if (S_ISDIR(mode)) {
61576- sys_mkdir(collected, mode);
61577- sys_chown(collected, uid, gid);
61578- sys_chmod(collected, mode);
61579+ sys_mkdir((char __force_user *)collected, mode);
61580+ sys_chown((char __force_user *)collected, uid, gid);
61581+ sys_chmod((char __force_user *)collected, mode);
61582 dir_add(collected, mtime);
61583 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61584 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61585 if (maybe_link() == 0) {
61586- sys_mknod(collected, mode, rdev);
61587- sys_chown(collected, uid, gid);
61588- sys_chmod(collected, mode);
61589- do_utime(collected, mtime);
61590+ sys_mknod((char __force_user *)collected, mode, rdev);
61591+ sys_chown((char __force_user *)collected, uid, gid);
61592+ sys_chmod((char __force_user *)collected, mode);
61593+ do_utime((char __force_user *)collected, mtime);
61594 }
61595 }
61596 return 0;
61597@@ -336,15 +336,15 @@ static int __init do_name(void)
61598 static int __init do_copy(void)
61599 {
61600 if (count >= body_len) {
61601- sys_write(wfd, victim, body_len);
61602+ sys_write(wfd, (char __force_user *)victim, body_len);
61603 sys_close(wfd);
61604- do_utime(vcollected, mtime);
61605+ do_utime((char __force_user *)vcollected, mtime);
61606 kfree(vcollected);
61607 eat(body_len);
61608 state = SkipIt;
61609 return 0;
61610 } else {
61611- sys_write(wfd, victim, count);
61612+ sys_write(wfd, (char __force_user *)victim, count);
61613 body_len -= count;
61614 eat(count);
61615 return 1;
61616@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61617 {
61618 collected[N_ALIGN(name_len) + body_len] = '\0';
61619 clean_path(collected, 0);
61620- sys_symlink(collected + N_ALIGN(name_len), collected);
61621- sys_lchown(collected, uid, gid);
61622- do_utime(collected, mtime);
61623+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61624+ sys_lchown((char __force_user *)collected, uid, gid);
61625+ do_utime((char __force_user *)collected, mtime);
61626 state = SkipIt;
61627 next_state = Reset;
61628 return 0;
61629diff -urNp linux-3.0.7/init/main.c linux-3.0.7/init/main.c
61630--- linux-3.0.7/init/main.c 2011-07-21 22:17:23.000000000 -0400
61631+++ linux-3.0.7/init/main.c 2011-10-06 04:17:55.000000000 -0400
61632@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61633 extern void tc_init(void);
61634 #endif
61635
61636+extern void grsecurity_init(void);
61637+
61638 /*
61639 * Debug helper: via this flag we know that we are in 'early bootup code'
61640 * where only the boot processor is running with IRQ disabled. This means
61641@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61642
61643 __setup("reset_devices", set_reset_devices);
61644
61645+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61646+extern char pax_enter_kernel_user[];
61647+extern char pax_exit_kernel_user[];
61648+extern pgdval_t clone_pgd_mask;
61649+#endif
61650+
61651+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61652+static int __init setup_pax_nouderef(char *str)
61653+{
61654+#ifdef CONFIG_X86_32
61655+ unsigned int cpu;
61656+ struct desc_struct *gdt;
61657+
61658+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61659+ gdt = get_cpu_gdt_table(cpu);
61660+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61661+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61662+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61663+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61664+ }
61665+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61666+#else
61667+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61668+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61669+ clone_pgd_mask = ~(pgdval_t)0UL;
61670+#endif
61671+
61672+ return 0;
61673+}
61674+early_param("pax_nouderef", setup_pax_nouderef);
61675+#endif
61676+
61677+#ifdef CONFIG_PAX_SOFTMODE
61678+int pax_softmode;
61679+
61680+static int __init setup_pax_softmode(char *str)
61681+{
61682+ get_option(&str, &pax_softmode);
61683+ return 1;
61684+}
61685+__setup("pax_softmode=", setup_pax_softmode);
61686+#endif
61687+
61688 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61689 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61690 static const char *panic_later, *panic_param;
61691@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
61692 {
61693 int count = preempt_count();
61694 int ret;
61695+ const char *msg1 = "", *msg2 = "";
61696
61697 if (initcall_debug)
61698 ret = do_one_initcall_debug(fn);
61699@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
61700 sprintf(msgbuf, "error code %d ", ret);
61701
61702 if (preempt_count() != count) {
61703- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61704+ msg1 = " preemption imbalance";
61705 preempt_count() = count;
61706 }
61707 if (irqs_disabled()) {
61708- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61709+ msg2 = " disabled interrupts";
61710 local_irq_enable();
61711 }
61712- if (msgbuf[0]) {
61713- printk("initcall %pF returned with %s\n", fn, msgbuf);
61714+ if (msgbuf[0] || *msg1 || *msg2) {
61715+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61716 }
61717
61718 return ret;
61719@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
61720 do_basic_setup();
61721
61722 /* Open the /dev/console on the rootfs, this should never fail */
61723- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61724+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61725 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61726
61727 (void) sys_dup(0);
61728@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
61729 if (!ramdisk_execute_command)
61730 ramdisk_execute_command = "/init";
61731
61732- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61733+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61734 ramdisk_execute_command = NULL;
61735 prepare_namespace();
61736 }
61737
61738+ grsecurity_init();
61739+
61740 /*
61741 * Ok, we have completed the initial bootup, and
61742 * we're essentially up and running. Get rid of the
61743diff -urNp linux-3.0.7/ipc/mqueue.c linux-3.0.7/ipc/mqueue.c
61744--- linux-3.0.7/ipc/mqueue.c 2011-10-16 21:54:54.000000000 -0400
61745+++ linux-3.0.7/ipc/mqueue.c 2011-10-16 21:59:31.000000000 -0400
61746@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61747 mq_bytes = (mq_msg_tblsz +
61748 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61749
61750+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61751 spin_lock(&mq_lock);
61752 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61753 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61754diff -urNp linux-3.0.7/ipc/msg.c linux-3.0.7/ipc/msg.c
61755--- linux-3.0.7/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
61756+++ linux-3.0.7/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
61757@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61758 return security_msg_queue_associate(msq, msgflg);
61759 }
61760
61761+static struct ipc_ops msg_ops = {
61762+ .getnew = newque,
61763+ .associate = msg_security,
61764+ .more_checks = NULL
61765+};
61766+
61767 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61768 {
61769 struct ipc_namespace *ns;
61770- struct ipc_ops msg_ops;
61771 struct ipc_params msg_params;
61772
61773 ns = current->nsproxy->ipc_ns;
61774
61775- msg_ops.getnew = newque;
61776- msg_ops.associate = msg_security;
61777- msg_ops.more_checks = NULL;
61778-
61779 msg_params.key = key;
61780 msg_params.flg = msgflg;
61781
61782diff -urNp linux-3.0.7/ipc/sem.c linux-3.0.7/ipc/sem.c
61783--- linux-3.0.7/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
61784+++ linux-3.0.7/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
61785@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
61786 return 0;
61787 }
61788
61789+static struct ipc_ops sem_ops = {
61790+ .getnew = newary,
61791+ .associate = sem_security,
61792+ .more_checks = sem_more_checks
61793+};
61794+
61795 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61796 {
61797 struct ipc_namespace *ns;
61798- struct ipc_ops sem_ops;
61799 struct ipc_params sem_params;
61800
61801 ns = current->nsproxy->ipc_ns;
61802@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61803 if (nsems < 0 || nsems > ns->sc_semmsl)
61804 return -EINVAL;
61805
61806- sem_ops.getnew = newary;
61807- sem_ops.associate = sem_security;
61808- sem_ops.more_checks = sem_more_checks;
61809-
61810 sem_params.key = key;
61811 sem_params.flg = semflg;
61812 sem_params.u.nsems = nsems;
61813@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
61814 int nsems;
61815 struct list_head tasks;
61816
61817+ pax_track_stack();
61818+
61819 sma = sem_lock_check(ns, semid);
61820 if (IS_ERR(sma))
61821 return PTR_ERR(sma);
61822@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61823 struct ipc_namespace *ns;
61824 struct list_head tasks;
61825
61826+ pax_track_stack();
61827+
61828 ns = current->nsproxy->ipc_ns;
61829
61830 if (nsops < 1 || semid < 0)
61831diff -urNp linux-3.0.7/ipc/shm.c linux-3.0.7/ipc/shm.c
61832--- linux-3.0.7/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
61833+++ linux-3.0.7/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
61834@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
61835 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
61836 #endif
61837
61838+#ifdef CONFIG_GRKERNSEC
61839+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61840+ const time_t shm_createtime, const uid_t cuid,
61841+ const int shmid);
61842+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61843+ const time_t shm_createtime);
61844+#endif
61845+
61846 void shm_init_ns(struct ipc_namespace *ns)
61847 {
61848 ns->shm_ctlmax = SHMMAX;
61849@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
61850 shp->shm_lprid = 0;
61851 shp->shm_atim = shp->shm_dtim = 0;
61852 shp->shm_ctim = get_seconds();
61853+#ifdef CONFIG_GRKERNSEC
61854+ {
61855+ struct timespec timeval;
61856+ do_posix_clock_monotonic_gettime(&timeval);
61857+
61858+ shp->shm_createtime = timeval.tv_sec;
61859+ }
61860+#endif
61861 shp->shm_segsz = size;
61862 shp->shm_nattch = 0;
61863 shp->shm_file = file;
61864@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
61865 return 0;
61866 }
61867
61868+static struct ipc_ops shm_ops = {
61869+ .getnew = newseg,
61870+ .associate = shm_security,
61871+ .more_checks = shm_more_checks
61872+};
61873+
61874 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
61875 {
61876 struct ipc_namespace *ns;
61877- struct ipc_ops shm_ops;
61878 struct ipc_params shm_params;
61879
61880 ns = current->nsproxy->ipc_ns;
61881
61882- shm_ops.getnew = newseg;
61883- shm_ops.associate = shm_security;
61884- shm_ops.more_checks = shm_more_checks;
61885-
61886 shm_params.key = key;
61887 shm_params.flg = shmflg;
61888 shm_params.u.size = size;
61889@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
61890 case SHM_LOCK:
61891 case SHM_UNLOCK:
61892 {
61893- struct file *uninitialized_var(shm_file);
61894-
61895 lru_add_drain_all(); /* drain pagevecs to lru lists */
61896
61897 shp = shm_lock_check(ns, shmid);
61898@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
61899 if (err)
61900 goto out_unlock;
61901
61902+#ifdef CONFIG_GRKERNSEC
61903+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
61904+ shp->shm_perm.cuid, shmid) ||
61905+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
61906+ err = -EACCES;
61907+ goto out_unlock;
61908+ }
61909+#endif
61910+
61911 path = shp->shm_file->f_path;
61912 path_get(&path);
61913 shp->shm_nattch++;
61914+#ifdef CONFIG_GRKERNSEC
61915+ shp->shm_lapid = current->pid;
61916+#endif
61917 size = i_size_read(path.dentry->d_inode);
61918 shm_unlock(shp);
61919
61920diff -urNp linux-3.0.7/kernel/acct.c linux-3.0.7/kernel/acct.c
61921--- linux-3.0.7/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
61922+++ linux-3.0.7/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
61923@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
61924 */
61925 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
61926 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
61927- file->f_op->write(file, (char *)&ac,
61928+ file->f_op->write(file, (char __force_user *)&ac,
61929 sizeof(acct_t), &file->f_pos);
61930 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
61931 set_fs(fs);
61932diff -urNp linux-3.0.7/kernel/audit.c linux-3.0.7/kernel/audit.c
61933--- linux-3.0.7/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
61934+++ linux-3.0.7/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
61935@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
61936 3) suppressed due to audit_rate_limit
61937 4) suppressed due to audit_backlog_limit
61938 */
61939-static atomic_t audit_lost = ATOMIC_INIT(0);
61940+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
61941
61942 /* The netlink socket. */
61943 static struct sock *audit_sock;
61944@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
61945 unsigned long now;
61946 int print;
61947
61948- atomic_inc(&audit_lost);
61949+ atomic_inc_unchecked(&audit_lost);
61950
61951 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
61952
61953@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
61954 printk(KERN_WARNING
61955 "audit: audit_lost=%d audit_rate_limit=%d "
61956 "audit_backlog_limit=%d\n",
61957- atomic_read(&audit_lost),
61958+ atomic_read_unchecked(&audit_lost),
61959 audit_rate_limit,
61960 audit_backlog_limit);
61961 audit_panic(message);
61962@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
61963 status_set.pid = audit_pid;
61964 status_set.rate_limit = audit_rate_limit;
61965 status_set.backlog_limit = audit_backlog_limit;
61966- status_set.lost = atomic_read(&audit_lost);
61967+ status_set.lost = atomic_read_unchecked(&audit_lost);
61968 status_set.backlog = skb_queue_len(&audit_skb_queue);
61969 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
61970 &status_set, sizeof(status_set));
61971diff -urNp linux-3.0.7/kernel/auditsc.c linux-3.0.7/kernel/auditsc.c
61972--- linux-3.0.7/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
61973+++ linux-3.0.7/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
61974@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
61975 }
61976
61977 /* global counter which is incremented every time something logs in */
61978-static atomic_t session_id = ATOMIC_INIT(0);
61979+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
61980
61981 /**
61982 * audit_set_loginuid - set a task's audit_context loginuid
61983@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
61984 */
61985 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
61986 {
61987- unsigned int sessionid = atomic_inc_return(&session_id);
61988+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
61989 struct audit_context *context = task->audit_context;
61990
61991 if (context && context->in_syscall) {
61992diff -urNp linux-3.0.7/kernel/capability.c linux-3.0.7/kernel/capability.c
61993--- linux-3.0.7/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
61994+++ linux-3.0.7/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
61995@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
61996 * before modification is attempted and the application
61997 * fails.
61998 */
61999+ if (tocopy > ARRAY_SIZE(kdata))
62000+ return -EFAULT;
62001+
62002 if (copy_to_user(dataptr, kdata, tocopy
62003 * sizeof(struct __user_cap_data_struct))) {
62004 return -EFAULT;
62005@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62006 BUG();
62007 }
62008
62009- if (security_capable(ns, current_cred(), cap) == 0) {
62010+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62011 current->flags |= PF_SUPERPRIV;
62012 return true;
62013 }
62014@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62015 }
62016 EXPORT_SYMBOL(ns_capable);
62017
62018+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62019+{
62020+ if (unlikely(!cap_valid(cap))) {
62021+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62022+ BUG();
62023+ }
62024+
62025+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62026+ current->flags |= PF_SUPERPRIV;
62027+ return true;
62028+ }
62029+ return false;
62030+}
62031+EXPORT_SYMBOL(ns_capable_nolog);
62032+
62033+bool capable_nolog(int cap)
62034+{
62035+ return ns_capable_nolog(&init_user_ns, cap);
62036+}
62037+EXPORT_SYMBOL(capable_nolog);
62038+
62039 /**
62040 * task_ns_capable - Determine whether current task has a superior
62041 * capability targeted at a specific task's user namespace.
62042@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62043 }
62044 EXPORT_SYMBOL(task_ns_capable);
62045
62046+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62047+{
62048+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62049+}
62050+EXPORT_SYMBOL(task_ns_capable_nolog);
62051+
62052 /**
62053 * nsown_capable - Check superior capability to one's own user_ns
62054 * @cap: The capability in question
62055diff -urNp linux-3.0.7/kernel/cgroup.c linux-3.0.7/kernel/cgroup.c
62056--- linux-3.0.7/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
62057+++ linux-3.0.7/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
62058@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
62059 struct hlist_head *hhead;
62060 struct cg_cgroup_link *link;
62061
62062+ pax_track_stack();
62063+
62064 /* First see if we already have a cgroup group that matches
62065 * the desired set */
62066 read_lock(&css_set_lock);
62067diff -urNp linux-3.0.7/kernel/compat.c linux-3.0.7/kernel/compat.c
62068--- linux-3.0.7/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
62069+++ linux-3.0.7/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
62070@@ -13,6 +13,7 @@
62071
62072 #include <linux/linkage.h>
62073 #include <linux/compat.h>
62074+#include <linux/module.h>
62075 #include <linux/errno.h>
62076 #include <linux/time.h>
62077 #include <linux/signal.h>
62078@@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
62079 mm_segment_t oldfs;
62080 long ret;
62081
62082- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62083+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62084 oldfs = get_fs();
62085 set_fs(KERNEL_DS);
62086 ret = hrtimer_nanosleep_restart(restart);
62087@@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
62088 oldfs = get_fs();
62089 set_fs(KERNEL_DS);
62090 ret = hrtimer_nanosleep(&tu,
62091- rmtp ? (struct timespec __user *)&rmt : NULL,
62092+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62093 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62094 set_fs(oldfs);
62095
62096@@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
62097 mm_segment_t old_fs = get_fs();
62098
62099 set_fs(KERNEL_DS);
62100- ret = sys_sigpending((old_sigset_t __user *) &s);
62101+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62102 set_fs(old_fs);
62103 if (ret == 0)
62104 ret = put_user(s, set);
62105@@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
62106 old_fs = get_fs();
62107 set_fs(KERNEL_DS);
62108 ret = sys_sigprocmask(how,
62109- set ? (old_sigset_t __user *) &s : NULL,
62110- oset ? (old_sigset_t __user *) &s : NULL);
62111+ set ? (old_sigset_t __force_user *) &s : NULL,
62112+ oset ? (old_sigset_t __force_user *) &s : NULL);
62113 set_fs(old_fs);
62114 if (ret == 0)
62115 if (oset)
62116@@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
62117 mm_segment_t old_fs = get_fs();
62118
62119 set_fs(KERNEL_DS);
62120- ret = sys_old_getrlimit(resource, &r);
62121+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62122 set_fs(old_fs);
62123
62124 if (!ret) {
62125@@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
62126 mm_segment_t old_fs = get_fs();
62127
62128 set_fs(KERNEL_DS);
62129- ret = sys_getrusage(who, (struct rusage __user *) &r);
62130+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62131 set_fs(old_fs);
62132
62133 if (ret)
62134@@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62135 set_fs (KERNEL_DS);
62136 ret = sys_wait4(pid,
62137 (stat_addr ?
62138- (unsigned int __user *) &status : NULL),
62139- options, (struct rusage __user *) &r);
62140+ (unsigned int __force_user *) &status : NULL),
62141+ options, (struct rusage __force_user *) &r);
62142 set_fs (old_fs);
62143
62144 if (ret > 0) {
62145@@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
62146 memset(&info, 0, sizeof(info));
62147
62148 set_fs(KERNEL_DS);
62149- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62150- uru ? (struct rusage __user *)&ru : NULL);
62151+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62152+ uru ? (struct rusage __force_user *)&ru : NULL);
62153 set_fs(old_fs);
62154
62155 if ((ret < 0) || (info.si_signo == 0))
62156@@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
62157 oldfs = get_fs();
62158 set_fs(KERNEL_DS);
62159 err = sys_timer_settime(timer_id, flags,
62160- (struct itimerspec __user *) &newts,
62161- (struct itimerspec __user *) &oldts);
62162+ (struct itimerspec __force_user *) &newts,
62163+ (struct itimerspec __force_user *) &oldts);
62164 set_fs(oldfs);
62165 if (!err && old && put_compat_itimerspec(old, &oldts))
62166 return -EFAULT;
62167@@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
62168 oldfs = get_fs();
62169 set_fs(KERNEL_DS);
62170 err = sys_timer_gettime(timer_id,
62171- (struct itimerspec __user *) &ts);
62172+ (struct itimerspec __force_user *) &ts);
62173 set_fs(oldfs);
62174 if (!err && put_compat_itimerspec(setting, &ts))
62175 return -EFAULT;
62176@@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
62177 oldfs = get_fs();
62178 set_fs(KERNEL_DS);
62179 err = sys_clock_settime(which_clock,
62180- (struct timespec __user *) &ts);
62181+ (struct timespec __force_user *) &ts);
62182 set_fs(oldfs);
62183 return err;
62184 }
62185@@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
62186 oldfs = get_fs();
62187 set_fs(KERNEL_DS);
62188 err = sys_clock_gettime(which_clock,
62189- (struct timespec __user *) &ts);
62190+ (struct timespec __force_user *) &ts);
62191 set_fs(oldfs);
62192 if (!err && put_compat_timespec(&ts, tp))
62193 return -EFAULT;
62194@@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
62195
62196 oldfs = get_fs();
62197 set_fs(KERNEL_DS);
62198- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62199+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62200 set_fs(oldfs);
62201
62202 err = compat_put_timex(utp, &txc);
62203@@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
62204 oldfs = get_fs();
62205 set_fs(KERNEL_DS);
62206 err = sys_clock_getres(which_clock,
62207- (struct timespec __user *) &ts);
62208+ (struct timespec __force_user *) &ts);
62209 set_fs(oldfs);
62210 if (!err && tp && put_compat_timespec(&ts, tp))
62211 return -EFAULT;
62212@@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
62213 long err;
62214 mm_segment_t oldfs;
62215 struct timespec tu;
62216- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62217+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62218
62219- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62220+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62221 oldfs = get_fs();
62222 set_fs(KERNEL_DS);
62223 err = clock_nanosleep_restart(restart);
62224@@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
62225 oldfs = get_fs();
62226 set_fs(KERNEL_DS);
62227 err = sys_clock_nanosleep(which_clock, flags,
62228- (struct timespec __user *) &in,
62229- (struct timespec __user *) &out);
62230+ (struct timespec __force_user *) &in,
62231+ (struct timespec __force_user *) &out);
62232 set_fs(oldfs);
62233
62234 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62235diff -urNp linux-3.0.7/kernel/configs.c linux-3.0.7/kernel/configs.c
62236--- linux-3.0.7/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
62237+++ linux-3.0.7/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
62238@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62239 struct proc_dir_entry *entry;
62240
62241 /* create the current config file */
62242+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62243+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62244+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62245+ &ikconfig_file_ops);
62246+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62247+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62248+ &ikconfig_file_ops);
62249+#endif
62250+#else
62251 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62252 &ikconfig_file_ops);
62253+#endif
62254+
62255 if (!entry)
62256 return -ENOMEM;
62257
62258diff -urNp linux-3.0.7/kernel/cred.c linux-3.0.7/kernel/cred.c
62259--- linux-3.0.7/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
62260+++ linux-3.0.7/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
62261@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62262 */
62263 void __put_cred(struct cred *cred)
62264 {
62265+ pax_track_stack();
62266+
62267 kdebug("__put_cred(%p{%d,%d})", cred,
62268 atomic_read(&cred->usage),
62269 read_cred_subscribers(cred));
62270@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62271 {
62272 struct cred *cred;
62273
62274+ pax_track_stack();
62275+
62276 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62277 atomic_read(&tsk->cred->usage),
62278 read_cred_subscribers(tsk->cred));
62279@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62280 {
62281 const struct cred *cred;
62282
62283+ pax_track_stack();
62284+
62285 rcu_read_lock();
62286
62287 do {
62288@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62289 {
62290 struct cred *new;
62291
62292+ pax_track_stack();
62293+
62294 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62295 if (!new)
62296 return NULL;
62297@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62298 const struct cred *old;
62299 struct cred *new;
62300
62301+ pax_track_stack();
62302+
62303 validate_process_creds();
62304
62305 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62306@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62307 struct thread_group_cred *tgcred = NULL;
62308 struct cred *new;
62309
62310+ pax_track_stack();
62311+
62312 #ifdef CONFIG_KEYS
62313 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62314 if (!tgcred)
62315@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62316 struct cred *new;
62317 int ret;
62318
62319+ pax_track_stack();
62320+
62321 if (
62322 #ifdef CONFIG_KEYS
62323 !p->cred->thread_keyring &&
62324@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62325 struct task_struct *task = current;
62326 const struct cred *old = task->real_cred;
62327
62328+ pax_track_stack();
62329+
62330 kdebug("commit_creds(%p{%d,%d})", new,
62331 atomic_read(&new->usage),
62332 read_cred_subscribers(new));
62333@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62334
62335 get_cred(new); /* we will require a ref for the subj creds too */
62336
62337+ gr_set_role_label(task, new->uid, new->gid);
62338+
62339 /* dumpability changes */
62340 if (old->euid != new->euid ||
62341 old->egid != new->egid ||
62342@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
62343 key_fsgid_changed(task);
62344
62345 /* do it
62346- * - What if a process setreuid()'s and this brings the
62347- * new uid over his NPROC rlimit? We can check this now
62348- * cheaply with the new uid cache, so if it matters
62349- * we should be checking for it. -DaveM
62350+ * RLIMIT_NPROC limits on user->processes have already been checked
62351+ * in set_user().
62352 */
62353 alter_cred_subscribers(new, 2);
62354 if (new->user != old->user)
62355@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62356 */
62357 void abort_creds(struct cred *new)
62358 {
62359+ pax_track_stack();
62360+
62361 kdebug("abort_creds(%p{%d,%d})", new,
62362 atomic_read(&new->usage),
62363 read_cred_subscribers(new));
62364@@ -574,6 +592,8 @@ const struct cred *override_creds(const
62365 {
62366 const struct cred *old = current->cred;
62367
62368+ pax_track_stack();
62369+
62370 kdebug("override_creds(%p{%d,%d})", new,
62371 atomic_read(&new->usage),
62372 read_cred_subscribers(new));
62373@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
62374 {
62375 const struct cred *override = current->cred;
62376
62377+ pax_track_stack();
62378+
62379 kdebug("revert_creds(%p{%d,%d})", old,
62380 atomic_read(&old->usage),
62381 read_cred_subscribers(old));
62382@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62383 const struct cred *old;
62384 struct cred *new;
62385
62386+ pax_track_stack();
62387+
62388 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62389 if (!new)
62390 return NULL;
62391@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62392 */
62393 int set_security_override(struct cred *new, u32 secid)
62394 {
62395+ pax_track_stack();
62396+
62397 return security_kernel_act_as(new, secid);
62398 }
62399 EXPORT_SYMBOL(set_security_override);
62400@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
62401 u32 secid;
62402 int ret;
62403
62404+ pax_track_stack();
62405+
62406 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62407 if (ret < 0)
62408 return ret;
62409diff -urNp linux-3.0.7/kernel/debug/debug_core.c linux-3.0.7/kernel/debug/debug_core.c
62410--- linux-3.0.7/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
62411+++ linux-3.0.7/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
62412@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62413 */
62414 static atomic_t masters_in_kgdb;
62415 static atomic_t slaves_in_kgdb;
62416-static atomic_t kgdb_break_tasklet_var;
62417+static atomic_unchecked_t kgdb_break_tasklet_var;
62418 atomic_t kgdb_setting_breakpoint;
62419
62420 struct task_struct *kgdb_usethread;
62421@@ -129,7 +129,7 @@ int kgdb_single_step;
62422 static pid_t kgdb_sstep_pid;
62423
62424 /* to keep track of the CPU which is doing the single stepping*/
62425-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62426+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62427
62428 /*
62429 * If you are debugging a problem where roundup (the collection of
62430@@ -542,7 +542,7 @@ return_normal:
62431 * kernel will only try for the value of sstep_tries before
62432 * giving up and continuing on.
62433 */
62434- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62435+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62436 (kgdb_info[cpu].task &&
62437 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62438 atomic_set(&kgdb_active, -1);
62439@@ -636,8 +636,8 @@ cpu_master_loop:
62440 }
62441
62442 kgdb_restore:
62443- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62444- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62445+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62446+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62447 if (kgdb_info[sstep_cpu].task)
62448 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62449 else
62450@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62451 static void kgdb_tasklet_bpt(unsigned long ing)
62452 {
62453 kgdb_breakpoint();
62454- atomic_set(&kgdb_break_tasklet_var, 0);
62455+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62456 }
62457
62458 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62459
62460 void kgdb_schedule_breakpoint(void)
62461 {
62462- if (atomic_read(&kgdb_break_tasklet_var) ||
62463+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62464 atomic_read(&kgdb_active) != -1 ||
62465 atomic_read(&kgdb_setting_breakpoint))
62466 return;
62467- atomic_inc(&kgdb_break_tasklet_var);
62468+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62469 tasklet_schedule(&kgdb_tasklet_breakpoint);
62470 }
62471 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62472diff -urNp linux-3.0.7/kernel/debug/kdb/kdb_main.c linux-3.0.7/kernel/debug/kdb/kdb_main.c
62473--- linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
62474+++ linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
62475@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62476 list_for_each_entry(mod, kdb_modules, list) {
62477
62478 kdb_printf("%-20s%8u 0x%p ", mod->name,
62479- mod->core_size, (void *)mod);
62480+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62481 #ifdef CONFIG_MODULE_UNLOAD
62482 kdb_printf("%4d ", module_refcount(mod));
62483 #endif
62484@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62485 kdb_printf(" (Loading)");
62486 else
62487 kdb_printf(" (Live)");
62488- kdb_printf(" 0x%p", mod->module_core);
62489+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62490
62491 #ifdef CONFIG_MODULE_UNLOAD
62492 {
62493diff -urNp linux-3.0.7/kernel/events/core.c linux-3.0.7/kernel/events/core.c
62494--- linux-3.0.7/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
62495+++ linux-3.0.7/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
62496@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
62497 return 0;
62498 }
62499
62500-static atomic64_t perf_event_id;
62501+static atomic64_unchecked_t perf_event_id;
62502
62503 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62504 enum event_type_t event_type);
62505@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
62506
62507 static inline u64 perf_event_count(struct perf_event *event)
62508 {
62509- return local64_read(&event->count) + atomic64_read(&event->child_count);
62510+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62511 }
62512
62513 static u64 perf_event_read(struct perf_event *event)
62514@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
62515 mutex_lock(&event->child_mutex);
62516 total += perf_event_read(event);
62517 *enabled += event->total_time_enabled +
62518- atomic64_read(&event->child_total_time_enabled);
62519+ atomic64_read_unchecked(&event->child_total_time_enabled);
62520 *running += event->total_time_running +
62521- atomic64_read(&event->child_total_time_running);
62522+ atomic64_read_unchecked(&event->child_total_time_running);
62523
62524 list_for_each_entry(child, &event->child_list, child_list) {
62525 total += perf_event_read(child);
62526@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
62527 userpg->offset -= local64_read(&event->hw.prev_count);
62528
62529 userpg->time_enabled = event->total_time_enabled +
62530- atomic64_read(&event->child_total_time_enabled);
62531+ atomic64_read_unchecked(&event->child_total_time_enabled);
62532
62533 userpg->time_running = event->total_time_running +
62534- atomic64_read(&event->child_total_time_running);
62535+ atomic64_read_unchecked(&event->child_total_time_running);
62536
62537 barrier();
62538 ++userpg->lock;
62539@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
62540 values[n++] = perf_event_count(event);
62541 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62542 values[n++] = enabled +
62543- atomic64_read(&event->child_total_time_enabled);
62544+ atomic64_read_unchecked(&event->child_total_time_enabled);
62545 }
62546 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62547 values[n++] = running +
62548- atomic64_read(&event->child_total_time_running);
62549+ atomic64_read_unchecked(&event->child_total_time_running);
62550 }
62551 if (read_format & PERF_FORMAT_ID)
62552 values[n++] = primary_event_id(event);
62553@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
62554 * need to add enough zero bytes after the string to handle
62555 * the 64bit alignment we do later.
62556 */
62557- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62558+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62559 if (!buf) {
62560 name = strncpy(tmp, "//enomem", sizeof(tmp));
62561 goto got_name;
62562 }
62563- name = d_path(&file->f_path, buf, PATH_MAX);
62564+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62565 if (IS_ERR(name)) {
62566 name = strncpy(tmp, "//toolong", sizeof(tmp));
62567 goto got_name;
62568@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
62569 event->parent = parent_event;
62570
62571 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62572- event->id = atomic64_inc_return(&perf_event_id);
62573+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62574
62575 event->state = PERF_EVENT_STATE_INACTIVE;
62576
62577@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
62578 /*
62579 * Add back the child's count to the parent's count:
62580 */
62581- atomic64_add(child_val, &parent_event->child_count);
62582- atomic64_add(child_event->total_time_enabled,
62583+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62584+ atomic64_add_unchecked(child_event->total_time_enabled,
62585 &parent_event->child_total_time_enabled);
62586- atomic64_add(child_event->total_time_running,
62587+ atomic64_add_unchecked(child_event->total_time_running,
62588 &parent_event->child_total_time_running);
62589
62590 /*
62591diff -urNp linux-3.0.7/kernel/exit.c linux-3.0.7/kernel/exit.c
62592--- linux-3.0.7/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
62593+++ linux-3.0.7/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
62594@@ -57,6 +57,10 @@
62595 #include <asm/pgtable.h>
62596 #include <asm/mmu_context.h>
62597
62598+#ifdef CONFIG_GRKERNSEC
62599+extern rwlock_t grsec_exec_file_lock;
62600+#endif
62601+
62602 static void exit_mm(struct task_struct * tsk);
62603
62604 static void __unhash_process(struct task_struct *p, bool group_dead)
62605@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
62606 struct task_struct *leader;
62607 int zap_leader;
62608 repeat:
62609+#ifdef CONFIG_NET
62610+ gr_del_task_from_ip_table(p);
62611+#endif
62612+
62613 tracehook_prepare_release_task(p);
62614 /* don't need to get the RCU readlock here - the process is dead and
62615 * can't be modifying its own credentials. But shut RCU-lockdep up */
62616@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
62617 {
62618 write_lock_irq(&tasklist_lock);
62619
62620+#ifdef CONFIG_GRKERNSEC
62621+ write_lock(&grsec_exec_file_lock);
62622+ if (current->exec_file) {
62623+ fput(current->exec_file);
62624+ current->exec_file = NULL;
62625+ }
62626+ write_unlock(&grsec_exec_file_lock);
62627+#endif
62628+
62629 ptrace_unlink(current);
62630 /* Reparent to init */
62631 current->real_parent = current->parent = kthreadd_task;
62632 list_move_tail(&current->sibling, &current->real_parent->children);
62633
62634+ gr_set_kernel_label(current);
62635+
62636 /* Set the exit signal to SIGCHLD so we signal init on exit */
62637 current->exit_signal = SIGCHLD;
62638
62639@@ -394,7 +413,7 @@ int allow_signal(int sig)
62640 * know it'll be handled, so that they don't get converted to
62641 * SIGKILL or just silently dropped.
62642 */
62643- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62644+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62645 recalc_sigpending();
62646 spin_unlock_irq(&current->sighand->siglock);
62647 return 0;
62648@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
62649 vsnprintf(current->comm, sizeof(current->comm), name, args);
62650 va_end(args);
62651
62652+#ifdef CONFIG_GRKERNSEC
62653+ write_lock(&grsec_exec_file_lock);
62654+ if (current->exec_file) {
62655+ fput(current->exec_file);
62656+ current->exec_file = NULL;
62657+ }
62658+ write_unlock(&grsec_exec_file_lock);
62659+#endif
62660+
62661+ gr_set_kernel_label(current);
62662+
62663 /*
62664 * If we were started as result of loading a module, close all of the
62665 * user space pages. We don't need them, and if we didn't close them
62666@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
62667 struct task_struct *tsk = current;
62668 int group_dead;
62669
62670- profile_task_exit(tsk);
62671-
62672- WARN_ON(atomic_read(&tsk->fs_excl));
62673- WARN_ON(blk_needs_flush_plug(tsk));
62674-
62675 if (unlikely(in_interrupt()))
62676 panic("Aiee, killing interrupt handler!");
62677- if (unlikely(!tsk->pid))
62678- panic("Attempted to kill the idle task!");
62679
62680 /*
62681 * If do_exit is called because this processes oopsed, it's possible
62682@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
62683 */
62684 set_fs(USER_DS);
62685
62686+ profile_task_exit(tsk);
62687+
62688+ WARN_ON(atomic_read(&tsk->fs_excl));
62689+ WARN_ON(blk_needs_flush_plug(tsk));
62690+
62691+ if (unlikely(!tsk->pid))
62692+ panic("Attempted to kill the idle task!");
62693+
62694 tracehook_report_exit(&code);
62695
62696 validate_creds_for_do_exit(tsk);
62697@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
62698 tsk->exit_code = code;
62699 taskstats_exit(tsk, group_dead);
62700
62701+ gr_acl_handle_psacct(tsk, code);
62702+ gr_acl_handle_exit();
62703+
62704 exit_mm(tsk);
62705
62706 if (group_dead)
62707diff -urNp linux-3.0.7/kernel/fork.c linux-3.0.7/kernel/fork.c
62708--- linux-3.0.7/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
62709+++ linux-3.0.7/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
62710@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
62711 *stackend = STACK_END_MAGIC; /* for overflow detection */
62712
62713 #ifdef CONFIG_CC_STACKPROTECTOR
62714- tsk->stack_canary = get_random_int();
62715+ tsk->stack_canary = pax_get_random_long();
62716 #endif
62717
62718 /* One for us, one for whoever does the "release_task()" (usually parent) */
62719@@ -308,13 +308,77 @@ out:
62720 }
62721
62722 #ifdef CONFIG_MMU
62723+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62724+{
62725+ struct vm_area_struct *tmp;
62726+ unsigned long charge;
62727+ struct mempolicy *pol;
62728+ struct file *file;
62729+
62730+ charge = 0;
62731+ if (mpnt->vm_flags & VM_ACCOUNT) {
62732+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62733+ if (security_vm_enough_memory(len))
62734+ goto fail_nomem;
62735+ charge = len;
62736+ }
62737+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62738+ if (!tmp)
62739+ goto fail_nomem;
62740+ *tmp = *mpnt;
62741+ tmp->vm_mm = mm;
62742+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62743+ pol = mpol_dup(vma_policy(mpnt));
62744+ if (IS_ERR(pol))
62745+ goto fail_nomem_policy;
62746+ vma_set_policy(tmp, pol);
62747+ if (anon_vma_fork(tmp, mpnt))
62748+ goto fail_nomem_anon_vma_fork;
62749+ tmp->vm_flags &= ~VM_LOCKED;
62750+ tmp->vm_next = tmp->vm_prev = NULL;
62751+ tmp->vm_mirror = NULL;
62752+ file = tmp->vm_file;
62753+ if (file) {
62754+ struct inode *inode = file->f_path.dentry->d_inode;
62755+ struct address_space *mapping = file->f_mapping;
62756+
62757+ get_file(file);
62758+ if (tmp->vm_flags & VM_DENYWRITE)
62759+ atomic_dec(&inode->i_writecount);
62760+ mutex_lock(&mapping->i_mmap_mutex);
62761+ if (tmp->vm_flags & VM_SHARED)
62762+ mapping->i_mmap_writable++;
62763+ flush_dcache_mmap_lock(mapping);
62764+ /* insert tmp into the share list, just after mpnt */
62765+ vma_prio_tree_add(tmp, mpnt);
62766+ flush_dcache_mmap_unlock(mapping);
62767+ mutex_unlock(&mapping->i_mmap_mutex);
62768+ }
62769+
62770+ /*
62771+ * Clear hugetlb-related page reserves for children. This only
62772+ * affects MAP_PRIVATE mappings. Faults generated by the child
62773+ * are not guaranteed to succeed, even if read-only
62774+ */
62775+ if (is_vm_hugetlb_page(tmp))
62776+ reset_vma_resv_huge_pages(tmp);
62777+
62778+ return tmp;
62779+
62780+fail_nomem_anon_vma_fork:
62781+ mpol_put(pol);
62782+fail_nomem_policy:
62783+ kmem_cache_free(vm_area_cachep, tmp);
62784+fail_nomem:
62785+ vm_unacct_memory(charge);
62786+ return NULL;
62787+}
62788+
62789 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62790 {
62791 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62792 struct rb_node **rb_link, *rb_parent;
62793 int retval;
62794- unsigned long charge;
62795- struct mempolicy *pol;
62796
62797 down_write(&oldmm->mmap_sem);
62798 flush_cache_dup_mm(oldmm);
62799@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
62800 mm->locked_vm = 0;
62801 mm->mmap = NULL;
62802 mm->mmap_cache = NULL;
62803- mm->free_area_cache = oldmm->mmap_base;
62804- mm->cached_hole_size = ~0UL;
62805+ mm->free_area_cache = oldmm->free_area_cache;
62806+ mm->cached_hole_size = oldmm->cached_hole_size;
62807 mm->map_count = 0;
62808 cpumask_clear(mm_cpumask(mm));
62809 mm->mm_rb = RB_ROOT;
62810@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
62811
62812 prev = NULL;
62813 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
62814- struct file *file;
62815-
62816 if (mpnt->vm_flags & VM_DONTCOPY) {
62817 long pages = vma_pages(mpnt);
62818 mm->total_vm -= pages;
62819@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
62820 -pages);
62821 continue;
62822 }
62823- charge = 0;
62824- if (mpnt->vm_flags & VM_ACCOUNT) {
62825- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62826- if (security_vm_enough_memory(len))
62827- goto fail_nomem;
62828- charge = len;
62829- }
62830- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62831- if (!tmp)
62832- goto fail_nomem;
62833- *tmp = *mpnt;
62834- INIT_LIST_HEAD(&tmp->anon_vma_chain);
62835- pol = mpol_dup(vma_policy(mpnt));
62836- retval = PTR_ERR(pol);
62837- if (IS_ERR(pol))
62838- goto fail_nomem_policy;
62839- vma_set_policy(tmp, pol);
62840- tmp->vm_mm = mm;
62841- if (anon_vma_fork(tmp, mpnt))
62842- goto fail_nomem_anon_vma_fork;
62843- tmp->vm_flags &= ~VM_LOCKED;
62844- tmp->vm_next = tmp->vm_prev = NULL;
62845- file = tmp->vm_file;
62846- if (file) {
62847- struct inode *inode = file->f_path.dentry->d_inode;
62848- struct address_space *mapping = file->f_mapping;
62849-
62850- get_file(file);
62851- if (tmp->vm_flags & VM_DENYWRITE)
62852- atomic_dec(&inode->i_writecount);
62853- mutex_lock(&mapping->i_mmap_mutex);
62854- if (tmp->vm_flags & VM_SHARED)
62855- mapping->i_mmap_writable++;
62856- flush_dcache_mmap_lock(mapping);
62857- /* insert tmp into the share list, just after mpnt */
62858- vma_prio_tree_add(tmp, mpnt);
62859- flush_dcache_mmap_unlock(mapping);
62860- mutex_unlock(&mapping->i_mmap_mutex);
62861+ tmp = dup_vma(mm, mpnt);
62862+ if (!tmp) {
62863+ retval = -ENOMEM;
62864+ goto out;
62865 }
62866
62867 /*
62868- * Clear hugetlb-related page reserves for children. This only
62869- * affects MAP_PRIVATE mappings. Faults generated by the child
62870- * are not guaranteed to succeed, even if read-only
62871- */
62872- if (is_vm_hugetlb_page(tmp))
62873- reset_vma_resv_huge_pages(tmp);
62874-
62875- /*
62876 * Link in the new vma and copy the page table entries.
62877 */
62878 *pprev = tmp;
62879@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
62880 if (retval)
62881 goto out;
62882 }
62883+
62884+#ifdef CONFIG_PAX_SEGMEXEC
62885+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62886+ struct vm_area_struct *mpnt_m;
62887+
62888+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62889+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62890+
62891+ if (!mpnt->vm_mirror)
62892+ continue;
62893+
62894+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62895+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62896+ mpnt->vm_mirror = mpnt_m;
62897+ } else {
62898+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62899+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62900+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62901+ mpnt->vm_mirror->vm_mirror = mpnt;
62902+ }
62903+ }
62904+ BUG_ON(mpnt_m);
62905+ }
62906+#endif
62907+
62908 /* a new mm has just been created */
62909 arch_dup_mmap(oldmm, mm);
62910 retval = 0;
62911@@ -429,14 +474,6 @@ out:
62912 flush_tlb_mm(oldmm);
62913 up_write(&oldmm->mmap_sem);
62914 return retval;
62915-fail_nomem_anon_vma_fork:
62916- mpol_put(pol);
62917-fail_nomem_policy:
62918- kmem_cache_free(vm_area_cachep, tmp);
62919-fail_nomem:
62920- retval = -ENOMEM;
62921- vm_unacct_memory(charge);
62922- goto out;
62923 }
62924
62925 static inline int mm_alloc_pgd(struct mm_struct * mm)
62926@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
62927 spin_unlock(&fs->lock);
62928 return -EAGAIN;
62929 }
62930- fs->users++;
62931+ atomic_inc(&fs->users);
62932 spin_unlock(&fs->lock);
62933 return 0;
62934 }
62935 tsk->fs = copy_fs_struct(fs);
62936 if (!tsk->fs)
62937 return -ENOMEM;
62938+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62939 return 0;
62940 }
62941
62942@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
62943 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62944 #endif
62945 retval = -EAGAIN;
62946+
62947+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62948+
62949 if (atomic_read(&p->real_cred->user->processes) >=
62950 task_rlimit(p, RLIMIT_NPROC)) {
62951- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62952- p->real_cred->user != INIT_USER)
62953+ if (p->real_cred->user != INIT_USER &&
62954+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
62955 goto bad_fork_free;
62956 }
62957+ current->flags &= ~PF_NPROC_EXCEEDED;
62958
62959 retval = copy_creds(p, clone_flags);
62960 if (retval < 0)
62961@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
62962 if (clone_flags & CLONE_THREAD)
62963 p->tgid = current->tgid;
62964
62965+ gr_copy_label(p);
62966+
62967 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62968 /*
62969 * Clear TID on mm_release()?
62970@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
62971 bad_fork_free:
62972 free_task(p);
62973 fork_out:
62974+ gr_log_forkfail(retval);
62975+
62976 return ERR_PTR(retval);
62977 }
62978
62979@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
62980 if (clone_flags & CLONE_PARENT_SETTID)
62981 put_user(nr, parent_tidptr);
62982
62983+ gr_handle_brute_check();
62984+
62985 if (clone_flags & CLONE_VFORK) {
62986 p->vfork_done = &vfork;
62987 init_completion(&vfork);
62988@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
62989 return 0;
62990
62991 /* don't need lock here; in the worst case we'll do useless copy */
62992- if (fs->users == 1)
62993+ if (atomic_read(&fs->users) == 1)
62994 return 0;
62995
62996 *new_fsp = copy_fs_struct(fs);
62997@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62998 fs = current->fs;
62999 spin_lock(&fs->lock);
63000 current->fs = new_fs;
63001- if (--fs->users)
63002+ gr_set_chroot_entries(current, &current->fs->root);
63003+ if (atomic_dec_return(&fs->users))
63004 new_fs = NULL;
63005 else
63006 new_fs = fs;
63007diff -urNp linux-3.0.7/kernel/futex.c linux-3.0.7/kernel/futex.c
63008--- linux-3.0.7/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
63009+++ linux-3.0.7/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
63010@@ -54,6 +54,7 @@
63011 #include <linux/mount.h>
63012 #include <linux/pagemap.h>
63013 #include <linux/syscalls.h>
63014+#include <linux/ptrace.h>
63015 #include <linux/signal.h>
63016 #include <linux/module.h>
63017 #include <linux/magic.h>
63018@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63019 struct page *page, *page_head;
63020 int err, ro = 0;
63021
63022+#ifdef CONFIG_PAX_SEGMEXEC
63023+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63024+ return -EFAULT;
63025+#endif
63026+
63027 /*
63028 * The futex address must be "naturally" aligned.
63029 */
63030@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63031 struct futex_q q = futex_q_init;
63032 int ret;
63033
63034+ pax_track_stack();
63035+
63036 if (!bitset)
63037 return -EINVAL;
63038 q.bitset = bitset;
63039@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63040 struct futex_q q = futex_q_init;
63041 int res, ret;
63042
63043+ pax_track_stack();
63044+
63045 if (!bitset)
63046 return -EINVAL;
63047
63048@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63049 {
63050 struct robust_list_head __user *head;
63051 unsigned long ret;
63052+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63053 const struct cred *cred = current_cred(), *pcred;
63054+#endif
63055
63056 if (!futex_cmpxchg_enabled)
63057 return -ENOSYS;
63058@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63059 if (!p)
63060 goto err_unlock;
63061 ret = -EPERM;
63062+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63063+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63064+ goto err_unlock;
63065+#else
63066 pcred = __task_cred(p);
63067 /* If victim is in different user_ns, then uids are not
63068 comparable, so we must have CAP_SYS_PTRACE */
63069@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63070 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63071 goto err_unlock;
63072 ok:
63073+#endif
63074 head = p->robust_list;
63075 rcu_read_unlock();
63076 }
63077@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63078 {
63079 u32 curval;
63080 int i;
63081+ mm_segment_t oldfs;
63082
63083 /*
63084 * This will fail and we want it. Some arch implementations do
63085@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63086 * implementation, the non-functional ones will return
63087 * -ENOSYS.
63088 */
63089+ oldfs = get_fs();
63090+ set_fs(USER_DS);
63091 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63092 futex_cmpxchg_enabled = 1;
63093+ set_fs(oldfs);
63094
63095 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63096 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
63097diff -urNp linux-3.0.7/kernel/futex_compat.c linux-3.0.7/kernel/futex_compat.c
63098--- linux-3.0.7/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
63099+++ linux-3.0.7/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
63100@@ -10,6 +10,7 @@
63101 #include <linux/compat.h>
63102 #include <linux/nsproxy.h>
63103 #include <linux/futex.h>
63104+#include <linux/ptrace.h>
63105
63106 #include <asm/uaccess.h>
63107
63108@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63109 {
63110 struct compat_robust_list_head __user *head;
63111 unsigned long ret;
63112- const struct cred *cred = current_cred(), *pcred;
63113+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63114+ const struct cred *cred = current_cred();
63115+ const struct cred *pcred;
63116+#endif
63117
63118 if (!futex_cmpxchg_enabled)
63119 return -ENOSYS;
63120@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63121 if (!p)
63122 goto err_unlock;
63123 ret = -EPERM;
63124+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63125+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63126+ goto err_unlock;
63127+#else
63128 pcred = __task_cred(p);
63129 /* If victim is in different user_ns, then uids are not
63130 comparable, so we must have CAP_SYS_PTRACE */
63131@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63132 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63133 goto err_unlock;
63134 ok:
63135+#endif
63136 head = p->compat_robust_list;
63137 rcu_read_unlock();
63138 }
63139diff -urNp linux-3.0.7/kernel/gcov/base.c linux-3.0.7/kernel/gcov/base.c
63140--- linux-3.0.7/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
63141+++ linux-3.0.7/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
63142@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63143 }
63144
63145 #ifdef CONFIG_MODULES
63146-static inline int within(void *addr, void *start, unsigned long size)
63147-{
63148- return ((addr >= start) && (addr < start + size));
63149-}
63150-
63151 /* Update list and generate events when modules are unloaded. */
63152 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63153 void *data)
63154@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63155 prev = NULL;
63156 /* Remove entries located in module from linked list. */
63157 for (info = gcov_info_head; info; info = info->next) {
63158- if (within(info, mod->module_core, mod->core_size)) {
63159+ if (within_module_core_rw((unsigned long)info, mod)) {
63160 if (prev)
63161 prev->next = info->next;
63162 else
63163diff -urNp linux-3.0.7/kernel/hrtimer.c linux-3.0.7/kernel/hrtimer.c
63164--- linux-3.0.7/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
63165+++ linux-3.0.7/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
63166@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63167 local_irq_restore(flags);
63168 }
63169
63170-static void run_hrtimer_softirq(struct softirq_action *h)
63171+static void run_hrtimer_softirq(void)
63172 {
63173 hrtimer_peek_ahead_timers();
63174 }
63175diff -urNp linux-3.0.7/kernel/jump_label.c linux-3.0.7/kernel/jump_label.c
63176--- linux-3.0.7/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
63177+++ linux-3.0.7/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
63178@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63179
63180 size = (((unsigned long)stop - (unsigned long)start)
63181 / sizeof(struct jump_entry));
63182+ pax_open_kernel();
63183 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63184+ pax_close_kernel();
63185 }
63186
63187 static void jump_label_update(struct jump_label_key *key, int enable);
63188@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63189 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63190 struct jump_entry *iter;
63191
63192+ pax_open_kernel();
63193 for (iter = iter_start; iter < iter_stop; iter++) {
63194 if (within_module_init(iter->code, mod))
63195 iter->code = 0;
63196 }
63197+ pax_close_kernel();
63198 }
63199
63200 static int
63201diff -urNp linux-3.0.7/kernel/kallsyms.c linux-3.0.7/kernel/kallsyms.c
63202--- linux-3.0.7/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
63203+++ linux-3.0.7/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
63204@@ -11,6 +11,9 @@
63205 * Changed the compression method from stem compression to "table lookup"
63206 * compression (see scripts/kallsyms.c for a more complete description)
63207 */
63208+#ifdef CONFIG_GRKERNSEC_HIDESYM
63209+#define __INCLUDED_BY_HIDESYM 1
63210+#endif
63211 #include <linux/kallsyms.h>
63212 #include <linux/module.h>
63213 #include <linux/init.h>
63214@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63215
63216 static inline int is_kernel_inittext(unsigned long addr)
63217 {
63218+ if (system_state != SYSTEM_BOOTING)
63219+ return 0;
63220+
63221 if (addr >= (unsigned long)_sinittext
63222 && addr <= (unsigned long)_einittext)
63223 return 1;
63224 return 0;
63225 }
63226
63227+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63228+#ifdef CONFIG_MODULES
63229+static inline int is_module_text(unsigned long addr)
63230+{
63231+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63232+ return 1;
63233+
63234+ addr = ktla_ktva(addr);
63235+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63236+}
63237+#else
63238+static inline int is_module_text(unsigned long addr)
63239+{
63240+ return 0;
63241+}
63242+#endif
63243+#endif
63244+
63245 static inline int is_kernel_text(unsigned long addr)
63246 {
63247 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63248@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63249
63250 static inline int is_kernel(unsigned long addr)
63251 {
63252+
63253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63254+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63255+ return 1;
63256+
63257+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63258+#else
63259 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63260+#endif
63261+
63262 return 1;
63263 return in_gate_area_no_mm(addr);
63264 }
63265
63266 static int is_ksym_addr(unsigned long addr)
63267 {
63268+
63269+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63270+ if (is_module_text(addr))
63271+ return 0;
63272+#endif
63273+
63274 if (all_var)
63275 return is_kernel(addr);
63276
63277@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63278
63279 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63280 {
63281- iter->name[0] = '\0';
63282 iter->nameoff = get_symbol_offset(new_pos);
63283 iter->pos = new_pos;
63284 }
63285@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63286 {
63287 struct kallsym_iter *iter = m->private;
63288
63289+#ifdef CONFIG_GRKERNSEC_HIDESYM
63290+ if (current_uid())
63291+ return 0;
63292+#endif
63293+
63294 /* Some debugging symbols have no name. Ignore them. */
63295 if (!iter->name[0])
63296 return 0;
63297@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63298 struct kallsym_iter *iter;
63299 int ret;
63300
63301- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63302+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63303 if (!iter)
63304 return -ENOMEM;
63305 reset_iter(iter, 0);
63306diff -urNp linux-3.0.7/kernel/kexec.c linux-3.0.7/kernel/kexec.c
63307--- linux-3.0.7/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
63308+++ linux-3.0.7/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
63309@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63310 unsigned long flags)
63311 {
63312 struct compat_kexec_segment in;
63313- struct kexec_segment out, __user *ksegments;
63314+ struct kexec_segment out;
63315+ struct kexec_segment __user *ksegments;
63316 unsigned long i, result;
63317
63318 /* Don't allow clients that don't understand the native
63319diff -urNp linux-3.0.7/kernel/kmod.c linux-3.0.7/kernel/kmod.c
63320--- linux-3.0.7/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
63321+++ linux-3.0.7/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
63322@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63323 * If module auto-loading support is disabled then this function
63324 * becomes a no-operation.
63325 */
63326-int __request_module(bool wait, const char *fmt, ...)
63327+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63328 {
63329- va_list args;
63330 char module_name[MODULE_NAME_LEN];
63331 unsigned int max_modprobes;
63332 int ret;
63333- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63334+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63335 static char *envp[] = { "HOME=/",
63336 "TERM=linux",
63337 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63338@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63339 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63340 static int kmod_loop_msg;
63341
63342- va_start(args, fmt);
63343- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63344- va_end(args);
63345+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63346 if (ret >= MODULE_NAME_LEN)
63347 return -ENAMETOOLONG;
63348
63349@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63350 if (ret)
63351 return ret;
63352
63353+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63354+ if (!current_uid()) {
63355+ /* hack to workaround consolekit/udisks stupidity */
63356+ read_lock(&tasklist_lock);
63357+ if (!strcmp(current->comm, "mount") &&
63358+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63359+ read_unlock(&tasklist_lock);
63360+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63361+ return -EPERM;
63362+ }
63363+ read_unlock(&tasklist_lock);
63364+ }
63365+#endif
63366+
63367 /* If modprobe needs a service that is in a module, we get a recursive
63368 * loop. Limit the number of running kmod threads to max_threads/2 or
63369 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63370@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
63371 atomic_dec(&kmod_concurrent);
63372 return ret;
63373 }
63374+
63375+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63376+{
63377+ va_list args;
63378+ int ret;
63379+
63380+ va_start(args, fmt);
63381+ ret = ____request_module(wait, module_param, fmt, args);
63382+ va_end(args);
63383+
63384+ return ret;
63385+}
63386+
63387+int __request_module(bool wait, const char *fmt, ...)
63388+{
63389+ va_list args;
63390+ int ret;
63391+
63392+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63393+ if (current_uid()) {
63394+ char module_param[MODULE_NAME_LEN];
63395+
63396+ memset(module_param, 0, sizeof(module_param));
63397+
63398+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63399+
63400+ va_start(args, fmt);
63401+ ret = ____request_module(wait, module_param, fmt, args);
63402+ va_end(args);
63403+
63404+ return ret;
63405+ }
63406+#endif
63407+
63408+ va_start(args, fmt);
63409+ ret = ____request_module(wait, NULL, fmt, args);
63410+ va_end(args);
63411+
63412+ return ret;
63413+}
63414+
63415 EXPORT_SYMBOL(__request_module);
63416 #endif /* CONFIG_MODULES */
63417
63418@@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
63419 *
63420 * Thus the __user pointer cast is valid here.
63421 */
63422- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63423+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63424
63425 /*
63426 * If ret is 0, either ____call_usermodehelper failed and the
63427diff -urNp linux-3.0.7/kernel/kprobes.c linux-3.0.7/kernel/kprobes.c
63428--- linux-3.0.7/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
63429+++ linux-3.0.7/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
63430@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63431 * kernel image and loaded module images reside. This is required
63432 * so x86_64 can correctly handle the %rip-relative fixups.
63433 */
63434- kip->insns = module_alloc(PAGE_SIZE);
63435+ kip->insns = module_alloc_exec(PAGE_SIZE);
63436 if (!kip->insns) {
63437 kfree(kip);
63438 return NULL;
63439@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63440 */
63441 if (!list_is_singular(&kip->list)) {
63442 list_del(&kip->list);
63443- module_free(NULL, kip->insns);
63444+ module_free_exec(NULL, kip->insns);
63445 kfree(kip);
63446 }
63447 return 1;
63448@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
63449 {
63450 int i, err = 0;
63451 unsigned long offset = 0, size = 0;
63452- char *modname, namebuf[128];
63453+ char *modname, namebuf[KSYM_NAME_LEN];
63454 const char *symbol_name;
63455 void *addr;
63456 struct kprobe_blackpoint *kb;
63457@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
63458 const char *sym = NULL;
63459 unsigned int i = *(loff_t *) v;
63460 unsigned long offset = 0;
63461- char *modname, namebuf[128];
63462+ char *modname, namebuf[KSYM_NAME_LEN];
63463
63464 head = &kprobe_table[i];
63465 preempt_disable();
63466diff -urNp linux-3.0.7/kernel/lockdep.c linux-3.0.7/kernel/lockdep.c
63467--- linux-3.0.7/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
63468+++ linux-3.0.7/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
63469@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63470 end = (unsigned long) &_end,
63471 addr = (unsigned long) obj;
63472
63473+#ifdef CONFIG_PAX_KERNEXEC
63474+ start = ktla_ktva(start);
63475+#endif
63476+
63477 /*
63478 * static variable?
63479 */
63480@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63481 if (!static_obj(lock->key)) {
63482 debug_locks_off();
63483 printk("INFO: trying to register non-static key.\n");
63484+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63485 printk("the code is fine but needs lockdep annotation.\n");
63486 printk("turning off the locking correctness validator.\n");
63487 dump_stack();
63488@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
63489 if (!class)
63490 return 0;
63491 }
63492- atomic_inc((atomic_t *)&class->ops);
63493+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63494 if (very_verbose(class)) {
63495 printk("\nacquire class [%p] %s", class->key, class->name);
63496 if (class->name_version > 1)
63497diff -urNp linux-3.0.7/kernel/lockdep_proc.c linux-3.0.7/kernel/lockdep_proc.c
63498--- linux-3.0.7/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
63499+++ linux-3.0.7/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
63500@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63501
63502 static void print_name(struct seq_file *m, struct lock_class *class)
63503 {
63504- char str[128];
63505+ char str[KSYM_NAME_LEN];
63506 const char *name = class->name;
63507
63508 if (!name) {
63509diff -urNp linux-3.0.7/kernel/module.c linux-3.0.7/kernel/module.c
63510--- linux-3.0.7/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
63511+++ linux-3.0.7/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
63512@@ -58,6 +58,7 @@
63513 #include <linux/jump_label.h>
63514 #include <linux/pfn.h>
63515 #include <linux/bsearch.h>
63516+#include <linux/grsecurity.h>
63517
63518 #define CREATE_TRACE_POINTS
63519 #include <trace/events/module.h>
63520@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63521
63522 /* Bounds of module allocation, for speeding __module_address.
63523 * Protected by module_mutex. */
63524-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63525+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63526+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63527
63528 int register_module_notifier(struct notifier_block * nb)
63529 {
63530@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63531 return true;
63532
63533 list_for_each_entry_rcu(mod, &modules, list) {
63534- struct symsearch arr[] = {
63535+ struct symsearch modarr[] = {
63536 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63537 NOT_GPL_ONLY, false },
63538 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63539@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63540 #endif
63541 };
63542
63543- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63544+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63545 return true;
63546 }
63547 return false;
63548@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63549 static int percpu_modalloc(struct module *mod,
63550 unsigned long size, unsigned long align)
63551 {
63552- if (align > PAGE_SIZE) {
63553+ if (align-1 >= PAGE_SIZE) {
63554 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63555 mod->name, align, PAGE_SIZE);
63556 align = PAGE_SIZE;
63557@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
63558 */
63559 #ifdef CONFIG_SYSFS
63560
63561-#ifdef CONFIG_KALLSYMS
63562+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63563 static inline bool sect_empty(const Elf_Shdr *sect)
63564 {
63565 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63566@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
63567
63568 static void unset_module_core_ro_nx(struct module *mod)
63569 {
63570- set_page_attributes(mod->module_core + mod->core_text_size,
63571- mod->module_core + mod->core_size,
63572+ set_page_attributes(mod->module_core_rw,
63573+ mod->module_core_rw + mod->core_size_rw,
63574 set_memory_x);
63575- set_page_attributes(mod->module_core,
63576- mod->module_core + mod->core_ro_size,
63577+ set_page_attributes(mod->module_core_rx,
63578+ mod->module_core_rx + mod->core_size_rx,
63579 set_memory_rw);
63580 }
63581
63582 static void unset_module_init_ro_nx(struct module *mod)
63583 {
63584- set_page_attributes(mod->module_init + mod->init_text_size,
63585- mod->module_init + mod->init_size,
63586+ set_page_attributes(mod->module_init_rw,
63587+ mod->module_init_rw + mod->init_size_rw,
63588 set_memory_x);
63589- set_page_attributes(mod->module_init,
63590- mod->module_init + mod->init_ro_size,
63591+ set_page_attributes(mod->module_init_rx,
63592+ mod->module_init_rx + mod->init_size_rx,
63593 set_memory_rw);
63594 }
63595
63596@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
63597
63598 mutex_lock(&module_mutex);
63599 list_for_each_entry_rcu(mod, &modules, list) {
63600- if ((mod->module_core) && (mod->core_text_size)) {
63601- set_page_attributes(mod->module_core,
63602- mod->module_core + mod->core_text_size,
63603+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63604+ set_page_attributes(mod->module_core_rx,
63605+ mod->module_core_rx + mod->core_size_rx,
63606 set_memory_rw);
63607 }
63608- if ((mod->module_init) && (mod->init_text_size)) {
63609- set_page_attributes(mod->module_init,
63610- mod->module_init + mod->init_text_size,
63611+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63612+ set_page_attributes(mod->module_init_rx,
63613+ mod->module_init_rx + mod->init_size_rx,
63614 set_memory_rw);
63615 }
63616 }
63617@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
63618
63619 mutex_lock(&module_mutex);
63620 list_for_each_entry_rcu(mod, &modules, list) {
63621- if ((mod->module_core) && (mod->core_text_size)) {
63622- set_page_attributes(mod->module_core,
63623- mod->module_core + mod->core_text_size,
63624+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63625+ set_page_attributes(mod->module_core_rx,
63626+ mod->module_core_rx + mod->core_size_rx,
63627 set_memory_ro);
63628 }
63629- if ((mod->module_init) && (mod->init_text_size)) {
63630- set_page_attributes(mod->module_init,
63631- mod->module_init + mod->init_text_size,
63632+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63633+ set_page_attributes(mod->module_init_rx,
63634+ mod->module_init_rx + mod->init_size_rx,
63635 set_memory_ro);
63636 }
63637 }
63638@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
63639
63640 /* This may be NULL, but that's OK */
63641 unset_module_init_ro_nx(mod);
63642- module_free(mod, mod->module_init);
63643+ module_free(mod, mod->module_init_rw);
63644+ module_free_exec(mod, mod->module_init_rx);
63645 kfree(mod->args);
63646 percpu_modfree(mod);
63647
63648 /* Free lock-classes: */
63649- lockdep_free_key_range(mod->module_core, mod->core_size);
63650+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63651+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63652
63653 /* Finally, free the core (containing the module structure) */
63654 unset_module_core_ro_nx(mod);
63655- module_free(mod, mod->module_core);
63656+ module_free_exec(mod, mod->module_core_rx);
63657+ module_free(mod, mod->module_core_rw);
63658
63659 #ifdef CONFIG_MPU
63660 update_protections(current->mm);
63661@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
63662 unsigned int i;
63663 int ret = 0;
63664 const struct kernel_symbol *ksym;
63665+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63666+ int is_fs_load = 0;
63667+ int register_filesystem_found = 0;
63668+ char *p;
63669+
63670+ p = strstr(mod->args, "grsec_modharden_fs");
63671+ if (p) {
63672+ char *endptr = p + strlen("grsec_modharden_fs");
63673+ /* copy \0 as well */
63674+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63675+ is_fs_load = 1;
63676+ }
63677+#endif
63678
63679 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63680 const char *name = info->strtab + sym[i].st_name;
63681
63682+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63683+ /* it's a real shame this will never get ripped and copied
63684+ upstream! ;(
63685+ */
63686+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63687+ register_filesystem_found = 1;
63688+#endif
63689+
63690 switch (sym[i].st_shndx) {
63691 case SHN_COMMON:
63692 /* We compiled with -fno-common. These are not
63693@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
63694 ksym = resolve_symbol_wait(mod, info, name);
63695 /* Ok if resolved. */
63696 if (ksym && !IS_ERR(ksym)) {
63697+ pax_open_kernel();
63698 sym[i].st_value = ksym->value;
63699+ pax_close_kernel();
63700 break;
63701 }
63702
63703@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
63704 secbase = (unsigned long)mod_percpu(mod);
63705 else
63706 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63707+ pax_open_kernel();
63708 sym[i].st_value += secbase;
63709+ pax_close_kernel();
63710 break;
63711 }
63712 }
63713
63714+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63715+ if (is_fs_load && !register_filesystem_found) {
63716+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63717+ ret = -EPERM;
63718+ }
63719+#endif
63720+
63721 return ret;
63722 }
63723
63724@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
63725 || s->sh_entsize != ~0UL
63726 || strstarts(sname, ".init"))
63727 continue;
63728- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63729+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63730+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63731+ else
63732+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63733 DEBUGP("\t%s\n", name);
63734 }
63735- switch (m) {
63736- case 0: /* executable */
63737- mod->core_size = debug_align(mod->core_size);
63738- mod->core_text_size = mod->core_size;
63739- break;
63740- case 1: /* RO: text and ro-data */
63741- mod->core_size = debug_align(mod->core_size);
63742- mod->core_ro_size = mod->core_size;
63743- break;
63744- case 3: /* whole core */
63745- mod->core_size = debug_align(mod->core_size);
63746- break;
63747- }
63748 }
63749
63750 DEBUGP("Init section allocation order:\n");
63751@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
63752 || s->sh_entsize != ~0UL
63753 || !strstarts(sname, ".init"))
63754 continue;
63755- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63756- | INIT_OFFSET_MASK);
63757+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63758+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63759+ else
63760+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63761+ s->sh_entsize |= INIT_OFFSET_MASK;
63762 DEBUGP("\t%s\n", sname);
63763 }
63764- switch (m) {
63765- case 0: /* executable */
63766- mod->init_size = debug_align(mod->init_size);
63767- mod->init_text_size = mod->init_size;
63768- break;
63769- case 1: /* RO: text and ro-data */
63770- mod->init_size = debug_align(mod->init_size);
63771- mod->init_ro_size = mod->init_size;
63772- break;
63773- case 3: /* whole init */
63774- mod->init_size = debug_align(mod->init_size);
63775- break;
63776- }
63777 }
63778 }
63779
63780@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
63781
63782 /* Put symbol section at end of init part of module. */
63783 symsect->sh_flags |= SHF_ALLOC;
63784- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63785+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63786 info->index.sym) | INIT_OFFSET_MASK;
63787 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63788
63789@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
63790 }
63791
63792 /* Append room for core symbols at end of core part. */
63793- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63794- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63795+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63796+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63797
63798 /* Put string table section at end of init part of module. */
63799 strsect->sh_flags |= SHF_ALLOC;
63800- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63801+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63802 info->index.str) | INIT_OFFSET_MASK;
63803 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63804
63805 /* Append room for core symbols' strings at end of core part. */
63806- info->stroffs = mod->core_size;
63807+ info->stroffs = mod->core_size_rx;
63808 __set_bit(0, info->strmap);
63809- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63810+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63811 }
63812
63813 static void add_kallsyms(struct module *mod, const struct load_info *info)
63814@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
63815 /* Make sure we get permanent strtab: don't use info->strtab. */
63816 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63817
63818+ pax_open_kernel();
63819+
63820 /* Set types up while we still have access to sections. */
63821 for (i = 0; i < mod->num_symtab; i++)
63822 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63823
63824- mod->core_symtab = dst = mod->module_core + info->symoffs;
63825+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63826 src = mod->symtab;
63827 *dst = *src;
63828 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63829@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
63830 }
63831 mod->core_num_syms = ndst;
63832
63833- mod->core_strtab = s = mod->module_core + info->stroffs;
63834+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63835 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63836 if (test_bit(i, info->strmap))
63837 *++s = mod->strtab[i];
63838+
63839+ pax_close_kernel();
63840 }
63841 #else
63842 static inline void layout_symtab(struct module *mod, struct load_info *info)
63843@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
63844 ddebug_remove_module(debug->modname);
63845 }
63846
63847-static void *module_alloc_update_bounds(unsigned long size)
63848+static void *module_alloc_update_bounds_rw(unsigned long size)
63849 {
63850 void *ret = module_alloc(size);
63851
63852 if (ret) {
63853 mutex_lock(&module_mutex);
63854 /* Update module bounds. */
63855- if ((unsigned long)ret < module_addr_min)
63856- module_addr_min = (unsigned long)ret;
63857- if ((unsigned long)ret + size > module_addr_max)
63858- module_addr_max = (unsigned long)ret + size;
63859+ if ((unsigned long)ret < module_addr_min_rw)
63860+ module_addr_min_rw = (unsigned long)ret;
63861+ if ((unsigned long)ret + size > module_addr_max_rw)
63862+ module_addr_max_rw = (unsigned long)ret + size;
63863+ mutex_unlock(&module_mutex);
63864+ }
63865+ return ret;
63866+}
63867+
63868+static void *module_alloc_update_bounds_rx(unsigned long size)
63869+{
63870+ void *ret = module_alloc_exec(size);
63871+
63872+ if (ret) {
63873+ mutex_lock(&module_mutex);
63874+ /* Update module bounds. */
63875+ if ((unsigned long)ret < module_addr_min_rx)
63876+ module_addr_min_rx = (unsigned long)ret;
63877+ if ((unsigned long)ret + size > module_addr_max_rx)
63878+ module_addr_max_rx = (unsigned long)ret + size;
63879 mutex_unlock(&module_mutex);
63880 }
63881 return ret;
63882@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
63883 void *ptr;
63884
63885 /* Do the allocs. */
63886- ptr = module_alloc_update_bounds(mod->core_size);
63887+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63888 /*
63889 * The pointer to this block is stored in the module structure
63890 * which is inside the block. Just mark it as not being a
63891@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
63892 if (!ptr)
63893 return -ENOMEM;
63894
63895- memset(ptr, 0, mod->core_size);
63896- mod->module_core = ptr;
63897+ memset(ptr, 0, mod->core_size_rw);
63898+ mod->module_core_rw = ptr;
63899
63900- ptr = module_alloc_update_bounds(mod->init_size);
63901+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63902 /*
63903 * The pointer to this block is stored in the module structure
63904 * which is inside the block. This block doesn't need to be
63905 * scanned as it contains data and code that will be freed
63906 * after the module is initialized.
63907 */
63908- kmemleak_ignore(ptr);
63909- if (!ptr && mod->init_size) {
63910- module_free(mod, mod->module_core);
63911+ kmemleak_not_leak(ptr);
63912+ if (!ptr && mod->init_size_rw) {
63913+ module_free(mod, mod->module_core_rw);
63914 return -ENOMEM;
63915 }
63916- memset(ptr, 0, mod->init_size);
63917- mod->module_init = ptr;
63918+ memset(ptr, 0, mod->init_size_rw);
63919+ mod->module_init_rw = ptr;
63920+
63921+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63922+ kmemleak_not_leak(ptr);
63923+ if (!ptr) {
63924+ module_free(mod, mod->module_init_rw);
63925+ module_free(mod, mod->module_core_rw);
63926+ return -ENOMEM;
63927+ }
63928+
63929+ pax_open_kernel();
63930+ memset(ptr, 0, mod->core_size_rx);
63931+ pax_close_kernel();
63932+ mod->module_core_rx = ptr;
63933+
63934+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63935+ kmemleak_not_leak(ptr);
63936+ if (!ptr && mod->init_size_rx) {
63937+ module_free_exec(mod, mod->module_core_rx);
63938+ module_free(mod, mod->module_init_rw);
63939+ module_free(mod, mod->module_core_rw);
63940+ return -ENOMEM;
63941+ }
63942+
63943+ pax_open_kernel();
63944+ memset(ptr, 0, mod->init_size_rx);
63945+ pax_close_kernel();
63946+ mod->module_init_rx = ptr;
63947
63948 /* Transfer each section which specifies SHF_ALLOC */
63949 DEBUGP("final section addresses:\n");
63950@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
63951 if (!(shdr->sh_flags & SHF_ALLOC))
63952 continue;
63953
63954- if (shdr->sh_entsize & INIT_OFFSET_MASK)
63955- dest = mod->module_init
63956- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63957- else
63958- dest = mod->module_core + shdr->sh_entsize;
63959+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
63960+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63961+ dest = mod->module_init_rw
63962+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63963+ else
63964+ dest = mod->module_init_rx
63965+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63966+ } else {
63967+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63968+ dest = mod->module_core_rw + shdr->sh_entsize;
63969+ else
63970+ dest = mod->module_core_rx + shdr->sh_entsize;
63971+ }
63972+
63973+ if (shdr->sh_type != SHT_NOBITS) {
63974+
63975+#ifdef CONFIG_PAX_KERNEXEC
63976+#ifdef CONFIG_X86_64
63977+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
63978+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63979+#endif
63980+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
63981+ pax_open_kernel();
63982+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63983+ pax_close_kernel();
63984+ } else
63985+#endif
63986
63987- if (shdr->sh_type != SHT_NOBITS)
63988 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63989+ }
63990 /* Update sh_addr to point to copy in image. */
63991- shdr->sh_addr = (unsigned long)dest;
63992+
63993+#ifdef CONFIG_PAX_KERNEXEC
63994+ if (shdr->sh_flags & SHF_EXECINSTR)
63995+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
63996+ else
63997+#endif
63998+
63999+ shdr->sh_addr = (unsigned long)dest;
64000 DEBUGP("\t0x%lx %s\n",
64001 shdr->sh_addr, info->secstrings + shdr->sh_name);
64002 }
64003@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
64004 * Do it before processing of module parameters, so the module
64005 * can provide parameter accessor functions of its own.
64006 */
64007- if (mod->module_init)
64008- flush_icache_range((unsigned long)mod->module_init,
64009- (unsigned long)mod->module_init
64010- + mod->init_size);
64011- flush_icache_range((unsigned long)mod->module_core,
64012- (unsigned long)mod->module_core + mod->core_size);
64013+ if (mod->module_init_rx)
64014+ flush_icache_range((unsigned long)mod->module_init_rx,
64015+ (unsigned long)mod->module_init_rx
64016+ + mod->init_size_rx);
64017+ flush_icache_range((unsigned long)mod->module_core_rx,
64018+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64019
64020 set_fs(old_fs);
64021 }
64022@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
64023 {
64024 kfree(info->strmap);
64025 percpu_modfree(mod);
64026- module_free(mod, mod->module_init);
64027- module_free(mod, mod->module_core);
64028+ module_free_exec(mod, mod->module_init_rx);
64029+ module_free_exec(mod, mod->module_core_rx);
64030+ module_free(mod, mod->module_init_rw);
64031+ module_free(mod, mod->module_core_rw);
64032 }
64033
64034 static int post_relocation(struct module *mod, const struct load_info *info)
64035@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
64036 if (err)
64037 goto free_unload;
64038
64039+ /* Now copy in args */
64040+ mod->args = strndup_user(uargs, ~0UL >> 1);
64041+ if (IS_ERR(mod->args)) {
64042+ err = PTR_ERR(mod->args);
64043+ goto free_unload;
64044+ }
64045+
64046 /* Set up MODINFO_ATTR fields */
64047 setup_modinfo(mod, &info);
64048
64049+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64050+ {
64051+ char *p, *p2;
64052+
64053+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64054+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64055+ err = -EPERM;
64056+ goto free_modinfo;
64057+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64058+ p += strlen("grsec_modharden_normal");
64059+ p2 = strstr(p, "_");
64060+ if (p2) {
64061+ *p2 = '\0';
64062+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64063+ *p2 = '_';
64064+ }
64065+ err = -EPERM;
64066+ goto free_modinfo;
64067+ }
64068+ }
64069+#endif
64070+
64071 /* Fix up syms, so that st_value is a pointer to location. */
64072 err = simplify_symbols(mod, &info);
64073 if (err < 0)
64074@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
64075
64076 flush_module_icache(mod);
64077
64078- /* Now copy in args */
64079- mod->args = strndup_user(uargs, ~0UL >> 1);
64080- if (IS_ERR(mod->args)) {
64081- err = PTR_ERR(mod->args);
64082- goto free_arch_cleanup;
64083- }
64084-
64085 /* Mark state as coming so strong_try_module_get() ignores us. */
64086 mod->state = MODULE_STATE_COMING;
64087
64088@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
64089 unlock:
64090 mutex_unlock(&module_mutex);
64091 synchronize_sched();
64092- kfree(mod->args);
64093- free_arch_cleanup:
64094 module_arch_cleanup(mod);
64095 free_modinfo:
64096 free_modinfo(mod);
64097+ kfree(mod->args);
64098 free_unload:
64099 module_unload_free(mod);
64100 free_module:
64101@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
64102 MODULE_STATE_COMING, mod);
64103
64104 /* Set RO and NX regions for core */
64105- set_section_ro_nx(mod->module_core,
64106- mod->core_text_size,
64107- mod->core_ro_size,
64108- mod->core_size);
64109+ set_section_ro_nx(mod->module_core_rx,
64110+ mod->core_size_rx,
64111+ mod->core_size_rx,
64112+ mod->core_size_rx);
64113
64114 /* Set RO and NX regions for init */
64115- set_section_ro_nx(mod->module_init,
64116- mod->init_text_size,
64117- mod->init_ro_size,
64118- mod->init_size);
64119+ set_section_ro_nx(mod->module_init_rx,
64120+ mod->init_size_rx,
64121+ mod->init_size_rx,
64122+ mod->init_size_rx);
64123
64124 do_mod_ctors(mod);
64125 /* Start the module */
64126@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
64127 mod->strtab = mod->core_strtab;
64128 #endif
64129 unset_module_init_ro_nx(mod);
64130- module_free(mod, mod->module_init);
64131- mod->module_init = NULL;
64132- mod->init_size = 0;
64133- mod->init_ro_size = 0;
64134- mod->init_text_size = 0;
64135+ module_free(mod, mod->module_init_rw);
64136+ module_free_exec(mod, mod->module_init_rx);
64137+ mod->module_init_rw = NULL;
64138+ mod->module_init_rx = NULL;
64139+ mod->init_size_rw = 0;
64140+ mod->init_size_rx = 0;
64141 mutex_unlock(&module_mutex);
64142
64143 return 0;
64144@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
64145 unsigned long nextval;
64146
64147 /* At worse, next value is at end of module */
64148- if (within_module_init(addr, mod))
64149- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64150+ if (within_module_init_rx(addr, mod))
64151+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64152+ else if (within_module_init_rw(addr, mod))
64153+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64154+ else if (within_module_core_rx(addr, mod))
64155+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64156+ else if (within_module_core_rw(addr, mod))
64157+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64158 else
64159- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64160+ return NULL;
64161
64162 /* Scan for closest preceding symbol, and next symbol. (ELF
64163 starts real symbols at 1). */
64164@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
64165 char buf[8];
64166
64167 seq_printf(m, "%s %u",
64168- mod->name, mod->init_size + mod->core_size);
64169+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64170 print_unload_info(m, mod);
64171
64172 /* Informative for users. */
64173@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
64174 mod->state == MODULE_STATE_COMING ? "Loading":
64175 "Live");
64176 /* Used by oprofile and other similar tools. */
64177- seq_printf(m, " 0x%pK", mod->module_core);
64178+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64179
64180 /* Taints info */
64181 if (mod->taints)
64182@@ -3283,7 +3406,17 @@ static const struct file_operations proc
64183
64184 static int __init proc_modules_init(void)
64185 {
64186+#ifndef CONFIG_GRKERNSEC_HIDESYM
64187+#ifdef CONFIG_GRKERNSEC_PROC_USER
64188+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64189+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64190+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64191+#else
64192 proc_create("modules", 0, NULL, &proc_modules_operations);
64193+#endif
64194+#else
64195+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64196+#endif
64197 return 0;
64198 }
64199 module_init(proc_modules_init);
64200@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
64201 {
64202 struct module *mod;
64203
64204- if (addr < module_addr_min || addr > module_addr_max)
64205+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64206+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64207 return NULL;
64208
64209 list_for_each_entry_rcu(mod, &modules, list)
64210- if (within_module_core(addr, mod)
64211- || within_module_init(addr, mod))
64212+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64213 return mod;
64214 return NULL;
64215 }
64216@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
64217 */
64218 struct module *__module_text_address(unsigned long addr)
64219 {
64220- struct module *mod = __module_address(addr);
64221+ struct module *mod;
64222+
64223+#ifdef CONFIG_X86_32
64224+ addr = ktla_ktva(addr);
64225+#endif
64226+
64227+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64228+ return NULL;
64229+
64230+ mod = __module_address(addr);
64231+
64232 if (mod) {
64233 /* Make sure it's within the text section. */
64234- if (!within(addr, mod->module_init, mod->init_text_size)
64235- && !within(addr, mod->module_core, mod->core_text_size))
64236+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64237 mod = NULL;
64238 }
64239 return mod;
64240diff -urNp linux-3.0.7/kernel/mutex-debug.c linux-3.0.7/kernel/mutex-debug.c
64241--- linux-3.0.7/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
64242+++ linux-3.0.7/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
64243@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64244 }
64245
64246 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64247- struct thread_info *ti)
64248+ struct task_struct *task)
64249 {
64250 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64251
64252 /* Mark the current thread as blocked on the lock: */
64253- ti->task->blocked_on = waiter;
64254+ task->blocked_on = waiter;
64255 }
64256
64257 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64258- struct thread_info *ti)
64259+ struct task_struct *task)
64260 {
64261 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64262- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64263- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64264- ti->task->blocked_on = NULL;
64265+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64266+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64267+ task->blocked_on = NULL;
64268
64269 list_del_init(&waiter->list);
64270 waiter->task = NULL;
64271diff -urNp linux-3.0.7/kernel/mutex-debug.h linux-3.0.7/kernel/mutex-debug.h
64272--- linux-3.0.7/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
64273+++ linux-3.0.7/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
64274@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64275 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64276 extern void debug_mutex_add_waiter(struct mutex *lock,
64277 struct mutex_waiter *waiter,
64278- struct thread_info *ti);
64279+ struct task_struct *task);
64280 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64281- struct thread_info *ti);
64282+ struct task_struct *task);
64283 extern void debug_mutex_unlock(struct mutex *lock);
64284 extern void debug_mutex_init(struct mutex *lock, const char *name,
64285 struct lock_class_key *key);
64286diff -urNp linux-3.0.7/kernel/mutex.c linux-3.0.7/kernel/mutex.c
64287--- linux-3.0.7/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
64288+++ linux-3.0.7/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
64289@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64290 spin_lock_mutex(&lock->wait_lock, flags);
64291
64292 debug_mutex_lock_common(lock, &waiter);
64293- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64294+ debug_mutex_add_waiter(lock, &waiter, task);
64295
64296 /* add waiting tasks to the end of the waitqueue (FIFO): */
64297 list_add_tail(&waiter.list, &lock->wait_list);
64298@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64299 * TASK_UNINTERRUPTIBLE case.)
64300 */
64301 if (unlikely(signal_pending_state(state, task))) {
64302- mutex_remove_waiter(lock, &waiter,
64303- task_thread_info(task));
64304+ mutex_remove_waiter(lock, &waiter, task);
64305 mutex_release(&lock->dep_map, 1, ip);
64306 spin_unlock_mutex(&lock->wait_lock, flags);
64307
64308@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64309 done:
64310 lock_acquired(&lock->dep_map, ip);
64311 /* got the lock - rejoice! */
64312- mutex_remove_waiter(lock, &waiter, current_thread_info());
64313+ mutex_remove_waiter(lock, &waiter, task);
64314 mutex_set_owner(lock);
64315
64316 /* set it to 0 if there are no waiters left: */
64317diff -urNp linux-3.0.7/kernel/padata.c linux-3.0.7/kernel/padata.c
64318--- linux-3.0.7/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
64319+++ linux-3.0.7/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
64320@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64321 padata->pd = pd;
64322 padata->cb_cpu = cb_cpu;
64323
64324- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64325- atomic_set(&pd->seq_nr, -1);
64326+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64327+ atomic_set_unchecked(&pd->seq_nr, -1);
64328
64329- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64330+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64331
64332 target_cpu = padata_cpu_hash(padata);
64333 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64334@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64335 padata_init_pqueues(pd);
64336 padata_init_squeues(pd);
64337 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64338- atomic_set(&pd->seq_nr, -1);
64339+ atomic_set_unchecked(&pd->seq_nr, -1);
64340 atomic_set(&pd->reorder_objects, 0);
64341 atomic_set(&pd->refcnt, 0);
64342 pd->pinst = pinst;
64343diff -urNp linux-3.0.7/kernel/panic.c linux-3.0.7/kernel/panic.c
64344--- linux-3.0.7/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
64345+++ linux-3.0.7/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
64346@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
64347 const char *board;
64348
64349 printk(KERN_WARNING "------------[ cut here ]------------\n");
64350- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64351+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64352 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64353 if (board)
64354 printk(KERN_WARNING "Hardware name: %s\n", board);
64355@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64356 */
64357 void __stack_chk_fail(void)
64358 {
64359- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64360+ dump_stack();
64361+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64362 __builtin_return_address(0));
64363 }
64364 EXPORT_SYMBOL(__stack_chk_fail);
64365diff -urNp linux-3.0.7/kernel/pid.c linux-3.0.7/kernel/pid.c
64366--- linux-3.0.7/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
64367+++ linux-3.0.7/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
64368@@ -33,6 +33,7 @@
64369 #include <linux/rculist.h>
64370 #include <linux/bootmem.h>
64371 #include <linux/hash.h>
64372+#include <linux/security.h>
64373 #include <linux/pid_namespace.h>
64374 #include <linux/init_task.h>
64375 #include <linux/syscalls.h>
64376@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64377
64378 int pid_max = PID_MAX_DEFAULT;
64379
64380-#define RESERVED_PIDS 300
64381+#define RESERVED_PIDS 500
64382
64383 int pid_max_min = RESERVED_PIDS + 1;
64384 int pid_max_max = PID_MAX_LIMIT;
64385@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
64386 */
64387 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64388 {
64389+ struct task_struct *task;
64390+
64391 rcu_lockdep_assert(rcu_read_lock_held());
64392- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64393+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64394+
64395+ if (gr_pid_is_chrooted(task))
64396+ return NULL;
64397+
64398+ return task;
64399 }
64400
64401 struct task_struct *find_task_by_vpid(pid_t vnr)
64402@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
64403 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64404 }
64405
64406+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64407+{
64408+ rcu_lockdep_assert(rcu_read_lock_held());
64409+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64410+}
64411+
64412 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64413 {
64414 struct pid *pid;
64415diff -urNp linux-3.0.7/kernel/posix-cpu-timers.c linux-3.0.7/kernel/posix-cpu-timers.c
64416--- linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:09.000000000 -0400
64417+++ linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:19.000000000 -0400
64418@@ -6,6 +6,7 @@
64419 #include <linux/posix-timers.h>
64420 #include <linux/errno.h>
64421 #include <linux/math64.h>
64422+#include <linux/security.h>
64423 #include <asm/uaccess.h>
64424 #include <linux/kernel_stat.h>
64425 #include <trace/events/timer.h>
64426@@ -1605,14 +1606,14 @@ struct k_clock clock_posix_cpu = {
64427
64428 static __init int init_posix_cpu_timers(void)
64429 {
64430- struct k_clock process = {
64431+ static struct k_clock process = {
64432 .clock_getres = process_cpu_clock_getres,
64433 .clock_get = process_cpu_clock_get,
64434 .timer_create = process_cpu_timer_create,
64435 .nsleep = process_cpu_nsleep,
64436 .nsleep_restart = process_cpu_nsleep_restart,
64437 };
64438- struct k_clock thread = {
64439+ static struct k_clock thread = {
64440 .clock_getres = thread_cpu_clock_getres,
64441 .clock_get = thread_cpu_clock_get,
64442 .timer_create = thread_cpu_timer_create,
64443diff -urNp linux-3.0.7/kernel/posix-timers.c linux-3.0.7/kernel/posix-timers.c
64444--- linux-3.0.7/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
64445+++ linux-3.0.7/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
64446@@ -43,6 +43,7 @@
64447 #include <linux/idr.h>
64448 #include <linux/posix-clock.h>
64449 #include <linux/posix-timers.h>
64450+#include <linux/grsecurity.h>
64451 #include <linux/syscalls.h>
64452 #include <linux/wait.h>
64453 #include <linux/workqueue.h>
64454@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64455 * which we beg off on and pass to do_sys_settimeofday().
64456 */
64457
64458-static struct k_clock posix_clocks[MAX_CLOCKS];
64459+static struct k_clock *posix_clocks[MAX_CLOCKS];
64460
64461 /*
64462 * These ones are defined below.
64463@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64464 */
64465 static __init int init_posix_timers(void)
64466 {
64467- struct k_clock clock_realtime = {
64468+ static struct k_clock clock_realtime = {
64469 .clock_getres = hrtimer_get_res,
64470 .clock_get = posix_clock_realtime_get,
64471 .clock_set = posix_clock_realtime_set,
64472@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64473 .timer_get = common_timer_get,
64474 .timer_del = common_timer_del,
64475 };
64476- struct k_clock clock_monotonic = {
64477+ static struct k_clock clock_monotonic = {
64478 .clock_getres = hrtimer_get_res,
64479 .clock_get = posix_ktime_get_ts,
64480 .nsleep = common_nsleep,
64481@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64482 .timer_get = common_timer_get,
64483 .timer_del = common_timer_del,
64484 };
64485- struct k_clock clock_monotonic_raw = {
64486+ static struct k_clock clock_monotonic_raw = {
64487 .clock_getres = hrtimer_get_res,
64488 .clock_get = posix_get_monotonic_raw,
64489 };
64490- struct k_clock clock_realtime_coarse = {
64491+ static struct k_clock clock_realtime_coarse = {
64492 .clock_getres = posix_get_coarse_res,
64493 .clock_get = posix_get_realtime_coarse,
64494 };
64495- struct k_clock clock_monotonic_coarse = {
64496+ static struct k_clock clock_monotonic_coarse = {
64497 .clock_getres = posix_get_coarse_res,
64498 .clock_get = posix_get_monotonic_coarse,
64499 };
64500- struct k_clock clock_boottime = {
64501+ static struct k_clock clock_boottime = {
64502 .clock_getres = hrtimer_get_res,
64503 .clock_get = posix_get_boottime,
64504 .nsleep = common_nsleep,
64505@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64506 .timer_del = common_timer_del,
64507 };
64508
64509+ pax_track_stack();
64510+
64511 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64512 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64513 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64514@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64515 return;
64516 }
64517
64518- posix_clocks[clock_id] = *new_clock;
64519+ posix_clocks[clock_id] = new_clock;
64520 }
64521 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64522
64523@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64524 return (id & CLOCKFD_MASK) == CLOCKFD ?
64525 &clock_posix_dynamic : &clock_posix_cpu;
64526
64527- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64528+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64529 return NULL;
64530- return &posix_clocks[id];
64531+ return posix_clocks[id];
64532 }
64533
64534 static int common_timer_create(struct k_itimer *new_timer)
64535@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64536 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64537 return -EFAULT;
64538
64539+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64540+ have their clock_set fptr set to a nosettime dummy function
64541+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64542+ call common_clock_set, which calls do_sys_settimeofday, which
64543+ we hook
64544+ */
64545+
64546 return kc->clock_set(which_clock, &new_tp);
64547 }
64548
64549diff -urNp linux-3.0.7/kernel/power/poweroff.c linux-3.0.7/kernel/power/poweroff.c
64550--- linux-3.0.7/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
64551+++ linux-3.0.7/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
64552@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64553 .enable_mask = SYSRQ_ENABLE_BOOT,
64554 };
64555
64556-static int pm_sysrq_init(void)
64557+static int __init pm_sysrq_init(void)
64558 {
64559 register_sysrq_key('o', &sysrq_poweroff_op);
64560 return 0;
64561diff -urNp linux-3.0.7/kernel/power/process.c linux-3.0.7/kernel/power/process.c
64562--- linux-3.0.7/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
64563+++ linux-3.0.7/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
64564@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64565 u64 elapsed_csecs64;
64566 unsigned int elapsed_csecs;
64567 bool wakeup = false;
64568+ bool timedout = false;
64569
64570 do_gettimeofday(&start);
64571
64572@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64573
64574 while (true) {
64575 todo = 0;
64576+ if (time_after(jiffies, end_time))
64577+ timedout = true;
64578 read_lock(&tasklist_lock);
64579 do_each_thread(g, p) {
64580 if (frozen(p) || !freezable(p))
64581@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64582 * try_to_stop() after schedule() in ptrace/signal
64583 * stop sees TIF_FREEZE.
64584 */
64585- if (!task_is_stopped_or_traced(p) &&
64586- !freezer_should_skip(p))
64587+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64588 todo++;
64589+ if (timedout) {
64590+ printk(KERN_ERR "Task refusing to freeze:\n");
64591+ sched_show_task(p);
64592+ }
64593+ }
64594 } while_each_thread(g, p);
64595 read_unlock(&tasklist_lock);
64596
64597@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64598 todo += wq_busy;
64599 }
64600
64601- if (!todo || time_after(jiffies, end_time))
64602+ if (!todo || timedout)
64603 break;
64604
64605 if (pm_wakeup_pending()) {
64606diff -urNp linux-3.0.7/kernel/printk.c linux-3.0.7/kernel/printk.c
64607--- linux-3.0.7/kernel/printk.c 2011-10-16 21:54:54.000000000 -0400
64608+++ linux-3.0.7/kernel/printk.c 2011-10-16 21:55:28.000000000 -0400
64609@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
64610 if (from_file && type != SYSLOG_ACTION_OPEN)
64611 return 0;
64612
64613+#ifdef CONFIG_GRKERNSEC_DMESG
64614+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64615+ return -EPERM;
64616+#endif
64617+
64618 if (syslog_action_restricted(type)) {
64619 if (capable(CAP_SYSLOG))
64620 return 0;
64621 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
64622 if (capable(CAP_SYS_ADMIN)) {
64623- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
64624+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
64625 "but no CAP_SYSLOG (deprecated).\n");
64626 return 0;
64627 }
64628diff -urNp linux-3.0.7/kernel/profile.c linux-3.0.7/kernel/profile.c
64629--- linux-3.0.7/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
64630+++ linux-3.0.7/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
64631@@ -39,7 +39,7 @@ struct profile_hit {
64632 /* Oprofile timer tick hook */
64633 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64634
64635-static atomic_t *prof_buffer;
64636+static atomic_unchecked_t *prof_buffer;
64637 static unsigned long prof_len, prof_shift;
64638
64639 int prof_on __read_mostly;
64640@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64641 hits[i].pc = 0;
64642 continue;
64643 }
64644- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64645+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64646 hits[i].hits = hits[i].pc = 0;
64647 }
64648 }
64649@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64650 * Add the current hit(s) and flush the write-queue out
64651 * to the global buffer:
64652 */
64653- atomic_add(nr_hits, &prof_buffer[pc]);
64654+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64655 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64656- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64657+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64658 hits[i].pc = hits[i].hits = 0;
64659 }
64660 out:
64661@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64662 {
64663 unsigned long pc;
64664 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64665- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64666+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64667 }
64668 #endif /* !CONFIG_SMP */
64669
64670@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64671 return -EFAULT;
64672 buf++; p++; count--; read++;
64673 }
64674- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64675+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64676 if (copy_to_user(buf, (void *)pnt, count))
64677 return -EFAULT;
64678 read += count;
64679@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64680 }
64681 #endif
64682 profile_discard_flip_buffers();
64683- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64684+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64685 return count;
64686 }
64687
64688diff -urNp linux-3.0.7/kernel/ptrace.c linux-3.0.7/kernel/ptrace.c
64689--- linux-3.0.7/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
64690+++ linux-3.0.7/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
64691@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
64692 return ret;
64693 }
64694
64695-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64696+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64697+ unsigned int log)
64698 {
64699 const struct cred *cred = current_cred(), *tcred;
64700
64701@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
64702 cred->gid == tcred->sgid &&
64703 cred->gid == tcred->gid))
64704 goto ok;
64705- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64706+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64707+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64708 goto ok;
64709 rcu_read_unlock();
64710 return -EPERM;
64711@@ -167,7 +169,9 @@ ok:
64712 smp_rmb();
64713 if (task->mm)
64714 dumpable = get_dumpable(task->mm);
64715- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64716+ if (!dumpable &&
64717+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64718+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64719 return -EPERM;
64720
64721 return security_ptrace_access_check(task, mode);
64722@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
64723 {
64724 int err;
64725 task_lock(task);
64726- err = __ptrace_may_access(task, mode);
64727+ err = __ptrace_may_access(task, mode, 0);
64728+ task_unlock(task);
64729+ return !err;
64730+}
64731+
64732+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64733+{
64734+ int err;
64735+ task_lock(task);
64736+ err = __ptrace_may_access(task, mode, 1);
64737 task_unlock(task);
64738 return !err;
64739 }
64740@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
64741 goto out;
64742
64743 task_lock(task);
64744- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64745+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64746 task_unlock(task);
64747 if (retval)
64748 goto unlock_creds;
64749@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
64750 goto unlock_tasklist;
64751
64752 task->ptrace = PT_PTRACED;
64753- if (task_ns_capable(task, CAP_SYS_PTRACE))
64754+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64755 task->ptrace |= PT_PTRACE_CAP;
64756
64757 __ptrace_link(task, current);
64758@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
64759 {
64760 int copied = 0;
64761
64762+ pax_track_stack();
64763+
64764 while (len > 0) {
64765 char buf[128];
64766 int this_len, retval;
64767@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
64768 break;
64769 return -EIO;
64770 }
64771- if (copy_to_user(dst, buf, retval))
64772+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64773 return -EFAULT;
64774 copied += retval;
64775 src += retval;
64776@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
64777 {
64778 int copied = 0;
64779
64780+ pax_track_stack();
64781+
64782 while (len > 0) {
64783 char buf[128];
64784 int this_len, retval;
64785@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
64786 {
64787 int ret = -EIO;
64788 siginfo_t siginfo;
64789- void __user *datavp = (void __user *) data;
64790+ void __user *datavp = (__force void __user *) data;
64791 unsigned long __user *datalp = datavp;
64792
64793+ pax_track_stack();
64794+
64795 switch (request) {
64796 case PTRACE_PEEKTEXT:
64797 case PTRACE_PEEKDATA:
64798@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64799 goto out;
64800 }
64801
64802+ if (gr_handle_ptrace(child, request)) {
64803+ ret = -EPERM;
64804+ goto out_put_task_struct;
64805+ }
64806+
64807 if (request == PTRACE_ATTACH) {
64808 ret = ptrace_attach(child);
64809 /*
64810 * Some architectures need to do book-keeping after
64811 * a ptrace attach.
64812 */
64813- if (!ret)
64814+ if (!ret) {
64815 arch_ptrace_attach(child);
64816+ gr_audit_ptrace(child);
64817+ }
64818 goto out_put_task_struct;
64819 }
64820
64821@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
64822 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64823 if (copied != sizeof(tmp))
64824 return -EIO;
64825- return put_user(tmp, (unsigned long __user *)data);
64826+ return put_user(tmp, (__force unsigned long __user *)data);
64827 }
64828
64829 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
64830@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
64831 siginfo_t siginfo;
64832 int ret;
64833
64834+ pax_track_stack();
64835+
64836 switch (request) {
64837 case PTRACE_PEEKTEXT:
64838 case PTRACE_PEEKDATA:
64839@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
64840 goto out;
64841 }
64842
64843+ if (gr_handle_ptrace(child, request)) {
64844+ ret = -EPERM;
64845+ goto out_put_task_struct;
64846+ }
64847+
64848 if (request == PTRACE_ATTACH) {
64849 ret = ptrace_attach(child);
64850 /*
64851 * Some architectures need to do book-keeping after
64852 * a ptrace attach.
64853 */
64854- if (!ret)
64855+ if (!ret) {
64856 arch_ptrace_attach(child);
64857+ gr_audit_ptrace(child);
64858+ }
64859 goto out_put_task_struct;
64860 }
64861
64862diff -urNp linux-3.0.7/kernel/rcutorture.c linux-3.0.7/kernel/rcutorture.c
64863--- linux-3.0.7/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
64864+++ linux-3.0.7/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
64865@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64866 { 0 };
64867 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64868 { 0 };
64869-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64870-static atomic_t n_rcu_torture_alloc;
64871-static atomic_t n_rcu_torture_alloc_fail;
64872-static atomic_t n_rcu_torture_free;
64873-static atomic_t n_rcu_torture_mberror;
64874-static atomic_t n_rcu_torture_error;
64875+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64876+static atomic_unchecked_t n_rcu_torture_alloc;
64877+static atomic_unchecked_t n_rcu_torture_alloc_fail;
64878+static atomic_unchecked_t n_rcu_torture_free;
64879+static atomic_unchecked_t n_rcu_torture_mberror;
64880+static atomic_unchecked_t n_rcu_torture_error;
64881 static long n_rcu_torture_boost_ktrerror;
64882 static long n_rcu_torture_boost_rterror;
64883 static long n_rcu_torture_boost_failure;
64884@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
64885
64886 spin_lock_bh(&rcu_torture_lock);
64887 if (list_empty(&rcu_torture_freelist)) {
64888- atomic_inc(&n_rcu_torture_alloc_fail);
64889+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64890 spin_unlock_bh(&rcu_torture_lock);
64891 return NULL;
64892 }
64893- atomic_inc(&n_rcu_torture_alloc);
64894+ atomic_inc_unchecked(&n_rcu_torture_alloc);
64895 p = rcu_torture_freelist.next;
64896 list_del_init(p);
64897 spin_unlock_bh(&rcu_torture_lock);
64898@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
64899 static void
64900 rcu_torture_free(struct rcu_torture *p)
64901 {
64902- atomic_inc(&n_rcu_torture_free);
64903+ atomic_inc_unchecked(&n_rcu_torture_free);
64904 spin_lock_bh(&rcu_torture_lock);
64905 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64906 spin_unlock_bh(&rcu_torture_lock);
64907@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
64908 i = rp->rtort_pipe_count;
64909 if (i > RCU_TORTURE_PIPE_LEN)
64910 i = RCU_TORTURE_PIPE_LEN;
64911- atomic_inc(&rcu_torture_wcount[i]);
64912+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64913 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64914 rp->rtort_mbtest = 0;
64915 rcu_torture_free(rp);
64916@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
64917 i = rp->rtort_pipe_count;
64918 if (i > RCU_TORTURE_PIPE_LEN)
64919 i = RCU_TORTURE_PIPE_LEN;
64920- atomic_inc(&rcu_torture_wcount[i]);
64921+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64922 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64923 rp->rtort_mbtest = 0;
64924 list_del(&rp->rtort_free);
64925@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
64926 i = old_rp->rtort_pipe_count;
64927 if (i > RCU_TORTURE_PIPE_LEN)
64928 i = RCU_TORTURE_PIPE_LEN;
64929- atomic_inc(&rcu_torture_wcount[i]);
64930+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64931 old_rp->rtort_pipe_count++;
64932 cur_ops->deferred_free(old_rp);
64933 }
64934@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
64935 return;
64936 }
64937 if (p->rtort_mbtest == 0)
64938- atomic_inc(&n_rcu_torture_mberror);
64939+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64940 spin_lock(&rand_lock);
64941 cur_ops->read_delay(&rand);
64942 n_rcu_torture_timers++;
64943@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
64944 continue;
64945 }
64946 if (p->rtort_mbtest == 0)
64947- atomic_inc(&n_rcu_torture_mberror);
64948+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64949 cur_ops->read_delay(&rand);
64950 preempt_disable();
64951 pipe_count = p->rtort_pipe_count;
64952@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
64953 rcu_torture_current,
64954 rcu_torture_current_version,
64955 list_empty(&rcu_torture_freelist),
64956- atomic_read(&n_rcu_torture_alloc),
64957- atomic_read(&n_rcu_torture_alloc_fail),
64958- atomic_read(&n_rcu_torture_free),
64959- atomic_read(&n_rcu_torture_mberror),
64960+ atomic_read_unchecked(&n_rcu_torture_alloc),
64961+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64962+ atomic_read_unchecked(&n_rcu_torture_free),
64963+ atomic_read_unchecked(&n_rcu_torture_mberror),
64964 n_rcu_torture_boost_ktrerror,
64965 n_rcu_torture_boost_rterror,
64966 n_rcu_torture_boost_failure,
64967 n_rcu_torture_boosts,
64968 n_rcu_torture_timers);
64969- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
64970+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
64971 n_rcu_torture_boost_ktrerror != 0 ||
64972 n_rcu_torture_boost_rterror != 0 ||
64973 n_rcu_torture_boost_failure != 0)
64974@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
64975 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64976 if (i > 1) {
64977 cnt += sprintf(&page[cnt], "!!! ");
64978- atomic_inc(&n_rcu_torture_error);
64979+ atomic_inc_unchecked(&n_rcu_torture_error);
64980 WARN_ON_ONCE(1);
64981 }
64982 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64983@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
64984 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
64985 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64986 cnt += sprintf(&page[cnt], " %d",
64987- atomic_read(&rcu_torture_wcount[i]));
64988+ atomic_read_unchecked(&rcu_torture_wcount[i]));
64989 }
64990 cnt += sprintf(&page[cnt], "\n");
64991 if (cur_ops->stats)
64992@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
64993
64994 if (cur_ops->cleanup)
64995 cur_ops->cleanup();
64996- if (atomic_read(&n_rcu_torture_error))
64997+ if (atomic_read_unchecked(&n_rcu_torture_error))
64998 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
64999 else
65000 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65001@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
65002
65003 rcu_torture_current = NULL;
65004 rcu_torture_current_version = 0;
65005- atomic_set(&n_rcu_torture_alloc, 0);
65006- atomic_set(&n_rcu_torture_alloc_fail, 0);
65007- atomic_set(&n_rcu_torture_free, 0);
65008- atomic_set(&n_rcu_torture_mberror, 0);
65009- atomic_set(&n_rcu_torture_error, 0);
65010+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65011+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65012+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65013+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65014+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65015 n_rcu_torture_boost_ktrerror = 0;
65016 n_rcu_torture_boost_rterror = 0;
65017 n_rcu_torture_boost_failure = 0;
65018 n_rcu_torture_boosts = 0;
65019 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65020- atomic_set(&rcu_torture_wcount[i], 0);
65021+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65022 for_each_possible_cpu(cpu) {
65023 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65024 per_cpu(rcu_torture_count, cpu)[i] = 0;
65025diff -urNp linux-3.0.7/kernel/rcutree.c linux-3.0.7/kernel/rcutree.c
65026--- linux-3.0.7/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
65027+++ linux-3.0.7/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
65028@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65029 }
65030 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65031 smp_mb__before_atomic_inc(); /* See above. */
65032- atomic_inc(&rdtp->dynticks);
65033+ atomic_inc_unchecked(&rdtp->dynticks);
65034 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65035- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65036+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65037 local_irq_restore(flags);
65038
65039 /* If the interrupt queued a callback, get out of dyntick mode. */
65040@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65041 return;
65042 }
65043 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65044- atomic_inc(&rdtp->dynticks);
65045+ atomic_inc_unchecked(&rdtp->dynticks);
65046 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65047 smp_mb__after_atomic_inc(); /* See above. */
65048- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65049+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65050 local_irq_restore(flags);
65051 }
65052
65053@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65054 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65055
65056 if (rdtp->dynticks_nmi_nesting == 0 &&
65057- (atomic_read(&rdtp->dynticks) & 0x1))
65058+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65059 return;
65060 rdtp->dynticks_nmi_nesting++;
65061 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65062- atomic_inc(&rdtp->dynticks);
65063+ atomic_inc_unchecked(&rdtp->dynticks);
65064 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65065 smp_mb__after_atomic_inc(); /* See above. */
65066- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65067+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65068 }
65069
65070 /**
65071@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65072 return;
65073 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65074 smp_mb__before_atomic_inc(); /* See above. */
65075- atomic_inc(&rdtp->dynticks);
65076+ atomic_inc_unchecked(&rdtp->dynticks);
65077 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65078- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65079+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65080 }
65081
65082 /**
65083@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65084 */
65085 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65086 {
65087- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65088+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65089 return 0;
65090 }
65091
65092@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65093 unsigned long curr;
65094 unsigned long snap;
65095
65096- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65097+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65098 snap = (unsigned long)rdp->dynticks_snap;
65099
65100 /*
65101@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65102 /*
65103 * Do softirq processing for the current CPU.
65104 */
65105-static void rcu_process_callbacks(struct softirq_action *unused)
65106+static void rcu_process_callbacks(void)
65107 {
65108 __rcu_process_callbacks(&rcu_sched_state,
65109 &__get_cpu_var(rcu_sched_data));
65110diff -urNp linux-3.0.7/kernel/rcutree.h linux-3.0.7/kernel/rcutree.h
65111--- linux-3.0.7/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
65112+++ linux-3.0.7/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
65113@@ -86,7 +86,7 @@
65114 struct rcu_dynticks {
65115 int dynticks_nesting; /* Track irq/process nesting level. */
65116 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65117- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65118+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65119 };
65120
65121 /* RCU's kthread states for tracing. */
65122diff -urNp linux-3.0.7/kernel/rcutree_plugin.h linux-3.0.7/kernel/rcutree_plugin.h
65123--- linux-3.0.7/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
65124+++ linux-3.0.7/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
65125@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65126
65127 /* Clean up and exit. */
65128 smp_mb(); /* ensure expedited GP seen before counter increment. */
65129- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65130+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65131 unlock_mb_ret:
65132 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65133 mb_ret:
65134@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65135
65136 #else /* #ifndef CONFIG_SMP */
65137
65138-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65139-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65140+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65141+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65142
65143 static int synchronize_sched_expedited_cpu_stop(void *data)
65144 {
65145@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65146 int firstsnap, s, snap, trycount = 0;
65147
65148 /* Note that atomic_inc_return() implies full memory barrier. */
65149- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65150+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65151 get_online_cpus();
65152
65153 /*
65154@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65155 }
65156
65157 /* Check to see if someone else did our work for us. */
65158- s = atomic_read(&sync_sched_expedited_done);
65159+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65160 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65161 smp_mb(); /* ensure test happens before caller kfree */
65162 return;
65163@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65164 * grace period works for us.
65165 */
65166 get_online_cpus();
65167- snap = atomic_read(&sync_sched_expedited_started) - 1;
65168+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65169 smp_mb(); /* ensure read is before try_stop_cpus(). */
65170 }
65171
65172@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65173 * than we did beat us to the punch.
65174 */
65175 do {
65176- s = atomic_read(&sync_sched_expedited_done);
65177+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65178 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65179 smp_mb(); /* ensure test happens before caller kfree */
65180 break;
65181 }
65182- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65183+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65184
65185 put_online_cpus();
65186 }
65187diff -urNp linux-3.0.7/kernel/relay.c linux-3.0.7/kernel/relay.c
65188--- linux-3.0.7/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
65189+++ linux-3.0.7/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
65190@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65191 };
65192 ssize_t ret;
65193
65194+ pax_track_stack();
65195+
65196 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65197 return 0;
65198 if (splice_grow_spd(pipe, &spd))
65199diff -urNp linux-3.0.7/kernel/resource.c linux-3.0.7/kernel/resource.c
65200--- linux-3.0.7/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
65201+++ linux-3.0.7/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
65202@@ -141,8 +141,18 @@ static const struct file_operations proc
65203
65204 static int __init ioresources_init(void)
65205 {
65206+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65207+#ifdef CONFIG_GRKERNSEC_PROC_USER
65208+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65209+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65210+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65211+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65212+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65213+#endif
65214+#else
65215 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65216 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65217+#endif
65218 return 0;
65219 }
65220 __initcall(ioresources_init);
65221diff -urNp linux-3.0.7/kernel/rtmutex-tester.c linux-3.0.7/kernel/rtmutex-tester.c
65222--- linux-3.0.7/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
65223+++ linux-3.0.7/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
65224@@ -20,7 +20,7 @@
65225 #define MAX_RT_TEST_MUTEXES 8
65226
65227 static spinlock_t rttest_lock;
65228-static atomic_t rttest_event;
65229+static atomic_unchecked_t rttest_event;
65230
65231 struct test_thread_data {
65232 int opcode;
65233@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65234
65235 case RTTEST_LOCKCONT:
65236 td->mutexes[td->opdata] = 1;
65237- td->event = atomic_add_return(1, &rttest_event);
65238+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65239 return 0;
65240
65241 case RTTEST_RESET:
65242@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65243 return 0;
65244
65245 case RTTEST_RESETEVENT:
65246- atomic_set(&rttest_event, 0);
65247+ atomic_set_unchecked(&rttest_event, 0);
65248 return 0;
65249
65250 default:
65251@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65252 return ret;
65253
65254 td->mutexes[id] = 1;
65255- td->event = atomic_add_return(1, &rttest_event);
65256+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65257 rt_mutex_lock(&mutexes[id]);
65258- td->event = atomic_add_return(1, &rttest_event);
65259+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65260 td->mutexes[id] = 4;
65261 return 0;
65262
65263@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65264 return ret;
65265
65266 td->mutexes[id] = 1;
65267- td->event = atomic_add_return(1, &rttest_event);
65268+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65269 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65270- td->event = atomic_add_return(1, &rttest_event);
65271+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65272 td->mutexes[id] = ret ? 0 : 4;
65273 return ret ? -EINTR : 0;
65274
65275@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65276 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65277 return ret;
65278
65279- td->event = atomic_add_return(1, &rttest_event);
65280+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65281 rt_mutex_unlock(&mutexes[id]);
65282- td->event = atomic_add_return(1, &rttest_event);
65283+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65284 td->mutexes[id] = 0;
65285 return 0;
65286
65287@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65288 break;
65289
65290 td->mutexes[dat] = 2;
65291- td->event = atomic_add_return(1, &rttest_event);
65292+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65293 break;
65294
65295 default:
65296@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65297 return;
65298
65299 td->mutexes[dat] = 3;
65300- td->event = atomic_add_return(1, &rttest_event);
65301+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65302 break;
65303
65304 case RTTEST_LOCKNOWAIT:
65305@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65306 return;
65307
65308 td->mutexes[dat] = 1;
65309- td->event = atomic_add_return(1, &rttest_event);
65310+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65311 return;
65312
65313 default:
65314diff -urNp linux-3.0.7/kernel/sched.c linux-3.0.7/kernel/sched.c
65315--- linux-3.0.7/kernel/sched.c 2011-10-17 23:17:09.000000000 -0400
65316+++ linux-3.0.7/kernel/sched.c 2011-10-17 23:17:19.000000000 -0400
65317@@ -4227,6 +4227,8 @@ static void __sched __schedule(void)
65318 struct rq *rq;
65319 int cpu;
65320
65321+ pax_track_stack();
65322+
65323 need_resched:
65324 preempt_disable();
65325 cpu = smp_processor_id();
65326@@ -4920,6 +4922,8 @@ int can_nice(const struct task_struct *p
65327 /* convert nice value [19,-20] to rlimit style value [1,40] */
65328 int nice_rlim = 20 - nice;
65329
65330+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65331+
65332 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65333 capable(CAP_SYS_NICE));
65334 }
65335@@ -4953,7 +4957,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65336 if (nice > 19)
65337 nice = 19;
65338
65339- if (increment < 0 && !can_nice(current, nice))
65340+ if (increment < 0 && (!can_nice(current, nice) ||
65341+ gr_handle_chroot_nice()))
65342 return -EPERM;
65343
65344 retval = security_task_setnice(current, nice);
65345@@ -5097,6 +5102,7 @@ recheck:
65346 unsigned long rlim_rtprio =
65347 task_rlimit(p, RLIMIT_RTPRIO);
65348
65349+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65350 /* can't set/change the rt policy */
65351 if (policy != p->policy && !rlim_rtprio)
65352 return -EPERM;
65353diff -urNp linux-3.0.7/kernel/sched_autogroup.c linux-3.0.7/kernel/sched_autogroup.c
65354--- linux-3.0.7/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
65355+++ linux-3.0.7/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
65356@@ -7,7 +7,7 @@
65357
65358 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65359 static struct autogroup autogroup_default;
65360-static atomic_t autogroup_seq_nr;
65361+static atomic_unchecked_t autogroup_seq_nr;
65362
65363 static void __init autogroup_init(struct task_struct *init_task)
65364 {
65365@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65366
65367 kref_init(&ag->kref);
65368 init_rwsem(&ag->lock);
65369- ag->id = atomic_inc_return(&autogroup_seq_nr);
65370+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65371 ag->tg = tg;
65372 #ifdef CONFIG_RT_GROUP_SCHED
65373 /*
65374diff -urNp linux-3.0.7/kernel/sched_fair.c linux-3.0.7/kernel/sched_fair.c
65375--- linux-3.0.7/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
65376+++ linux-3.0.7/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
65377@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
65378 * run_rebalance_domains is triggered when needed from the scheduler tick.
65379 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65380 */
65381-static void run_rebalance_domains(struct softirq_action *h)
65382+static void run_rebalance_domains(void)
65383 {
65384 int this_cpu = smp_processor_id();
65385 struct rq *this_rq = cpu_rq(this_cpu);
65386diff -urNp linux-3.0.7/kernel/signal.c linux-3.0.7/kernel/signal.c
65387--- linux-3.0.7/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
65388+++ linux-3.0.7/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
65389@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65390
65391 int print_fatal_signals __read_mostly;
65392
65393-static void __user *sig_handler(struct task_struct *t, int sig)
65394+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65395 {
65396 return t->sighand->action[sig - 1].sa.sa_handler;
65397 }
65398
65399-static int sig_handler_ignored(void __user *handler, int sig)
65400+static int sig_handler_ignored(__sighandler_t handler, int sig)
65401 {
65402 /* Is it explicitly or implicitly ignored? */
65403 return handler == SIG_IGN ||
65404@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65405 static int sig_task_ignored(struct task_struct *t, int sig,
65406 int from_ancestor_ns)
65407 {
65408- void __user *handler;
65409+ __sighandler_t handler;
65410
65411 handler = sig_handler(t, sig);
65412
65413@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
65414 atomic_inc(&user->sigpending);
65415 rcu_read_unlock();
65416
65417+ if (!override_rlimit)
65418+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65419+
65420 if (override_rlimit ||
65421 atomic_read(&user->sigpending) <=
65422 task_rlimit(t, RLIMIT_SIGPENDING)) {
65423@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
65424
65425 int unhandled_signal(struct task_struct *tsk, int sig)
65426 {
65427- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65428+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65429 if (is_global_init(tsk))
65430 return 1;
65431 if (handler != SIG_IGN && handler != SIG_DFL)
65432@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
65433 }
65434 }
65435
65436+ /* allow glibc communication via tgkill to other threads in our
65437+ thread group */
65438+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65439+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65440+ && gr_handle_signal(t, sig))
65441+ return -EPERM;
65442+
65443 return security_task_kill(t, info, sig, 0);
65444 }
65445
65446@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
65447 return send_signal(sig, info, p, 1);
65448 }
65449
65450-static int
65451+int
65452 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65453 {
65454 return send_signal(sig, info, t, 0);
65455@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
65456 unsigned long int flags;
65457 int ret, blocked, ignored;
65458 struct k_sigaction *action;
65459+ int is_unhandled = 0;
65460
65461 spin_lock_irqsave(&t->sighand->siglock, flags);
65462 action = &t->sighand->action[sig-1];
65463@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
65464 }
65465 if (action->sa.sa_handler == SIG_DFL)
65466 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65467+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65468+ is_unhandled = 1;
65469 ret = specific_send_sig_info(sig, info, t);
65470 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65471
65472+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65473+ normal operation */
65474+ if (is_unhandled) {
65475+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65476+ gr_handle_crash(t, sig);
65477+ }
65478+
65479 return ret;
65480 }
65481
65482@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
65483 ret = check_kill_permission(sig, info, p);
65484 rcu_read_unlock();
65485
65486- if (!ret && sig)
65487+ if (!ret && sig) {
65488 ret = do_send_sig_info(sig, info, p, true);
65489+ if (!ret)
65490+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65491+ }
65492
65493 return ret;
65494 }
65495@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
65496 {
65497 siginfo_t info;
65498
65499+ pax_track_stack();
65500+
65501 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65502
65503 memset(&info, 0, sizeof info);
65504@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65505 int error = -ESRCH;
65506
65507 rcu_read_lock();
65508- p = find_task_by_vpid(pid);
65509+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65510+ /* allow glibc communication via tgkill to other threads in our
65511+ thread group */
65512+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65513+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65514+ p = find_task_by_vpid_unrestricted(pid);
65515+ else
65516+#endif
65517+ p = find_task_by_vpid(pid);
65518 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65519 error = check_kill_permission(sig, info, p);
65520 /*
65521diff -urNp linux-3.0.7/kernel/smp.c linux-3.0.7/kernel/smp.c
65522--- linux-3.0.7/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
65523+++ linux-3.0.7/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
65524@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65525 }
65526 EXPORT_SYMBOL(smp_call_function);
65527
65528-void ipi_call_lock(void)
65529+void ipi_call_lock(void) __acquires(call_function.lock)
65530 {
65531 raw_spin_lock(&call_function.lock);
65532 }
65533
65534-void ipi_call_unlock(void)
65535+void ipi_call_unlock(void) __releases(call_function.lock)
65536 {
65537 raw_spin_unlock(&call_function.lock);
65538 }
65539
65540-void ipi_call_lock_irq(void)
65541+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65542 {
65543 raw_spin_lock_irq(&call_function.lock);
65544 }
65545
65546-void ipi_call_unlock_irq(void)
65547+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65548 {
65549 raw_spin_unlock_irq(&call_function.lock);
65550 }
65551diff -urNp linux-3.0.7/kernel/softirq.c linux-3.0.7/kernel/softirq.c
65552--- linux-3.0.7/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
65553+++ linux-3.0.7/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
65554@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65555
65556 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65557
65558-char *softirq_to_name[NR_SOFTIRQS] = {
65559+const char * const softirq_to_name[NR_SOFTIRQS] = {
65560 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65561 "TASKLET", "SCHED", "HRTIMER", "RCU"
65562 };
65563@@ -235,7 +235,7 @@ restart:
65564 kstat_incr_softirqs_this_cpu(vec_nr);
65565
65566 trace_softirq_entry(vec_nr);
65567- h->action(h);
65568+ h->action();
65569 trace_softirq_exit(vec_nr);
65570 if (unlikely(prev_count != preempt_count())) {
65571 printk(KERN_ERR "huh, entered softirq %u %s %p"
65572@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65573 local_irq_restore(flags);
65574 }
65575
65576-void open_softirq(int nr, void (*action)(struct softirq_action *))
65577+void open_softirq(int nr, void (*action)(void))
65578 {
65579- softirq_vec[nr].action = action;
65580+ pax_open_kernel();
65581+ *(void **)&softirq_vec[nr].action = action;
65582+ pax_close_kernel();
65583 }
65584
65585 /*
65586@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65587
65588 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65589
65590-static void tasklet_action(struct softirq_action *a)
65591+static void tasklet_action(void)
65592 {
65593 struct tasklet_struct *list;
65594
65595@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65596 }
65597 }
65598
65599-static void tasklet_hi_action(struct softirq_action *a)
65600+static void tasklet_hi_action(void)
65601 {
65602 struct tasklet_struct *list;
65603
65604diff -urNp linux-3.0.7/kernel/sys.c linux-3.0.7/kernel/sys.c
65605--- linux-3.0.7/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
65606+++ linux-3.0.7/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
65607@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
65608 error = -EACCES;
65609 goto out;
65610 }
65611+
65612+ if (gr_handle_chroot_setpriority(p, niceval)) {
65613+ error = -EACCES;
65614+ goto out;
65615+ }
65616+
65617 no_nice = security_task_setnice(p, niceval);
65618 if (no_nice) {
65619 error = no_nice;
65620@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65621 goto error;
65622 }
65623
65624+ if (gr_check_group_change(new->gid, new->egid, -1))
65625+ goto error;
65626+
65627 if (rgid != (gid_t) -1 ||
65628 (egid != (gid_t) -1 && egid != old->gid))
65629 new->sgid = new->egid;
65630@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65631 old = current_cred();
65632
65633 retval = -EPERM;
65634+
65635+ if (gr_check_group_change(gid, gid, gid))
65636+ goto error;
65637+
65638 if (nsown_capable(CAP_SETGID))
65639 new->gid = new->egid = new->sgid = new->fsgid = gid;
65640 else if (gid == old->gid || gid == old->sgid)
65641@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
65642 if (!new_user)
65643 return -EAGAIN;
65644
65645+ /*
65646+ * We don't fail in case of NPROC limit excess here because too many
65647+ * poorly written programs don't check set*uid() return code, assuming
65648+ * it never fails if called by root. We may still enforce NPROC limit
65649+ * for programs doing set*uid()+execve() by harmlessly deferring the
65650+ * failure to the execve() stage.
65651+ */
65652 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
65653- new_user != INIT_USER) {
65654- free_uid(new_user);
65655- return -EAGAIN;
65656- }
65657+ new_user != INIT_USER)
65658+ current->flags |= PF_NPROC_EXCEEDED;
65659+ else
65660+ current->flags &= ~PF_NPROC_EXCEEDED;
65661
65662 free_uid(new->user);
65663 new->user = new_user;
65664@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65665 goto error;
65666 }
65667
65668+ if (gr_check_user_change(new->uid, new->euid, -1))
65669+ goto error;
65670+
65671 if (new->uid != old->uid) {
65672 retval = set_user(new);
65673 if (retval < 0)
65674@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65675 old = current_cred();
65676
65677 retval = -EPERM;
65678+
65679+ if (gr_check_crash_uid(uid))
65680+ goto error;
65681+ if (gr_check_user_change(uid, uid, uid))
65682+ goto error;
65683+
65684 if (nsown_capable(CAP_SETUID)) {
65685 new->suid = new->uid = uid;
65686 if (uid != old->uid) {
65687@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65688 goto error;
65689 }
65690
65691+ if (gr_check_user_change(ruid, euid, -1))
65692+ goto error;
65693+
65694 if (ruid != (uid_t) -1) {
65695 new->uid = ruid;
65696 if (ruid != old->uid) {
65697@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65698 goto error;
65699 }
65700
65701+ if (gr_check_group_change(rgid, egid, -1))
65702+ goto error;
65703+
65704 if (rgid != (gid_t) -1)
65705 new->gid = rgid;
65706 if (egid != (gid_t) -1)
65707@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65708 old = current_cred();
65709 old_fsuid = old->fsuid;
65710
65711+ if (gr_check_user_change(-1, -1, uid))
65712+ goto error;
65713+
65714 if (uid == old->uid || uid == old->euid ||
65715 uid == old->suid || uid == old->fsuid ||
65716 nsown_capable(CAP_SETUID)) {
65717@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65718 }
65719 }
65720
65721+error:
65722 abort_creds(new);
65723 return old_fsuid;
65724
65725@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65726 if (gid == old->gid || gid == old->egid ||
65727 gid == old->sgid || gid == old->fsgid ||
65728 nsown_capable(CAP_SETGID)) {
65729+ if (gr_check_group_change(-1, -1, gid))
65730+ goto error;
65731+
65732 if (gid != old_fsgid) {
65733 new->fsgid = gid;
65734 goto change_okay;
65735 }
65736 }
65737
65738+error:
65739 abort_creds(new);
65740 return old_fsgid;
65741
65742@@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65743 return -EFAULT;
65744
65745 down_read(&uts_sem);
65746- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65747+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65748 __OLD_UTS_LEN);
65749 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65750- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65751+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65752 __OLD_UTS_LEN);
65753 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65754- error |= __copy_to_user(&name->release, &utsname()->release,
65755+ error |= __copy_to_user(name->release, &utsname()->release,
65756 __OLD_UTS_LEN);
65757 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65758- error |= __copy_to_user(&name->version, &utsname()->version,
65759+ error |= __copy_to_user(name->version, &utsname()->version,
65760 __OLD_UTS_LEN);
65761 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65762- error |= __copy_to_user(&name->machine, &utsname()->machine,
65763+ error |= __copy_to_user(name->machine, &utsname()->machine,
65764 __OLD_UTS_LEN);
65765 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65766 up_read(&uts_sem);
65767@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65768 error = get_dumpable(me->mm);
65769 break;
65770 case PR_SET_DUMPABLE:
65771- if (arg2 < 0 || arg2 > 1) {
65772+ if (arg2 > 1) {
65773 error = -EINVAL;
65774 break;
65775 }
65776diff -urNp linux-3.0.7/kernel/sysctl.c linux-3.0.7/kernel/sysctl.c
65777--- linux-3.0.7/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
65778+++ linux-3.0.7/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
65779@@ -85,6 +85,13 @@
65780
65781
65782 #if defined(CONFIG_SYSCTL)
65783+#include <linux/grsecurity.h>
65784+#include <linux/grinternal.h>
65785+
65786+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65787+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65788+ const int op);
65789+extern int gr_handle_chroot_sysctl(const int op);
65790
65791 /* External variables not in a header file. */
65792 extern int sysctl_overcommit_memory;
65793@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
65794 }
65795
65796 #endif
65797+extern struct ctl_table grsecurity_table[];
65798
65799 static struct ctl_table root_table[];
65800 static struct ctl_table_root sysctl_table_root;
65801@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
65802 int sysctl_legacy_va_layout;
65803 #endif
65804
65805+#ifdef CONFIG_PAX_SOFTMODE
65806+static ctl_table pax_table[] = {
65807+ {
65808+ .procname = "softmode",
65809+ .data = &pax_softmode,
65810+ .maxlen = sizeof(unsigned int),
65811+ .mode = 0600,
65812+ .proc_handler = &proc_dointvec,
65813+ },
65814+
65815+ { }
65816+};
65817+#endif
65818+
65819 /* The default sysctl tables: */
65820
65821 static struct ctl_table root_table[] = {
65822@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
65823 #endif
65824
65825 static struct ctl_table kern_table[] = {
65826+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65827+ {
65828+ .procname = "grsecurity",
65829+ .mode = 0500,
65830+ .child = grsecurity_table,
65831+ },
65832+#endif
65833+
65834+#ifdef CONFIG_PAX_SOFTMODE
65835+ {
65836+ .procname = "pax",
65837+ .mode = 0500,
65838+ .child = pax_table,
65839+ },
65840+#endif
65841+
65842 {
65843 .procname = "sched_child_runs_first",
65844 .data = &sysctl_sched_child_runs_first,
65845@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
65846 .data = &modprobe_path,
65847 .maxlen = KMOD_PATH_LEN,
65848 .mode = 0644,
65849- .proc_handler = proc_dostring,
65850+ .proc_handler = proc_dostring_modpriv,
65851 },
65852 {
65853 .procname = "modules_disabled",
65854@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
65855 .extra1 = &zero,
65856 .extra2 = &one,
65857 },
65858+#endif
65859 {
65860 .procname = "kptr_restrict",
65861 .data = &kptr_restrict,
65862 .maxlen = sizeof(int),
65863 .mode = 0644,
65864 .proc_handler = proc_dmesg_restrict,
65865+#ifdef CONFIG_GRKERNSEC_HIDESYM
65866+ .extra1 = &two,
65867+#else
65868 .extra1 = &zero,
65869+#endif
65870 .extra2 = &two,
65871 },
65872-#endif
65873 {
65874 .procname = "ngroups_max",
65875 .data = &ngroups_max,
65876@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
65877 .proc_handler = proc_dointvec_minmax,
65878 .extra1 = &zero,
65879 },
65880+ {
65881+ .procname = "heap_stack_gap",
65882+ .data = &sysctl_heap_stack_gap,
65883+ .maxlen = sizeof(sysctl_heap_stack_gap),
65884+ .mode = 0644,
65885+ .proc_handler = proc_doulongvec_minmax,
65886+ },
65887 #else
65888 {
65889 .procname = "nr_trim_pages",
65890@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
65891 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
65892 {
65893 int mode;
65894+ int error;
65895+
65896+ if (table->parent != NULL && table->parent->procname != NULL &&
65897+ table->procname != NULL &&
65898+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65899+ return -EACCES;
65900+ if (gr_handle_chroot_sysctl(op))
65901+ return -EACCES;
65902+ error = gr_handle_sysctl(table, op);
65903+ if (error)
65904+ return error;
65905
65906 if (root->permissions)
65907 mode = root->permissions(root, current->nsproxy, table);
65908@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
65909 buffer, lenp, ppos);
65910 }
65911
65912+int proc_dostring_modpriv(struct ctl_table *table, int write,
65913+ void __user *buffer, size_t *lenp, loff_t *ppos)
65914+{
65915+ if (write && !capable(CAP_SYS_MODULE))
65916+ return -EPERM;
65917+
65918+ return _proc_do_string(table->data, table->maxlen, write,
65919+ buffer, lenp, ppos);
65920+}
65921+
65922 static size_t proc_skip_spaces(char **buf)
65923 {
65924 size_t ret;
65925@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
65926 len = strlen(tmp);
65927 if (len > *size)
65928 len = *size;
65929+ if (len > sizeof(tmp))
65930+ len = sizeof(tmp);
65931 if (copy_to_user(*buf, tmp, len))
65932 return -EFAULT;
65933 *size -= len;
65934@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
65935 *i = val;
65936 } else {
65937 val = convdiv * (*i) / convmul;
65938- if (!first)
65939+ if (!first) {
65940 err = proc_put_char(&buffer, &left, '\t');
65941+ if (err)
65942+ break;
65943+ }
65944 err = proc_put_long(&buffer, &left, val, false);
65945 if (err)
65946 break;
65947@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
65948 return -ENOSYS;
65949 }
65950
65951+int proc_dostring_modpriv(struct ctl_table *table, int write,
65952+ void __user *buffer, size_t *lenp, loff_t *ppos)
65953+{
65954+ return -ENOSYS;
65955+}
65956+
65957 int proc_dointvec(struct ctl_table *table, int write,
65958 void __user *buffer, size_t *lenp, loff_t *ppos)
65959 {
65960@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65961 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65962 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65963 EXPORT_SYMBOL(proc_dostring);
65964+EXPORT_SYMBOL(proc_dostring_modpriv);
65965 EXPORT_SYMBOL(proc_doulongvec_minmax);
65966 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65967 EXPORT_SYMBOL(register_sysctl_table);
65968diff -urNp linux-3.0.7/kernel/sysctl_binary.c linux-3.0.7/kernel/sysctl_binary.c
65969--- linux-3.0.7/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
65970+++ linux-3.0.7/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
65971@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65972 int i;
65973
65974 set_fs(KERNEL_DS);
65975- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65976+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65977 set_fs(old_fs);
65978 if (result < 0)
65979 goto out_kfree;
65980@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65981 }
65982
65983 set_fs(KERNEL_DS);
65984- result = vfs_write(file, buffer, str - buffer, &pos);
65985+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65986 set_fs(old_fs);
65987 if (result < 0)
65988 goto out_kfree;
65989@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65990 int i;
65991
65992 set_fs(KERNEL_DS);
65993- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65994+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65995 set_fs(old_fs);
65996 if (result < 0)
65997 goto out_kfree;
65998@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65999 }
66000
66001 set_fs(KERNEL_DS);
66002- result = vfs_write(file, buffer, str - buffer, &pos);
66003+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66004 set_fs(old_fs);
66005 if (result < 0)
66006 goto out_kfree;
66007@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
66008 int i;
66009
66010 set_fs(KERNEL_DS);
66011- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66012+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66013 set_fs(old_fs);
66014 if (result < 0)
66015 goto out;
66016@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
66017 __le16 dnaddr;
66018
66019 set_fs(KERNEL_DS);
66020- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66021+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66022 set_fs(old_fs);
66023 if (result < 0)
66024 goto out;
66025@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
66026 le16_to_cpu(dnaddr) & 0x3ff);
66027
66028 set_fs(KERNEL_DS);
66029- result = vfs_write(file, buf, len, &pos);
66030+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66031 set_fs(old_fs);
66032 if (result < 0)
66033 goto out;
66034diff -urNp linux-3.0.7/kernel/sysctl_check.c linux-3.0.7/kernel/sysctl_check.c
66035--- linux-3.0.7/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
66036+++ linux-3.0.7/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
66037@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66038 set_fail(&fail, table, "Directory with extra2");
66039 } else {
66040 if ((table->proc_handler == proc_dostring) ||
66041+ (table->proc_handler == proc_dostring_modpriv) ||
66042 (table->proc_handler == proc_dointvec) ||
66043 (table->proc_handler == proc_dointvec_minmax) ||
66044 (table->proc_handler == proc_dointvec_jiffies) ||
66045diff -urNp linux-3.0.7/kernel/taskstats.c linux-3.0.7/kernel/taskstats.c
66046--- linux-3.0.7/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
66047+++ linux-3.0.7/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
66048@@ -27,9 +27,12 @@
66049 #include <linux/cgroup.h>
66050 #include <linux/fs.h>
66051 #include <linux/file.h>
66052+#include <linux/grsecurity.h>
66053 #include <net/genetlink.h>
66054 #include <asm/atomic.h>
66055
66056+extern int gr_is_taskstats_denied(int pid);
66057+
66058 /*
66059 * Maximum length of a cpumask that can be specified in
66060 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66061@@ -558,6 +561,9 @@ err:
66062
66063 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66064 {
66065+ if (gr_is_taskstats_denied(current->pid))
66066+ return -EACCES;
66067+
66068 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66069 return cmd_attr_register_cpumask(info);
66070 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66071diff -urNp linux-3.0.7/kernel/time/alarmtimer.c linux-3.0.7/kernel/time/alarmtimer.c
66072--- linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:54:54.000000000 -0400
66073+++ linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:55:28.000000000 -0400
66074@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66075 {
66076 int error = 0;
66077 int i;
66078- struct k_clock alarm_clock = {
66079+ static struct k_clock alarm_clock = {
66080 .clock_getres = alarm_clock_getres,
66081 .clock_get = alarm_clock_get,
66082 .timer_create = alarm_timer_create,
66083diff -urNp linux-3.0.7/kernel/time/tick-broadcast.c linux-3.0.7/kernel/time/tick-broadcast.c
66084--- linux-3.0.7/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
66085+++ linux-3.0.7/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
66086@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66087 * then clear the broadcast bit.
66088 */
66089 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66090- int cpu = smp_processor_id();
66091+ cpu = smp_processor_id();
66092
66093 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66094 tick_broadcast_clear_oneshot(cpu);
66095diff -urNp linux-3.0.7/kernel/time/timekeeping.c linux-3.0.7/kernel/time/timekeeping.c
66096--- linux-3.0.7/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
66097+++ linux-3.0.7/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
66098@@ -14,6 +14,7 @@
66099 #include <linux/init.h>
66100 #include <linux/mm.h>
66101 #include <linux/sched.h>
66102+#include <linux/grsecurity.h>
66103 #include <linux/syscore_ops.h>
66104 #include <linux/clocksource.h>
66105 #include <linux/jiffies.h>
66106@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66107 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66108 return -EINVAL;
66109
66110+ gr_log_timechange();
66111+
66112 write_seqlock_irqsave(&xtime_lock, flags);
66113
66114 timekeeping_forward_now();
66115diff -urNp linux-3.0.7/kernel/time/timer_list.c linux-3.0.7/kernel/time/timer_list.c
66116--- linux-3.0.7/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
66117+++ linux-3.0.7/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
66118@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66119
66120 static void print_name_offset(struct seq_file *m, void *sym)
66121 {
66122+#ifdef CONFIG_GRKERNSEC_HIDESYM
66123+ SEQ_printf(m, "<%p>", NULL);
66124+#else
66125 char symname[KSYM_NAME_LEN];
66126
66127 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66128 SEQ_printf(m, "<%pK>", sym);
66129 else
66130 SEQ_printf(m, "%s", symname);
66131+#endif
66132 }
66133
66134 static void
66135@@ -112,7 +116,11 @@ next_one:
66136 static void
66137 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66138 {
66139+#ifdef CONFIG_GRKERNSEC_HIDESYM
66140+ SEQ_printf(m, " .base: %p\n", NULL);
66141+#else
66142 SEQ_printf(m, " .base: %pK\n", base);
66143+#endif
66144 SEQ_printf(m, " .index: %d\n",
66145 base->index);
66146 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66147@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66148 {
66149 struct proc_dir_entry *pe;
66150
66151+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66152+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66153+#else
66154 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66155+#endif
66156 if (!pe)
66157 return -ENOMEM;
66158 return 0;
66159diff -urNp linux-3.0.7/kernel/time/timer_stats.c linux-3.0.7/kernel/time/timer_stats.c
66160--- linux-3.0.7/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
66161+++ linux-3.0.7/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
66162@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66163 static unsigned long nr_entries;
66164 static struct entry entries[MAX_ENTRIES];
66165
66166-static atomic_t overflow_count;
66167+static atomic_unchecked_t overflow_count;
66168
66169 /*
66170 * The entries are in a hash-table, for fast lookup:
66171@@ -140,7 +140,7 @@ static void reset_entries(void)
66172 nr_entries = 0;
66173 memset(entries, 0, sizeof(entries));
66174 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66175- atomic_set(&overflow_count, 0);
66176+ atomic_set_unchecked(&overflow_count, 0);
66177 }
66178
66179 static struct entry *alloc_entry(void)
66180@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66181 if (likely(entry))
66182 entry->count++;
66183 else
66184- atomic_inc(&overflow_count);
66185+ atomic_inc_unchecked(&overflow_count);
66186
66187 out_unlock:
66188 raw_spin_unlock_irqrestore(lock, flags);
66189@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66190
66191 static void print_name_offset(struct seq_file *m, unsigned long addr)
66192 {
66193+#ifdef CONFIG_GRKERNSEC_HIDESYM
66194+ seq_printf(m, "<%p>", NULL);
66195+#else
66196 char symname[KSYM_NAME_LEN];
66197
66198 if (lookup_symbol_name(addr, symname) < 0)
66199 seq_printf(m, "<%p>", (void *)addr);
66200 else
66201 seq_printf(m, "%s", symname);
66202+#endif
66203 }
66204
66205 static int tstats_show(struct seq_file *m, void *v)
66206@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66207
66208 seq_puts(m, "Timer Stats Version: v0.2\n");
66209 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66210- if (atomic_read(&overflow_count))
66211+ if (atomic_read_unchecked(&overflow_count))
66212 seq_printf(m, "Overflow: %d entries\n",
66213- atomic_read(&overflow_count));
66214+ atomic_read_unchecked(&overflow_count));
66215
66216 for (i = 0; i < nr_entries; i++) {
66217 entry = entries + i;
66218@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66219 {
66220 struct proc_dir_entry *pe;
66221
66222+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66223+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66224+#else
66225 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66226+#endif
66227 if (!pe)
66228 return -ENOMEM;
66229 return 0;
66230diff -urNp linux-3.0.7/kernel/time.c linux-3.0.7/kernel/time.c
66231--- linux-3.0.7/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
66232+++ linux-3.0.7/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
66233@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66234 return error;
66235
66236 if (tz) {
66237+ /* we log in do_settimeofday called below, so don't log twice
66238+ */
66239+ if (!tv)
66240+ gr_log_timechange();
66241+
66242 /* SMP safe, global irq locking makes it work. */
66243 sys_tz = *tz;
66244 update_vsyscall_tz();
66245diff -urNp linux-3.0.7/kernel/timer.c linux-3.0.7/kernel/timer.c
66246--- linux-3.0.7/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
66247+++ linux-3.0.7/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
66248@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66249 /*
66250 * This function runs timers and the timer-tq in bottom half context.
66251 */
66252-static void run_timer_softirq(struct softirq_action *h)
66253+static void run_timer_softirq(void)
66254 {
66255 struct tvec_base *base = __this_cpu_read(tvec_bases);
66256
66257diff -urNp linux-3.0.7/kernel/trace/blktrace.c linux-3.0.7/kernel/trace/blktrace.c
66258--- linux-3.0.7/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
66259+++ linux-3.0.7/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
66260@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
66261 struct blk_trace *bt = filp->private_data;
66262 char buf[16];
66263
66264- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66265+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66266
66267 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66268 }
66269@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
66270 return 1;
66271
66272 bt = buf->chan->private_data;
66273- atomic_inc(&bt->dropped);
66274+ atomic_inc_unchecked(&bt->dropped);
66275 return 0;
66276 }
66277
66278@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
66279
66280 bt->dir = dir;
66281 bt->dev = dev;
66282- atomic_set(&bt->dropped, 0);
66283+ atomic_set_unchecked(&bt->dropped, 0);
66284
66285 ret = -EIO;
66286 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66287diff -urNp linux-3.0.7/kernel/trace/ftrace.c linux-3.0.7/kernel/trace/ftrace.c
66288--- linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:09.000000000 -0400
66289+++ linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:19.000000000 -0400
66290@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
66291 if (unlikely(ftrace_disabled))
66292 return 0;
66293
66294+ ret = ftrace_arch_code_modify_prepare();
66295+ FTRACE_WARN_ON(ret);
66296+ if (ret)
66297+ return 0;
66298+
66299 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66300+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66301 if (ret) {
66302 ftrace_bug(ret, ip);
66303- return 0;
66304 }
66305- return 1;
66306+ return ret ? 0 : 1;
66307 }
66308
66309 /*
66310@@ -2570,7 +2575,7 @@ static void ftrace_free_entry_rcu(struct
66311
66312 int
66313 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66314- void *data)
66315+ void *data)
66316 {
66317 struct ftrace_func_probe *entry;
66318 struct ftrace_page *pg;
66319diff -urNp linux-3.0.7/kernel/trace/trace.c linux-3.0.7/kernel/trace/trace.c
66320--- linux-3.0.7/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
66321+++ linux-3.0.7/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
66322@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
66323 size_t rem;
66324 unsigned int i;
66325
66326+ pax_track_stack();
66327+
66328 if (splice_grow_spd(pipe, &spd))
66329 return -ENOMEM;
66330
66331@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
66332 int entries, size, i;
66333 size_t ret;
66334
66335+ pax_track_stack();
66336+
66337 if (splice_grow_spd(pipe, &spd))
66338 return -ENOMEM;
66339
66340@@ -3990,10 +3994,9 @@ static const struct file_operations trac
66341 };
66342 #endif
66343
66344-static struct dentry *d_tracer;
66345-
66346 struct dentry *tracing_init_dentry(void)
66347 {
66348+ static struct dentry *d_tracer;
66349 static int once;
66350
66351 if (d_tracer)
66352@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
66353 return d_tracer;
66354 }
66355
66356-static struct dentry *d_percpu;
66357-
66358 struct dentry *tracing_dentry_percpu(void)
66359 {
66360+ static struct dentry *d_percpu;
66361 static int once;
66362 struct dentry *d_tracer;
66363
66364diff -urNp linux-3.0.7/kernel/trace/trace_events.c linux-3.0.7/kernel/trace/trace_events.c
66365--- linux-3.0.7/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
66366+++ linux-3.0.7/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
66367@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
66368 struct ftrace_module_file_ops {
66369 struct list_head list;
66370 struct module *mod;
66371- struct file_operations id;
66372- struct file_operations enable;
66373- struct file_operations format;
66374- struct file_operations filter;
66375 };
66376
66377 static struct ftrace_module_file_ops *
66378@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
66379
66380 file_ops->mod = mod;
66381
66382- file_ops->id = ftrace_event_id_fops;
66383- file_ops->id.owner = mod;
66384-
66385- file_ops->enable = ftrace_enable_fops;
66386- file_ops->enable.owner = mod;
66387-
66388- file_ops->filter = ftrace_event_filter_fops;
66389- file_ops->filter.owner = mod;
66390-
66391- file_ops->format = ftrace_event_format_fops;
66392- file_ops->format.owner = mod;
66393+ pax_open_kernel();
66394+ *(void **)&mod->trace_id.owner = mod;
66395+ *(void **)&mod->trace_enable.owner = mod;
66396+ *(void **)&mod->trace_filter.owner = mod;
66397+ *(void **)&mod->trace_format.owner = mod;
66398+ pax_close_kernel();
66399
66400 list_add(&file_ops->list, &ftrace_module_file_list);
66401
66402@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
66403
66404 for_each_event(call, start, end) {
66405 __trace_add_event_call(*call, mod,
66406- &file_ops->id, &file_ops->enable,
66407- &file_ops->filter, &file_ops->format);
66408+ &mod->trace_id, &mod->trace_enable,
66409+ &mod->trace_filter, &mod->trace_format);
66410 }
66411 }
66412
66413diff -urNp linux-3.0.7/kernel/trace/trace_kprobe.c linux-3.0.7/kernel/trace/trace_kprobe.c
66414--- linux-3.0.7/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
66415+++ linux-3.0.7/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
66416@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66417 long ret;
66418 int maxlen = get_rloc_len(*(u32 *)dest);
66419 u8 *dst = get_rloc_data(dest);
66420- u8 *src = addr;
66421+ const u8 __user *src = (const u8 __force_user *)addr;
66422 mm_segment_t old_fs = get_fs();
66423 if (!maxlen)
66424 return;
66425@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66426 pagefault_disable();
66427 do
66428 ret = __copy_from_user_inatomic(dst++, src++, 1);
66429- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66430+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66431 dst[-1] = '\0';
66432 pagefault_enable();
66433 set_fs(old_fs);
66434@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66435 ((u8 *)get_rloc_data(dest))[0] = '\0';
66436 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66437 } else
66438- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66439+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66440 get_rloc_offs(*(u32 *)dest));
66441 }
66442 /* Return the length of string -- including null terminal byte */
66443@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66444 set_fs(KERNEL_DS);
66445 pagefault_disable();
66446 do {
66447- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66448+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66449 len++;
66450 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66451 pagefault_enable();
66452diff -urNp linux-3.0.7/kernel/trace/trace_mmiotrace.c linux-3.0.7/kernel/trace/trace_mmiotrace.c
66453--- linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
66454+++ linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
66455@@ -24,7 +24,7 @@ struct header_iter {
66456 static struct trace_array *mmio_trace_array;
66457 static bool overrun_detected;
66458 static unsigned long prev_overruns;
66459-static atomic_t dropped_count;
66460+static atomic_unchecked_t dropped_count;
66461
66462 static void mmio_reset_data(struct trace_array *tr)
66463 {
66464@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66465
66466 static unsigned long count_overruns(struct trace_iterator *iter)
66467 {
66468- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66469+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66470 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66471
66472 if (over > prev_overruns)
66473@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66474 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66475 sizeof(*entry), 0, pc);
66476 if (!event) {
66477- atomic_inc(&dropped_count);
66478+ atomic_inc_unchecked(&dropped_count);
66479 return;
66480 }
66481 entry = ring_buffer_event_data(event);
66482@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66483 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66484 sizeof(*entry), 0, pc);
66485 if (!event) {
66486- atomic_inc(&dropped_count);
66487+ atomic_inc_unchecked(&dropped_count);
66488 return;
66489 }
66490 entry = ring_buffer_event_data(event);
66491diff -urNp linux-3.0.7/kernel/trace/trace_output.c linux-3.0.7/kernel/trace/trace_output.c
66492--- linux-3.0.7/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
66493+++ linux-3.0.7/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
66494@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66495
66496 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66497 if (!IS_ERR(p)) {
66498- p = mangle_path(s->buffer + s->len, p, "\n");
66499+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66500 if (p) {
66501 s->len = p - s->buffer;
66502 return 1;
66503diff -urNp linux-3.0.7/kernel/trace/trace_stack.c linux-3.0.7/kernel/trace/trace_stack.c
66504--- linux-3.0.7/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
66505+++ linux-3.0.7/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
66506@@ -50,7 +50,7 @@ static inline void check_stack(void)
66507 return;
66508
66509 /* we do not handle interrupt stacks yet */
66510- if (!object_is_on_stack(&this_size))
66511+ if (!object_starts_on_stack(&this_size))
66512 return;
66513
66514 local_irq_save(flags);
66515diff -urNp linux-3.0.7/kernel/trace/trace_workqueue.c linux-3.0.7/kernel/trace/trace_workqueue.c
66516--- linux-3.0.7/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
66517+++ linux-3.0.7/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
66518@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66519 int cpu;
66520 pid_t pid;
66521 /* Can be inserted from interrupt or user context, need to be atomic */
66522- atomic_t inserted;
66523+ atomic_unchecked_t inserted;
66524 /*
66525 * Don't need to be atomic, works are serialized in a single workqueue thread
66526 * on a single CPU.
66527@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66528 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66529 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66530 if (node->pid == wq_thread->pid) {
66531- atomic_inc(&node->inserted);
66532+ atomic_inc_unchecked(&node->inserted);
66533 goto found;
66534 }
66535 }
66536@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66537 tsk = get_pid_task(pid, PIDTYPE_PID);
66538 if (tsk) {
66539 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66540- atomic_read(&cws->inserted), cws->executed,
66541+ atomic_read_unchecked(&cws->inserted), cws->executed,
66542 tsk->comm);
66543 put_task_struct(tsk);
66544 }
66545diff -urNp linux-3.0.7/lib/Kconfig.debug linux-3.0.7/lib/Kconfig.debug
66546--- linux-3.0.7/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
66547+++ linux-3.0.7/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
66548@@ -1088,6 +1088,7 @@ config LATENCYTOP
66549 depends on DEBUG_KERNEL
66550 depends on STACKTRACE_SUPPORT
66551 depends on PROC_FS
66552+ depends on !GRKERNSEC_HIDESYM
66553 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66554 select KALLSYMS
66555 select KALLSYMS_ALL
66556diff -urNp linux-3.0.7/lib/bitmap.c linux-3.0.7/lib/bitmap.c
66557--- linux-3.0.7/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
66558+++ linux-3.0.7/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
66559@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
66560 {
66561 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66562 u32 chunk;
66563- const char __user *ubuf = buf;
66564+ const char __user *ubuf = (const char __force_user *)buf;
66565
66566 bitmap_zero(maskp, nmaskbits);
66567
66568@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
66569 {
66570 if (!access_ok(VERIFY_READ, ubuf, ulen))
66571 return -EFAULT;
66572- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66573+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66574 }
66575 EXPORT_SYMBOL(bitmap_parse_user);
66576
66577@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
66578 {
66579 unsigned a, b;
66580 int c, old_c, totaldigits;
66581- const char __user *ubuf = buf;
66582+ const char __user *ubuf = (const char __force_user *)buf;
66583 int exp_digit, in_range;
66584
66585 totaldigits = c = 0;
66586@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
66587 {
66588 if (!access_ok(VERIFY_READ, ubuf, ulen))
66589 return -EFAULT;
66590- return __bitmap_parselist((const char *)ubuf,
66591+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66592 ulen, 1, maskp, nmaskbits);
66593 }
66594 EXPORT_SYMBOL(bitmap_parselist_user);
66595diff -urNp linux-3.0.7/lib/bug.c linux-3.0.7/lib/bug.c
66596--- linux-3.0.7/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
66597+++ linux-3.0.7/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
66598@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66599 return BUG_TRAP_TYPE_NONE;
66600
66601 bug = find_bug(bugaddr);
66602+ if (!bug)
66603+ return BUG_TRAP_TYPE_NONE;
66604
66605 file = NULL;
66606 line = 0;
66607diff -urNp linux-3.0.7/lib/debugobjects.c linux-3.0.7/lib/debugobjects.c
66608--- linux-3.0.7/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
66609+++ linux-3.0.7/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
66610@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66611 if (limit > 4)
66612 return;
66613
66614- is_on_stack = object_is_on_stack(addr);
66615+ is_on_stack = object_starts_on_stack(addr);
66616 if (is_on_stack == onstack)
66617 return;
66618
66619diff -urNp linux-3.0.7/lib/devres.c linux-3.0.7/lib/devres.c
66620--- linux-3.0.7/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
66621+++ linux-3.0.7/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
66622@@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
66623 {
66624 iounmap(addr);
66625 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66626- (void *)addr));
66627+ (void __force *)addr));
66628 }
66629 EXPORT_SYMBOL(devm_iounmap);
66630
66631@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66632 {
66633 ioport_unmap(addr);
66634 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66635- devm_ioport_map_match, (void *)addr));
66636+ devm_ioport_map_match, (void __force *)addr));
66637 }
66638 EXPORT_SYMBOL(devm_ioport_unmap);
66639
66640diff -urNp linux-3.0.7/lib/dma-debug.c linux-3.0.7/lib/dma-debug.c
66641--- linux-3.0.7/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
66642+++ linux-3.0.7/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
66643@@ -870,7 +870,7 @@ out:
66644
66645 static void check_for_stack(struct device *dev, void *addr)
66646 {
66647- if (object_is_on_stack(addr))
66648+ if (object_starts_on_stack(addr))
66649 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66650 "stack [addr=%p]\n", addr);
66651 }
66652diff -urNp linux-3.0.7/lib/extable.c linux-3.0.7/lib/extable.c
66653--- linux-3.0.7/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
66654+++ linux-3.0.7/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
66655@@ -13,6 +13,7 @@
66656 #include <linux/init.h>
66657 #include <linux/sort.h>
66658 #include <asm/uaccess.h>
66659+#include <asm/pgtable.h>
66660
66661 #ifndef ARCH_HAS_SORT_EXTABLE
66662 /*
66663@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66664 void sort_extable(struct exception_table_entry *start,
66665 struct exception_table_entry *finish)
66666 {
66667+ pax_open_kernel();
66668 sort(start, finish - start, sizeof(struct exception_table_entry),
66669 cmp_ex, NULL);
66670+ pax_close_kernel();
66671 }
66672
66673 #ifdef CONFIG_MODULES
66674diff -urNp linux-3.0.7/lib/inflate.c linux-3.0.7/lib/inflate.c
66675--- linux-3.0.7/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
66676+++ linux-3.0.7/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
66677@@ -269,7 +269,7 @@ static void free(void *where)
66678 malloc_ptr = free_mem_ptr;
66679 }
66680 #else
66681-#define malloc(a) kmalloc(a, GFP_KERNEL)
66682+#define malloc(a) kmalloc((a), GFP_KERNEL)
66683 #define free(a) kfree(a)
66684 #endif
66685
66686diff -urNp linux-3.0.7/lib/kref.c linux-3.0.7/lib/kref.c
66687--- linux-3.0.7/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
66688+++ linux-3.0.7/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
66689@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66690 */
66691 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66692 {
66693- WARN_ON(release == NULL);
66694+ BUG_ON(release == NULL);
66695 WARN_ON(release == (void (*)(struct kref *))kfree);
66696
66697 if (atomic_dec_and_test(&kref->refcount)) {
66698diff -urNp linux-3.0.7/lib/radix-tree.c linux-3.0.7/lib/radix-tree.c
66699--- linux-3.0.7/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
66700+++ linux-3.0.7/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
66701@@ -80,7 +80,7 @@ struct radix_tree_preload {
66702 int nr;
66703 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66704 };
66705-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66706+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66707
66708 static inline void *ptr_to_indirect(void *ptr)
66709 {
66710diff -urNp linux-3.0.7/lib/vsprintf.c linux-3.0.7/lib/vsprintf.c
66711--- linux-3.0.7/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
66712+++ linux-3.0.7/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
66713@@ -16,6 +16,9 @@
66714 * - scnprintf and vscnprintf
66715 */
66716
66717+#ifdef CONFIG_GRKERNSEC_HIDESYM
66718+#define __INCLUDED_BY_HIDESYM 1
66719+#endif
66720 #include <stdarg.h>
66721 #include <linux/module.h>
66722 #include <linux/types.h>
66723@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
66724 char sym[KSYM_SYMBOL_LEN];
66725 if (ext == 'B')
66726 sprint_backtrace(sym, value);
66727- else if (ext != 'f' && ext != 's')
66728+ else if (ext != 'f' && ext != 's' && ext != 'a')
66729 sprint_symbol(sym, value);
66730 else
66731 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66732@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
66733 return string(buf, end, uuid, spec);
66734 }
66735
66736+#ifdef CONFIG_GRKERNSEC_HIDESYM
66737+int kptr_restrict __read_mostly = 2;
66738+#else
66739 int kptr_restrict __read_mostly;
66740+#endif
66741
66742 /*
66743 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66744@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
66745 * - 'S' For symbolic direct pointers with offset
66746 * - 's' For symbolic direct pointers without offset
66747 * - 'B' For backtraced symbolic direct pointers with offset
66748+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66749+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66750 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66751 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66752 * - 'M' For a 6-byte MAC address, it prints the address in the
66753@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
66754 {
66755 if (!ptr && *fmt != 'K') {
66756 /*
66757- * Print (null) with the same width as a pointer so it makes
66758+ * Print (nil) with the same width as a pointer so it makes
66759 * tabular output look nice.
66760 */
66761 if (spec.field_width == -1)
66762 spec.field_width = 2 * sizeof(void *);
66763- return string(buf, end, "(null)", spec);
66764+ return string(buf, end, "(nil)", spec);
66765 }
66766
66767 switch (*fmt) {
66768@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
66769 /* Fallthrough */
66770 case 'S':
66771 case 's':
66772+#ifdef CONFIG_GRKERNSEC_HIDESYM
66773+ break;
66774+#else
66775+ return symbol_string(buf, end, ptr, spec, *fmt);
66776+#endif
66777+ case 'A':
66778+ case 'a':
66779 case 'B':
66780 return symbol_string(buf, end, ptr, spec, *fmt);
66781 case 'R':
66782@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
66783 typeof(type) value; \
66784 if (sizeof(type) == 8) { \
66785 args = PTR_ALIGN(args, sizeof(u32)); \
66786- *(u32 *)&value = *(u32 *)args; \
66787- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66788+ *(u32 *)&value = *(const u32 *)args; \
66789+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66790 } else { \
66791 args = PTR_ALIGN(args, sizeof(type)); \
66792- value = *(typeof(type) *)args; \
66793+ value = *(const typeof(type) *)args; \
66794 } \
66795 args += sizeof(type); \
66796 value; \
66797@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
66798 case FORMAT_TYPE_STR: {
66799 const char *str_arg = args;
66800 args += strlen(str_arg) + 1;
66801- str = string(str, end, (char *)str_arg, spec);
66802+ str = string(str, end, str_arg, spec);
66803 break;
66804 }
66805
66806diff -urNp linux-3.0.7/localversion-grsec linux-3.0.7/localversion-grsec
66807--- linux-3.0.7/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66808+++ linux-3.0.7/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
66809@@ -0,0 +1 @@
66810+-grsec
66811diff -urNp linux-3.0.7/mm/Kconfig linux-3.0.7/mm/Kconfig
66812--- linux-3.0.7/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
66813+++ linux-3.0.7/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
66814@@ -240,7 +240,7 @@ config KSM
66815 config DEFAULT_MMAP_MIN_ADDR
66816 int "Low address space to protect from user allocation"
66817 depends on MMU
66818- default 4096
66819+ default 65536
66820 help
66821 This is the portion of low virtual memory which should be protected
66822 from userspace allocation. Keeping a user from writing to low pages
66823diff -urNp linux-3.0.7/mm/filemap.c linux-3.0.7/mm/filemap.c
66824--- linux-3.0.7/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
66825+++ linux-3.0.7/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
66826@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
66827 struct address_space *mapping = file->f_mapping;
66828
66829 if (!mapping->a_ops->readpage)
66830- return -ENOEXEC;
66831+ return -ENODEV;
66832 file_accessed(file);
66833 vma->vm_ops = &generic_file_vm_ops;
66834 vma->vm_flags |= VM_CAN_NONLINEAR;
66835@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
66836 *pos = i_size_read(inode);
66837
66838 if (limit != RLIM_INFINITY) {
66839+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66840 if (*pos >= limit) {
66841 send_sig(SIGXFSZ, current, 0);
66842 return -EFBIG;
66843diff -urNp linux-3.0.7/mm/fremap.c linux-3.0.7/mm/fremap.c
66844--- linux-3.0.7/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
66845+++ linux-3.0.7/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
66846@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66847 retry:
66848 vma = find_vma(mm, start);
66849
66850+#ifdef CONFIG_PAX_SEGMEXEC
66851+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66852+ goto out;
66853+#endif
66854+
66855 /*
66856 * Make sure the vma is shared, that it supports prefaulting,
66857 * and that the remapped range is valid and fully within
66858diff -urNp linux-3.0.7/mm/highmem.c linux-3.0.7/mm/highmem.c
66859--- linux-3.0.7/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
66860+++ linux-3.0.7/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
66861@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
66862 * So no dangers, even with speculative execution.
66863 */
66864 page = pte_page(pkmap_page_table[i]);
66865+ pax_open_kernel();
66866 pte_clear(&init_mm, (unsigned long)page_address(page),
66867 &pkmap_page_table[i]);
66868-
66869+ pax_close_kernel();
66870 set_page_address(page, NULL);
66871 need_flush = 1;
66872 }
66873@@ -186,9 +187,11 @@ start:
66874 }
66875 }
66876 vaddr = PKMAP_ADDR(last_pkmap_nr);
66877+
66878+ pax_open_kernel();
66879 set_pte_at(&init_mm, vaddr,
66880 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66881-
66882+ pax_close_kernel();
66883 pkmap_count[last_pkmap_nr] = 1;
66884 set_page_address(page, (void *)vaddr);
66885
66886diff -urNp linux-3.0.7/mm/huge_memory.c linux-3.0.7/mm/huge_memory.c
66887--- linux-3.0.7/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
66888+++ linux-3.0.7/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
66889@@ -702,7 +702,7 @@ out:
66890 * run pte_offset_map on the pmd, if an huge pmd could
66891 * materialize from under us from a different thread.
66892 */
66893- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
66894+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66895 return VM_FAULT_OOM;
66896 /* if an huge pmd materialized from under us just retry later */
66897 if (unlikely(pmd_trans_huge(*pmd)))
66898diff -urNp linux-3.0.7/mm/hugetlb.c linux-3.0.7/mm/hugetlb.c
66899--- linux-3.0.7/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
66900+++ linux-3.0.7/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
66901@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
66902 return 1;
66903 }
66904
66905+#ifdef CONFIG_PAX_SEGMEXEC
66906+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66907+{
66908+ struct mm_struct *mm = vma->vm_mm;
66909+ struct vm_area_struct *vma_m;
66910+ unsigned long address_m;
66911+ pte_t *ptep_m;
66912+
66913+ vma_m = pax_find_mirror_vma(vma);
66914+ if (!vma_m)
66915+ return;
66916+
66917+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66918+ address_m = address + SEGMEXEC_TASK_SIZE;
66919+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66920+ get_page(page_m);
66921+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
66922+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66923+}
66924+#endif
66925+
66926 /*
66927 * Hugetlb_cow() should be called with page lock of the original hugepage held.
66928 */
66929@@ -2440,6 +2461,11 @@ retry_avoidcopy:
66930 make_huge_pte(vma, new_page, 1));
66931 page_remove_rmap(old_page);
66932 hugepage_add_new_anon_rmap(new_page, vma, address);
66933+
66934+#ifdef CONFIG_PAX_SEGMEXEC
66935+ pax_mirror_huge_pte(vma, address, new_page);
66936+#endif
66937+
66938 /* Make the old page be freed below */
66939 new_page = old_page;
66940 mmu_notifier_invalidate_range_end(mm,
66941@@ -2591,6 +2617,10 @@ retry:
66942 && (vma->vm_flags & VM_SHARED)));
66943 set_huge_pte_at(mm, address, ptep, new_pte);
66944
66945+#ifdef CONFIG_PAX_SEGMEXEC
66946+ pax_mirror_huge_pte(vma, address, page);
66947+#endif
66948+
66949 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
66950 /* Optimization, do the COW without a second fault */
66951 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
66952@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
66953 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
66954 struct hstate *h = hstate_vma(vma);
66955
66956+#ifdef CONFIG_PAX_SEGMEXEC
66957+ struct vm_area_struct *vma_m;
66958+#endif
66959+
66960 ptep = huge_pte_offset(mm, address);
66961 if (ptep) {
66962 entry = huge_ptep_get(ptep);
66963@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
66964 VM_FAULT_SET_HINDEX(h - hstates);
66965 }
66966
66967+#ifdef CONFIG_PAX_SEGMEXEC
66968+ vma_m = pax_find_mirror_vma(vma);
66969+ if (vma_m) {
66970+ unsigned long address_m;
66971+
66972+ if (vma->vm_start > vma_m->vm_start) {
66973+ address_m = address;
66974+ address -= SEGMEXEC_TASK_SIZE;
66975+ vma = vma_m;
66976+ h = hstate_vma(vma);
66977+ } else
66978+ address_m = address + SEGMEXEC_TASK_SIZE;
66979+
66980+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
66981+ return VM_FAULT_OOM;
66982+ address_m &= HPAGE_MASK;
66983+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
66984+ }
66985+#endif
66986+
66987 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
66988 if (!ptep)
66989 return VM_FAULT_OOM;
66990diff -urNp linux-3.0.7/mm/internal.h linux-3.0.7/mm/internal.h
66991--- linux-3.0.7/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
66992+++ linux-3.0.7/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
66993@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
66994 * in mm/page_alloc.c
66995 */
66996 extern void __free_pages_bootmem(struct page *page, unsigned int order);
66997+extern void free_compound_page(struct page *page);
66998 extern void prep_compound_page(struct page *page, unsigned long order);
66999 #ifdef CONFIG_MEMORY_FAILURE
67000 extern bool is_free_buddy_page(struct page *page);
67001diff -urNp linux-3.0.7/mm/kmemleak.c linux-3.0.7/mm/kmemleak.c
67002--- linux-3.0.7/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
67003+++ linux-3.0.7/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
67004@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67005
67006 for (i = 0; i < object->trace_len; i++) {
67007 void *ptr = (void *)object->trace[i];
67008- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67009+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67010 }
67011 }
67012
67013diff -urNp linux-3.0.7/mm/maccess.c linux-3.0.7/mm/maccess.c
67014--- linux-3.0.7/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
67015+++ linux-3.0.7/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
67016@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67017 set_fs(KERNEL_DS);
67018 pagefault_disable();
67019 ret = __copy_from_user_inatomic(dst,
67020- (__force const void __user *)src, size);
67021+ (const void __force_user *)src, size);
67022 pagefault_enable();
67023 set_fs(old_fs);
67024
67025@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67026
67027 set_fs(KERNEL_DS);
67028 pagefault_disable();
67029- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67030+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67031 pagefault_enable();
67032 set_fs(old_fs);
67033
67034diff -urNp linux-3.0.7/mm/madvise.c linux-3.0.7/mm/madvise.c
67035--- linux-3.0.7/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
67036+++ linux-3.0.7/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
67037@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67038 pgoff_t pgoff;
67039 unsigned long new_flags = vma->vm_flags;
67040
67041+#ifdef CONFIG_PAX_SEGMEXEC
67042+ struct vm_area_struct *vma_m;
67043+#endif
67044+
67045 switch (behavior) {
67046 case MADV_NORMAL:
67047 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67048@@ -110,6 +114,13 @@ success:
67049 /*
67050 * vm_flags is protected by the mmap_sem held in write mode.
67051 */
67052+
67053+#ifdef CONFIG_PAX_SEGMEXEC
67054+ vma_m = pax_find_mirror_vma(vma);
67055+ if (vma_m)
67056+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67057+#endif
67058+
67059 vma->vm_flags = new_flags;
67060
67061 out:
67062@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67063 struct vm_area_struct ** prev,
67064 unsigned long start, unsigned long end)
67065 {
67066+
67067+#ifdef CONFIG_PAX_SEGMEXEC
67068+ struct vm_area_struct *vma_m;
67069+#endif
67070+
67071 *prev = vma;
67072 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67073 return -EINVAL;
67074@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67075 zap_page_range(vma, start, end - start, &details);
67076 } else
67077 zap_page_range(vma, start, end - start, NULL);
67078+
67079+#ifdef CONFIG_PAX_SEGMEXEC
67080+ vma_m = pax_find_mirror_vma(vma);
67081+ if (vma_m) {
67082+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67083+ struct zap_details details = {
67084+ .nonlinear_vma = vma_m,
67085+ .last_index = ULONG_MAX,
67086+ };
67087+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67088+ } else
67089+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67090+ }
67091+#endif
67092+
67093 return 0;
67094 }
67095
67096@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67097 if (end < start)
67098 goto out;
67099
67100+#ifdef CONFIG_PAX_SEGMEXEC
67101+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67102+ if (end > SEGMEXEC_TASK_SIZE)
67103+ goto out;
67104+ } else
67105+#endif
67106+
67107+ if (end > TASK_SIZE)
67108+ goto out;
67109+
67110 error = 0;
67111 if (end == start)
67112 goto out;
67113diff -urNp linux-3.0.7/mm/memory-failure.c linux-3.0.7/mm/memory-failure.c
67114--- linux-3.0.7/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
67115+++ linux-3.0.7/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
67116@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
67117
67118 int sysctl_memory_failure_recovery __read_mostly = 1;
67119
67120-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67121+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67122
67123 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67124
67125@@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
67126 si.si_signo = SIGBUS;
67127 si.si_errno = 0;
67128 si.si_code = BUS_MCEERR_AO;
67129- si.si_addr = (void *)addr;
67130+ si.si_addr = (void __user *)addr;
67131 #ifdef __ARCH_SI_TRAPNO
67132 si.si_trapno = trapno;
67133 #endif
67134@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
67135 }
67136
67137 nr_pages = 1 << compound_trans_order(hpage);
67138- atomic_long_add(nr_pages, &mce_bad_pages);
67139+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67140
67141 /*
67142 * We need/can do nothing about count=0 pages.
67143@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
67144 if (!PageHWPoison(hpage)
67145 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67146 || (p != hpage && TestSetPageHWPoison(hpage))) {
67147- atomic_long_sub(nr_pages, &mce_bad_pages);
67148+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67149 return 0;
67150 }
67151 set_page_hwpoison_huge_page(hpage);
67152@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
67153 }
67154 if (hwpoison_filter(p)) {
67155 if (TestClearPageHWPoison(p))
67156- atomic_long_sub(nr_pages, &mce_bad_pages);
67157+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67158 unlock_page(hpage);
67159 put_page(hpage);
67160 return 0;
67161@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
67162 return 0;
67163 }
67164 if (TestClearPageHWPoison(p))
67165- atomic_long_sub(nr_pages, &mce_bad_pages);
67166+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67167 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67168 return 0;
67169 }
67170@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
67171 */
67172 if (TestClearPageHWPoison(page)) {
67173 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67174- atomic_long_sub(nr_pages, &mce_bad_pages);
67175+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67176 freeit = 1;
67177 if (PageHuge(page))
67178 clear_page_hwpoison_huge_page(page);
67179@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
67180 }
67181 done:
67182 if (!PageHWPoison(hpage))
67183- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67184+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67185 set_page_hwpoison_huge_page(hpage);
67186 dequeue_hwpoisoned_huge_page(hpage);
67187 /* keep elevated page count for bad page */
67188@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
67189 return ret;
67190
67191 done:
67192- atomic_long_add(1, &mce_bad_pages);
67193+ atomic_long_add_unchecked(1, &mce_bad_pages);
67194 SetPageHWPoison(page);
67195 /* keep elevated page count for bad page */
67196 return ret;
67197diff -urNp linux-3.0.7/mm/memory.c linux-3.0.7/mm/memory.c
67198--- linux-3.0.7/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
67199+++ linux-3.0.7/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
67200@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67201 return;
67202
67203 pmd = pmd_offset(pud, start);
67204+
67205+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67206 pud_clear(pud);
67207 pmd_free_tlb(tlb, pmd, start);
67208+#endif
67209+
67210 }
67211
67212 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67213@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67214 if (end - 1 > ceiling - 1)
67215 return;
67216
67217+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67218 pud = pud_offset(pgd, start);
67219 pgd_clear(pgd);
67220 pud_free_tlb(tlb, pud, start);
67221+#endif
67222+
67223 }
67224
67225 /*
67226@@ -1577,12 +1584,6 @@ no_page_table:
67227 return page;
67228 }
67229
67230-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67231-{
67232- return stack_guard_page_start(vma, addr) ||
67233- stack_guard_page_end(vma, addr+PAGE_SIZE);
67234-}
67235-
67236 /**
67237 * __get_user_pages() - pin user pages in memory
67238 * @tsk: task_struct of target task
67239@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
67240 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67241 i = 0;
67242
67243- do {
67244+ while (nr_pages) {
67245 struct vm_area_struct *vma;
67246
67247- vma = find_extend_vma(mm, start);
67248+ vma = find_vma(mm, start);
67249 if (!vma && in_gate_area(mm, start)) {
67250 unsigned long pg = start & PAGE_MASK;
67251 pgd_t *pgd;
67252@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
67253 goto next_page;
67254 }
67255
67256- if (!vma ||
67257+ if (!vma || start < vma->vm_start ||
67258 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67259 !(vm_flags & vma->vm_flags))
67260 return i ? : -EFAULT;
67261@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
67262 int ret;
67263 unsigned int fault_flags = 0;
67264
67265- /* For mlock, just skip the stack guard page. */
67266- if (foll_flags & FOLL_MLOCK) {
67267- if (stack_guard_page(vma, start))
67268- goto next_page;
67269- }
67270 if (foll_flags & FOLL_WRITE)
67271 fault_flags |= FAULT_FLAG_WRITE;
67272 if (nonblocking)
67273@@ -1811,7 +1807,7 @@ next_page:
67274 start += PAGE_SIZE;
67275 nr_pages--;
67276 } while (nr_pages && start < vma->vm_end);
67277- } while (nr_pages);
67278+ }
67279 return i;
67280 }
67281 EXPORT_SYMBOL(__get_user_pages);
67282@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
67283 page_add_file_rmap(page);
67284 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67285
67286+#ifdef CONFIG_PAX_SEGMEXEC
67287+ pax_mirror_file_pte(vma, addr, page, ptl);
67288+#endif
67289+
67290 retval = 0;
67291 pte_unmap_unlock(pte, ptl);
67292 return retval;
67293@@ -2052,10 +2052,22 @@ out:
67294 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67295 struct page *page)
67296 {
67297+
67298+#ifdef CONFIG_PAX_SEGMEXEC
67299+ struct vm_area_struct *vma_m;
67300+#endif
67301+
67302 if (addr < vma->vm_start || addr >= vma->vm_end)
67303 return -EFAULT;
67304 if (!page_count(page))
67305 return -EINVAL;
67306+
67307+#ifdef CONFIG_PAX_SEGMEXEC
67308+ vma_m = pax_find_mirror_vma(vma);
67309+ if (vma_m)
67310+ vma_m->vm_flags |= VM_INSERTPAGE;
67311+#endif
67312+
67313 vma->vm_flags |= VM_INSERTPAGE;
67314 return insert_page(vma, addr, page, vma->vm_page_prot);
67315 }
67316@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
67317 unsigned long pfn)
67318 {
67319 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67320+ BUG_ON(vma->vm_mirror);
67321
67322 if (addr < vma->vm_start || addr >= vma->vm_end)
67323 return -EFAULT;
67324@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
67325 copy_user_highpage(dst, src, va, vma);
67326 }
67327
67328+#ifdef CONFIG_PAX_SEGMEXEC
67329+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67330+{
67331+ struct mm_struct *mm = vma->vm_mm;
67332+ spinlock_t *ptl;
67333+ pte_t *pte, entry;
67334+
67335+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67336+ entry = *pte;
67337+ if (!pte_present(entry)) {
67338+ if (!pte_none(entry)) {
67339+ BUG_ON(pte_file(entry));
67340+ free_swap_and_cache(pte_to_swp_entry(entry));
67341+ pte_clear_not_present_full(mm, address, pte, 0);
67342+ }
67343+ } else {
67344+ struct page *page;
67345+
67346+ flush_cache_page(vma, address, pte_pfn(entry));
67347+ entry = ptep_clear_flush(vma, address, pte);
67348+ BUG_ON(pte_dirty(entry));
67349+ page = vm_normal_page(vma, address, entry);
67350+ if (page) {
67351+ update_hiwater_rss(mm);
67352+ if (PageAnon(page))
67353+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67354+ else
67355+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67356+ page_remove_rmap(page);
67357+ page_cache_release(page);
67358+ }
67359+ }
67360+ pte_unmap_unlock(pte, ptl);
67361+}
67362+
67363+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67364+ *
67365+ * the ptl of the lower mapped page is held on entry and is not released on exit
67366+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67367+ */
67368+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67369+{
67370+ struct mm_struct *mm = vma->vm_mm;
67371+ unsigned long address_m;
67372+ spinlock_t *ptl_m;
67373+ struct vm_area_struct *vma_m;
67374+ pmd_t *pmd_m;
67375+ pte_t *pte_m, entry_m;
67376+
67377+ BUG_ON(!page_m || !PageAnon(page_m));
67378+
67379+ vma_m = pax_find_mirror_vma(vma);
67380+ if (!vma_m)
67381+ return;
67382+
67383+ BUG_ON(!PageLocked(page_m));
67384+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67385+ address_m = address + SEGMEXEC_TASK_SIZE;
67386+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67387+ pte_m = pte_offset_map(pmd_m, address_m);
67388+ ptl_m = pte_lockptr(mm, pmd_m);
67389+ if (ptl != ptl_m) {
67390+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67391+ if (!pte_none(*pte_m))
67392+ goto out;
67393+ }
67394+
67395+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67396+ page_cache_get(page_m);
67397+ page_add_anon_rmap(page_m, vma_m, address_m);
67398+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67399+ set_pte_at(mm, address_m, pte_m, entry_m);
67400+ update_mmu_cache(vma_m, address_m, entry_m);
67401+out:
67402+ if (ptl != ptl_m)
67403+ spin_unlock(ptl_m);
67404+ pte_unmap(pte_m);
67405+ unlock_page(page_m);
67406+}
67407+
67408+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67409+{
67410+ struct mm_struct *mm = vma->vm_mm;
67411+ unsigned long address_m;
67412+ spinlock_t *ptl_m;
67413+ struct vm_area_struct *vma_m;
67414+ pmd_t *pmd_m;
67415+ pte_t *pte_m, entry_m;
67416+
67417+ BUG_ON(!page_m || PageAnon(page_m));
67418+
67419+ vma_m = pax_find_mirror_vma(vma);
67420+ if (!vma_m)
67421+ return;
67422+
67423+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67424+ address_m = address + SEGMEXEC_TASK_SIZE;
67425+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67426+ pte_m = pte_offset_map(pmd_m, address_m);
67427+ ptl_m = pte_lockptr(mm, pmd_m);
67428+ if (ptl != ptl_m) {
67429+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67430+ if (!pte_none(*pte_m))
67431+ goto out;
67432+ }
67433+
67434+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67435+ page_cache_get(page_m);
67436+ page_add_file_rmap(page_m);
67437+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67438+ set_pte_at(mm, address_m, pte_m, entry_m);
67439+ update_mmu_cache(vma_m, address_m, entry_m);
67440+out:
67441+ if (ptl != ptl_m)
67442+ spin_unlock(ptl_m);
67443+ pte_unmap(pte_m);
67444+}
67445+
67446+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67447+{
67448+ struct mm_struct *mm = vma->vm_mm;
67449+ unsigned long address_m;
67450+ spinlock_t *ptl_m;
67451+ struct vm_area_struct *vma_m;
67452+ pmd_t *pmd_m;
67453+ pte_t *pte_m, entry_m;
67454+
67455+ vma_m = pax_find_mirror_vma(vma);
67456+ if (!vma_m)
67457+ return;
67458+
67459+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67460+ address_m = address + SEGMEXEC_TASK_SIZE;
67461+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67462+ pte_m = pte_offset_map(pmd_m, address_m);
67463+ ptl_m = pte_lockptr(mm, pmd_m);
67464+ if (ptl != ptl_m) {
67465+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67466+ if (!pte_none(*pte_m))
67467+ goto out;
67468+ }
67469+
67470+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67471+ set_pte_at(mm, address_m, pte_m, entry_m);
67472+out:
67473+ if (ptl != ptl_m)
67474+ spin_unlock(ptl_m);
67475+ pte_unmap(pte_m);
67476+}
67477+
67478+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67479+{
67480+ struct page *page_m;
67481+ pte_t entry;
67482+
67483+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67484+ goto out;
67485+
67486+ entry = *pte;
67487+ page_m = vm_normal_page(vma, address, entry);
67488+ if (!page_m)
67489+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67490+ else if (PageAnon(page_m)) {
67491+ if (pax_find_mirror_vma(vma)) {
67492+ pte_unmap_unlock(pte, ptl);
67493+ lock_page(page_m);
67494+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67495+ if (pte_same(entry, *pte))
67496+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67497+ else
67498+ unlock_page(page_m);
67499+ }
67500+ } else
67501+ pax_mirror_file_pte(vma, address, page_m, ptl);
67502+
67503+out:
67504+ pte_unmap_unlock(pte, ptl);
67505+}
67506+#endif
67507+
67508 /*
67509 * This routine handles present pages, when users try to write
67510 * to a shared page. It is done by copying the page to a new address
67511@@ -2667,6 +2860,12 @@ gotten:
67512 */
67513 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67514 if (likely(pte_same(*page_table, orig_pte))) {
67515+
67516+#ifdef CONFIG_PAX_SEGMEXEC
67517+ if (pax_find_mirror_vma(vma))
67518+ BUG_ON(!trylock_page(new_page));
67519+#endif
67520+
67521 if (old_page) {
67522 if (!PageAnon(old_page)) {
67523 dec_mm_counter_fast(mm, MM_FILEPAGES);
67524@@ -2718,6 +2917,10 @@ gotten:
67525 page_remove_rmap(old_page);
67526 }
67527
67528+#ifdef CONFIG_PAX_SEGMEXEC
67529+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67530+#endif
67531+
67532 /* Free the old page.. */
67533 new_page = old_page;
67534 ret |= VM_FAULT_WRITE;
67535@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
67536 swap_free(entry);
67537 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67538 try_to_free_swap(page);
67539+
67540+#ifdef CONFIG_PAX_SEGMEXEC
67541+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67542+#endif
67543+
67544 unlock_page(page);
67545 if (swapcache) {
67546 /*
67547@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
67548
67549 /* No need to invalidate - it was non-present before */
67550 update_mmu_cache(vma, address, page_table);
67551+
67552+#ifdef CONFIG_PAX_SEGMEXEC
67553+ pax_mirror_anon_pte(vma, address, page, ptl);
67554+#endif
67555+
67556 unlock:
67557 pte_unmap_unlock(page_table, ptl);
67558 out:
67559@@ -3039,40 +3252,6 @@ out_release:
67560 }
67561
67562 /*
67563- * This is like a special single-page "expand_{down|up}wards()",
67564- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67565- * doesn't hit another vma.
67566- */
67567-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67568-{
67569- address &= PAGE_MASK;
67570- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67571- struct vm_area_struct *prev = vma->vm_prev;
67572-
67573- /*
67574- * Is there a mapping abutting this one below?
67575- *
67576- * That's only ok if it's the same stack mapping
67577- * that has gotten split..
67578- */
67579- if (prev && prev->vm_end == address)
67580- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67581-
67582- expand_downwards(vma, address - PAGE_SIZE);
67583- }
67584- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67585- struct vm_area_struct *next = vma->vm_next;
67586-
67587- /* As VM_GROWSDOWN but s/below/above/ */
67588- if (next && next->vm_start == address + PAGE_SIZE)
67589- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67590-
67591- expand_upwards(vma, address + PAGE_SIZE);
67592- }
67593- return 0;
67594-}
67595-
67596-/*
67597 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67598 * but allow concurrent faults), and pte mapped but not yet locked.
67599 * We return with mmap_sem still held, but pte unmapped and unlocked.
67600@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
67601 unsigned long address, pte_t *page_table, pmd_t *pmd,
67602 unsigned int flags)
67603 {
67604- struct page *page;
67605+ struct page *page = NULL;
67606 spinlock_t *ptl;
67607 pte_t entry;
67608
67609- pte_unmap(page_table);
67610-
67611- /* Check if we need to add a guard page to the stack */
67612- if (check_stack_guard_page(vma, address) < 0)
67613- return VM_FAULT_SIGBUS;
67614-
67615- /* Use the zero-page for reads */
67616 if (!(flags & FAULT_FLAG_WRITE)) {
67617 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67618 vma->vm_page_prot));
67619- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67620+ ptl = pte_lockptr(mm, pmd);
67621+ spin_lock(ptl);
67622 if (!pte_none(*page_table))
67623 goto unlock;
67624 goto setpte;
67625 }
67626
67627 /* Allocate our own private page. */
67628+ pte_unmap(page_table);
67629+
67630 if (unlikely(anon_vma_prepare(vma)))
67631 goto oom;
67632 page = alloc_zeroed_user_highpage_movable(vma, address);
67633@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
67634 if (!pte_none(*page_table))
67635 goto release;
67636
67637+#ifdef CONFIG_PAX_SEGMEXEC
67638+ if (pax_find_mirror_vma(vma))
67639+ BUG_ON(!trylock_page(page));
67640+#endif
67641+
67642 inc_mm_counter_fast(mm, MM_ANONPAGES);
67643 page_add_new_anon_rmap(page, vma, address);
67644 setpte:
67645@@ -3127,6 +3307,12 @@ setpte:
67646
67647 /* No need to invalidate - it was non-present before */
67648 update_mmu_cache(vma, address, page_table);
67649+
67650+#ifdef CONFIG_PAX_SEGMEXEC
67651+ if (page)
67652+ pax_mirror_anon_pte(vma, address, page, ptl);
67653+#endif
67654+
67655 unlock:
67656 pte_unmap_unlock(page_table, ptl);
67657 return 0;
67658@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
67659 */
67660 /* Only go through if we didn't race with anybody else... */
67661 if (likely(pte_same(*page_table, orig_pte))) {
67662+
67663+#ifdef CONFIG_PAX_SEGMEXEC
67664+ if (anon && pax_find_mirror_vma(vma))
67665+ BUG_ON(!trylock_page(page));
67666+#endif
67667+
67668 flush_icache_page(vma, page);
67669 entry = mk_pte(page, vma->vm_page_prot);
67670 if (flags & FAULT_FLAG_WRITE)
67671@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
67672
67673 /* no need to invalidate: a not-present page won't be cached */
67674 update_mmu_cache(vma, address, page_table);
67675+
67676+#ifdef CONFIG_PAX_SEGMEXEC
67677+ if (anon)
67678+ pax_mirror_anon_pte(vma, address, page, ptl);
67679+ else
67680+ pax_mirror_file_pte(vma, address, page, ptl);
67681+#endif
67682+
67683 } else {
67684 if (charged)
67685 mem_cgroup_uncharge_page(page);
67686@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
67687 if (flags & FAULT_FLAG_WRITE)
67688 flush_tlb_fix_spurious_fault(vma, address);
67689 }
67690+
67691+#ifdef CONFIG_PAX_SEGMEXEC
67692+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67693+ return 0;
67694+#endif
67695+
67696 unlock:
67697 pte_unmap_unlock(pte, ptl);
67698 return 0;
67699@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
67700 pmd_t *pmd;
67701 pte_t *pte;
67702
67703+#ifdef CONFIG_PAX_SEGMEXEC
67704+ struct vm_area_struct *vma_m;
67705+#endif
67706+
67707 __set_current_state(TASK_RUNNING);
67708
67709 count_vm_event(PGFAULT);
67710@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
67711 if (unlikely(is_vm_hugetlb_page(vma)))
67712 return hugetlb_fault(mm, vma, address, flags);
67713
67714+#ifdef CONFIG_PAX_SEGMEXEC
67715+ vma_m = pax_find_mirror_vma(vma);
67716+ if (vma_m) {
67717+ unsigned long address_m;
67718+ pgd_t *pgd_m;
67719+ pud_t *pud_m;
67720+ pmd_t *pmd_m;
67721+
67722+ if (vma->vm_start > vma_m->vm_start) {
67723+ address_m = address;
67724+ address -= SEGMEXEC_TASK_SIZE;
67725+ vma = vma_m;
67726+ } else
67727+ address_m = address + SEGMEXEC_TASK_SIZE;
67728+
67729+ pgd_m = pgd_offset(mm, address_m);
67730+ pud_m = pud_alloc(mm, pgd_m, address_m);
67731+ if (!pud_m)
67732+ return VM_FAULT_OOM;
67733+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67734+ if (!pmd_m)
67735+ return VM_FAULT_OOM;
67736+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67737+ return VM_FAULT_OOM;
67738+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67739+ }
67740+#endif
67741+
67742 pgd = pgd_offset(mm, address);
67743 pud = pud_alloc(mm, pgd, address);
67744 if (!pud)
67745@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
67746 * run pte_offset_map on the pmd, if an huge pmd could
67747 * materialize from under us from a different thread.
67748 */
67749- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
67750+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67751 return VM_FAULT_OOM;
67752 /* if an huge pmd materialized from under us just retry later */
67753 if (unlikely(pmd_trans_huge(*pmd)))
67754@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
67755 gate_vma.vm_start = FIXADDR_USER_START;
67756 gate_vma.vm_end = FIXADDR_USER_END;
67757 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67758- gate_vma.vm_page_prot = __P101;
67759+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67760 /*
67761 * Make sure the vDSO gets into every core dump.
67762 * Dumping its contents makes post-mortem fully interpretable later
67763diff -urNp linux-3.0.7/mm/mempolicy.c linux-3.0.7/mm/mempolicy.c
67764--- linux-3.0.7/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
67765+++ linux-3.0.7/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
67766@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
67767 unsigned long vmstart;
67768 unsigned long vmend;
67769
67770+#ifdef CONFIG_PAX_SEGMEXEC
67771+ struct vm_area_struct *vma_m;
67772+#endif
67773+
67774 vma = find_vma_prev(mm, start, &prev);
67775 if (!vma || vma->vm_start > start)
67776 return -EFAULT;
67777@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
67778 err = policy_vma(vma, new_pol);
67779 if (err)
67780 goto out;
67781+
67782+#ifdef CONFIG_PAX_SEGMEXEC
67783+ vma_m = pax_find_mirror_vma(vma);
67784+ if (vma_m) {
67785+ err = policy_vma(vma_m, new_pol);
67786+ if (err)
67787+ goto out;
67788+ }
67789+#endif
67790+
67791 }
67792
67793 out:
67794@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
67795
67796 if (end < start)
67797 return -EINVAL;
67798+
67799+#ifdef CONFIG_PAX_SEGMEXEC
67800+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67801+ if (end > SEGMEXEC_TASK_SIZE)
67802+ return -EINVAL;
67803+ } else
67804+#endif
67805+
67806+ if (end > TASK_SIZE)
67807+ return -EINVAL;
67808+
67809 if (end == start)
67810 return 0;
67811
67812@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67813 if (!mm)
67814 goto out;
67815
67816+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67817+ if (mm != current->mm &&
67818+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67819+ err = -EPERM;
67820+ goto out;
67821+ }
67822+#endif
67823+
67824 /*
67825 * Check if this process has the right to modify the specified
67826 * process. The right exists if the process has administrative
67827@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67828 rcu_read_lock();
67829 tcred = __task_cred(task);
67830 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67831- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67832- !capable(CAP_SYS_NICE)) {
67833+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67834 rcu_read_unlock();
67835 err = -EPERM;
67836 goto out;
67837diff -urNp linux-3.0.7/mm/migrate.c linux-3.0.7/mm/migrate.c
67838--- linux-3.0.7/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
67839+++ linux-3.0.7/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
67840@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
67841 unsigned long chunk_start;
67842 int err;
67843
67844+ pax_track_stack();
67845+
67846 task_nodes = cpuset_mems_allowed(task);
67847
67848 err = -ENOMEM;
67849@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67850 if (!mm)
67851 return -EINVAL;
67852
67853+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67854+ if (mm != current->mm &&
67855+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67856+ err = -EPERM;
67857+ goto out;
67858+ }
67859+#endif
67860+
67861 /*
67862 * Check if this process has the right to modify the specified
67863 * process. The right exists if the process has administrative
67864@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67865 rcu_read_lock();
67866 tcred = __task_cred(task);
67867 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67868- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67869- !capable(CAP_SYS_NICE)) {
67870+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67871 rcu_read_unlock();
67872 err = -EPERM;
67873 goto out;
67874diff -urNp linux-3.0.7/mm/mlock.c linux-3.0.7/mm/mlock.c
67875--- linux-3.0.7/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
67876+++ linux-3.0.7/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
67877@@ -13,6 +13,7 @@
67878 #include <linux/pagemap.h>
67879 #include <linux/mempolicy.h>
67880 #include <linux/syscalls.h>
67881+#include <linux/security.h>
67882 #include <linux/sched.h>
67883 #include <linux/module.h>
67884 #include <linux/rmap.h>
67885@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
67886 return -EINVAL;
67887 if (end == start)
67888 return 0;
67889+ if (end > TASK_SIZE)
67890+ return -EINVAL;
67891+
67892 vma = find_vma_prev(current->mm, start, &prev);
67893 if (!vma || vma->vm_start > start)
67894 return -ENOMEM;
67895@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
67896 for (nstart = start ; ; ) {
67897 vm_flags_t newflags;
67898
67899+#ifdef CONFIG_PAX_SEGMEXEC
67900+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67901+ break;
67902+#endif
67903+
67904 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67905
67906 newflags = vma->vm_flags | VM_LOCKED;
67907@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67908 lock_limit >>= PAGE_SHIFT;
67909
67910 /* check against resource limits */
67911+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67912 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67913 error = do_mlock(start, len, 1);
67914 up_write(&current->mm->mmap_sem);
67915@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67916 static int do_mlockall(int flags)
67917 {
67918 struct vm_area_struct * vma, * prev = NULL;
67919- unsigned int def_flags = 0;
67920
67921 if (flags & MCL_FUTURE)
67922- def_flags = VM_LOCKED;
67923- current->mm->def_flags = def_flags;
67924+ current->mm->def_flags |= VM_LOCKED;
67925+ else
67926+ current->mm->def_flags &= ~VM_LOCKED;
67927 if (flags == MCL_FUTURE)
67928 goto out;
67929
67930 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67931 vm_flags_t newflags;
67932
67933+#ifdef CONFIG_PAX_SEGMEXEC
67934+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67935+ break;
67936+#endif
67937+
67938+ BUG_ON(vma->vm_end > TASK_SIZE);
67939 newflags = vma->vm_flags | VM_LOCKED;
67940 if (!(flags & MCL_CURRENT))
67941 newflags &= ~VM_LOCKED;
67942@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67943 lock_limit >>= PAGE_SHIFT;
67944
67945 ret = -ENOMEM;
67946+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67947 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67948 capable(CAP_IPC_LOCK))
67949 ret = do_mlockall(flags);
67950diff -urNp linux-3.0.7/mm/mmap.c linux-3.0.7/mm/mmap.c
67951--- linux-3.0.7/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
67952+++ linux-3.0.7/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
67953@@ -46,6 +46,16 @@
67954 #define arch_rebalance_pgtables(addr, len) (addr)
67955 #endif
67956
67957+static inline void verify_mm_writelocked(struct mm_struct *mm)
67958+{
67959+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67960+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67961+ up_read(&mm->mmap_sem);
67962+ BUG();
67963+ }
67964+#endif
67965+}
67966+
67967 static void unmap_region(struct mm_struct *mm,
67968 struct vm_area_struct *vma, struct vm_area_struct *prev,
67969 unsigned long start, unsigned long end);
67970@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
67971 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67972 *
67973 */
67974-pgprot_t protection_map[16] = {
67975+pgprot_t protection_map[16] __read_only = {
67976 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67977 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67978 };
67979
67980-pgprot_t vm_get_page_prot(unsigned long vm_flags)
67981+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
67982 {
67983- return __pgprot(pgprot_val(protection_map[vm_flags &
67984+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67985 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67986 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67987+
67988+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67989+ if (!(__supported_pte_mask & _PAGE_NX) &&
67990+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
67991+ (vm_flags & (VM_READ | VM_WRITE)))
67992+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
67993+#endif
67994+
67995+ return prot;
67996 }
67997 EXPORT_SYMBOL(vm_get_page_prot);
67998
67999 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68000 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68001 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68002+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68003 /*
68004 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68005 * other variables. It can be updated by several CPUs frequently.
68006@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
68007 struct vm_area_struct *next = vma->vm_next;
68008
68009 might_sleep();
68010+ BUG_ON(vma->vm_mirror);
68011 if (vma->vm_ops && vma->vm_ops->close)
68012 vma->vm_ops->close(vma);
68013 if (vma->vm_file) {
68014@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68015 * not page aligned -Ram Gupta
68016 */
68017 rlim = rlimit(RLIMIT_DATA);
68018+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68019 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68020 (mm->end_data - mm->start_data) > rlim)
68021 goto out;
68022@@ -697,6 +719,12 @@ static int
68023 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68024 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68025 {
68026+
68027+#ifdef CONFIG_PAX_SEGMEXEC
68028+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68029+ return 0;
68030+#endif
68031+
68032 if (is_mergeable_vma(vma, file, vm_flags) &&
68033 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68034 if (vma->vm_pgoff == vm_pgoff)
68035@@ -716,6 +744,12 @@ static int
68036 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68037 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68038 {
68039+
68040+#ifdef CONFIG_PAX_SEGMEXEC
68041+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68042+ return 0;
68043+#endif
68044+
68045 if (is_mergeable_vma(vma, file, vm_flags) &&
68046 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68047 pgoff_t vm_pglen;
68048@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
68049 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68050 struct vm_area_struct *prev, unsigned long addr,
68051 unsigned long end, unsigned long vm_flags,
68052- struct anon_vma *anon_vma, struct file *file,
68053+ struct anon_vma *anon_vma, struct file *file,
68054 pgoff_t pgoff, struct mempolicy *policy)
68055 {
68056 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68057 struct vm_area_struct *area, *next;
68058 int err;
68059
68060+#ifdef CONFIG_PAX_SEGMEXEC
68061+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68062+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68063+
68064+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68065+#endif
68066+
68067 /*
68068 * We later require that vma->vm_flags == vm_flags,
68069 * so this tests vma->vm_flags & VM_SPECIAL, too.
68070@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
68071 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68072 next = next->vm_next;
68073
68074+#ifdef CONFIG_PAX_SEGMEXEC
68075+ if (prev)
68076+ prev_m = pax_find_mirror_vma(prev);
68077+ if (area)
68078+ area_m = pax_find_mirror_vma(area);
68079+ if (next)
68080+ next_m = pax_find_mirror_vma(next);
68081+#endif
68082+
68083 /*
68084 * Can it merge with the predecessor?
68085 */
68086@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
68087 /* cases 1, 6 */
68088 err = vma_adjust(prev, prev->vm_start,
68089 next->vm_end, prev->vm_pgoff, NULL);
68090- } else /* cases 2, 5, 7 */
68091+
68092+#ifdef CONFIG_PAX_SEGMEXEC
68093+ if (!err && prev_m)
68094+ err = vma_adjust(prev_m, prev_m->vm_start,
68095+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68096+#endif
68097+
68098+ } else { /* cases 2, 5, 7 */
68099 err = vma_adjust(prev, prev->vm_start,
68100 end, prev->vm_pgoff, NULL);
68101+
68102+#ifdef CONFIG_PAX_SEGMEXEC
68103+ if (!err && prev_m)
68104+ err = vma_adjust(prev_m, prev_m->vm_start,
68105+ end_m, prev_m->vm_pgoff, NULL);
68106+#endif
68107+
68108+ }
68109 if (err)
68110 return NULL;
68111 khugepaged_enter_vma_merge(prev);
68112@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
68113 mpol_equal(policy, vma_policy(next)) &&
68114 can_vma_merge_before(next, vm_flags,
68115 anon_vma, file, pgoff+pglen)) {
68116- if (prev && addr < prev->vm_end) /* case 4 */
68117+ if (prev && addr < prev->vm_end) { /* case 4 */
68118 err = vma_adjust(prev, prev->vm_start,
68119 addr, prev->vm_pgoff, NULL);
68120- else /* cases 3, 8 */
68121+
68122+#ifdef CONFIG_PAX_SEGMEXEC
68123+ if (!err && prev_m)
68124+ err = vma_adjust(prev_m, prev_m->vm_start,
68125+ addr_m, prev_m->vm_pgoff, NULL);
68126+#endif
68127+
68128+ } else { /* cases 3, 8 */
68129 err = vma_adjust(area, addr, next->vm_end,
68130 next->vm_pgoff - pglen, NULL);
68131+
68132+#ifdef CONFIG_PAX_SEGMEXEC
68133+ if (!err && area_m)
68134+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68135+ next_m->vm_pgoff - pglen, NULL);
68136+#endif
68137+
68138+ }
68139 if (err)
68140 return NULL;
68141 khugepaged_enter_vma_merge(area);
68142@@ -929,14 +1009,11 @@ none:
68143 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68144 struct file *file, long pages)
68145 {
68146- const unsigned long stack_flags
68147- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68148-
68149 if (file) {
68150 mm->shared_vm += pages;
68151 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68152 mm->exec_vm += pages;
68153- } else if (flags & stack_flags)
68154+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68155 mm->stack_vm += pages;
68156 if (flags & (VM_RESERVED|VM_IO))
68157 mm->reserved_vm += pages;
68158@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
68159 * (the exception is when the underlying filesystem is noexec
68160 * mounted, in which case we dont add PROT_EXEC.)
68161 */
68162- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68163+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68164 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68165 prot |= PROT_EXEC;
68166
68167@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
68168 /* Obtain the address to map to. we verify (or select) it and ensure
68169 * that it represents a valid section of the address space.
68170 */
68171- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68172+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68173 if (addr & ~PAGE_MASK)
68174 return addr;
68175
68176@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
68177 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68178 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68179
68180+#ifdef CONFIG_PAX_MPROTECT
68181+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68182+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68183+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68184+ gr_log_rwxmmap(file);
68185+
68186+#ifdef CONFIG_PAX_EMUPLT
68187+ vm_flags &= ~VM_EXEC;
68188+#else
68189+ return -EPERM;
68190+#endif
68191+
68192+ }
68193+
68194+ if (!(vm_flags & VM_EXEC))
68195+ vm_flags &= ~VM_MAYEXEC;
68196+#else
68197+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68198+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68199+#endif
68200+ else
68201+ vm_flags &= ~VM_MAYWRITE;
68202+ }
68203+#endif
68204+
68205+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68206+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68207+ vm_flags &= ~VM_PAGEEXEC;
68208+#endif
68209+
68210 if (flags & MAP_LOCKED)
68211 if (!can_do_mlock())
68212 return -EPERM;
68213@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
68214 locked += mm->locked_vm;
68215 lock_limit = rlimit(RLIMIT_MEMLOCK);
68216 lock_limit >>= PAGE_SHIFT;
68217+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68218 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68219 return -EAGAIN;
68220 }
68221@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
68222 if (error)
68223 return error;
68224
68225+ if (!gr_acl_handle_mmap(file, prot))
68226+ return -EACCES;
68227+
68228 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68229 }
68230 EXPORT_SYMBOL(do_mmap_pgoff);
68231@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
68232 vm_flags_t vm_flags = vma->vm_flags;
68233
68234 /* If it was private or non-writable, the write bit is already clear */
68235- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68236+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68237 return 0;
68238
68239 /* The backer wishes to know when pages are first written to? */
68240@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
68241 unsigned long charged = 0;
68242 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68243
68244+#ifdef CONFIG_PAX_SEGMEXEC
68245+ struct vm_area_struct *vma_m = NULL;
68246+#endif
68247+
68248+ /*
68249+ * mm->mmap_sem is required to protect against another thread
68250+ * changing the mappings in case we sleep.
68251+ */
68252+ verify_mm_writelocked(mm);
68253+
68254 /* Clear old maps */
68255 error = -ENOMEM;
68256-munmap_back:
68257 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68258 if (vma && vma->vm_start < addr + len) {
68259 if (do_munmap(mm, addr, len))
68260 return -ENOMEM;
68261- goto munmap_back;
68262+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68263+ BUG_ON(vma && vma->vm_start < addr + len);
68264 }
68265
68266 /* Check against address space limit. */
68267@@ -1266,6 +1387,16 @@ munmap_back:
68268 goto unacct_error;
68269 }
68270
68271+#ifdef CONFIG_PAX_SEGMEXEC
68272+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68273+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68274+ if (!vma_m) {
68275+ error = -ENOMEM;
68276+ goto free_vma;
68277+ }
68278+ }
68279+#endif
68280+
68281 vma->vm_mm = mm;
68282 vma->vm_start = addr;
68283 vma->vm_end = addr + len;
68284@@ -1289,6 +1420,19 @@ munmap_back:
68285 error = file->f_op->mmap(file, vma);
68286 if (error)
68287 goto unmap_and_free_vma;
68288+
68289+#ifdef CONFIG_PAX_SEGMEXEC
68290+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68291+ added_exe_file_vma(mm);
68292+#endif
68293+
68294+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68295+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68296+ vma->vm_flags |= VM_PAGEEXEC;
68297+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68298+ }
68299+#endif
68300+
68301 if (vm_flags & VM_EXECUTABLE)
68302 added_exe_file_vma(mm);
68303
68304@@ -1324,6 +1468,11 @@ munmap_back:
68305 vma_link(mm, vma, prev, rb_link, rb_parent);
68306 file = vma->vm_file;
68307
68308+#ifdef CONFIG_PAX_SEGMEXEC
68309+ if (vma_m)
68310+ BUG_ON(pax_mirror_vma(vma_m, vma));
68311+#endif
68312+
68313 /* Once vma denies write, undo our temporary denial count */
68314 if (correct_wcount)
68315 atomic_inc(&inode->i_writecount);
68316@@ -1332,6 +1481,7 @@ out:
68317
68318 mm->total_vm += len >> PAGE_SHIFT;
68319 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68320+ track_exec_limit(mm, addr, addr + len, vm_flags);
68321 if (vm_flags & VM_LOCKED) {
68322 if (!mlock_vma_pages_range(vma, addr, addr + len))
68323 mm->locked_vm += (len >> PAGE_SHIFT);
68324@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
68325 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68326 charged = 0;
68327 free_vma:
68328+
68329+#ifdef CONFIG_PAX_SEGMEXEC
68330+ if (vma_m)
68331+ kmem_cache_free(vm_area_cachep, vma_m);
68332+#endif
68333+
68334 kmem_cache_free(vm_area_cachep, vma);
68335 unacct_error:
68336 if (charged)
68337@@ -1356,6 +1512,44 @@ unacct_error:
68338 return error;
68339 }
68340
68341+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68342+{
68343+ if (!vma) {
68344+#ifdef CONFIG_STACK_GROWSUP
68345+ if (addr > sysctl_heap_stack_gap)
68346+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68347+ else
68348+ vma = find_vma(current->mm, 0);
68349+ if (vma && (vma->vm_flags & VM_GROWSUP))
68350+ return false;
68351+#endif
68352+ return true;
68353+ }
68354+
68355+ if (addr + len > vma->vm_start)
68356+ return false;
68357+
68358+ if (vma->vm_flags & VM_GROWSDOWN)
68359+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68360+#ifdef CONFIG_STACK_GROWSUP
68361+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68362+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68363+#endif
68364+
68365+ return true;
68366+}
68367+
68368+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68369+{
68370+ if (vma->vm_start < len)
68371+ return -ENOMEM;
68372+ if (!(vma->vm_flags & VM_GROWSDOWN))
68373+ return vma->vm_start - len;
68374+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68375+ return vma->vm_start - len - sysctl_heap_stack_gap;
68376+ return -ENOMEM;
68377+}
68378+
68379 /* Get an address range which is currently unmapped.
68380 * For shmat() with addr=0.
68381 *
68382@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
68383 if (flags & MAP_FIXED)
68384 return addr;
68385
68386+#ifdef CONFIG_PAX_RANDMMAP
68387+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68388+#endif
68389+
68390 if (addr) {
68391 addr = PAGE_ALIGN(addr);
68392- vma = find_vma(mm, addr);
68393- if (TASK_SIZE - len >= addr &&
68394- (!vma || addr + len <= vma->vm_start))
68395- return addr;
68396+ if (TASK_SIZE - len >= addr) {
68397+ vma = find_vma(mm, addr);
68398+ if (check_heap_stack_gap(vma, addr, len))
68399+ return addr;
68400+ }
68401 }
68402 if (len > mm->cached_hole_size) {
68403- start_addr = addr = mm->free_area_cache;
68404+ start_addr = addr = mm->free_area_cache;
68405 } else {
68406- start_addr = addr = TASK_UNMAPPED_BASE;
68407- mm->cached_hole_size = 0;
68408+ start_addr = addr = mm->mmap_base;
68409+ mm->cached_hole_size = 0;
68410 }
68411
68412 full_search:
68413@@ -1404,34 +1603,40 @@ full_search:
68414 * Start a new search - just in case we missed
68415 * some holes.
68416 */
68417- if (start_addr != TASK_UNMAPPED_BASE) {
68418- addr = TASK_UNMAPPED_BASE;
68419- start_addr = addr;
68420+ if (start_addr != mm->mmap_base) {
68421+ start_addr = addr = mm->mmap_base;
68422 mm->cached_hole_size = 0;
68423 goto full_search;
68424 }
68425 return -ENOMEM;
68426 }
68427- if (!vma || addr + len <= vma->vm_start) {
68428- /*
68429- * Remember the place where we stopped the search:
68430- */
68431- mm->free_area_cache = addr + len;
68432- return addr;
68433- }
68434+ if (check_heap_stack_gap(vma, addr, len))
68435+ break;
68436 if (addr + mm->cached_hole_size < vma->vm_start)
68437 mm->cached_hole_size = vma->vm_start - addr;
68438 addr = vma->vm_end;
68439 }
68440+
68441+ /*
68442+ * Remember the place where we stopped the search:
68443+ */
68444+ mm->free_area_cache = addr + len;
68445+ return addr;
68446 }
68447 #endif
68448
68449 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68450 {
68451+
68452+#ifdef CONFIG_PAX_SEGMEXEC
68453+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68454+ return;
68455+#endif
68456+
68457 /*
68458 * Is this a new hole at the lowest possible address?
68459 */
68460- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68461+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68462 mm->free_area_cache = addr;
68463 mm->cached_hole_size = ~0UL;
68464 }
68465@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
68466 {
68467 struct vm_area_struct *vma;
68468 struct mm_struct *mm = current->mm;
68469- unsigned long addr = addr0;
68470+ unsigned long base = mm->mmap_base, addr = addr0;
68471
68472 /* requested length too big for entire address space */
68473 if (len > TASK_SIZE)
68474@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
68475 if (flags & MAP_FIXED)
68476 return addr;
68477
68478+#ifdef CONFIG_PAX_RANDMMAP
68479+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68480+#endif
68481+
68482 /* requesting a specific address */
68483 if (addr) {
68484 addr = PAGE_ALIGN(addr);
68485- vma = find_vma(mm, addr);
68486- if (TASK_SIZE - len >= addr &&
68487- (!vma || addr + len <= vma->vm_start))
68488- return addr;
68489+ if (TASK_SIZE - len >= addr) {
68490+ vma = find_vma(mm, addr);
68491+ if (check_heap_stack_gap(vma, addr, len))
68492+ return addr;
68493+ }
68494 }
68495
68496 /* check if free_area_cache is useful for us */
68497@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
68498 /* make sure it can fit in the remaining address space */
68499 if (addr > len) {
68500 vma = find_vma(mm, addr-len);
68501- if (!vma || addr <= vma->vm_start)
68502+ if (check_heap_stack_gap(vma, addr - len, len))
68503 /* remember the address as a hint for next time */
68504 return (mm->free_area_cache = addr-len);
68505 }
68506@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
68507 * return with success:
68508 */
68509 vma = find_vma(mm, addr);
68510- if (!vma || addr+len <= vma->vm_start)
68511+ if (check_heap_stack_gap(vma, addr, len))
68512 /* remember the address as a hint for next time */
68513 return (mm->free_area_cache = addr);
68514
68515@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
68516 mm->cached_hole_size = vma->vm_start - addr;
68517
68518 /* try just below the current vma->vm_start */
68519- addr = vma->vm_start-len;
68520- } while (len < vma->vm_start);
68521+ addr = skip_heap_stack_gap(vma, len);
68522+ } while (!IS_ERR_VALUE(addr));
68523
68524 bottomup:
68525 /*
68526@@ -1515,13 +1725,21 @@ bottomup:
68527 * can happen with large stack limits and large mmap()
68528 * allocations.
68529 */
68530+ mm->mmap_base = TASK_UNMAPPED_BASE;
68531+
68532+#ifdef CONFIG_PAX_RANDMMAP
68533+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68534+ mm->mmap_base += mm->delta_mmap;
68535+#endif
68536+
68537+ mm->free_area_cache = mm->mmap_base;
68538 mm->cached_hole_size = ~0UL;
68539- mm->free_area_cache = TASK_UNMAPPED_BASE;
68540 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68541 /*
68542 * Restore the topdown base:
68543 */
68544- mm->free_area_cache = mm->mmap_base;
68545+ mm->mmap_base = base;
68546+ mm->free_area_cache = base;
68547 mm->cached_hole_size = ~0UL;
68548
68549 return addr;
68550@@ -1530,6 +1748,12 @@ bottomup:
68551
68552 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68553 {
68554+
68555+#ifdef CONFIG_PAX_SEGMEXEC
68556+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68557+ return;
68558+#endif
68559+
68560 /*
68561 * Is this a new hole at the highest possible address?
68562 */
68563@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
68564 mm->free_area_cache = addr;
68565
68566 /* dont allow allocations above current base */
68567- if (mm->free_area_cache > mm->mmap_base)
68568+ if (mm->free_area_cache > mm->mmap_base) {
68569 mm->free_area_cache = mm->mmap_base;
68570+ mm->cached_hole_size = ~0UL;
68571+ }
68572 }
68573
68574 unsigned long
68575@@ -1646,6 +1872,28 @@ out:
68576 return prev ? prev->vm_next : vma;
68577 }
68578
68579+#ifdef CONFIG_PAX_SEGMEXEC
68580+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68581+{
68582+ struct vm_area_struct *vma_m;
68583+
68584+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68585+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68586+ BUG_ON(vma->vm_mirror);
68587+ return NULL;
68588+ }
68589+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68590+ vma_m = vma->vm_mirror;
68591+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68592+ BUG_ON(vma->vm_file != vma_m->vm_file);
68593+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68594+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68595+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68596+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68597+ return vma_m;
68598+}
68599+#endif
68600+
68601 /*
68602 * Verify that the stack growth is acceptable and
68603 * update accounting. This is shared with both the
68604@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
68605 return -ENOMEM;
68606
68607 /* Stack limit test */
68608+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68609 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68610 return -ENOMEM;
68611
68612@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
68613 locked = mm->locked_vm + grow;
68614 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68615 limit >>= PAGE_SHIFT;
68616+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68617 if (locked > limit && !capable(CAP_IPC_LOCK))
68618 return -ENOMEM;
68619 }
68620@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
68621 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68622 * vma is the last one with address > vma->vm_end. Have to extend vma.
68623 */
68624+#ifndef CONFIG_IA64
68625+static
68626+#endif
68627 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68628 {
68629 int error;
68630+ bool locknext;
68631
68632 if (!(vma->vm_flags & VM_GROWSUP))
68633 return -EFAULT;
68634
68635+ /* Also guard against wrapping around to address 0. */
68636+ if (address < PAGE_ALIGN(address+1))
68637+ address = PAGE_ALIGN(address+1);
68638+ else
68639+ return -ENOMEM;
68640+
68641 /*
68642 * We must make sure the anon_vma is allocated
68643 * so that the anon_vma locking is not a noop.
68644 */
68645 if (unlikely(anon_vma_prepare(vma)))
68646 return -ENOMEM;
68647+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68648+ if (locknext && anon_vma_prepare(vma->vm_next))
68649+ return -ENOMEM;
68650 vma_lock_anon_vma(vma);
68651+ if (locknext)
68652+ vma_lock_anon_vma(vma->vm_next);
68653
68654 /*
68655 * vma->vm_start/vm_end cannot change under us because the caller
68656 * is required to hold the mmap_sem in read mode. We need the
68657- * anon_vma lock to serialize against concurrent expand_stacks.
68658- * Also guard against wrapping around to address 0.
68659+ * anon_vma locks to serialize against concurrent expand_stacks
68660+ * and expand_upwards.
68661 */
68662- if (address < PAGE_ALIGN(address+4))
68663- address = PAGE_ALIGN(address+4);
68664- else {
68665- vma_unlock_anon_vma(vma);
68666- return -ENOMEM;
68667- }
68668 error = 0;
68669
68670 /* Somebody else might have raced and expanded it already */
68671- if (address > vma->vm_end) {
68672+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68673+ error = -ENOMEM;
68674+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68675 unsigned long size, grow;
68676
68677 size = address - vma->vm_start;
68678@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
68679 }
68680 }
68681 }
68682+ if (locknext)
68683+ vma_unlock_anon_vma(vma->vm_next);
68684 vma_unlock_anon_vma(vma);
68685 khugepaged_enter_vma_merge(vma);
68686 return error;
68687@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
68688 unsigned long address)
68689 {
68690 int error;
68691+ bool lockprev = false;
68692+ struct vm_area_struct *prev;
68693
68694 /*
68695 * We must make sure the anon_vma is allocated
68696@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
68697 if (error)
68698 return error;
68699
68700+ prev = vma->vm_prev;
68701+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68702+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68703+#endif
68704+ if (lockprev && anon_vma_prepare(prev))
68705+ return -ENOMEM;
68706+ if (lockprev)
68707+ vma_lock_anon_vma(prev);
68708+
68709 vma_lock_anon_vma(vma);
68710
68711 /*
68712@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
68713 */
68714
68715 /* Somebody else might have raced and expanded it already */
68716- if (address < vma->vm_start) {
68717+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68718+ error = -ENOMEM;
68719+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68720 unsigned long size, grow;
68721
68722+#ifdef CONFIG_PAX_SEGMEXEC
68723+ struct vm_area_struct *vma_m;
68724+
68725+ vma_m = pax_find_mirror_vma(vma);
68726+#endif
68727+
68728 size = vma->vm_end - address;
68729 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68730
68731@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
68732 if (!error) {
68733 vma->vm_start = address;
68734 vma->vm_pgoff -= grow;
68735+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68736+
68737+#ifdef CONFIG_PAX_SEGMEXEC
68738+ if (vma_m) {
68739+ vma_m->vm_start -= grow << PAGE_SHIFT;
68740+ vma_m->vm_pgoff -= grow;
68741+ }
68742+#endif
68743+
68744 perf_event_mmap(vma);
68745 }
68746 }
68747 }
68748 vma_unlock_anon_vma(vma);
68749+ if (lockprev)
68750+ vma_unlock_anon_vma(prev);
68751 khugepaged_enter_vma_merge(vma);
68752 return error;
68753 }
68754@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
68755 do {
68756 long nrpages = vma_pages(vma);
68757
68758+#ifdef CONFIG_PAX_SEGMEXEC
68759+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68760+ vma = remove_vma(vma);
68761+ continue;
68762+ }
68763+#endif
68764+
68765 mm->total_vm -= nrpages;
68766 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68767 vma = remove_vma(vma);
68768@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68769 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68770 vma->vm_prev = NULL;
68771 do {
68772+
68773+#ifdef CONFIG_PAX_SEGMEXEC
68774+ if (vma->vm_mirror) {
68775+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68776+ vma->vm_mirror->vm_mirror = NULL;
68777+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68778+ vma->vm_mirror = NULL;
68779+ }
68780+#endif
68781+
68782 rb_erase(&vma->vm_rb, &mm->mm_rb);
68783 mm->map_count--;
68784 tail_vma = vma;
68785@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
68786 struct vm_area_struct *new;
68787 int err = -ENOMEM;
68788
68789+#ifdef CONFIG_PAX_SEGMEXEC
68790+ struct vm_area_struct *vma_m, *new_m = NULL;
68791+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68792+#endif
68793+
68794 if (is_vm_hugetlb_page(vma) && (addr &
68795 ~(huge_page_mask(hstate_vma(vma)))))
68796 return -EINVAL;
68797
68798+#ifdef CONFIG_PAX_SEGMEXEC
68799+ vma_m = pax_find_mirror_vma(vma);
68800+#endif
68801+
68802 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68803 if (!new)
68804 goto out_err;
68805
68806+#ifdef CONFIG_PAX_SEGMEXEC
68807+ if (vma_m) {
68808+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68809+ if (!new_m) {
68810+ kmem_cache_free(vm_area_cachep, new);
68811+ goto out_err;
68812+ }
68813+ }
68814+#endif
68815+
68816 /* most fields are the same, copy all, and then fixup */
68817 *new = *vma;
68818
68819@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
68820 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68821 }
68822
68823+#ifdef CONFIG_PAX_SEGMEXEC
68824+ if (vma_m) {
68825+ *new_m = *vma_m;
68826+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
68827+ new_m->vm_mirror = new;
68828+ new->vm_mirror = new_m;
68829+
68830+ if (new_below)
68831+ new_m->vm_end = addr_m;
68832+ else {
68833+ new_m->vm_start = addr_m;
68834+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68835+ }
68836+ }
68837+#endif
68838+
68839 pol = mpol_dup(vma_policy(vma));
68840 if (IS_ERR(pol)) {
68841 err = PTR_ERR(pol);
68842@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
68843 else
68844 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68845
68846+#ifdef CONFIG_PAX_SEGMEXEC
68847+ if (!err && vma_m) {
68848+ if (anon_vma_clone(new_m, vma_m))
68849+ goto out_free_mpol;
68850+
68851+ mpol_get(pol);
68852+ vma_set_policy(new_m, pol);
68853+
68854+ if (new_m->vm_file) {
68855+ get_file(new_m->vm_file);
68856+ if (vma_m->vm_flags & VM_EXECUTABLE)
68857+ added_exe_file_vma(mm);
68858+ }
68859+
68860+ if (new_m->vm_ops && new_m->vm_ops->open)
68861+ new_m->vm_ops->open(new_m);
68862+
68863+ if (new_below)
68864+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68865+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68866+ else
68867+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68868+
68869+ if (err) {
68870+ if (new_m->vm_ops && new_m->vm_ops->close)
68871+ new_m->vm_ops->close(new_m);
68872+ if (new_m->vm_file) {
68873+ if (vma_m->vm_flags & VM_EXECUTABLE)
68874+ removed_exe_file_vma(mm);
68875+ fput(new_m->vm_file);
68876+ }
68877+ mpol_put(pol);
68878+ }
68879+ }
68880+#endif
68881+
68882 /* Success. */
68883 if (!err)
68884 return 0;
68885@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
68886 removed_exe_file_vma(mm);
68887 fput(new->vm_file);
68888 }
68889- unlink_anon_vmas(new);
68890 out_free_mpol:
68891 mpol_put(pol);
68892 out_free_vma:
68893+
68894+#ifdef CONFIG_PAX_SEGMEXEC
68895+ if (new_m) {
68896+ unlink_anon_vmas(new_m);
68897+ kmem_cache_free(vm_area_cachep, new_m);
68898+ }
68899+#endif
68900+
68901+ unlink_anon_vmas(new);
68902 kmem_cache_free(vm_area_cachep, new);
68903 out_err:
68904 return err;
68905@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
68906 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
68907 unsigned long addr, int new_below)
68908 {
68909+
68910+#ifdef CONFIG_PAX_SEGMEXEC
68911+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68912+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68913+ if (mm->map_count >= sysctl_max_map_count-1)
68914+ return -ENOMEM;
68915+ } else
68916+#endif
68917+
68918 if (mm->map_count >= sysctl_max_map_count)
68919 return -ENOMEM;
68920
68921@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
68922 * work. This now handles partial unmappings.
68923 * Jeremy Fitzhardinge <jeremy@goop.org>
68924 */
68925+#ifdef CONFIG_PAX_SEGMEXEC
68926 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68927 {
68928+ int ret = __do_munmap(mm, start, len);
68929+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68930+ return ret;
68931+
68932+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68933+}
68934+
68935+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68936+#else
68937+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68938+#endif
68939+{
68940 unsigned long end;
68941 struct vm_area_struct *vma, *prev, *last;
68942
68943+ /*
68944+ * mm->mmap_sem is required to protect against another thread
68945+ * changing the mappings in case we sleep.
68946+ */
68947+ verify_mm_writelocked(mm);
68948+
68949 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68950 return -EINVAL;
68951
68952@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
68953 /* Fix up all other VM information */
68954 remove_vma_list(mm, vma);
68955
68956+ track_exec_limit(mm, start, end, 0UL);
68957+
68958 return 0;
68959 }
68960
68961@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68962
68963 profile_munmap(addr);
68964
68965+#ifdef CONFIG_PAX_SEGMEXEC
68966+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68967+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68968+ return -EINVAL;
68969+#endif
68970+
68971 down_write(&mm->mmap_sem);
68972 ret = do_munmap(mm, addr, len);
68973 up_write(&mm->mmap_sem);
68974 return ret;
68975 }
68976
68977-static inline void verify_mm_writelocked(struct mm_struct *mm)
68978-{
68979-#ifdef CONFIG_DEBUG_VM
68980- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68981- WARN_ON(1);
68982- up_read(&mm->mmap_sem);
68983- }
68984-#endif
68985-}
68986-
68987 /*
68988 * this is really a simplified "do_mmap". it only handles
68989 * anonymous maps. eventually we may be able to do some
68990@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
68991 struct rb_node ** rb_link, * rb_parent;
68992 pgoff_t pgoff = addr >> PAGE_SHIFT;
68993 int error;
68994+ unsigned long charged;
68995
68996 len = PAGE_ALIGN(len);
68997 if (!len)
68998@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
68999
69000 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69001
69002+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69003+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69004+ flags &= ~VM_EXEC;
69005+
69006+#ifdef CONFIG_PAX_MPROTECT
69007+ if (mm->pax_flags & MF_PAX_MPROTECT)
69008+ flags &= ~VM_MAYEXEC;
69009+#endif
69010+
69011+ }
69012+#endif
69013+
69014 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69015 if (error & ~PAGE_MASK)
69016 return error;
69017
69018+ charged = len >> PAGE_SHIFT;
69019+
69020 /*
69021 * mlock MCL_FUTURE?
69022 */
69023 if (mm->def_flags & VM_LOCKED) {
69024 unsigned long locked, lock_limit;
69025- locked = len >> PAGE_SHIFT;
69026+ locked = charged;
69027 locked += mm->locked_vm;
69028 lock_limit = rlimit(RLIMIT_MEMLOCK);
69029 lock_limit >>= PAGE_SHIFT;
69030@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
69031 /*
69032 * Clear old maps. this also does some error checking for us
69033 */
69034- munmap_back:
69035 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69036 if (vma && vma->vm_start < addr + len) {
69037 if (do_munmap(mm, addr, len))
69038 return -ENOMEM;
69039- goto munmap_back;
69040+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69041+ BUG_ON(vma && vma->vm_start < addr + len);
69042 }
69043
69044 /* Check against address space limits *after* clearing old maps... */
69045- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69046+ if (!may_expand_vm(mm, charged))
69047 return -ENOMEM;
69048
69049 if (mm->map_count > sysctl_max_map_count)
69050 return -ENOMEM;
69051
69052- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69053+ if (security_vm_enough_memory(charged))
69054 return -ENOMEM;
69055
69056 /* Can we just expand an old private anonymous mapping? */
69057@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
69058 */
69059 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69060 if (!vma) {
69061- vm_unacct_memory(len >> PAGE_SHIFT);
69062+ vm_unacct_memory(charged);
69063 return -ENOMEM;
69064 }
69065
69066@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
69067 vma_link(mm, vma, prev, rb_link, rb_parent);
69068 out:
69069 perf_event_mmap(vma);
69070- mm->total_vm += len >> PAGE_SHIFT;
69071+ mm->total_vm += charged;
69072 if (flags & VM_LOCKED) {
69073 if (!mlock_vma_pages_range(vma, addr, addr + len))
69074- mm->locked_vm += (len >> PAGE_SHIFT);
69075+ mm->locked_vm += charged;
69076 }
69077+ track_exec_limit(mm, addr, addr + len, flags);
69078 return addr;
69079 }
69080
69081@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
69082 * Walk the list again, actually closing and freeing it,
69083 * with preemption enabled, without holding any MM locks.
69084 */
69085- while (vma)
69086+ while (vma) {
69087+ vma->vm_mirror = NULL;
69088 vma = remove_vma(vma);
69089+ }
69090
69091 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69092 }
69093@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
69094 struct vm_area_struct * __vma, * prev;
69095 struct rb_node ** rb_link, * rb_parent;
69096
69097+#ifdef CONFIG_PAX_SEGMEXEC
69098+ struct vm_area_struct *vma_m = NULL;
69099+#endif
69100+
69101+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69102+ return -EPERM;
69103+
69104 /*
69105 * The vm_pgoff of a purely anonymous vma should be irrelevant
69106 * until its first write fault, when page's anon_vma and index
69107@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
69108 if ((vma->vm_flags & VM_ACCOUNT) &&
69109 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69110 return -ENOMEM;
69111+
69112+#ifdef CONFIG_PAX_SEGMEXEC
69113+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69114+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69115+ if (!vma_m)
69116+ return -ENOMEM;
69117+ }
69118+#endif
69119+
69120 vma_link(mm, vma, prev, rb_link, rb_parent);
69121+
69122+#ifdef CONFIG_PAX_SEGMEXEC
69123+ if (vma_m)
69124+ BUG_ON(pax_mirror_vma(vma_m, vma));
69125+#endif
69126+
69127 return 0;
69128 }
69129
69130@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
69131 struct rb_node **rb_link, *rb_parent;
69132 struct mempolicy *pol;
69133
69134+ BUG_ON(vma->vm_mirror);
69135+
69136 /*
69137 * If anonymous vma has not yet been faulted, update new pgoff
69138 * to match new location, to increase its chance of merging.
69139@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
69140 return NULL;
69141 }
69142
69143+#ifdef CONFIG_PAX_SEGMEXEC
69144+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69145+{
69146+ struct vm_area_struct *prev_m;
69147+ struct rb_node **rb_link_m, *rb_parent_m;
69148+ struct mempolicy *pol_m;
69149+
69150+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69151+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69152+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69153+ *vma_m = *vma;
69154+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69155+ if (anon_vma_clone(vma_m, vma))
69156+ return -ENOMEM;
69157+ pol_m = vma_policy(vma_m);
69158+ mpol_get(pol_m);
69159+ vma_set_policy(vma_m, pol_m);
69160+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69161+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69162+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69163+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69164+ if (vma_m->vm_file)
69165+ get_file(vma_m->vm_file);
69166+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69167+ vma_m->vm_ops->open(vma_m);
69168+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69169+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69170+ vma_m->vm_mirror = vma;
69171+ vma->vm_mirror = vma_m;
69172+ return 0;
69173+}
69174+#endif
69175+
69176 /*
69177 * Return true if the calling process may expand its vm space by the passed
69178 * number of pages
69179@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
69180 unsigned long lim;
69181
69182 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69183-
69184+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69185 if (cur + npages > lim)
69186 return 0;
69187 return 1;
69188@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
69189 vma->vm_start = addr;
69190 vma->vm_end = addr + len;
69191
69192+#ifdef CONFIG_PAX_MPROTECT
69193+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69194+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69195+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69196+ return -EPERM;
69197+ if (!(vm_flags & VM_EXEC))
69198+ vm_flags &= ~VM_MAYEXEC;
69199+#else
69200+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69201+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69202+#endif
69203+ else
69204+ vm_flags &= ~VM_MAYWRITE;
69205+ }
69206+#endif
69207+
69208 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69209 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69210
69211diff -urNp linux-3.0.7/mm/mprotect.c linux-3.0.7/mm/mprotect.c
69212--- linux-3.0.7/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
69213+++ linux-3.0.7/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
69214@@ -23,10 +23,16 @@
69215 #include <linux/mmu_notifier.h>
69216 #include <linux/migrate.h>
69217 #include <linux/perf_event.h>
69218+
69219+#ifdef CONFIG_PAX_MPROTECT
69220+#include <linux/elf.h>
69221+#endif
69222+
69223 #include <asm/uaccess.h>
69224 #include <asm/pgtable.h>
69225 #include <asm/cacheflush.h>
69226 #include <asm/tlbflush.h>
69227+#include <asm/mmu_context.h>
69228
69229 #ifndef pgprot_modify
69230 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69231@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69232 flush_tlb_range(vma, start, end);
69233 }
69234
69235+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69236+/* called while holding the mmap semaphor for writing except stack expansion */
69237+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69238+{
69239+ unsigned long oldlimit, newlimit = 0UL;
69240+
69241+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69242+ return;
69243+
69244+ spin_lock(&mm->page_table_lock);
69245+ oldlimit = mm->context.user_cs_limit;
69246+ if ((prot & VM_EXEC) && oldlimit < end)
69247+ /* USER_CS limit moved up */
69248+ newlimit = end;
69249+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69250+ /* USER_CS limit moved down */
69251+ newlimit = start;
69252+
69253+ if (newlimit) {
69254+ mm->context.user_cs_limit = newlimit;
69255+
69256+#ifdef CONFIG_SMP
69257+ wmb();
69258+ cpus_clear(mm->context.cpu_user_cs_mask);
69259+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69260+#endif
69261+
69262+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69263+ }
69264+ spin_unlock(&mm->page_table_lock);
69265+ if (newlimit == end) {
69266+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69267+
69268+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69269+ if (is_vm_hugetlb_page(vma))
69270+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69271+ else
69272+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69273+ }
69274+}
69275+#endif
69276+
69277 int
69278 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69279 unsigned long start, unsigned long end, unsigned long newflags)
69280@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69281 int error;
69282 int dirty_accountable = 0;
69283
69284+#ifdef CONFIG_PAX_SEGMEXEC
69285+ struct vm_area_struct *vma_m = NULL;
69286+ unsigned long start_m, end_m;
69287+
69288+ start_m = start + SEGMEXEC_TASK_SIZE;
69289+ end_m = end + SEGMEXEC_TASK_SIZE;
69290+#endif
69291+
69292 if (newflags == oldflags) {
69293 *pprev = vma;
69294 return 0;
69295 }
69296
69297+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69298+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69299+
69300+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69301+ return -ENOMEM;
69302+
69303+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69304+ return -ENOMEM;
69305+ }
69306+
69307 /*
69308 * If we make a private mapping writable we increase our commit;
69309 * but (without finer accounting) cannot reduce our commit if we
69310@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69311 }
69312 }
69313
69314+#ifdef CONFIG_PAX_SEGMEXEC
69315+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69316+ if (start != vma->vm_start) {
69317+ error = split_vma(mm, vma, start, 1);
69318+ if (error)
69319+ goto fail;
69320+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69321+ *pprev = (*pprev)->vm_next;
69322+ }
69323+
69324+ if (end != vma->vm_end) {
69325+ error = split_vma(mm, vma, end, 0);
69326+ if (error)
69327+ goto fail;
69328+ }
69329+
69330+ if (pax_find_mirror_vma(vma)) {
69331+ error = __do_munmap(mm, start_m, end_m - start_m);
69332+ if (error)
69333+ goto fail;
69334+ } else {
69335+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69336+ if (!vma_m) {
69337+ error = -ENOMEM;
69338+ goto fail;
69339+ }
69340+ vma->vm_flags = newflags;
69341+ error = pax_mirror_vma(vma_m, vma);
69342+ if (error) {
69343+ vma->vm_flags = oldflags;
69344+ goto fail;
69345+ }
69346+ }
69347+ }
69348+#endif
69349+
69350 /*
69351 * First try to merge with previous and/or next vma.
69352 */
69353@@ -204,9 +306,21 @@ success:
69354 * vm_flags and vm_page_prot are protected by the mmap_sem
69355 * held in write mode.
69356 */
69357+
69358+#ifdef CONFIG_PAX_SEGMEXEC
69359+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69360+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69361+#endif
69362+
69363 vma->vm_flags = newflags;
69364+
69365+#ifdef CONFIG_PAX_MPROTECT
69366+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69367+ mm->binfmt->handle_mprotect(vma, newflags);
69368+#endif
69369+
69370 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69371- vm_get_page_prot(newflags));
69372+ vm_get_page_prot(vma->vm_flags));
69373
69374 if (vma_wants_writenotify(vma)) {
69375 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69376@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69377 end = start + len;
69378 if (end <= start)
69379 return -ENOMEM;
69380+
69381+#ifdef CONFIG_PAX_SEGMEXEC
69382+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69383+ if (end > SEGMEXEC_TASK_SIZE)
69384+ return -EINVAL;
69385+ } else
69386+#endif
69387+
69388+ if (end > TASK_SIZE)
69389+ return -EINVAL;
69390+
69391 if (!arch_validate_prot(prot))
69392 return -EINVAL;
69393
69394@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69395 /*
69396 * Does the application expect PROT_READ to imply PROT_EXEC:
69397 */
69398- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69399+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69400 prot |= PROT_EXEC;
69401
69402 vm_flags = calc_vm_prot_bits(prot);
69403@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69404 if (start > vma->vm_start)
69405 prev = vma;
69406
69407+#ifdef CONFIG_PAX_MPROTECT
69408+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69409+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69410+#endif
69411+
69412 for (nstart = start ; ; ) {
69413 unsigned long newflags;
69414
69415@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69416
69417 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69418 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69419+ if (prot & (PROT_WRITE | PROT_EXEC))
69420+ gr_log_rwxmprotect(vma->vm_file);
69421+
69422+ error = -EACCES;
69423+ goto out;
69424+ }
69425+
69426+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69427 error = -EACCES;
69428 goto out;
69429 }
69430@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69431 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69432 if (error)
69433 goto out;
69434+
69435+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69436+
69437 nstart = tmp;
69438
69439 if (nstart < prev->vm_end)
69440diff -urNp linux-3.0.7/mm/mremap.c linux-3.0.7/mm/mremap.c
69441--- linux-3.0.7/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
69442+++ linux-3.0.7/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
69443@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69444 continue;
69445 pte = ptep_clear_flush(vma, old_addr, old_pte);
69446 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69447+
69448+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69449+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69450+ pte = pte_exprotect(pte);
69451+#endif
69452+
69453 set_pte_at(mm, new_addr, new_pte, pte);
69454 }
69455
69456@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69457 if (is_vm_hugetlb_page(vma))
69458 goto Einval;
69459
69460+#ifdef CONFIG_PAX_SEGMEXEC
69461+ if (pax_find_mirror_vma(vma))
69462+ goto Einval;
69463+#endif
69464+
69465 /* We can't remap across vm area boundaries */
69466 if (old_len > vma->vm_end - addr)
69467 goto Efault;
69468@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69469 unsigned long ret = -EINVAL;
69470 unsigned long charged = 0;
69471 unsigned long map_flags;
69472+ unsigned long pax_task_size = TASK_SIZE;
69473
69474 if (new_addr & ~PAGE_MASK)
69475 goto out;
69476
69477- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69478+#ifdef CONFIG_PAX_SEGMEXEC
69479+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69480+ pax_task_size = SEGMEXEC_TASK_SIZE;
69481+#endif
69482+
69483+ pax_task_size -= PAGE_SIZE;
69484+
69485+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69486 goto out;
69487
69488 /* Check if the location we're moving into overlaps the
69489 * old location at all, and fail if it does.
69490 */
69491- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69492- goto out;
69493-
69494- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69495+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69496 goto out;
69497
69498 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69499@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69500 struct vm_area_struct *vma;
69501 unsigned long ret = -EINVAL;
69502 unsigned long charged = 0;
69503+ unsigned long pax_task_size = TASK_SIZE;
69504
69505 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69506 goto out;
69507@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69508 if (!new_len)
69509 goto out;
69510
69511+#ifdef CONFIG_PAX_SEGMEXEC
69512+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69513+ pax_task_size = SEGMEXEC_TASK_SIZE;
69514+#endif
69515+
69516+ pax_task_size -= PAGE_SIZE;
69517+
69518+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69519+ old_len > pax_task_size || addr > pax_task_size-old_len)
69520+ goto out;
69521+
69522 if (flags & MREMAP_FIXED) {
69523 if (flags & MREMAP_MAYMOVE)
69524 ret = mremap_to(addr, old_len, new_addr, new_len);
69525@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69526 addr + new_len);
69527 }
69528 ret = addr;
69529+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69530 goto out;
69531 }
69532 }
69533@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69534 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69535 if (ret)
69536 goto out;
69537+
69538+ map_flags = vma->vm_flags;
69539 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69540+ if (!(ret & ~PAGE_MASK)) {
69541+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69542+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69543+ }
69544 }
69545 out:
69546 if (ret & ~PAGE_MASK)
69547diff -urNp linux-3.0.7/mm/nobootmem.c linux-3.0.7/mm/nobootmem.c
69548--- linux-3.0.7/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
69549+++ linux-3.0.7/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
69550@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69551 unsigned long __init free_all_memory_core_early(int nodeid)
69552 {
69553 int i;
69554- u64 start, end;
69555+ u64 start, end, startrange, endrange;
69556 unsigned long count = 0;
69557- struct range *range = NULL;
69558+ struct range *range = NULL, rangerange = { 0, 0 };
69559 int nr_range;
69560
69561 nr_range = get_free_all_memory_range(&range, nodeid);
69562+ startrange = __pa(range) >> PAGE_SHIFT;
69563+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69564
69565 for (i = 0; i < nr_range; i++) {
69566 start = range[i].start;
69567 end = range[i].end;
69568+ if (start <= endrange && startrange < end) {
69569+ BUG_ON(rangerange.start | rangerange.end);
69570+ rangerange = range[i];
69571+ continue;
69572+ }
69573 count += end - start;
69574 __free_pages_memory(start, end);
69575 }
69576+ start = rangerange.start;
69577+ end = rangerange.end;
69578+ count += end - start;
69579+ __free_pages_memory(start, end);
69580
69581 return count;
69582 }
69583diff -urNp linux-3.0.7/mm/nommu.c linux-3.0.7/mm/nommu.c
69584--- linux-3.0.7/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
69585+++ linux-3.0.7/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
69586@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69587 int sysctl_overcommit_ratio = 50; /* default is 50% */
69588 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69589 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69590-int heap_stack_gap = 0;
69591
69592 atomic_long_t mmap_pages_allocated;
69593
69594@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
69595 EXPORT_SYMBOL(find_vma);
69596
69597 /*
69598- * find a VMA
69599- * - we don't extend stack VMAs under NOMMU conditions
69600- */
69601-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69602-{
69603- return find_vma(mm, addr);
69604-}
69605-
69606-/*
69607 * expand a stack to a given address
69608 * - not supported under NOMMU conditions
69609 */
69610@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
69611
69612 /* most fields are the same, copy all, and then fixup */
69613 *new = *vma;
69614+ INIT_LIST_HEAD(&new->anon_vma_chain);
69615 *region = *vma->vm_region;
69616 new->vm_region = region;
69617
69618diff -urNp linux-3.0.7/mm/page_alloc.c linux-3.0.7/mm/page_alloc.c
69619--- linux-3.0.7/mm/page_alloc.c 2011-10-16 21:54:54.000000000 -0400
69620+++ linux-3.0.7/mm/page_alloc.c 2011-10-16 21:55:28.000000000 -0400
69621@@ -340,7 +340,7 @@ out:
69622 * This usage means that zero-order pages may not be compound.
69623 */
69624
69625-static void free_compound_page(struct page *page)
69626+void free_compound_page(struct page *page)
69627 {
69628 __free_pages_ok(page, compound_order(page));
69629 }
69630@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69631 int i;
69632 int bad = 0;
69633
69634+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69635+ unsigned long index = 1UL << order;
69636+#endif
69637+
69638 trace_mm_page_free_direct(page, order);
69639 kmemcheck_free_shadow(page, order);
69640
69641@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69642 debug_check_no_obj_freed(page_address(page),
69643 PAGE_SIZE << order);
69644 }
69645+
69646+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69647+ for (; index; --index)
69648+ sanitize_highpage(page + index - 1);
69649+#endif
69650+
69651 arch_free_page(page, order);
69652 kernel_map_pages(page, 1 << order, 0);
69653
69654@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69655 arch_alloc_page(page, order);
69656 kernel_map_pages(page, 1 << order, 1);
69657
69658+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69659 if (gfp_flags & __GFP_ZERO)
69660 prep_zero_page(page, order, gfp_flags);
69661+#endif
69662
69663 if (order && (gfp_flags & __GFP_COMP))
69664 prep_compound_page(page, order);
69665@@ -2557,6 +2569,8 @@ void show_free_areas(unsigned int filter
69666 int cpu;
69667 struct zone *zone;
69668
69669+ pax_track_stack();
69670+
69671 for_each_populated_zone(zone) {
69672 if (skip_free_areas_node(filter, zone_to_nid(zone)))
69673 continue;
69674diff -urNp linux-3.0.7/mm/percpu.c linux-3.0.7/mm/percpu.c
69675--- linux-3.0.7/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
69676+++ linux-3.0.7/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
69677@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
69678 static unsigned int pcpu_last_unit_cpu __read_mostly;
69679
69680 /* the address of the first chunk which starts with the kernel static area */
69681-void *pcpu_base_addr __read_mostly;
69682+void *pcpu_base_addr __read_only;
69683 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69684
69685 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69686diff -urNp linux-3.0.7/mm/rmap.c linux-3.0.7/mm/rmap.c
69687--- linux-3.0.7/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
69688+++ linux-3.0.7/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
69689@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
69690 struct anon_vma *anon_vma = vma->anon_vma;
69691 struct anon_vma_chain *avc;
69692
69693+#ifdef CONFIG_PAX_SEGMEXEC
69694+ struct anon_vma_chain *avc_m = NULL;
69695+#endif
69696+
69697 might_sleep();
69698 if (unlikely(!anon_vma)) {
69699 struct mm_struct *mm = vma->vm_mm;
69700@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
69701 if (!avc)
69702 goto out_enomem;
69703
69704+#ifdef CONFIG_PAX_SEGMEXEC
69705+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
69706+ if (!avc_m)
69707+ goto out_enomem_free_avc;
69708+#endif
69709+
69710 anon_vma = find_mergeable_anon_vma(vma);
69711 allocated = NULL;
69712 if (!anon_vma) {
69713@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
69714 /* page_table_lock to protect against threads */
69715 spin_lock(&mm->page_table_lock);
69716 if (likely(!vma->anon_vma)) {
69717+
69718+#ifdef CONFIG_PAX_SEGMEXEC
69719+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69720+
69721+ if (vma_m) {
69722+ BUG_ON(vma_m->anon_vma);
69723+ vma_m->anon_vma = anon_vma;
69724+ avc_m->anon_vma = anon_vma;
69725+ avc_m->vma = vma;
69726+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
69727+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
69728+ avc_m = NULL;
69729+ }
69730+#endif
69731+
69732 vma->anon_vma = anon_vma;
69733 avc->anon_vma = anon_vma;
69734 avc->vma = vma;
69735@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
69736
69737 if (unlikely(allocated))
69738 put_anon_vma(allocated);
69739+
69740+#ifdef CONFIG_PAX_SEGMEXEC
69741+ if (unlikely(avc_m))
69742+ anon_vma_chain_free(avc_m);
69743+#endif
69744+
69745 if (unlikely(avc))
69746 anon_vma_chain_free(avc);
69747 }
69748 return 0;
69749
69750 out_enomem_free_avc:
69751+
69752+#ifdef CONFIG_PAX_SEGMEXEC
69753+ if (avc_m)
69754+ anon_vma_chain_free(avc_m);
69755+#endif
69756+
69757 anon_vma_chain_free(avc);
69758 out_enomem:
69759 return -ENOMEM;
69760@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
69761 * Attach the anon_vmas from src to dst.
69762 * Returns 0 on success, -ENOMEM on failure.
69763 */
69764-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
69765+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
69766 {
69767 struct anon_vma_chain *avc, *pavc;
69768 struct anon_vma *root = NULL;
69769@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
69770 * the corresponding VMA in the parent process is attached to.
69771 * Returns 0 on success, non-zero on failure.
69772 */
69773-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
69774+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
69775 {
69776 struct anon_vma_chain *avc;
69777 struct anon_vma *anon_vma;
69778diff -urNp linux-3.0.7/mm/shmem.c linux-3.0.7/mm/shmem.c
69779--- linux-3.0.7/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
69780+++ linux-3.0.7/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
69781@@ -31,7 +31,7 @@
69782 #include <linux/percpu_counter.h>
69783 #include <linux/swap.h>
69784
69785-static struct vfsmount *shm_mnt;
69786+struct vfsmount *shm_mnt;
69787
69788 #ifdef CONFIG_SHMEM
69789 /*
69790@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
69791 goto unlock;
69792 }
69793 entry = shmem_swp_entry(info, index, NULL);
69794+ if (!entry)
69795+ goto unlock;
69796 if (entry->val) {
69797 /*
69798 * The more uptodate page coming down from a stacked
69799@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
69800 struct vm_area_struct pvma;
69801 struct page *page;
69802
69803+ pax_track_stack();
69804+
69805 spol = mpol_cond_copy(&mpol,
69806 mpol_shared_policy_lookup(&info->policy, idx));
69807
69808@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
69809 int err = -ENOMEM;
69810
69811 /* Round up to L1_CACHE_BYTES to resist false sharing */
69812- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69813- L1_CACHE_BYTES), GFP_KERNEL);
69814+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69815 if (!sbinfo)
69816 return -ENOMEM;
69817
69818diff -urNp linux-3.0.7/mm/slab.c linux-3.0.7/mm/slab.c
69819--- linux-3.0.7/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
69820+++ linux-3.0.7/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
69821@@ -151,7 +151,7 @@
69822
69823 /* Legal flag mask for kmem_cache_create(). */
69824 #if DEBUG
69825-# define CREATE_MASK (SLAB_RED_ZONE | \
69826+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69827 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69828 SLAB_CACHE_DMA | \
69829 SLAB_STORE_USER | \
69830@@ -159,7 +159,7 @@
69831 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69832 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69833 #else
69834-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69835+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69836 SLAB_CACHE_DMA | \
69837 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69838 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69839@@ -288,7 +288,7 @@ struct kmem_list3 {
69840 * Need this for bootstrapping a per node allocator.
69841 */
69842 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69843-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69844+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69845 #define CACHE_CACHE 0
69846 #define SIZE_AC MAX_NUMNODES
69847 #define SIZE_L3 (2 * MAX_NUMNODES)
69848@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
69849 if ((x)->max_freeable < i) \
69850 (x)->max_freeable = i; \
69851 } while (0)
69852-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69853-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69854-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69855-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69856+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69857+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69858+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69859+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69860 #else
69861 #define STATS_INC_ACTIVE(x) do { } while (0)
69862 #define STATS_DEC_ACTIVE(x) do { } while (0)
69863@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
69864 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69865 */
69866 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69867- const struct slab *slab, void *obj)
69868+ const struct slab *slab, const void *obj)
69869 {
69870 u32 offset = (obj - slab->s_mem);
69871 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69872@@ -564,7 +564,7 @@ struct cache_names {
69873 static struct cache_names __initdata cache_names[] = {
69874 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
69875 #include <linux/kmalloc_sizes.h>
69876- {NULL,}
69877+ {NULL}
69878 #undef CACHE
69879 };
69880
69881@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
69882 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69883 sizes[INDEX_AC].cs_size,
69884 ARCH_KMALLOC_MINALIGN,
69885- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69886+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69887 NULL);
69888
69889 if (INDEX_AC != INDEX_L3) {
69890@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
69891 kmem_cache_create(names[INDEX_L3].name,
69892 sizes[INDEX_L3].cs_size,
69893 ARCH_KMALLOC_MINALIGN,
69894- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69895+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69896 NULL);
69897 }
69898
69899@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
69900 sizes->cs_cachep = kmem_cache_create(names->name,
69901 sizes->cs_size,
69902 ARCH_KMALLOC_MINALIGN,
69903- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69904+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69905 NULL);
69906 }
69907 #ifdef CONFIG_ZONE_DMA
69908@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
69909 }
69910 /* cpu stats */
69911 {
69912- unsigned long allochit = atomic_read(&cachep->allochit);
69913- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69914- unsigned long freehit = atomic_read(&cachep->freehit);
69915- unsigned long freemiss = atomic_read(&cachep->freemiss);
69916+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69917+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69918+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69919+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69920
69921 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69922 allochit, allocmiss, freehit, freemiss);
69923@@ -4532,15 +4532,66 @@ static const struct file_operations proc
69924
69925 static int __init slab_proc_init(void)
69926 {
69927- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69928+ mode_t gr_mode = S_IRUGO;
69929+
69930+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69931+ gr_mode = S_IRUSR;
69932+#endif
69933+
69934+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69935 #ifdef CONFIG_DEBUG_SLAB_LEAK
69936- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69937+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69938 #endif
69939 return 0;
69940 }
69941 module_init(slab_proc_init);
69942 #endif
69943
69944+void check_object_size(const void *ptr, unsigned long n, bool to)
69945+{
69946+
69947+#ifdef CONFIG_PAX_USERCOPY
69948+ struct page *page;
69949+ struct kmem_cache *cachep = NULL;
69950+ struct slab *slabp;
69951+ unsigned int objnr;
69952+ unsigned long offset;
69953+
69954+ if (!n)
69955+ return;
69956+
69957+ if (ZERO_OR_NULL_PTR(ptr))
69958+ goto report;
69959+
69960+ if (!virt_addr_valid(ptr))
69961+ return;
69962+
69963+ page = virt_to_head_page(ptr);
69964+
69965+ if (!PageSlab(page)) {
69966+ if (object_is_on_stack(ptr, n) == -1)
69967+ goto report;
69968+ return;
69969+ }
69970+
69971+ cachep = page_get_cache(page);
69972+ if (!(cachep->flags & SLAB_USERCOPY))
69973+ goto report;
69974+
69975+ slabp = page_get_slab(page);
69976+ objnr = obj_to_index(cachep, slabp, ptr);
69977+ BUG_ON(objnr >= cachep->num);
69978+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69979+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69980+ return;
69981+
69982+report:
69983+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69984+#endif
69985+
69986+}
69987+EXPORT_SYMBOL(check_object_size);
69988+
69989 /**
69990 * ksize - get the actual amount of memory allocated for a given object
69991 * @objp: Pointer to the object
69992diff -urNp linux-3.0.7/mm/slob.c linux-3.0.7/mm/slob.c
69993--- linux-3.0.7/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
69994+++ linux-3.0.7/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
69995@@ -29,7 +29,7 @@
69996 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69997 * alloc_pages() directly, allocating compound pages so the page order
69998 * does not have to be separately tracked, and also stores the exact
69999- * allocation size in page->private so that it can be used to accurately
70000+ * allocation size in slob_page->size so that it can be used to accurately
70001 * provide ksize(). These objects are detected in kfree() because slob_page()
70002 * is false for them.
70003 *
70004@@ -58,6 +58,7 @@
70005 */
70006
70007 #include <linux/kernel.h>
70008+#include <linux/sched.h>
70009 #include <linux/slab.h>
70010 #include <linux/mm.h>
70011 #include <linux/swap.h> /* struct reclaim_state */
70012@@ -102,7 +103,8 @@ struct slob_page {
70013 unsigned long flags; /* mandatory */
70014 atomic_t _count; /* mandatory */
70015 slobidx_t units; /* free units left in page */
70016- unsigned long pad[2];
70017+ unsigned long pad[1];
70018+ unsigned long size; /* size when >=PAGE_SIZE */
70019 slob_t *free; /* first free slob_t in page */
70020 struct list_head list; /* linked list of free pages */
70021 };
70022@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70023 */
70024 static inline int is_slob_page(struct slob_page *sp)
70025 {
70026- return PageSlab((struct page *)sp);
70027+ return PageSlab((struct page *)sp) && !sp->size;
70028 }
70029
70030 static inline void set_slob_page(struct slob_page *sp)
70031@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70032
70033 static inline struct slob_page *slob_page(const void *addr)
70034 {
70035- return (struct slob_page *)virt_to_page(addr);
70036+ return (struct slob_page *)virt_to_head_page(addr);
70037 }
70038
70039 /*
70040@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70041 /*
70042 * Return the size of a slob block.
70043 */
70044-static slobidx_t slob_units(slob_t *s)
70045+static slobidx_t slob_units(const slob_t *s)
70046 {
70047 if (s->units > 0)
70048 return s->units;
70049@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70050 /*
70051 * Return the next free slob block pointer after this one.
70052 */
70053-static slob_t *slob_next(slob_t *s)
70054+static slob_t *slob_next(const slob_t *s)
70055 {
70056 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70057 slobidx_t next;
70058@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70059 /*
70060 * Returns true if s is the last free block in its page.
70061 */
70062-static int slob_last(slob_t *s)
70063+static int slob_last(const slob_t *s)
70064 {
70065 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70066 }
70067@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70068 if (!page)
70069 return NULL;
70070
70071+ set_slob_page(page);
70072 return page_address(page);
70073 }
70074
70075@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70076 if (!b)
70077 return NULL;
70078 sp = slob_page(b);
70079- set_slob_page(sp);
70080
70081 spin_lock_irqsave(&slob_lock, flags);
70082 sp->units = SLOB_UNITS(PAGE_SIZE);
70083 sp->free = b;
70084+ sp->size = 0;
70085 INIT_LIST_HEAD(&sp->list);
70086 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70087 set_slob_page_free(sp, slob_list);
70088@@ -476,10 +479,9 @@ out:
70089 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70090 */
70091
70092-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70093+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70094 {
70095- unsigned int *m;
70096- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70097+ slob_t *m;
70098 void *ret;
70099
70100 lockdep_trace_alloc(gfp);
70101@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
70102
70103 if (!m)
70104 return NULL;
70105- *m = size;
70106+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70107+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70108+ m[0].units = size;
70109+ m[1].units = align;
70110 ret = (void *)m + align;
70111
70112 trace_kmalloc_node(_RET_IP_, ret,
70113@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
70114 gfp |= __GFP_COMP;
70115 ret = slob_new_pages(gfp, order, node);
70116 if (ret) {
70117- struct page *page;
70118- page = virt_to_page(ret);
70119- page->private = size;
70120+ struct slob_page *sp;
70121+ sp = slob_page(ret);
70122+ sp->size = size;
70123 }
70124
70125 trace_kmalloc_node(_RET_IP_, ret,
70126 size, PAGE_SIZE << order, gfp, node);
70127 }
70128
70129- kmemleak_alloc(ret, size, 1, gfp);
70130+ return ret;
70131+}
70132+
70133+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70134+{
70135+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70136+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70137+
70138+ if (!ZERO_OR_NULL_PTR(ret))
70139+ kmemleak_alloc(ret, size, 1, gfp);
70140 return ret;
70141 }
70142 EXPORT_SYMBOL(__kmalloc_node);
70143@@ -531,13 +545,88 @@ void kfree(const void *block)
70144 sp = slob_page(block);
70145 if (is_slob_page(sp)) {
70146 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70147- unsigned int *m = (unsigned int *)(block - align);
70148- slob_free(m, *m + align);
70149- } else
70150+ slob_t *m = (slob_t *)(block - align);
70151+ slob_free(m, m[0].units + align);
70152+ } else {
70153+ clear_slob_page(sp);
70154+ free_slob_page(sp);
70155+ sp->size = 0;
70156 put_page(&sp->page);
70157+ }
70158 }
70159 EXPORT_SYMBOL(kfree);
70160
70161+void check_object_size(const void *ptr, unsigned long n, bool to)
70162+{
70163+
70164+#ifdef CONFIG_PAX_USERCOPY
70165+ struct slob_page *sp;
70166+ const slob_t *free;
70167+ const void *base;
70168+ unsigned long flags;
70169+
70170+ if (!n)
70171+ return;
70172+
70173+ if (ZERO_OR_NULL_PTR(ptr))
70174+ goto report;
70175+
70176+ if (!virt_addr_valid(ptr))
70177+ return;
70178+
70179+ sp = slob_page(ptr);
70180+ if (!PageSlab((struct page*)sp)) {
70181+ if (object_is_on_stack(ptr, n) == -1)
70182+ goto report;
70183+ return;
70184+ }
70185+
70186+ if (sp->size) {
70187+ base = page_address(&sp->page);
70188+ if (base <= ptr && n <= sp->size - (ptr - base))
70189+ return;
70190+ goto report;
70191+ }
70192+
70193+ /* some tricky double walking to find the chunk */
70194+ spin_lock_irqsave(&slob_lock, flags);
70195+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70196+ free = sp->free;
70197+
70198+ while (!slob_last(free) && (void *)free <= ptr) {
70199+ base = free + slob_units(free);
70200+ free = slob_next(free);
70201+ }
70202+
70203+ while (base < (void *)free) {
70204+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70205+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70206+ int offset;
70207+
70208+ if (ptr < base + align)
70209+ break;
70210+
70211+ offset = ptr - base - align;
70212+ if (offset >= m) {
70213+ base += size;
70214+ continue;
70215+ }
70216+
70217+ if (n > m - offset)
70218+ break;
70219+
70220+ spin_unlock_irqrestore(&slob_lock, flags);
70221+ return;
70222+ }
70223+
70224+ spin_unlock_irqrestore(&slob_lock, flags);
70225+report:
70226+ pax_report_usercopy(ptr, n, to, NULL);
70227+#endif
70228+
70229+}
70230+EXPORT_SYMBOL(check_object_size);
70231+
70232 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70233 size_t ksize(const void *block)
70234 {
70235@@ -550,10 +639,10 @@ size_t ksize(const void *block)
70236 sp = slob_page(block);
70237 if (is_slob_page(sp)) {
70238 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70239- unsigned int *m = (unsigned int *)(block - align);
70240- return SLOB_UNITS(*m) * SLOB_UNIT;
70241+ slob_t *m = (slob_t *)(block - align);
70242+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70243 } else
70244- return sp->page.private;
70245+ return sp->size;
70246 }
70247 EXPORT_SYMBOL(ksize);
70248
70249@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
70250 {
70251 struct kmem_cache *c;
70252
70253+#ifdef CONFIG_PAX_USERCOPY
70254+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70255+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70256+#else
70257 c = slob_alloc(sizeof(struct kmem_cache),
70258 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70259+#endif
70260
70261 if (c) {
70262 c->name = name;
70263@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
70264 {
70265 void *b;
70266
70267+#ifdef CONFIG_PAX_USERCOPY
70268+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70269+#else
70270 if (c->size < PAGE_SIZE) {
70271 b = slob_alloc(c->size, flags, c->align, node);
70272 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70273 SLOB_UNITS(c->size) * SLOB_UNIT,
70274 flags, node);
70275 } else {
70276+ struct slob_page *sp;
70277+
70278 b = slob_new_pages(flags, get_order(c->size), node);
70279+ sp = slob_page(b);
70280+ sp->size = c->size;
70281 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70282 PAGE_SIZE << get_order(c->size),
70283 flags, node);
70284 }
70285+#endif
70286
70287 if (c->ctor)
70288 c->ctor(b);
70289@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70290
70291 static void __kmem_cache_free(void *b, int size)
70292 {
70293- if (size < PAGE_SIZE)
70294+ struct slob_page *sp = slob_page(b);
70295+
70296+ if (is_slob_page(sp))
70297 slob_free(b, size);
70298- else
70299+ else {
70300+ clear_slob_page(sp);
70301+ free_slob_page(sp);
70302+ sp->size = 0;
70303 slob_free_pages(b, get_order(size));
70304+ }
70305 }
70306
70307 static void kmem_rcu_free(struct rcu_head *head)
70308@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
70309
70310 void kmem_cache_free(struct kmem_cache *c, void *b)
70311 {
70312+ int size = c->size;
70313+
70314+#ifdef CONFIG_PAX_USERCOPY
70315+ if (size + c->align < PAGE_SIZE) {
70316+ size += c->align;
70317+ b -= c->align;
70318+ }
70319+#endif
70320+
70321 kmemleak_free_recursive(b, c->flags);
70322 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70323 struct slob_rcu *slob_rcu;
70324- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70325- slob_rcu->size = c->size;
70326+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70327+ slob_rcu->size = size;
70328 call_rcu(&slob_rcu->head, kmem_rcu_free);
70329 } else {
70330- __kmem_cache_free(b, c->size);
70331+ __kmem_cache_free(b, size);
70332 }
70333
70334+#ifdef CONFIG_PAX_USERCOPY
70335+ trace_kfree(_RET_IP_, b);
70336+#else
70337 trace_kmem_cache_free(_RET_IP_, b);
70338+#endif
70339+
70340 }
70341 EXPORT_SYMBOL(kmem_cache_free);
70342
70343diff -urNp linux-3.0.7/mm/slub.c linux-3.0.7/mm/slub.c
70344--- linux-3.0.7/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
70345+++ linux-3.0.7/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
70346@@ -200,7 +200,7 @@ struct track {
70347
70348 enum track_item { TRACK_ALLOC, TRACK_FREE };
70349
70350-#ifdef CONFIG_SYSFS
70351+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70352 static int sysfs_slab_add(struct kmem_cache *);
70353 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70354 static void sysfs_slab_remove(struct kmem_cache *);
70355@@ -442,7 +442,7 @@ static void print_track(const char *s, s
70356 if (!t->addr)
70357 return;
70358
70359- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70360+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70361 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70362 }
70363
70364@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
70365
70366 page = virt_to_head_page(x);
70367
70368+ BUG_ON(!PageSlab(page));
70369+
70370 slab_free(s, page, x, _RET_IP_);
70371
70372 trace_kmem_cache_free(_RET_IP_, x);
70373@@ -2170,7 +2172,7 @@ static int slub_min_objects;
70374 * Merge control. If this is set then no merging of slab caches will occur.
70375 * (Could be removed. This was introduced to pacify the merge skeptics.)
70376 */
70377-static int slub_nomerge;
70378+static int slub_nomerge = 1;
70379
70380 /*
70381 * Calculate the order of allocation given an slab object size.
70382@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
70383 * list to avoid pounding the page allocator excessively.
70384 */
70385 set_min_partial(s, ilog2(s->size));
70386- s->refcount = 1;
70387+ atomic_set(&s->refcount, 1);
70388 #ifdef CONFIG_NUMA
70389 s->remote_node_defrag_ratio = 1000;
70390 #endif
70391@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
70392 void kmem_cache_destroy(struct kmem_cache *s)
70393 {
70394 down_write(&slub_lock);
70395- s->refcount--;
70396- if (!s->refcount) {
70397+ if (atomic_dec_and_test(&s->refcount)) {
70398 list_del(&s->list);
70399 if (kmem_cache_close(s)) {
70400 printk(KERN_ERR "SLUB %s: %s called for cache that "
70401@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
70402 EXPORT_SYMBOL(__kmalloc_node);
70403 #endif
70404
70405+void check_object_size(const void *ptr, unsigned long n, bool to)
70406+{
70407+
70408+#ifdef CONFIG_PAX_USERCOPY
70409+ struct page *page;
70410+ struct kmem_cache *s = NULL;
70411+ unsigned long offset;
70412+
70413+ if (!n)
70414+ return;
70415+
70416+ if (ZERO_OR_NULL_PTR(ptr))
70417+ goto report;
70418+
70419+ if (!virt_addr_valid(ptr))
70420+ return;
70421+
70422+ page = virt_to_head_page(ptr);
70423+
70424+ if (!PageSlab(page)) {
70425+ if (object_is_on_stack(ptr, n) == -1)
70426+ goto report;
70427+ return;
70428+ }
70429+
70430+ s = page->slab;
70431+ if (!(s->flags & SLAB_USERCOPY))
70432+ goto report;
70433+
70434+ offset = (ptr - page_address(page)) % s->size;
70435+ if (offset <= s->objsize && n <= s->objsize - offset)
70436+ return;
70437+
70438+report:
70439+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70440+#endif
70441+
70442+}
70443+EXPORT_SYMBOL(check_object_size);
70444+
70445 size_t ksize(const void *object)
70446 {
70447 struct page *page;
70448@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
70449 int node;
70450
70451 list_add(&s->list, &slab_caches);
70452- s->refcount = -1;
70453+ atomic_set(&s->refcount, -1);
70454
70455 for_each_node_state(node, N_NORMAL_MEMORY) {
70456 struct kmem_cache_node *n = get_node(s, node);
70457@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
70458
70459 /* Caches that are not of the two-to-the-power-of size */
70460 if (KMALLOC_MIN_SIZE <= 32) {
70461- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70462+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70463 caches++;
70464 }
70465
70466 if (KMALLOC_MIN_SIZE <= 64) {
70467- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70468+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70469 caches++;
70470 }
70471
70472 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70473- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70474+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70475 caches++;
70476 }
70477
70478@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
70479 /*
70480 * We may have set a slab to be unmergeable during bootstrap.
70481 */
70482- if (s->refcount < 0)
70483+ if (atomic_read(&s->refcount) < 0)
70484 return 1;
70485
70486 return 0;
70487@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
70488 down_write(&slub_lock);
70489 s = find_mergeable(size, align, flags, name, ctor);
70490 if (s) {
70491- s->refcount++;
70492+ atomic_inc(&s->refcount);
70493 /*
70494 * Adjust the object sizes so that we clear
70495 * the complete object on kzalloc.
70496@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
70497 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70498
70499 if (sysfs_slab_alias(s, name)) {
70500- s->refcount--;
70501+ atomic_dec(&s->refcount);
70502 goto err;
70503 }
70504 up_write(&slub_lock);
70505@@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
70506 }
70507 #endif
70508
70509-#ifdef CONFIG_SYSFS
70510+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70511 static int count_inuse(struct page *page)
70512 {
70513 return page->inuse;
70514@@ -3935,12 +3976,12 @@ static void resiliency_test(void)
70515 validate_slab_cache(kmalloc_caches[9]);
70516 }
70517 #else
70518-#ifdef CONFIG_SYSFS
70519+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70520 static void resiliency_test(void) {};
70521 #endif
70522 #endif
70523
70524-#ifdef CONFIG_SYSFS
70525+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70526 enum slab_stat_type {
70527 SL_ALL, /* All slabs */
70528 SL_PARTIAL, /* Only partially allocated slabs */
70529@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
70530
70531 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70532 {
70533- return sprintf(buf, "%d\n", s->refcount - 1);
70534+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70535 }
70536 SLAB_ATTR_RO(aliases);
70537
70538@@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
70539 return name;
70540 }
70541
70542+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70543 static int sysfs_slab_add(struct kmem_cache *s)
70544 {
70545 int err;
70546@@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
70547 kobject_del(&s->kobj);
70548 kobject_put(&s->kobj);
70549 }
70550+#endif
70551
70552 /*
70553 * Need to buffer aliases during bootup until sysfs becomes
70554@@ -4737,6 +4780,7 @@ struct saved_alias {
70555
70556 static struct saved_alias *alias_list;
70557
70558+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70559 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70560 {
70561 struct saved_alias *al;
70562@@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
70563 alias_list = al;
70564 return 0;
70565 }
70566+#endif
70567
70568 static int __init slab_sysfs_init(void)
70569 {
70570@@ -4894,7 +4939,13 @@ static const struct file_operations proc
70571
70572 static int __init slab_proc_init(void)
70573 {
70574- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70575+ mode_t gr_mode = S_IRUGO;
70576+
70577+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70578+ gr_mode = S_IRUSR;
70579+#endif
70580+
70581+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70582 return 0;
70583 }
70584 module_init(slab_proc_init);
70585diff -urNp linux-3.0.7/mm/swap.c linux-3.0.7/mm/swap.c
70586--- linux-3.0.7/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
70587+++ linux-3.0.7/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
70588@@ -31,6 +31,7 @@
70589 #include <linux/backing-dev.h>
70590 #include <linux/memcontrol.h>
70591 #include <linux/gfp.h>
70592+#include <linux/hugetlb.h>
70593
70594 #include "internal.h"
70595
70596@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70597
70598 __page_cache_release(page);
70599 dtor = get_compound_page_dtor(page);
70600+ if (!PageHuge(page))
70601+ BUG_ON(dtor != free_compound_page);
70602 (*dtor)(page);
70603 }
70604
70605diff -urNp linux-3.0.7/mm/swapfile.c linux-3.0.7/mm/swapfile.c
70606--- linux-3.0.7/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
70607+++ linux-3.0.7/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
70608@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70609
70610 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70611 /* Activity counter to indicate that a swapon or swapoff has occurred */
70612-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70613+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70614
70615 static inline unsigned char swap_count(unsigned char ent)
70616 {
70617@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70618 }
70619 filp_close(swap_file, NULL);
70620 err = 0;
70621- atomic_inc(&proc_poll_event);
70622+ atomic_inc_unchecked(&proc_poll_event);
70623 wake_up_interruptible(&proc_poll_wait);
70624
70625 out_dput:
70626@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
70627
70628 poll_wait(file, &proc_poll_wait, wait);
70629
70630- if (s->event != atomic_read(&proc_poll_event)) {
70631- s->event = atomic_read(&proc_poll_event);
70632+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
70633+ s->event = atomic_read_unchecked(&proc_poll_event);
70634 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70635 }
70636
70637@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
70638 }
70639
70640 s->seq.private = s;
70641- s->event = atomic_read(&proc_poll_event);
70642+ s->event = atomic_read_unchecked(&proc_poll_event);
70643 return ret;
70644 }
70645
70646@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
70647 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70648
70649 mutex_unlock(&swapon_mutex);
70650- atomic_inc(&proc_poll_event);
70651+ atomic_inc_unchecked(&proc_poll_event);
70652 wake_up_interruptible(&proc_poll_wait);
70653
70654 if (S_ISREG(inode->i_mode))
70655diff -urNp linux-3.0.7/mm/util.c linux-3.0.7/mm/util.c
70656--- linux-3.0.7/mm/util.c 2011-07-21 22:17:23.000000000 -0400
70657+++ linux-3.0.7/mm/util.c 2011-08-23 21:47:56.000000000 -0400
70658@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70659 * allocated buffer. Use this if you don't want to free the buffer immediately
70660 * like, for example, with RCU.
70661 */
70662+#undef __krealloc
70663 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70664 {
70665 void *ret;
70666@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70667 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70668 * %NULL pointer, the object pointed to is freed.
70669 */
70670+#undef krealloc
70671 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70672 {
70673 void *ret;
70674@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
70675 void arch_pick_mmap_layout(struct mm_struct *mm)
70676 {
70677 mm->mmap_base = TASK_UNMAPPED_BASE;
70678+
70679+#ifdef CONFIG_PAX_RANDMMAP
70680+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70681+ mm->mmap_base += mm->delta_mmap;
70682+#endif
70683+
70684 mm->get_unmapped_area = arch_get_unmapped_area;
70685 mm->unmap_area = arch_unmap_area;
70686 }
70687diff -urNp linux-3.0.7/mm/vmalloc.c linux-3.0.7/mm/vmalloc.c
70688--- linux-3.0.7/mm/vmalloc.c 2011-10-16 21:54:54.000000000 -0400
70689+++ linux-3.0.7/mm/vmalloc.c 2011-10-16 21:55:28.000000000 -0400
70690@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70691
70692 pte = pte_offset_kernel(pmd, addr);
70693 do {
70694- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70695- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70696+
70697+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70698+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70699+ BUG_ON(!pte_exec(*pte));
70700+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70701+ continue;
70702+ }
70703+#endif
70704+
70705+ {
70706+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70707+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70708+ }
70709 } while (pte++, addr += PAGE_SIZE, addr != end);
70710 }
70711
70712@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70713 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70714 {
70715 pte_t *pte;
70716+ int ret = -ENOMEM;
70717
70718 /*
70719 * nr is a running index into the array which helps higher level
70720@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
70721 pte = pte_alloc_kernel(pmd, addr);
70722 if (!pte)
70723 return -ENOMEM;
70724+
70725+ pax_open_kernel();
70726 do {
70727 struct page *page = pages[*nr];
70728
70729- if (WARN_ON(!pte_none(*pte)))
70730- return -EBUSY;
70731- if (WARN_ON(!page))
70732- return -ENOMEM;
70733+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70734+ if (pgprot_val(prot) & _PAGE_NX)
70735+#endif
70736+
70737+ if (WARN_ON(!pte_none(*pte))) {
70738+ ret = -EBUSY;
70739+ goto out;
70740+ }
70741+ if (WARN_ON(!page)) {
70742+ ret = -ENOMEM;
70743+ goto out;
70744+ }
70745 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70746 (*nr)++;
70747 } while (pte++, addr += PAGE_SIZE, addr != end);
70748- return 0;
70749+ ret = 0;
70750+out:
70751+ pax_close_kernel();
70752+ return ret;
70753 }
70754
70755 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70756@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
70757 * and fall back on vmalloc() if that fails. Others
70758 * just put it in the vmalloc space.
70759 */
70760-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70761+#ifdef CONFIG_MODULES
70762+#ifdef MODULES_VADDR
70763 unsigned long addr = (unsigned long)x;
70764 if (addr >= MODULES_VADDR && addr < MODULES_END)
70765 return 1;
70766 #endif
70767+
70768+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70769+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70770+ return 1;
70771+#endif
70772+
70773+#endif
70774+
70775 return is_vmalloc_addr(x);
70776 }
70777
70778@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
70779
70780 if (!pgd_none(*pgd)) {
70781 pud_t *pud = pud_offset(pgd, addr);
70782+#ifdef CONFIG_X86
70783+ if (!pud_large(*pud))
70784+#endif
70785 if (!pud_none(*pud)) {
70786 pmd_t *pmd = pmd_offset(pud, addr);
70787+#ifdef CONFIG_X86
70788+ if (!pmd_large(*pmd))
70789+#endif
70790 if (!pmd_none(*pmd)) {
70791 pte_t *ptep, pte;
70792
70793@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
70794 struct vm_struct *area;
70795
70796 BUG_ON(in_interrupt());
70797+
70798+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70799+ if (flags & VM_KERNEXEC) {
70800+ if (start != VMALLOC_START || end != VMALLOC_END)
70801+ return NULL;
70802+ start = (unsigned long)MODULES_EXEC_VADDR;
70803+ end = (unsigned long)MODULES_EXEC_END;
70804+ }
70805+#endif
70806+
70807 if (flags & VM_IOREMAP) {
70808 int bit = fls(size);
70809
70810@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
70811 if (count > totalram_pages)
70812 return NULL;
70813
70814+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70815+ if (!(pgprot_val(prot) & _PAGE_NX))
70816+ flags |= VM_KERNEXEC;
70817+#endif
70818+
70819 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70820 __builtin_return_address(0));
70821 if (!area)
70822@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
70823 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70824 return NULL;
70825
70826+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70827+ if (!(pgprot_val(prot) & _PAGE_NX))
70828+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70829+ node, gfp_mask, caller);
70830+ else
70831+#endif
70832+
70833 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
70834 gfp_mask, caller);
70835
70836@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
70837 gfp_mask, prot, node, caller);
70838 }
70839
70840+#undef __vmalloc
70841 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70842 {
70843 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70844@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
70845 * For tight control over page level allocator and protection flags
70846 * use __vmalloc() instead.
70847 */
70848+#undef vmalloc
70849 void *vmalloc(unsigned long size)
70850 {
70851 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
70852@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
70853 * For tight control over page level allocator and protection flags
70854 * use __vmalloc() instead.
70855 */
70856+#undef vzalloc
70857 void *vzalloc(unsigned long size)
70858 {
70859 return __vmalloc_node_flags(size, -1,
70860@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
70861 * The resulting memory area is zeroed so it can be mapped to userspace
70862 * without leaking data.
70863 */
70864+#undef vmalloc_user
70865 void *vmalloc_user(unsigned long size)
70866 {
70867 struct vm_struct *area;
70868@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
70869 * For tight control over page level allocator and protection flags
70870 * use __vmalloc() instead.
70871 */
70872+#undef vmalloc_node
70873 void *vmalloc_node(unsigned long size, int node)
70874 {
70875 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70876@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
70877 * For tight control over page level allocator and protection flags
70878 * use __vmalloc_node() instead.
70879 */
70880+#undef vzalloc_node
70881 void *vzalloc_node(unsigned long size, int node)
70882 {
70883 return __vmalloc_node_flags(size, node,
70884@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
70885 * For tight control over page level allocator and protection flags
70886 * use __vmalloc() instead.
70887 */
70888-
70889+#undef vmalloc_exec
70890 void *vmalloc_exec(unsigned long size)
70891 {
70892- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70893+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70894 -1, __builtin_return_address(0));
70895 }
70896
70897@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
70898 * Allocate enough 32bit PA addressable pages to cover @size from the
70899 * page level allocator and map them into contiguous kernel virtual space.
70900 */
70901+#undef vmalloc_32
70902 void *vmalloc_32(unsigned long size)
70903 {
70904 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70905@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
70906 * The resulting memory area is 32bit addressable and zeroed so it can be
70907 * mapped to userspace without leaking data.
70908 */
70909+#undef vmalloc_32_user
70910 void *vmalloc_32_user(unsigned long size)
70911 {
70912 struct vm_struct *area;
70913@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
70914 unsigned long uaddr = vma->vm_start;
70915 unsigned long usize = vma->vm_end - vma->vm_start;
70916
70917+ BUG_ON(vma->vm_mirror);
70918+
70919 if ((PAGE_SIZE-1) & (unsigned long)addr)
70920 return -EINVAL;
70921
70922diff -urNp linux-3.0.7/mm/vmstat.c linux-3.0.7/mm/vmstat.c
70923--- linux-3.0.7/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
70924+++ linux-3.0.7/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
70925@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
70926 *
70927 * vm_stat contains the global counters
70928 */
70929-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70930+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70931 EXPORT_SYMBOL(vm_stat);
70932
70933 #ifdef CONFIG_SMP
70934@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
70935 v = p->vm_stat_diff[i];
70936 p->vm_stat_diff[i] = 0;
70937 local_irq_restore(flags);
70938- atomic_long_add(v, &zone->vm_stat[i]);
70939+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70940 global_diff[i] += v;
70941 #ifdef CONFIG_NUMA
70942 /* 3 seconds idle till flush */
70943@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
70944
70945 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70946 if (global_diff[i])
70947- atomic_long_add(global_diff[i], &vm_stat[i]);
70948+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70949 }
70950
70951 #endif
70952@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
70953 start_cpu_timer(cpu);
70954 #endif
70955 #ifdef CONFIG_PROC_FS
70956- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70957- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70958- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70959- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70960+ {
70961+ mode_t gr_mode = S_IRUGO;
70962+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70963+ gr_mode = S_IRUSR;
70964+#endif
70965+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70966+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70967+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70968+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70969+#else
70970+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70971+#endif
70972+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70973+ }
70974 #endif
70975 return 0;
70976 }
70977diff -urNp linux-3.0.7/net/8021q/vlan.c linux-3.0.7/net/8021q/vlan.c
70978--- linux-3.0.7/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
70979+++ linux-3.0.7/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
70980@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
70981 err = -EPERM;
70982 if (!capable(CAP_NET_ADMIN))
70983 break;
70984- if ((args.u.name_type >= 0) &&
70985- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70986+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70987 struct vlan_net *vn;
70988
70989 vn = net_generic(net, vlan_net_id);
70990diff -urNp linux-3.0.7/net/9p/trans_fd.c linux-3.0.7/net/9p/trans_fd.c
70991--- linux-3.0.7/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
70992+++ linux-3.0.7/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
70993@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
70994 oldfs = get_fs();
70995 set_fs(get_ds());
70996 /* The cast to a user pointer is valid due to the set_fs() */
70997- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
70998+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
70999 set_fs(oldfs);
71000
71001 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71002diff -urNp linux-3.0.7/net/9p/trans_virtio.c linux-3.0.7/net/9p/trans_virtio.c
71003--- linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:54:54.000000000 -0400
71004+++ linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:55:28.000000000 -0400
71005@@ -327,7 +327,7 @@ req_retry_pinned:
71006 } else {
71007 char *pbuf;
71008 if (req->tc->pubuf)
71009- pbuf = (__force char *) req->tc->pubuf;
71010+ pbuf = (char __force_kernel *) req->tc->pubuf;
71011 else
71012 pbuf = req->tc->pkbuf;
71013 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71014@@ -357,7 +357,7 @@ req_retry_pinned:
71015 } else {
71016 char *pbuf;
71017 if (req->tc->pubuf)
71018- pbuf = (__force char *) req->tc->pubuf;
71019+ pbuf = (char __force_kernel *) req->tc->pubuf;
71020 else
71021 pbuf = req->tc->pkbuf;
71022
71023diff -urNp linux-3.0.7/net/atm/atm_misc.c linux-3.0.7/net/atm/atm_misc.c
71024--- linux-3.0.7/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
71025+++ linux-3.0.7/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
71026@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71027 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71028 return 1;
71029 atm_return(vcc, truesize);
71030- atomic_inc(&vcc->stats->rx_drop);
71031+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71032 return 0;
71033 }
71034 EXPORT_SYMBOL(atm_charge);
71035@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71036 }
71037 }
71038 atm_return(vcc, guess);
71039- atomic_inc(&vcc->stats->rx_drop);
71040+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71041 return NULL;
71042 }
71043 EXPORT_SYMBOL(atm_alloc_charge);
71044@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71045
71046 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71047 {
71048-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71049+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71050 __SONET_ITEMS
71051 #undef __HANDLE_ITEM
71052 }
71053@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71054
71055 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71056 {
71057-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71058+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71059 __SONET_ITEMS
71060 #undef __HANDLE_ITEM
71061 }
71062diff -urNp linux-3.0.7/net/atm/lec.h linux-3.0.7/net/atm/lec.h
71063--- linux-3.0.7/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
71064+++ linux-3.0.7/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
71065@@ -48,7 +48,7 @@ struct lane2_ops {
71066 const u8 *tlvs, u32 sizeoftlvs);
71067 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71068 const u8 *tlvs, u32 sizeoftlvs);
71069-};
71070+} __no_const;
71071
71072 /*
71073 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71074diff -urNp linux-3.0.7/net/atm/mpc.h linux-3.0.7/net/atm/mpc.h
71075--- linux-3.0.7/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
71076+++ linux-3.0.7/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
71077@@ -33,7 +33,7 @@ struct mpoa_client {
71078 struct mpc_parameters parameters; /* parameters for this client */
71079
71080 const struct net_device_ops *old_ops;
71081- struct net_device_ops new_ops;
71082+ net_device_ops_no_const new_ops;
71083 };
71084
71085
71086diff -urNp linux-3.0.7/net/atm/mpoa_caches.c linux-3.0.7/net/atm/mpoa_caches.c
71087--- linux-3.0.7/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
71088+++ linux-3.0.7/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
71089@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71090 struct timeval now;
71091 struct k_message msg;
71092
71093+ pax_track_stack();
71094+
71095 do_gettimeofday(&now);
71096
71097 read_lock_bh(&client->ingress_lock);
71098diff -urNp linux-3.0.7/net/atm/proc.c linux-3.0.7/net/atm/proc.c
71099--- linux-3.0.7/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
71100+++ linux-3.0.7/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
71101@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71102 const struct k_atm_aal_stats *stats)
71103 {
71104 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71105- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71106- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71107- atomic_read(&stats->rx_drop));
71108+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71109+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71110+ atomic_read_unchecked(&stats->rx_drop));
71111 }
71112
71113 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71114diff -urNp linux-3.0.7/net/atm/resources.c linux-3.0.7/net/atm/resources.c
71115--- linux-3.0.7/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
71116+++ linux-3.0.7/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
71117@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71118 static void copy_aal_stats(struct k_atm_aal_stats *from,
71119 struct atm_aal_stats *to)
71120 {
71121-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71122+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71123 __AAL_STAT_ITEMS
71124 #undef __HANDLE_ITEM
71125 }
71126@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71127 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71128 struct atm_aal_stats *to)
71129 {
71130-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71131+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71132 __AAL_STAT_ITEMS
71133 #undef __HANDLE_ITEM
71134 }
71135diff -urNp linux-3.0.7/net/batman-adv/hard-interface.c linux-3.0.7/net/batman-adv/hard-interface.c
71136--- linux-3.0.7/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
71137+++ linux-3.0.7/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
71138@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
71139 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71140 dev_add_pack(&hard_iface->batman_adv_ptype);
71141
71142- atomic_set(&hard_iface->seqno, 1);
71143- atomic_set(&hard_iface->frag_seqno, 1);
71144+ atomic_set_unchecked(&hard_iface->seqno, 1);
71145+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71146 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71147 hard_iface->net_dev->name);
71148
71149diff -urNp linux-3.0.7/net/batman-adv/routing.c linux-3.0.7/net/batman-adv/routing.c
71150--- linux-3.0.7/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
71151+++ linux-3.0.7/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
71152@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
71153 return;
71154
71155 /* could be changed by schedule_own_packet() */
71156- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71157+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71158
71159 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71160
71161diff -urNp linux-3.0.7/net/batman-adv/send.c linux-3.0.7/net/batman-adv/send.c
71162--- linux-3.0.7/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
71163+++ linux-3.0.7/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
71164@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
71165
71166 /* change sequence number to network order */
71167 batman_packet->seqno =
71168- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71169+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71170
71171 if (vis_server == VIS_TYPE_SERVER_SYNC)
71172 batman_packet->flags |= VIS_SERVER;
71173@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
71174 else
71175 batman_packet->gw_flags = 0;
71176
71177- atomic_inc(&hard_iface->seqno);
71178+ atomic_inc_unchecked(&hard_iface->seqno);
71179
71180 slide_own_bcast_window(hard_iface);
71181 send_time = own_send_time(bat_priv);
71182diff -urNp linux-3.0.7/net/batman-adv/soft-interface.c linux-3.0.7/net/batman-adv/soft-interface.c
71183--- linux-3.0.7/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
71184+++ linux-3.0.7/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
71185@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
71186
71187 /* set broadcast sequence number */
71188 bcast_packet->seqno =
71189- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71190+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71191
71192 add_bcast_packet_to_list(bat_priv, skb);
71193
71194@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
71195 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71196
71197 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71198- atomic_set(&bat_priv->bcast_seqno, 1);
71199+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71200 atomic_set(&bat_priv->tt_local_changed, 0);
71201
71202 bat_priv->primary_if = NULL;
71203diff -urNp linux-3.0.7/net/batman-adv/types.h linux-3.0.7/net/batman-adv/types.h
71204--- linux-3.0.7/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
71205+++ linux-3.0.7/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
71206@@ -38,8 +38,8 @@ struct hard_iface {
71207 int16_t if_num;
71208 char if_status;
71209 struct net_device *net_dev;
71210- atomic_t seqno;
71211- atomic_t frag_seqno;
71212+ atomic_unchecked_t seqno;
71213+ atomic_unchecked_t frag_seqno;
71214 unsigned char *packet_buff;
71215 int packet_len;
71216 struct kobject *hardif_obj;
71217@@ -142,7 +142,7 @@ struct bat_priv {
71218 atomic_t orig_interval; /* uint */
71219 atomic_t hop_penalty; /* uint */
71220 atomic_t log_level; /* uint */
71221- atomic_t bcast_seqno;
71222+ atomic_unchecked_t bcast_seqno;
71223 atomic_t bcast_queue_left;
71224 atomic_t batman_queue_left;
71225 char num_ifaces;
71226diff -urNp linux-3.0.7/net/batman-adv/unicast.c linux-3.0.7/net/batman-adv/unicast.c
71227--- linux-3.0.7/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
71228+++ linux-3.0.7/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
71229@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
71230 frag1->flags = UNI_FRAG_HEAD | large_tail;
71231 frag2->flags = large_tail;
71232
71233- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71234+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71235 frag1->seqno = htons(seqno - 1);
71236 frag2->seqno = htons(seqno);
71237
71238diff -urNp linux-3.0.7/net/bridge/br_multicast.c linux-3.0.7/net/bridge/br_multicast.c
71239--- linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:54:54.000000000 -0400
71240+++ linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:55:28.000000000 -0400
71241@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71242 nexthdr = ip6h->nexthdr;
71243 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71244
71245- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71246+ if (nexthdr != IPPROTO_ICMPV6)
71247 return 0;
71248
71249 /* Okay, we found ICMPv6 header */
71250diff -urNp linux-3.0.7/net/bridge/netfilter/ebtables.c linux-3.0.7/net/bridge/netfilter/ebtables.c
71251--- linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
71252+++ linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
71253@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
71254 tmp.valid_hooks = t->table->valid_hooks;
71255 }
71256 mutex_unlock(&ebt_mutex);
71257- if (copy_to_user(user, &tmp, *len) != 0){
71258+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71259 BUGPRINT("c2u Didn't work\n");
71260 ret = -EFAULT;
71261 break;
71262@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
71263 int ret;
71264 void __user *pos;
71265
71266+ pax_track_stack();
71267+
71268 memset(&tinfo, 0, sizeof(tinfo));
71269
71270 if (cmd == EBT_SO_GET_ENTRIES) {
71271diff -urNp linux-3.0.7/net/caif/caif_socket.c linux-3.0.7/net/caif/caif_socket.c
71272--- linux-3.0.7/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
71273+++ linux-3.0.7/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
71274@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71275 #ifdef CONFIG_DEBUG_FS
71276 struct debug_fs_counter {
71277 atomic_t caif_nr_socks;
71278- atomic_t caif_sock_create;
71279- atomic_t num_connect_req;
71280- atomic_t num_connect_resp;
71281- atomic_t num_connect_fail_resp;
71282- atomic_t num_disconnect;
71283- atomic_t num_remote_shutdown_ind;
71284- atomic_t num_tx_flow_off_ind;
71285- atomic_t num_tx_flow_on_ind;
71286- atomic_t num_rx_flow_off;
71287- atomic_t num_rx_flow_on;
71288+ atomic_unchecked_t caif_sock_create;
71289+ atomic_unchecked_t num_connect_req;
71290+ atomic_unchecked_t num_connect_resp;
71291+ atomic_unchecked_t num_connect_fail_resp;
71292+ atomic_unchecked_t num_disconnect;
71293+ atomic_unchecked_t num_remote_shutdown_ind;
71294+ atomic_unchecked_t num_tx_flow_off_ind;
71295+ atomic_unchecked_t num_tx_flow_on_ind;
71296+ atomic_unchecked_t num_rx_flow_off;
71297+ atomic_unchecked_t num_rx_flow_on;
71298 };
71299 static struct debug_fs_counter cnt;
71300 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71301+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71302 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71303 #else
71304 #define dbfs_atomic_inc(v) 0
71305@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71306 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71307 sk_rcvbuf_lowwater(cf_sk));
71308 set_rx_flow_off(cf_sk);
71309- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71310+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71311 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71312 }
71313
71314@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71315 set_rx_flow_off(cf_sk);
71316 if (net_ratelimit())
71317 pr_debug("sending flow OFF due to rmem_schedule\n");
71318- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71319+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71320 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71321 }
71322 skb->dev = NULL;
71323@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71324 switch (flow) {
71325 case CAIF_CTRLCMD_FLOW_ON_IND:
71326 /* OK from modem to start sending again */
71327- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71328+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71329 set_tx_flow_on(cf_sk);
71330 cf_sk->sk.sk_state_change(&cf_sk->sk);
71331 break;
71332
71333 case CAIF_CTRLCMD_FLOW_OFF_IND:
71334 /* Modem asks us to shut up */
71335- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71336+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71337 set_tx_flow_off(cf_sk);
71338 cf_sk->sk.sk_state_change(&cf_sk->sk);
71339 break;
71340@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71341 /* We're now connected */
71342 caif_client_register_refcnt(&cf_sk->layer,
71343 cfsk_hold, cfsk_put);
71344- dbfs_atomic_inc(&cnt.num_connect_resp);
71345+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71346 cf_sk->sk.sk_state = CAIF_CONNECTED;
71347 set_tx_flow_on(cf_sk);
71348 cf_sk->sk.sk_state_change(&cf_sk->sk);
71349@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71350
71351 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71352 /* Connect request failed */
71353- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71354+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71355 cf_sk->sk.sk_err = ECONNREFUSED;
71356 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71357 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71358@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71359
71360 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71361 /* Modem has closed this connection, or device is down. */
71362- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71363+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71364 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71365 cf_sk->sk.sk_err = ECONNRESET;
71366 set_rx_flow_on(cf_sk);
71367@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71368 return;
71369
71370 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71371- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71372+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71373 set_rx_flow_on(cf_sk);
71374 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71375 }
71376@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71377 /*ifindex = id of the interface.*/
71378 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71379
71380- dbfs_atomic_inc(&cnt.num_connect_req);
71381+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71382 cf_sk->layer.receive = caif_sktrecv_cb;
71383
71384 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71385@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71386 spin_unlock_bh(&sk->sk_receive_queue.lock);
71387 sock->sk = NULL;
71388
71389- dbfs_atomic_inc(&cnt.num_disconnect);
71390+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71391
71392 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71393 if (cf_sk->debugfs_socket_dir != NULL)
71394@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71395 cf_sk->conn_req.protocol = protocol;
71396 /* Increase the number of sockets created. */
71397 dbfs_atomic_inc(&cnt.caif_nr_socks);
71398- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71399+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71400 #ifdef CONFIG_DEBUG_FS
71401 if (!IS_ERR(debugfsdir)) {
71402
71403diff -urNp linux-3.0.7/net/caif/cfctrl.c linux-3.0.7/net/caif/cfctrl.c
71404--- linux-3.0.7/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
71405+++ linux-3.0.7/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
71406@@ -9,6 +9,7 @@
71407 #include <linux/stddef.h>
71408 #include <linux/spinlock.h>
71409 #include <linux/slab.h>
71410+#include <linux/sched.h>
71411 #include <net/caif/caif_layer.h>
71412 #include <net/caif/cfpkt.h>
71413 #include <net/caif/cfctrl.h>
71414@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71415 dev_info.id = 0xff;
71416 memset(this, 0, sizeof(*this));
71417 cfsrvl_init(&this->serv, 0, &dev_info, false);
71418- atomic_set(&this->req_seq_no, 1);
71419- atomic_set(&this->rsp_seq_no, 1);
71420+ atomic_set_unchecked(&this->req_seq_no, 1);
71421+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71422 this->serv.layer.receive = cfctrl_recv;
71423 sprintf(this->serv.layer.name, "ctrl");
71424 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71425@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71426 struct cfctrl_request_info *req)
71427 {
71428 spin_lock_bh(&ctrl->info_list_lock);
71429- atomic_inc(&ctrl->req_seq_no);
71430- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71431+ atomic_inc_unchecked(&ctrl->req_seq_no);
71432+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71433 list_add_tail(&req->list, &ctrl->list);
71434 spin_unlock_bh(&ctrl->info_list_lock);
71435 }
71436@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71437 if (p != first)
71438 pr_warn("Requests are not received in order\n");
71439
71440- atomic_set(&ctrl->rsp_seq_no,
71441+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71442 p->sequence_no);
71443 list_del(&p->list);
71444 goto out;
71445@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71446 struct cfctrl *cfctrl = container_obj(layer);
71447 struct cfctrl_request_info rsp, *req;
71448
71449+ pax_track_stack();
71450
71451 cfpkt_extr_head(pkt, &cmdrsp, 1);
71452 cmd = cmdrsp & CFCTRL_CMD_MASK;
71453diff -urNp linux-3.0.7/net/compat.c linux-3.0.7/net/compat.c
71454--- linux-3.0.7/net/compat.c 2011-07-21 22:17:23.000000000 -0400
71455+++ linux-3.0.7/net/compat.c 2011-10-06 04:17:55.000000000 -0400
71456@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71457 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71458 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71459 return -EFAULT;
71460- kmsg->msg_name = compat_ptr(tmp1);
71461- kmsg->msg_iov = compat_ptr(tmp2);
71462- kmsg->msg_control = compat_ptr(tmp3);
71463+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71464+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71465+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71466 return 0;
71467 }
71468
71469@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71470
71471 if (kern_msg->msg_namelen) {
71472 if (mode == VERIFY_READ) {
71473- int err = move_addr_to_kernel(kern_msg->msg_name,
71474+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71475 kern_msg->msg_namelen,
71476 kern_address);
71477 if (err < 0)
71478@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71479 kern_msg->msg_name = NULL;
71480
71481 tot_len = iov_from_user_compat_to_kern(kern_iov,
71482- (struct compat_iovec __user *)kern_msg->msg_iov,
71483+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71484 kern_msg->msg_iovlen);
71485 if (tot_len >= 0)
71486 kern_msg->msg_iov = kern_iov;
71487@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71488
71489 #define CMSG_COMPAT_FIRSTHDR(msg) \
71490 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71491- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71492+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71493 (struct compat_cmsghdr __user *)NULL)
71494
71495 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71496 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71497 (ucmlen) <= (unsigned long) \
71498 ((mhdr)->msg_controllen - \
71499- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71500+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71501
71502 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71503 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71504 {
71505 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71506- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71507+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71508 msg->msg_controllen)
71509 return NULL;
71510 return (struct compat_cmsghdr __user *)ptr;
71511@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71512 {
71513 struct compat_timeval ctv;
71514 struct compat_timespec cts[3];
71515- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71516+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71517 struct compat_cmsghdr cmhdr;
71518 int cmlen;
71519
71520@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71521
71522 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71523 {
71524- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71525+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71526 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71527 int fdnum = scm->fp->count;
71528 struct file **fp = scm->fp->fp;
71529@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71530 return -EFAULT;
71531 old_fs = get_fs();
71532 set_fs(KERNEL_DS);
71533- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71534+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71535 set_fs(old_fs);
71536
71537 return err;
71538@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71539 len = sizeof(ktime);
71540 old_fs = get_fs();
71541 set_fs(KERNEL_DS);
71542- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71543+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71544 set_fs(old_fs);
71545
71546 if (!err) {
71547@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71548 case MCAST_JOIN_GROUP:
71549 case MCAST_LEAVE_GROUP:
71550 {
71551- struct compat_group_req __user *gr32 = (void *)optval;
71552+ struct compat_group_req __user *gr32 = (void __user *)optval;
71553 struct group_req __user *kgr =
71554 compat_alloc_user_space(sizeof(struct group_req));
71555 u32 interface;
71556@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71557 case MCAST_BLOCK_SOURCE:
71558 case MCAST_UNBLOCK_SOURCE:
71559 {
71560- struct compat_group_source_req __user *gsr32 = (void *)optval;
71561+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71562 struct group_source_req __user *kgsr = compat_alloc_user_space(
71563 sizeof(struct group_source_req));
71564 u32 interface;
71565@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71566 }
71567 case MCAST_MSFILTER:
71568 {
71569- struct compat_group_filter __user *gf32 = (void *)optval;
71570+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71571 struct group_filter __user *kgf;
71572 u32 interface, fmode, numsrc;
71573
71574@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71575 char __user *optval, int __user *optlen,
71576 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71577 {
71578- struct compat_group_filter __user *gf32 = (void *)optval;
71579+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71580 struct group_filter __user *kgf;
71581 int __user *koptlen;
71582 u32 interface, fmode, numsrc;
71583diff -urNp linux-3.0.7/net/core/datagram.c linux-3.0.7/net/core/datagram.c
71584--- linux-3.0.7/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
71585+++ linux-3.0.7/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
71586@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71587 }
71588
71589 kfree_skb(skb);
71590- atomic_inc(&sk->sk_drops);
71591+ atomic_inc_unchecked(&sk->sk_drops);
71592 sk_mem_reclaim_partial(sk);
71593
71594 return err;
71595diff -urNp linux-3.0.7/net/core/dev.c linux-3.0.7/net/core/dev.c
71596--- linux-3.0.7/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
71597+++ linux-3.0.7/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
71598@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
71599 if (no_module && capable(CAP_NET_ADMIN))
71600 no_module = request_module("netdev-%s", name);
71601 if (no_module && capable(CAP_SYS_MODULE)) {
71602+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71603+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71604+#else
71605 if (!request_module("%s", name))
71606 pr_err("Loading kernel module for a network device "
71607 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71608 "instead\n", name);
71609+#endif
71610 }
71611 }
71612 EXPORT_SYMBOL(dev_load);
71613@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
71614
71615 struct dev_gso_cb {
71616 void (*destructor)(struct sk_buff *skb);
71617-};
71618+} __no_const;
71619
71620 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71621
71622@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
71623 }
71624 EXPORT_SYMBOL(netif_rx_ni);
71625
71626-static void net_tx_action(struct softirq_action *h)
71627+static void net_tx_action(void)
71628 {
71629 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71630
71631@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
71632 }
71633 EXPORT_SYMBOL(netif_napi_del);
71634
71635-static void net_rx_action(struct softirq_action *h)
71636+static void net_rx_action(void)
71637 {
71638 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71639 unsigned long time_limit = jiffies + 2;
71640diff -urNp linux-3.0.7/net/core/flow.c linux-3.0.7/net/core/flow.c
71641--- linux-3.0.7/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
71642+++ linux-3.0.7/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
71643@@ -60,7 +60,7 @@ struct flow_cache {
71644 struct timer_list rnd_timer;
71645 };
71646
71647-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71648+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71649 EXPORT_SYMBOL(flow_cache_genid);
71650 static struct flow_cache flow_cache_global;
71651 static struct kmem_cache *flow_cachep __read_mostly;
71652@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
71653
71654 static int flow_entry_valid(struct flow_cache_entry *fle)
71655 {
71656- if (atomic_read(&flow_cache_genid) != fle->genid)
71657+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71658 return 0;
71659 if (fle->object && !fle->object->ops->check(fle->object))
71660 return 0;
71661@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
71662 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71663 fcp->hash_count++;
71664 }
71665- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71666+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
71667 flo = fle->object;
71668 if (!flo)
71669 goto ret_object;
71670@@ -274,7 +274,7 @@ nocache:
71671 }
71672 flo = resolver(net, key, family, dir, flo, ctx);
71673 if (fle) {
71674- fle->genid = atomic_read(&flow_cache_genid);
71675+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71676 if (!IS_ERR(flo))
71677 fle->object = flo;
71678 else
71679diff -urNp linux-3.0.7/net/core/iovec.c linux-3.0.7/net/core/iovec.c
71680--- linux-3.0.7/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
71681+++ linux-3.0.7/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
71682@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
71683 if (m->msg_namelen) {
71684 if (mode == VERIFY_READ) {
71685 void __user *namep;
71686- namep = (void __user __force *) m->msg_name;
71687+ namep = (void __force_user *) m->msg_name;
71688 err = move_addr_to_kernel(namep, m->msg_namelen,
71689 address);
71690 if (err < 0)
71691@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
71692 }
71693
71694 size = m->msg_iovlen * sizeof(struct iovec);
71695- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
71696+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
71697 return -EFAULT;
71698
71699 m->msg_iov = iov;
71700diff -urNp linux-3.0.7/net/core/rtnetlink.c linux-3.0.7/net/core/rtnetlink.c
71701--- linux-3.0.7/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
71702+++ linux-3.0.7/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
71703@@ -56,7 +56,7 @@
71704 struct rtnl_link {
71705 rtnl_doit_func doit;
71706 rtnl_dumpit_func dumpit;
71707-};
71708+} __no_const;
71709
71710 static DEFINE_MUTEX(rtnl_mutex);
71711
71712diff -urNp linux-3.0.7/net/core/scm.c linux-3.0.7/net/core/scm.c
71713--- linux-3.0.7/net/core/scm.c 2011-10-16 21:54:54.000000000 -0400
71714+++ linux-3.0.7/net/core/scm.c 2011-10-16 21:55:28.000000000 -0400
71715@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
71716 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
71717 {
71718 struct cmsghdr __user *cm
71719- = (__force struct cmsghdr __user *)msg->msg_control;
71720+ = (struct cmsghdr __force_user *)msg->msg_control;
71721 struct cmsghdr cmhdr;
71722 int cmlen = CMSG_LEN(len);
71723 int err;
71724@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
71725 err = -EFAULT;
71726 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
71727 goto out;
71728- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
71729+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
71730 goto out;
71731 cmlen = CMSG_SPACE(len);
71732 if (msg->msg_controllen < cmlen)
71733@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
71734 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
71735 {
71736 struct cmsghdr __user *cm
71737- = (__force struct cmsghdr __user*)msg->msg_control;
71738+ = (struct cmsghdr __force_user *)msg->msg_control;
71739
71740 int fdmax = 0;
71741 int fdnum = scm->fp->count;
71742@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
71743 if (fdnum < fdmax)
71744 fdmax = fdnum;
71745
71746- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
71747+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
71748 i++, cmfptr++)
71749 {
71750 int new_fd;
71751diff -urNp linux-3.0.7/net/core/skbuff.c linux-3.0.7/net/core/skbuff.c
71752--- linux-3.0.7/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
71753+++ linux-3.0.7/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
71754@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
71755 struct sock *sk = skb->sk;
71756 int ret = 0;
71757
71758+ pax_track_stack();
71759+
71760 if (splice_grow_spd(pipe, &spd))
71761 return -ENOMEM;
71762
71763diff -urNp linux-3.0.7/net/core/sock.c linux-3.0.7/net/core/sock.c
71764--- linux-3.0.7/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
71765+++ linux-3.0.7/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
71766@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71767 */
71768 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
71769 (unsigned)sk->sk_rcvbuf) {
71770- atomic_inc(&sk->sk_drops);
71771+ atomic_inc_unchecked(&sk->sk_drops);
71772 return -ENOMEM;
71773 }
71774
71775@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71776 return err;
71777
71778 if (!sk_rmem_schedule(sk, skb->truesize)) {
71779- atomic_inc(&sk->sk_drops);
71780+ atomic_inc_unchecked(&sk->sk_drops);
71781 return -ENOBUFS;
71782 }
71783
71784@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71785 skb_dst_force(skb);
71786
71787 spin_lock_irqsave(&list->lock, flags);
71788- skb->dropcount = atomic_read(&sk->sk_drops);
71789+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
71790 __skb_queue_tail(list, skb);
71791 spin_unlock_irqrestore(&list->lock, flags);
71792
71793@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
71794 skb->dev = NULL;
71795
71796 if (sk_rcvqueues_full(sk, skb)) {
71797- atomic_inc(&sk->sk_drops);
71798+ atomic_inc_unchecked(&sk->sk_drops);
71799 goto discard_and_relse;
71800 }
71801 if (nested)
71802@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
71803 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
71804 } else if (sk_add_backlog(sk, skb)) {
71805 bh_unlock_sock(sk);
71806- atomic_inc(&sk->sk_drops);
71807+ atomic_inc_unchecked(&sk->sk_drops);
71808 goto discard_and_relse;
71809 }
71810
71811@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
71812 if (len > sizeof(peercred))
71813 len = sizeof(peercred);
71814 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
71815- if (copy_to_user(optval, &peercred, len))
71816+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
71817 return -EFAULT;
71818 goto lenout;
71819 }
71820@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
71821 return -ENOTCONN;
71822 if (lv < len)
71823 return -EINVAL;
71824- if (copy_to_user(optval, address, len))
71825+ if (len > sizeof(address) || copy_to_user(optval, address, len))
71826 return -EFAULT;
71827 goto lenout;
71828 }
71829@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
71830
71831 if (len > lv)
71832 len = lv;
71833- if (copy_to_user(optval, &v, len))
71834+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
71835 return -EFAULT;
71836 lenout:
71837 if (put_user(len, optlen))
71838@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
71839 */
71840 smp_wmb();
71841 atomic_set(&sk->sk_refcnt, 1);
71842- atomic_set(&sk->sk_drops, 0);
71843+ atomic_set_unchecked(&sk->sk_drops, 0);
71844 }
71845 EXPORT_SYMBOL(sock_init_data);
71846
71847diff -urNp linux-3.0.7/net/decnet/sysctl_net_decnet.c linux-3.0.7/net/decnet/sysctl_net_decnet.c
71848--- linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
71849+++ linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
71850@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
71851
71852 if (len > *lenp) len = *lenp;
71853
71854- if (copy_to_user(buffer, addr, len))
71855+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71856 return -EFAULT;
71857
71858 *lenp = len;
71859@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
71860
71861 if (len > *lenp) len = *lenp;
71862
71863- if (copy_to_user(buffer, devname, len))
71864+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71865 return -EFAULT;
71866
71867 *lenp = len;
71868diff -urNp linux-3.0.7/net/econet/Kconfig linux-3.0.7/net/econet/Kconfig
71869--- linux-3.0.7/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
71870+++ linux-3.0.7/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
71871@@ -4,7 +4,7 @@
71872
71873 config ECONET
71874 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71875- depends on EXPERIMENTAL && INET
71876+ depends on EXPERIMENTAL && INET && BROKEN
71877 ---help---
71878 Econet is a fairly old and slow networking protocol mainly used by
71879 Acorn computers to access file and print servers. It uses native
71880diff -urNp linux-3.0.7/net/ipv4/fib_frontend.c linux-3.0.7/net/ipv4/fib_frontend.c
71881--- linux-3.0.7/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
71882+++ linux-3.0.7/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
71883@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
71884 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71885 fib_sync_up(dev);
71886 #endif
71887- atomic_inc(&net->ipv4.dev_addr_genid);
71888+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71889 rt_cache_flush(dev_net(dev), -1);
71890 break;
71891 case NETDEV_DOWN:
71892 fib_del_ifaddr(ifa, NULL);
71893- atomic_inc(&net->ipv4.dev_addr_genid);
71894+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71895 if (ifa->ifa_dev->ifa_list == NULL) {
71896 /* Last address was deleted from this interface.
71897 * Disable IP.
71898@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
71899 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71900 fib_sync_up(dev);
71901 #endif
71902- atomic_inc(&net->ipv4.dev_addr_genid);
71903+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71904 rt_cache_flush(dev_net(dev), -1);
71905 break;
71906 case NETDEV_DOWN:
71907diff -urNp linux-3.0.7/net/ipv4/fib_semantics.c linux-3.0.7/net/ipv4/fib_semantics.c
71908--- linux-3.0.7/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
71909+++ linux-3.0.7/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
71910@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
71911 nh->nh_saddr = inet_select_addr(nh->nh_dev,
71912 nh->nh_gw,
71913 nh->nh_parent->fib_scope);
71914- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
71915+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
71916
71917 return nh->nh_saddr;
71918 }
71919diff -urNp linux-3.0.7/net/ipv4/inet_diag.c linux-3.0.7/net/ipv4/inet_diag.c
71920--- linux-3.0.7/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
71921+++ linux-3.0.7/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
71922@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
71923 r->idiag_retrans = 0;
71924
71925 r->id.idiag_if = sk->sk_bound_dev_if;
71926+
71927+#ifdef CONFIG_GRKERNSEC_HIDESYM
71928+ r->id.idiag_cookie[0] = 0;
71929+ r->id.idiag_cookie[1] = 0;
71930+#else
71931 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71932 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71933+#endif
71934
71935 r->id.idiag_sport = inet->inet_sport;
71936 r->id.idiag_dport = inet->inet_dport;
71937@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
71938 r->idiag_family = tw->tw_family;
71939 r->idiag_retrans = 0;
71940 r->id.idiag_if = tw->tw_bound_dev_if;
71941+
71942+#ifdef CONFIG_GRKERNSEC_HIDESYM
71943+ r->id.idiag_cookie[0] = 0;
71944+ r->id.idiag_cookie[1] = 0;
71945+#else
71946 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71947 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71948+#endif
71949+
71950 r->id.idiag_sport = tw->tw_sport;
71951 r->id.idiag_dport = tw->tw_dport;
71952 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71953@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
71954 if (sk == NULL)
71955 goto unlock;
71956
71957+#ifndef CONFIG_GRKERNSEC_HIDESYM
71958 err = -ESTALE;
71959 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71960 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71961 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71962 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71963 goto out;
71964+#endif
71965
71966 err = -ENOMEM;
71967 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71968@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
71969 r->idiag_retrans = req->retrans;
71970
71971 r->id.idiag_if = sk->sk_bound_dev_if;
71972+
71973+#ifdef CONFIG_GRKERNSEC_HIDESYM
71974+ r->id.idiag_cookie[0] = 0;
71975+ r->id.idiag_cookie[1] = 0;
71976+#else
71977 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71978 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71979+#endif
71980
71981 tmo = req->expires - jiffies;
71982 if (tmo < 0)
71983diff -urNp linux-3.0.7/net/ipv4/inet_hashtables.c linux-3.0.7/net/ipv4/inet_hashtables.c
71984--- linux-3.0.7/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
71985+++ linux-3.0.7/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
71986@@ -18,12 +18,15 @@
71987 #include <linux/sched.h>
71988 #include <linux/slab.h>
71989 #include <linux/wait.h>
71990+#include <linux/security.h>
71991
71992 #include <net/inet_connection_sock.h>
71993 #include <net/inet_hashtables.h>
71994 #include <net/secure_seq.h>
71995 #include <net/ip.h>
71996
71997+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71998+
71999 /*
72000 * Allocate and initialize a new local port bind bucket.
72001 * The bindhash mutex for snum's hash chain must be held here.
72002@@ -530,6 +533,8 @@ ok:
72003 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72004 spin_unlock(&head->lock);
72005
72006+ gr_update_task_in_ip_table(current, inet_sk(sk));
72007+
72008 if (tw) {
72009 inet_twsk_deschedule(tw, death_row);
72010 while (twrefcnt) {
72011diff -urNp linux-3.0.7/net/ipv4/inetpeer.c linux-3.0.7/net/ipv4/inetpeer.c
72012--- linux-3.0.7/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
72013+++ linux-3.0.7/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
72014@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
72015 unsigned int sequence;
72016 int invalidated, newrefcnt = 0;
72017
72018+ pax_track_stack();
72019+
72020 /* Look up for the address quickly, lockless.
72021 * Because of a concurrent writer, we might not find an existing entry.
72022 */
72023@@ -517,8 +519,8 @@ found: /* The existing node has been fo
72024 if (p) {
72025 p->daddr = *daddr;
72026 atomic_set(&p->refcnt, 1);
72027- atomic_set(&p->rid, 0);
72028- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72029+ atomic_set_unchecked(&p->rid, 0);
72030+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72031 p->tcp_ts_stamp = 0;
72032 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
72033 p->rate_tokens = 0;
72034diff -urNp linux-3.0.7/net/ipv4/ip_fragment.c linux-3.0.7/net/ipv4/ip_fragment.c
72035--- linux-3.0.7/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
72036+++ linux-3.0.7/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
72037@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
72038 return 0;
72039
72040 start = qp->rid;
72041- end = atomic_inc_return(&peer->rid);
72042+ end = atomic_inc_return_unchecked(&peer->rid);
72043 qp->rid = end;
72044
72045 rc = qp->q.fragments && (end - start) > max;
72046diff -urNp linux-3.0.7/net/ipv4/ip_sockglue.c linux-3.0.7/net/ipv4/ip_sockglue.c
72047--- linux-3.0.7/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
72048+++ linux-3.0.7/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
72049@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72050 int val;
72051 int len;
72052
72053+ pax_track_stack();
72054+
72055 if (level != SOL_IP)
72056 return -EOPNOTSUPP;
72057
72058@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72059 len = min_t(unsigned int, len, opt->optlen);
72060 if (put_user(len, optlen))
72061 return -EFAULT;
72062- if (copy_to_user(optval, opt->__data, len))
72063+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72064+ copy_to_user(optval, opt->__data, len))
72065 return -EFAULT;
72066 return 0;
72067 }
72068@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72069 if (sk->sk_type != SOCK_STREAM)
72070 return -ENOPROTOOPT;
72071
72072- msg.msg_control = optval;
72073+ msg.msg_control = (void __force_kernel *)optval;
72074 msg.msg_controllen = len;
72075 msg.msg_flags = 0;
72076
72077diff -urNp linux-3.0.7/net/ipv4/ipconfig.c linux-3.0.7/net/ipv4/ipconfig.c
72078--- linux-3.0.7/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
72079+++ linux-3.0.7/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
72080@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72081
72082 mm_segment_t oldfs = get_fs();
72083 set_fs(get_ds());
72084- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72085+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72086 set_fs(oldfs);
72087 return res;
72088 }
72089@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72090
72091 mm_segment_t oldfs = get_fs();
72092 set_fs(get_ds());
72093- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72094+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72095 set_fs(oldfs);
72096 return res;
72097 }
72098@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72099
72100 mm_segment_t oldfs = get_fs();
72101 set_fs(get_ds());
72102- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72103+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72104 set_fs(oldfs);
72105 return res;
72106 }
72107diff -urNp linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c
72108--- linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
72109+++ linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
72110@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72111
72112 *len = 0;
72113
72114- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72115+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72116 if (*octets == NULL) {
72117 if (net_ratelimit())
72118 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72119diff -urNp linux-3.0.7/net/ipv4/ping.c linux-3.0.7/net/ipv4/ping.c
72120--- linux-3.0.7/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
72121+++ linux-3.0.7/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
72122@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72123 sk_rmem_alloc_get(sp),
72124 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72125 atomic_read(&sp->sk_refcnt), sp,
72126- atomic_read(&sp->sk_drops), len);
72127+ atomic_read_unchecked(&sp->sk_drops), len);
72128 }
72129
72130 static int ping_seq_show(struct seq_file *seq, void *v)
72131diff -urNp linux-3.0.7/net/ipv4/raw.c linux-3.0.7/net/ipv4/raw.c
72132--- linux-3.0.7/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
72133+++ linux-3.0.7/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
72134@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72135 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72136 {
72137 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72138- atomic_inc(&sk->sk_drops);
72139+ atomic_inc_unchecked(&sk->sk_drops);
72140 kfree_skb(skb);
72141 return NET_RX_DROP;
72142 }
72143@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
72144
72145 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72146 {
72147+ struct icmp_filter filter;
72148+
72149 if (optlen > sizeof(struct icmp_filter))
72150 optlen = sizeof(struct icmp_filter);
72151- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72152+ if (copy_from_user(&filter, optval, optlen))
72153 return -EFAULT;
72154+ raw_sk(sk)->filter = filter;
72155 return 0;
72156 }
72157
72158 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72159 {
72160 int len, ret = -EFAULT;
72161+ struct icmp_filter filter;
72162
72163 if (get_user(len, optlen))
72164 goto out;
72165@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
72166 if (len > sizeof(struct icmp_filter))
72167 len = sizeof(struct icmp_filter);
72168 ret = -EFAULT;
72169- if (put_user(len, optlen) ||
72170- copy_to_user(optval, &raw_sk(sk)->filter, len))
72171+ filter = raw_sk(sk)->filter;
72172+ if (put_user(len, optlen) || len > sizeof filter ||
72173+ copy_to_user(optval, &filter, len))
72174 goto out;
72175 ret = 0;
72176 out: return ret;
72177@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72178 sk_wmem_alloc_get(sp),
72179 sk_rmem_alloc_get(sp),
72180 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72181- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72182+ atomic_read(&sp->sk_refcnt),
72183+#ifdef CONFIG_GRKERNSEC_HIDESYM
72184+ NULL,
72185+#else
72186+ sp,
72187+#endif
72188+ atomic_read_unchecked(&sp->sk_drops));
72189 }
72190
72191 static int raw_seq_show(struct seq_file *seq, void *v)
72192diff -urNp linux-3.0.7/net/ipv4/route.c linux-3.0.7/net/ipv4/route.c
72193--- linux-3.0.7/net/ipv4/route.c 2011-10-16 21:54:54.000000000 -0400
72194+++ linux-3.0.7/net/ipv4/route.c 2011-10-16 21:55:28.000000000 -0400
72195@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
72196
72197 static inline int rt_genid(struct net *net)
72198 {
72199- return atomic_read(&net->ipv4.rt_genid);
72200+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72201 }
72202
72203 #ifdef CONFIG_PROC_FS
72204@@ -832,7 +832,7 @@ static void rt_cache_invalidate(struct n
72205 unsigned char shuffle;
72206
72207 get_random_bytes(&shuffle, sizeof(shuffle));
72208- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72209+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72210 }
72211
72212 /*
72213@@ -2832,7 +2832,7 @@ static int rt_fill_info(struct net *net,
72214 error = rt->dst.error;
72215 if (peer) {
72216 inet_peer_refcheck(rt->peer);
72217- id = atomic_read(&peer->ip_id_count) & 0xffff;
72218+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72219 if (peer->tcp_ts_stamp) {
72220 ts = peer->tcp_ts;
72221 tsage = get_seconds() - peer->tcp_ts_stamp;
72222diff -urNp linux-3.0.7/net/ipv4/tcp.c linux-3.0.7/net/ipv4/tcp.c
72223--- linux-3.0.7/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
72224+++ linux-3.0.7/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
72225@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72226 int val;
72227 int err = 0;
72228
72229+ pax_track_stack();
72230+
72231 /* These are data/string values, all the others are ints */
72232 switch (optname) {
72233 case TCP_CONGESTION: {
72234@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72235 struct tcp_sock *tp = tcp_sk(sk);
72236 int val, len;
72237
72238+ pax_track_stack();
72239+
72240 if (get_user(len, optlen))
72241 return -EFAULT;
72242
72243diff -urNp linux-3.0.7/net/ipv4/tcp_ipv4.c linux-3.0.7/net/ipv4/tcp_ipv4.c
72244--- linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
72245+++ linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
72246@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72247 int sysctl_tcp_low_latency __read_mostly;
72248 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72249
72250+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72251+extern int grsec_enable_blackhole;
72252+#endif
72253
72254 #ifdef CONFIG_TCP_MD5SIG
72255 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72256@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72257 return 0;
72258
72259 reset:
72260+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72261+ if (!grsec_enable_blackhole)
72262+#endif
72263 tcp_v4_send_reset(rsk, skb);
72264 discard:
72265 kfree_skb(skb);
72266@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72267 TCP_SKB_CB(skb)->sacked = 0;
72268
72269 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72270- if (!sk)
72271+ if (!sk) {
72272+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72273+ ret = 1;
72274+#endif
72275 goto no_tcp_socket;
72276-
72277+ }
72278 process:
72279- if (sk->sk_state == TCP_TIME_WAIT)
72280+ if (sk->sk_state == TCP_TIME_WAIT) {
72281+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72282+ ret = 2;
72283+#endif
72284 goto do_time_wait;
72285+ }
72286
72287 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72288 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72289@@ -1724,6 +1737,10 @@ no_tcp_socket:
72290 bad_packet:
72291 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72292 } else {
72293+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72294+ if (!grsec_enable_blackhole || (ret == 1 &&
72295+ (skb->dev->flags & IFF_LOOPBACK)))
72296+#endif
72297 tcp_v4_send_reset(NULL, skb);
72298 }
72299
72300@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
72301 0, /* non standard timer */
72302 0, /* open_requests have no inode */
72303 atomic_read(&sk->sk_refcnt),
72304+#ifdef CONFIG_GRKERNSEC_HIDESYM
72305+ NULL,
72306+#else
72307 req,
72308+#endif
72309 len);
72310 }
72311
72312@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
72313 sock_i_uid(sk),
72314 icsk->icsk_probes_out,
72315 sock_i_ino(sk),
72316- atomic_read(&sk->sk_refcnt), sk,
72317+ atomic_read(&sk->sk_refcnt),
72318+#ifdef CONFIG_GRKERNSEC_HIDESYM
72319+ NULL,
72320+#else
72321+ sk,
72322+#endif
72323 jiffies_to_clock_t(icsk->icsk_rto),
72324 jiffies_to_clock_t(icsk->icsk_ack.ato),
72325 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72326@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
72327 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72328 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72329 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72330- atomic_read(&tw->tw_refcnt), tw, len);
72331+ atomic_read(&tw->tw_refcnt),
72332+#ifdef CONFIG_GRKERNSEC_HIDESYM
72333+ NULL,
72334+#else
72335+ tw,
72336+#endif
72337+ len);
72338 }
72339
72340 #define TMPSZ 150
72341diff -urNp linux-3.0.7/net/ipv4/tcp_minisocks.c linux-3.0.7/net/ipv4/tcp_minisocks.c
72342--- linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
72343+++ linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
72344@@ -27,6 +27,10 @@
72345 #include <net/inet_common.h>
72346 #include <net/xfrm.h>
72347
72348+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72349+extern int grsec_enable_blackhole;
72350+#endif
72351+
72352 int sysctl_tcp_syncookies __read_mostly = 1;
72353 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72354
72355@@ -745,6 +749,10 @@ listen_overflow:
72356
72357 embryonic_reset:
72358 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72359+
72360+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72361+ if (!grsec_enable_blackhole)
72362+#endif
72363 if (!(flg & TCP_FLAG_RST))
72364 req->rsk_ops->send_reset(sk, skb);
72365
72366diff -urNp linux-3.0.7/net/ipv4/tcp_output.c linux-3.0.7/net/ipv4/tcp_output.c
72367--- linux-3.0.7/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
72368+++ linux-3.0.7/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
72369@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72370 int mss;
72371 int s_data_desired = 0;
72372
72373+ pax_track_stack();
72374+
72375 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72376 s_data_desired = cvp->s_data_desired;
72377 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72378diff -urNp linux-3.0.7/net/ipv4/tcp_probe.c linux-3.0.7/net/ipv4/tcp_probe.c
72379--- linux-3.0.7/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
72380+++ linux-3.0.7/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
72381@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72382 if (cnt + width >= len)
72383 break;
72384
72385- if (copy_to_user(buf + cnt, tbuf, width))
72386+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72387 return -EFAULT;
72388 cnt += width;
72389 }
72390diff -urNp linux-3.0.7/net/ipv4/tcp_timer.c linux-3.0.7/net/ipv4/tcp_timer.c
72391--- linux-3.0.7/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
72392+++ linux-3.0.7/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
72393@@ -22,6 +22,10 @@
72394 #include <linux/gfp.h>
72395 #include <net/tcp.h>
72396
72397+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72398+extern int grsec_lastack_retries;
72399+#endif
72400+
72401 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72402 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72403 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72404@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72405 }
72406 }
72407
72408+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72409+ if ((sk->sk_state == TCP_LAST_ACK) &&
72410+ (grsec_lastack_retries > 0) &&
72411+ (grsec_lastack_retries < retry_until))
72412+ retry_until = grsec_lastack_retries;
72413+#endif
72414+
72415 if (retransmits_timed_out(sk, retry_until,
72416 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72417 /* Has it gone just too far? */
72418diff -urNp linux-3.0.7/net/ipv4/udp.c linux-3.0.7/net/ipv4/udp.c
72419--- linux-3.0.7/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
72420+++ linux-3.0.7/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
72421@@ -86,6 +86,7 @@
72422 #include <linux/types.h>
72423 #include <linux/fcntl.h>
72424 #include <linux/module.h>
72425+#include <linux/security.h>
72426 #include <linux/socket.h>
72427 #include <linux/sockios.h>
72428 #include <linux/igmp.h>
72429@@ -107,6 +108,10 @@
72430 #include <net/xfrm.h>
72431 #include "udp_impl.h"
72432
72433+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72434+extern int grsec_enable_blackhole;
72435+#endif
72436+
72437 struct udp_table udp_table __read_mostly;
72438 EXPORT_SYMBOL(udp_table);
72439
72440@@ -564,6 +569,9 @@ found:
72441 return s;
72442 }
72443
72444+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72445+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72446+
72447 /*
72448 * This routine is called by the ICMP module when it gets some
72449 * sort of error condition. If err < 0 then the socket should
72450@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72451 dport = usin->sin_port;
72452 if (dport == 0)
72453 return -EINVAL;
72454+
72455+ err = gr_search_udp_sendmsg(sk, usin);
72456+ if (err)
72457+ return err;
72458 } else {
72459 if (sk->sk_state != TCP_ESTABLISHED)
72460 return -EDESTADDRREQ;
72461+
72462+ err = gr_search_udp_sendmsg(sk, NULL);
72463+ if (err)
72464+ return err;
72465+
72466 daddr = inet->inet_daddr;
72467 dport = inet->inet_dport;
72468 /* Open fast path for connected socket.
72469@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
72470 udp_lib_checksum_complete(skb)) {
72471 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72472 IS_UDPLITE(sk));
72473- atomic_inc(&sk->sk_drops);
72474+ atomic_inc_unchecked(&sk->sk_drops);
72475 __skb_unlink(skb, rcvq);
72476 __skb_queue_tail(&list_kill, skb);
72477 }
72478@@ -1184,6 +1201,10 @@ try_again:
72479 if (!skb)
72480 goto out;
72481
72482+ err = gr_search_udp_recvmsg(sk, skb);
72483+ if (err)
72484+ goto out_free;
72485+
72486 ulen = skb->len - sizeof(struct udphdr);
72487 if (len > ulen)
72488 len = ulen;
72489@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72490
72491 drop:
72492 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72493- atomic_inc(&sk->sk_drops);
72494+ atomic_inc_unchecked(&sk->sk_drops);
72495 kfree_skb(skb);
72496 return -1;
72497 }
72498@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
72499 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72500
72501 if (!skb1) {
72502- atomic_inc(&sk->sk_drops);
72503+ atomic_inc_unchecked(&sk->sk_drops);
72504 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72505 IS_UDPLITE(sk));
72506 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72507@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72508 goto csum_error;
72509
72510 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72511+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72512+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72513+#endif
72514 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72515
72516 /*
72517@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
72518 sk_wmem_alloc_get(sp),
72519 sk_rmem_alloc_get(sp),
72520 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72521- atomic_read(&sp->sk_refcnt), sp,
72522- atomic_read(&sp->sk_drops), len);
72523+ atomic_read(&sp->sk_refcnt),
72524+#ifdef CONFIG_GRKERNSEC_HIDESYM
72525+ NULL,
72526+#else
72527+ sp,
72528+#endif
72529+ atomic_read_unchecked(&sp->sk_drops), len);
72530 }
72531
72532 int udp4_seq_show(struct seq_file *seq, void *v)
72533diff -urNp linux-3.0.7/net/ipv6/addrconf.c linux-3.0.7/net/ipv6/addrconf.c
72534--- linux-3.0.7/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
72535+++ linux-3.0.7/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
72536@@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
72537 p.iph.ihl = 5;
72538 p.iph.protocol = IPPROTO_IPV6;
72539 p.iph.ttl = 64;
72540- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72541+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72542
72543 if (ops->ndo_do_ioctl) {
72544 mm_segment_t oldfs = get_fs();
72545diff -urNp linux-3.0.7/net/ipv6/inet6_connection_sock.c linux-3.0.7/net/ipv6/inet6_connection_sock.c
72546--- linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
72547+++ linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
72548@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72549 #ifdef CONFIG_XFRM
72550 {
72551 struct rt6_info *rt = (struct rt6_info *)dst;
72552- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72553+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72554 }
72555 #endif
72556 }
72557@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72558 #ifdef CONFIG_XFRM
72559 if (dst) {
72560 struct rt6_info *rt = (struct rt6_info *)dst;
72561- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72562+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72563 __sk_dst_reset(sk);
72564 dst = NULL;
72565 }
72566diff -urNp linux-3.0.7/net/ipv6/ipv6_sockglue.c linux-3.0.7/net/ipv6/ipv6_sockglue.c
72567--- linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:54:54.000000000 -0400
72568+++ linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:55:28.000000000 -0400
72569@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72570 int val, valbool;
72571 int retv = -ENOPROTOOPT;
72572
72573+ pax_track_stack();
72574+
72575 if (optval == NULL)
72576 val=0;
72577 else {
72578@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72579 int len;
72580 int val;
72581
72582+ pax_track_stack();
72583+
72584 if (ip6_mroute_opt(optname))
72585 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72586
72587@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72588 if (sk->sk_type != SOCK_STREAM)
72589 return -ENOPROTOOPT;
72590
72591- msg.msg_control = optval;
72592+ msg.msg_control = (void __force_kernel *)optval;
72593 msg.msg_controllen = len;
72594 msg.msg_flags = flags;
72595
72596diff -urNp linux-3.0.7/net/ipv6/raw.c linux-3.0.7/net/ipv6/raw.c
72597--- linux-3.0.7/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
72598+++ linux-3.0.7/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
72599@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72600 {
72601 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72602 skb_checksum_complete(skb)) {
72603- atomic_inc(&sk->sk_drops);
72604+ atomic_inc_unchecked(&sk->sk_drops);
72605 kfree_skb(skb);
72606 return NET_RX_DROP;
72607 }
72608@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72609 struct raw6_sock *rp = raw6_sk(sk);
72610
72611 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72612- atomic_inc(&sk->sk_drops);
72613+ atomic_inc_unchecked(&sk->sk_drops);
72614 kfree_skb(skb);
72615 return NET_RX_DROP;
72616 }
72617@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72618
72619 if (inet->hdrincl) {
72620 if (skb_checksum_complete(skb)) {
72621- atomic_inc(&sk->sk_drops);
72622+ atomic_inc_unchecked(&sk->sk_drops);
72623 kfree_skb(skb);
72624 return NET_RX_DROP;
72625 }
72626@@ -601,7 +601,7 @@ out:
72627 return err;
72628 }
72629
72630-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72631+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72632 struct flowi6 *fl6, struct dst_entry **dstp,
72633 unsigned int flags)
72634 {
72635@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
72636 u16 proto;
72637 int err;
72638
72639+ pax_track_stack();
72640+
72641 /* Rough check on arithmetic overflow,
72642 better check is made in ip6_append_data().
72643 */
72644@@ -909,12 +911,15 @@ do_confirm:
72645 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72646 char __user *optval, int optlen)
72647 {
72648+ struct icmp6_filter filter;
72649+
72650 switch (optname) {
72651 case ICMPV6_FILTER:
72652 if (optlen > sizeof(struct icmp6_filter))
72653 optlen = sizeof(struct icmp6_filter);
72654- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72655+ if (copy_from_user(&filter, optval, optlen))
72656 return -EFAULT;
72657+ raw6_sk(sk)->filter = filter;
72658 return 0;
72659 default:
72660 return -ENOPROTOOPT;
72661@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
72662 char __user *optval, int __user *optlen)
72663 {
72664 int len;
72665+ struct icmp6_filter filter;
72666
72667 switch (optname) {
72668 case ICMPV6_FILTER:
72669@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
72670 len = sizeof(struct icmp6_filter);
72671 if (put_user(len, optlen))
72672 return -EFAULT;
72673- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72674+ filter = raw6_sk(sk)->filter;
72675+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72676 return -EFAULT;
72677 return 0;
72678 default:
72679@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
72680 0, 0L, 0,
72681 sock_i_uid(sp), 0,
72682 sock_i_ino(sp),
72683- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72684+ atomic_read(&sp->sk_refcnt),
72685+#ifdef CONFIG_GRKERNSEC_HIDESYM
72686+ NULL,
72687+#else
72688+ sp,
72689+#endif
72690+ atomic_read_unchecked(&sp->sk_drops));
72691 }
72692
72693 static int raw6_seq_show(struct seq_file *seq, void *v)
72694diff -urNp linux-3.0.7/net/ipv6/tcp_ipv6.c linux-3.0.7/net/ipv6/tcp_ipv6.c
72695--- linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
72696+++ linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
72697@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72698 }
72699 #endif
72700
72701+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72702+extern int grsec_enable_blackhole;
72703+#endif
72704+
72705 static void tcp_v6_hash(struct sock *sk)
72706 {
72707 if (sk->sk_state != TCP_CLOSE) {
72708@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72709 return 0;
72710
72711 reset:
72712+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72713+ if (!grsec_enable_blackhole)
72714+#endif
72715 tcp_v6_send_reset(sk, skb);
72716 discard:
72717 if (opt_skb)
72718@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72719 TCP_SKB_CB(skb)->sacked = 0;
72720
72721 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72722- if (!sk)
72723+ if (!sk) {
72724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72725+ ret = 1;
72726+#endif
72727 goto no_tcp_socket;
72728+ }
72729
72730 process:
72731- if (sk->sk_state == TCP_TIME_WAIT)
72732+ if (sk->sk_state == TCP_TIME_WAIT) {
72733+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72734+ ret = 2;
72735+#endif
72736 goto do_time_wait;
72737+ }
72738
72739 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
72740 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72741@@ -1794,6 +1809,10 @@ no_tcp_socket:
72742 bad_packet:
72743 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72744 } else {
72745+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72746+ if (!grsec_enable_blackhole || (ret == 1 &&
72747+ (skb->dev->flags & IFF_LOOPBACK)))
72748+#endif
72749 tcp_v6_send_reset(NULL, skb);
72750 }
72751
72752@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
72753 uid,
72754 0, /* non standard timer */
72755 0, /* open_requests have no inode */
72756- 0, req);
72757+ 0,
72758+#ifdef CONFIG_GRKERNSEC_HIDESYM
72759+ NULL
72760+#else
72761+ req
72762+#endif
72763+ );
72764 }
72765
72766 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72767@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
72768 sock_i_uid(sp),
72769 icsk->icsk_probes_out,
72770 sock_i_ino(sp),
72771- atomic_read(&sp->sk_refcnt), sp,
72772+ atomic_read(&sp->sk_refcnt),
72773+#ifdef CONFIG_GRKERNSEC_HIDESYM
72774+ NULL,
72775+#else
72776+ sp,
72777+#endif
72778 jiffies_to_clock_t(icsk->icsk_rto),
72779 jiffies_to_clock_t(icsk->icsk_ack.ato),
72780 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72781@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
72782 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72783 tw->tw_substate, 0, 0,
72784 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72785- atomic_read(&tw->tw_refcnt), tw);
72786+ atomic_read(&tw->tw_refcnt),
72787+#ifdef CONFIG_GRKERNSEC_HIDESYM
72788+ NULL
72789+#else
72790+ tw
72791+#endif
72792+ );
72793 }
72794
72795 static int tcp6_seq_show(struct seq_file *seq, void *v)
72796diff -urNp linux-3.0.7/net/ipv6/udp.c linux-3.0.7/net/ipv6/udp.c
72797--- linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:09.000000000 -0400
72798+++ linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:19.000000000 -0400
72799@@ -50,6 +50,10 @@
72800 #include <linux/seq_file.h>
72801 #include "udp_impl.h"
72802
72803+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72804+extern int grsec_enable_blackhole;
72805+#endif
72806+
72807 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72808 {
72809 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72810@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72811
72812 return 0;
72813 drop:
72814- atomic_inc(&sk->sk_drops);
72815+ atomic_inc_unchecked(&sk->sk_drops);
72816 drop_no_sk_drops_inc:
72817 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72818 kfree_skb(skb);
72819@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
72820 continue;
72821 }
72822 drop:
72823- atomic_inc(&sk->sk_drops);
72824+ atomic_inc_unchecked(&sk->sk_drops);
72825 UDP6_INC_STATS_BH(sock_net(sk),
72826 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
72827 UDP6_INC_STATS_BH(sock_net(sk),
72828@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72829 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72830 proto == IPPROTO_UDPLITE);
72831
72832+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72833+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72834+#endif
72835 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
72836
72837 kfree_skb(skb);
72838@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72839 if (!sock_owned_by_user(sk))
72840 udpv6_queue_rcv_skb(sk, skb);
72841 else if (sk_add_backlog(sk, skb)) {
72842- atomic_inc(&sk->sk_drops);
72843+ atomic_inc_unchecked(&sk->sk_drops);
72844 bh_unlock_sock(sk);
72845 sock_put(sk);
72846 goto discard;
72847@@ -1408,8 +1415,13 @@ static void udp6_sock_seq_show(struct se
72848 0, 0L, 0,
72849 sock_i_uid(sp), 0,
72850 sock_i_ino(sp),
72851- atomic_read(&sp->sk_refcnt), sp,
72852- atomic_read(&sp->sk_drops));
72853+ atomic_read(&sp->sk_refcnt),
72854+#ifdef CONFIG_GRKERNSEC_HIDESYM
72855+ NULL,
72856+#else
72857+ sp,
72858+#endif
72859+ atomic_read_unchecked(&sp->sk_drops));
72860 }
72861
72862 int udp6_seq_show(struct seq_file *seq, void *v)
72863diff -urNp linux-3.0.7/net/irda/ircomm/ircomm_tty.c linux-3.0.7/net/irda/ircomm/ircomm_tty.c
72864--- linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
72865+++ linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
72866@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
72867 add_wait_queue(&self->open_wait, &wait);
72868
72869 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72870- __FILE__,__LINE__, tty->driver->name, self->open_count );
72871+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72872
72873 /* As far as I can see, we protect open_count - Jean II */
72874 spin_lock_irqsave(&self->spinlock, flags);
72875 if (!tty_hung_up_p(filp)) {
72876 extra_count = 1;
72877- self->open_count--;
72878+ local_dec(&self->open_count);
72879 }
72880 spin_unlock_irqrestore(&self->spinlock, flags);
72881- self->blocked_open++;
72882+ local_inc(&self->blocked_open);
72883
72884 while (1) {
72885 if (tty->termios->c_cflag & CBAUD) {
72886@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
72887 }
72888
72889 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72890- __FILE__,__LINE__, tty->driver->name, self->open_count );
72891+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72892
72893 schedule();
72894 }
72895@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
72896 if (extra_count) {
72897 /* ++ is not atomic, so this should be protected - Jean II */
72898 spin_lock_irqsave(&self->spinlock, flags);
72899- self->open_count++;
72900+ local_inc(&self->open_count);
72901 spin_unlock_irqrestore(&self->spinlock, flags);
72902 }
72903- self->blocked_open--;
72904+ local_dec(&self->blocked_open);
72905
72906 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72907- __FILE__,__LINE__, tty->driver->name, self->open_count);
72908+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72909
72910 if (!retval)
72911 self->flags |= ASYNC_NORMAL_ACTIVE;
72912@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
72913 }
72914 /* ++ is not atomic, so this should be protected - Jean II */
72915 spin_lock_irqsave(&self->spinlock, flags);
72916- self->open_count++;
72917+ local_inc(&self->open_count);
72918
72919 tty->driver_data = self;
72920 self->tty = tty;
72921 spin_unlock_irqrestore(&self->spinlock, flags);
72922
72923 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72924- self->line, self->open_count);
72925+ self->line, local_read(&self->open_count));
72926
72927 /* Not really used by us, but lets do it anyway */
72928 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72929@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
72930 return;
72931 }
72932
72933- if ((tty->count == 1) && (self->open_count != 1)) {
72934+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72935 /*
72936 * Uh, oh. tty->count is 1, which means that the tty
72937 * structure will be freed. state->count should always
72938@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
72939 */
72940 IRDA_DEBUG(0, "%s(), bad serial port count; "
72941 "tty->count is 1, state->count is %d\n", __func__ ,
72942- self->open_count);
72943- self->open_count = 1;
72944+ local_read(&self->open_count));
72945+ local_set(&self->open_count, 1);
72946 }
72947
72948- if (--self->open_count < 0) {
72949+ if (local_dec_return(&self->open_count) < 0) {
72950 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72951- __func__, self->line, self->open_count);
72952- self->open_count = 0;
72953+ __func__, self->line, local_read(&self->open_count));
72954+ local_set(&self->open_count, 0);
72955 }
72956- if (self->open_count) {
72957+ if (local_read(&self->open_count)) {
72958 spin_unlock_irqrestore(&self->spinlock, flags);
72959
72960 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72961@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
72962 tty->closing = 0;
72963 self->tty = NULL;
72964
72965- if (self->blocked_open) {
72966+ if (local_read(&self->blocked_open)) {
72967 if (self->close_delay)
72968 schedule_timeout_interruptible(self->close_delay);
72969 wake_up_interruptible(&self->open_wait);
72970@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
72971 spin_lock_irqsave(&self->spinlock, flags);
72972 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72973 self->tty = NULL;
72974- self->open_count = 0;
72975+ local_set(&self->open_count, 0);
72976 spin_unlock_irqrestore(&self->spinlock, flags);
72977
72978 wake_up_interruptible(&self->open_wait);
72979@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
72980 seq_putc(m, '\n');
72981
72982 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72983- seq_printf(m, "Open count: %d\n", self->open_count);
72984+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72985 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72986 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72987
72988diff -urNp linux-3.0.7/net/iucv/af_iucv.c linux-3.0.7/net/iucv/af_iucv.c
72989--- linux-3.0.7/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
72990+++ linux-3.0.7/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
72991@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
72992
72993 write_lock_bh(&iucv_sk_list.lock);
72994
72995- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72996+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72997 while (__iucv_get_sock_by_name(name)) {
72998 sprintf(name, "%08x",
72999- atomic_inc_return(&iucv_sk_list.autobind_name));
73000+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73001 }
73002
73003 write_unlock_bh(&iucv_sk_list.lock);
73004diff -urNp linux-3.0.7/net/key/af_key.c linux-3.0.7/net/key/af_key.c
73005--- linux-3.0.7/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
73006+++ linux-3.0.7/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
73007@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73008 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73009 struct xfrm_kmaddress k;
73010
73011+ pax_track_stack();
73012+
73013 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73014 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73015 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73016@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73017 static u32 get_acqseq(void)
73018 {
73019 u32 res;
73020- static atomic_t acqseq;
73021+ static atomic_unchecked_t acqseq;
73022
73023 do {
73024- res = atomic_inc_return(&acqseq);
73025+ res = atomic_inc_return_unchecked(&acqseq);
73026 } while (!res);
73027 return res;
73028 }
73029diff -urNp linux-3.0.7/net/lapb/lapb_iface.c linux-3.0.7/net/lapb/lapb_iface.c
73030--- linux-3.0.7/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
73031+++ linux-3.0.7/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
73032@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73033 goto out;
73034
73035 lapb->dev = dev;
73036- lapb->callbacks = *callbacks;
73037+ lapb->callbacks = callbacks;
73038
73039 __lapb_insert_cb(lapb);
73040
73041@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73042
73043 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73044 {
73045- if (lapb->callbacks.connect_confirmation)
73046- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73047+ if (lapb->callbacks->connect_confirmation)
73048+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73049 }
73050
73051 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73052 {
73053- if (lapb->callbacks.connect_indication)
73054- lapb->callbacks.connect_indication(lapb->dev, reason);
73055+ if (lapb->callbacks->connect_indication)
73056+ lapb->callbacks->connect_indication(lapb->dev, reason);
73057 }
73058
73059 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73060 {
73061- if (lapb->callbacks.disconnect_confirmation)
73062- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73063+ if (lapb->callbacks->disconnect_confirmation)
73064+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73065 }
73066
73067 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73068 {
73069- if (lapb->callbacks.disconnect_indication)
73070- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73071+ if (lapb->callbacks->disconnect_indication)
73072+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73073 }
73074
73075 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73076 {
73077- if (lapb->callbacks.data_indication)
73078- return lapb->callbacks.data_indication(lapb->dev, skb);
73079+ if (lapb->callbacks->data_indication)
73080+ return lapb->callbacks->data_indication(lapb->dev, skb);
73081
73082 kfree_skb(skb);
73083 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73084@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73085 {
73086 int used = 0;
73087
73088- if (lapb->callbacks.data_transmit) {
73089- lapb->callbacks.data_transmit(lapb->dev, skb);
73090+ if (lapb->callbacks->data_transmit) {
73091+ lapb->callbacks->data_transmit(lapb->dev, skb);
73092 used = 1;
73093 }
73094
73095diff -urNp linux-3.0.7/net/mac80211/debugfs_sta.c linux-3.0.7/net/mac80211/debugfs_sta.c
73096--- linux-3.0.7/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
73097+++ linux-3.0.7/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
73098@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73099 struct tid_ampdu_rx *tid_rx;
73100 struct tid_ampdu_tx *tid_tx;
73101
73102+ pax_track_stack();
73103+
73104 rcu_read_lock();
73105
73106 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73107@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73108 struct sta_info *sta = file->private_data;
73109 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73110
73111+ pax_track_stack();
73112+
73113 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73114 htc->ht_supported ? "" : "not ");
73115 if (htc->ht_supported) {
73116diff -urNp linux-3.0.7/net/mac80211/ieee80211_i.h linux-3.0.7/net/mac80211/ieee80211_i.h
73117--- linux-3.0.7/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
73118+++ linux-3.0.7/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
73119@@ -27,6 +27,7 @@
73120 #include <net/ieee80211_radiotap.h>
73121 #include <net/cfg80211.h>
73122 #include <net/mac80211.h>
73123+#include <asm/local.h>
73124 #include "key.h"
73125 #include "sta_info.h"
73126
73127@@ -721,7 +722,7 @@ struct ieee80211_local {
73128 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73129 spinlock_t queue_stop_reason_lock;
73130
73131- int open_count;
73132+ local_t open_count;
73133 int monitors, cooked_mntrs;
73134 /* number of interfaces with corresponding FIF_ flags */
73135 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73136diff -urNp linux-3.0.7/net/mac80211/iface.c linux-3.0.7/net/mac80211/iface.c
73137--- linux-3.0.7/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
73138+++ linux-3.0.7/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
73139@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73140 break;
73141 }
73142
73143- if (local->open_count == 0) {
73144+ if (local_read(&local->open_count) == 0) {
73145 res = drv_start(local);
73146 if (res)
73147 goto err_del_bss;
73148@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73149 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73150
73151 if (!is_valid_ether_addr(dev->dev_addr)) {
73152- if (!local->open_count)
73153+ if (!local_read(&local->open_count))
73154 drv_stop(local);
73155 return -EADDRNOTAVAIL;
73156 }
73157@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73158 mutex_unlock(&local->mtx);
73159
73160 if (coming_up)
73161- local->open_count++;
73162+ local_inc(&local->open_count);
73163
73164 if (hw_reconf_flags) {
73165 ieee80211_hw_config(local, hw_reconf_flags);
73166@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73167 err_del_interface:
73168 drv_remove_interface(local, &sdata->vif);
73169 err_stop:
73170- if (!local->open_count)
73171+ if (!local_read(&local->open_count))
73172 drv_stop(local);
73173 err_del_bss:
73174 sdata->bss = NULL;
73175@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
73176 }
73177
73178 if (going_down)
73179- local->open_count--;
73180+ local_dec(&local->open_count);
73181
73182 switch (sdata->vif.type) {
73183 case NL80211_IFTYPE_AP_VLAN:
73184@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
73185
73186 ieee80211_recalc_ps(local, -1);
73187
73188- if (local->open_count == 0) {
73189+ if (local_read(&local->open_count) == 0) {
73190 if (local->ops->napi_poll)
73191 napi_disable(&local->napi);
73192 ieee80211_clear_tx_pending(local);
73193diff -urNp linux-3.0.7/net/mac80211/main.c linux-3.0.7/net/mac80211/main.c
73194--- linux-3.0.7/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
73195+++ linux-3.0.7/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
73196@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73197 local->hw.conf.power_level = power;
73198 }
73199
73200- if (changed && local->open_count) {
73201+ if (changed && local_read(&local->open_count)) {
73202 ret = drv_config(local, changed);
73203 /*
73204 * Goal:
73205diff -urNp linux-3.0.7/net/mac80211/mlme.c linux-3.0.7/net/mac80211/mlme.c
73206--- linux-3.0.7/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
73207+++ linux-3.0.7/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
73208@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
73209 bool have_higher_than_11mbit = false;
73210 u16 ap_ht_cap_flags;
73211
73212+ pax_track_stack();
73213+
73214 /* AssocResp and ReassocResp have identical structure */
73215
73216 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73217diff -urNp linux-3.0.7/net/mac80211/pm.c linux-3.0.7/net/mac80211/pm.c
73218--- linux-3.0.7/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
73219+++ linux-3.0.7/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
73220@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
73221 cancel_work_sync(&local->dynamic_ps_enable_work);
73222 del_timer_sync(&local->dynamic_ps_timer);
73223
73224- local->wowlan = wowlan && local->open_count;
73225+ local->wowlan = wowlan && local_read(&local->open_count);
73226 if (local->wowlan) {
73227 int err = drv_suspend(local, wowlan);
73228 if (err) {
73229@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
73230 }
73231
73232 /* stop hardware - this must stop RX */
73233- if (local->open_count)
73234+ if (local_read(&local->open_count))
73235 ieee80211_stop_device(local);
73236
73237 suspend:
73238diff -urNp linux-3.0.7/net/mac80211/rate.c linux-3.0.7/net/mac80211/rate.c
73239--- linux-3.0.7/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
73240+++ linux-3.0.7/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
73241@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73242
73243 ASSERT_RTNL();
73244
73245- if (local->open_count)
73246+ if (local_read(&local->open_count))
73247 return -EBUSY;
73248
73249 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73250diff -urNp linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c
73251--- linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
73252+++ linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
73253@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73254
73255 spin_unlock_irqrestore(&events->lock, status);
73256
73257- if (copy_to_user(buf, pb, p))
73258+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73259 return -EFAULT;
73260
73261 return p;
73262diff -urNp linux-3.0.7/net/mac80211/util.c linux-3.0.7/net/mac80211/util.c
73263--- linux-3.0.7/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
73264+++ linux-3.0.7/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
73265@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
73266 #endif
73267
73268 /* restart hardware */
73269- if (local->open_count) {
73270+ if (local_read(&local->open_count)) {
73271 /*
73272 * Upon resume hardware can sometimes be goofy due to
73273 * various platform / driver / bus issues, so restarting
73274diff -urNp linux-3.0.7/net/netfilter/Kconfig linux-3.0.7/net/netfilter/Kconfig
73275--- linux-3.0.7/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
73276+++ linux-3.0.7/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
73277@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73278
73279 To compile it as a module, choose M here. If unsure, say N.
73280
73281+config NETFILTER_XT_MATCH_GRADM
73282+ tristate '"gradm" match support'
73283+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73284+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73285+ ---help---
73286+ The gradm match allows to match on grsecurity RBAC being enabled.
73287+ It is useful when iptables rules are applied early on bootup to
73288+ prevent connections to the machine (except from a trusted host)
73289+ while the RBAC system is disabled.
73290+
73291 config NETFILTER_XT_MATCH_HASHLIMIT
73292 tristate '"hashlimit" match support'
73293 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73294diff -urNp linux-3.0.7/net/netfilter/Makefile linux-3.0.7/net/netfilter/Makefile
73295--- linux-3.0.7/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
73296+++ linux-3.0.7/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
73297@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73298 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73299 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73300 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73301+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73302 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73303 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73304 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73305diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c
73306--- linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
73307+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
73308@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73309 /* Increase the refcnt counter of the dest */
73310 atomic_inc(&dest->refcnt);
73311
73312- conn_flags = atomic_read(&dest->conn_flags);
73313+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73314 if (cp->protocol != IPPROTO_UDP)
73315 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73316 /* Bind with the destination and its corresponding transmitter */
73317@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73318 atomic_set(&cp->refcnt, 1);
73319
73320 atomic_set(&cp->n_control, 0);
73321- atomic_set(&cp->in_pkts, 0);
73322+ atomic_set_unchecked(&cp->in_pkts, 0);
73323
73324 atomic_inc(&ipvs->conn_count);
73325 if (flags & IP_VS_CONN_F_NO_CPORT)
73326@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73327
73328 /* Don't drop the entry if its number of incoming packets is not
73329 located in [0, 8] */
73330- i = atomic_read(&cp->in_pkts);
73331+ i = atomic_read_unchecked(&cp->in_pkts);
73332 if (i > 8 || i < 0) return 0;
73333
73334 if (!todrop_rate[i]) return 0;
73335diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c
73336--- linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
73337+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
73338@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73339 ret = cp->packet_xmit(skb, cp, pd->pp);
73340 /* do not touch skb anymore */
73341
73342- atomic_inc(&cp->in_pkts);
73343+ atomic_inc_unchecked(&cp->in_pkts);
73344 ip_vs_conn_put(cp);
73345 return ret;
73346 }
73347@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73348 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73349 pkts = sysctl_sync_threshold(ipvs);
73350 else
73351- pkts = atomic_add_return(1, &cp->in_pkts);
73352+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73353
73354 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73355 cp->protocol == IPPROTO_SCTP) {
73356diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c
73357--- linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
73358+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
73359@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73360 ip_vs_rs_hash(ipvs, dest);
73361 write_unlock_bh(&ipvs->rs_lock);
73362 }
73363- atomic_set(&dest->conn_flags, conn_flags);
73364+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73365
73366 /* bind the service */
73367 if (!dest->svc) {
73368@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73369 " %-7s %-6d %-10d %-10d\n",
73370 &dest->addr.in6,
73371 ntohs(dest->port),
73372- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73373+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73374 atomic_read(&dest->weight),
73375 atomic_read(&dest->activeconns),
73376 atomic_read(&dest->inactconns));
73377@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73378 "%-7s %-6d %-10d %-10d\n",
73379 ntohl(dest->addr.ip),
73380 ntohs(dest->port),
73381- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73382+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73383 atomic_read(&dest->weight),
73384 atomic_read(&dest->activeconns),
73385 atomic_read(&dest->inactconns));
73386@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73387 struct ip_vs_dest_user *udest_compat;
73388 struct ip_vs_dest_user_kern udest;
73389
73390+ pax_track_stack();
73391+
73392 if (!capable(CAP_NET_ADMIN))
73393 return -EPERM;
73394
73395@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
73396
73397 entry.addr = dest->addr.ip;
73398 entry.port = dest->port;
73399- entry.conn_flags = atomic_read(&dest->conn_flags);
73400+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73401 entry.weight = atomic_read(&dest->weight);
73402 entry.u_threshold = dest->u_threshold;
73403 entry.l_threshold = dest->l_threshold;
73404@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
73405 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73406
73407 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73408- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73409+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73410 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73411 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73412 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73413diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c
73414--- linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
73415+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
73416@@ -648,7 +648,7 @@ control:
73417 * i.e only increment in_pkts for Templates.
73418 */
73419 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73420- int pkts = atomic_add_return(1, &cp->in_pkts);
73421+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73422
73423 if (pkts % sysctl_sync_period(ipvs) != 1)
73424 return;
73425@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
73426
73427 if (opt)
73428 memcpy(&cp->in_seq, opt, sizeof(*opt));
73429- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73430+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73431 cp->state = state;
73432 cp->old_state = cp->state;
73433 /*
73434diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c
73435--- linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
73436+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
73437@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73438 else
73439 rc = NF_ACCEPT;
73440 /* do not touch skb anymore */
73441- atomic_inc(&cp->in_pkts);
73442+ atomic_inc_unchecked(&cp->in_pkts);
73443 goto out;
73444 }
73445
73446@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73447 else
73448 rc = NF_ACCEPT;
73449 /* do not touch skb anymore */
73450- atomic_inc(&cp->in_pkts);
73451+ atomic_inc_unchecked(&cp->in_pkts);
73452 goto out;
73453 }
73454
73455diff -urNp linux-3.0.7/net/netfilter/nfnetlink_log.c linux-3.0.7/net/netfilter/nfnetlink_log.c
73456--- linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
73457+++ linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
73458@@ -70,7 +70,7 @@ struct nfulnl_instance {
73459 };
73460
73461 static DEFINE_SPINLOCK(instances_lock);
73462-static atomic_t global_seq;
73463+static atomic_unchecked_t global_seq;
73464
73465 #define INSTANCE_BUCKETS 16
73466 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73467@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73468 /* global sequence number */
73469 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73470 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73471- htonl(atomic_inc_return(&global_seq)));
73472+ htonl(atomic_inc_return_unchecked(&global_seq)));
73473
73474 if (data_len) {
73475 struct nlattr *nla;
73476diff -urNp linux-3.0.7/net/netfilter/nfnetlink_queue.c linux-3.0.7/net/netfilter/nfnetlink_queue.c
73477--- linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
73478+++ linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
73479@@ -58,7 +58,7 @@ struct nfqnl_instance {
73480 */
73481 spinlock_t lock;
73482 unsigned int queue_total;
73483- atomic_t id_sequence; /* 'sequence' of pkt ids */
73484+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
73485 struct list_head queue_list; /* packets in queue */
73486 };
73487
73488@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
73489 nfmsg->version = NFNETLINK_V0;
73490 nfmsg->res_id = htons(queue->queue_num);
73491
73492- entry->id = atomic_inc_return(&queue->id_sequence);
73493+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
73494 pmsg.packet_id = htonl(entry->id);
73495 pmsg.hw_protocol = entskb->protocol;
73496 pmsg.hook = entry->hook;
73497@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
73498 inst->peer_pid, inst->queue_total,
73499 inst->copy_mode, inst->copy_range,
73500 inst->queue_dropped, inst->queue_user_dropped,
73501- atomic_read(&inst->id_sequence), 1);
73502+ atomic_read_unchecked(&inst->id_sequence), 1);
73503 }
73504
73505 static const struct seq_operations nfqnl_seq_ops = {
73506diff -urNp linux-3.0.7/net/netfilter/xt_gradm.c linux-3.0.7/net/netfilter/xt_gradm.c
73507--- linux-3.0.7/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73508+++ linux-3.0.7/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
73509@@ -0,0 +1,51 @@
73510+/*
73511+ * gradm match for netfilter
73512