]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.50-201112101848.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.50-201112101848.patch
CommitLineData
2d294565
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index f38986c..46a251b 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,42 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+gcc-plugins:
251+ $(Q)$(MAKE) $(build)=tools/gcc
252+else
253+gcc-plugins:
254+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
255+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
256+else
257+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
258+endif
259+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
260+endif
261+endif
262+
263 include $(srctree)/arch/$(SRCARCH)/Makefile
264
265 ifneq ($(CONFIG_FRAME_WARN),0)
266@@ -647,7 +684,7 @@ export mod_strip_cmd
267
268
269 ifeq ($(KBUILD_EXTMOD),)
270-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
271+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
272
273 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
274 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
275@@ -868,6 +905,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
276
277 # The actual objects are generated when descending,
278 # make sure no implicit rule kicks in
279+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
280 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
281
282 # Handle descending into subdirectories listed in $(vmlinux-dirs)
283@@ -877,7 +915,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
284 # Error messages still appears in the original language
285
286 PHONY += $(vmlinux-dirs)
287-$(vmlinux-dirs): prepare scripts
288+$(vmlinux-dirs): gcc-plugins prepare scripts
289 $(Q)$(MAKE) $(build)=$@
290
291 # Build the kernel release string
292@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
293 $(Q)$(MAKE) $(build)=. missing-syscalls
294
295 # All the preparing..
296+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
297 prepare: prepare0
298
299 # The asm symlink changes when $(ARCH) changes.
300@@ -1127,6 +1166,7 @@ all: modules
301 # using awk while concatenating to the final file.
302
303 PHONY += modules
304+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308@@ -1136,7 +1176,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312-modules_prepare: prepare scripts
313+modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317@@ -1201,7 +1241,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
318 include/linux/autoconf.h include/linux/version.h \
319 include/linux/utsrelease.h \
320 include/linux/bounds.h include/asm*/asm-offsets.h \
321- Module.symvers Module.markers tags TAGS cscope*
322+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
323
324 # clean - Delete most, but leave enough to build external modules
325 #
326@@ -1245,7 +1285,7 @@ distclean: mrproper
327 @find $(srctree) $(RCS_FIND_IGNORE) \
328 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
329 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
330- -o -name '.*.rej' -o -size 0 \
331+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
332 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
333 -type f -print | xargs rm -f
334
335@@ -1292,6 +1332,7 @@ help:
336 @echo ' modules_prepare - Set up for building external modules'
337 @echo ' tags/TAGS - Generate tags file for editors'
338 @echo ' cscope - Generate cscope index'
339+ @echo ' gtags - Generate GNU GLOBAL index'
340 @echo ' kernelrelease - Output the release version string'
341 @echo ' kernelversion - Output the version stored in Makefile'
342 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
343@@ -1393,6 +1434,7 @@ PHONY += $(module-dirs) modules
344 $(module-dirs): crmodverdir $(objtree)/Module.symvers
345 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
346
347+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
348 modules: $(module-dirs)
349 @$(kecho) ' Building modules, stage 2.';
350 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
351@@ -1448,7 +1490,7 @@ endif # KBUILD_EXTMOD
352 quiet_cmd_tags = GEN $@
353 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
354
355-tags TAGS cscope: FORCE
356+tags TAGS cscope gtags: FORCE
357 $(call cmd,tags)
358
359 # Scripts to check various things for consistency
360@@ -1513,17 +1555,19 @@ else
361 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
362 endif
363
364-%.s: %.c prepare scripts FORCE
365+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
366+%.s: %.c gcc-plugins prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368 %.i: %.c prepare scripts FORCE
369 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
370-%.o: %.c prepare scripts FORCE
371+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
372+%.o: %.c gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.lst: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376-%.s: %.S prepare scripts FORCE
377+%.s: %.S gcc-plugins prepare scripts FORCE
378 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
379-%.o: %.S prepare scripts FORCE
380+%.o: %.S gcc-plugins prepare scripts FORCE
381 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
382 %.symtypes: %.c prepare scripts FORCE
383 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
384@@ -1533,11 +1577,13 @@ endif
385 $(cmd_crmodverdir)
386 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
387 $(build)=$(build-dir)
388-%/: prepare scripts FORCE
389+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
390+%/: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir)
394-%.ko: prepare scripts FORCE
395+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
396+%.ko: gcc-plugins prepare scripts FORCE
397 $(cmd_crmodverdir)
398 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
399 $(build)=$(build-dir) $(@:.ko=.o)
400diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
401index 5c75c1b..c82f878 100644
402--- a/arch/alpha/include/asm/elf.h
403+++ b/arch/alpha/include/asm/elf.h
404@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
405
406 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
407
408+#ifdef CONFIG_PAX_ASLR
409+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
410+
411+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
412+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
413+#endif
414+
415 /* $0 is set by ld.so to a pointer to a function which might be
416 registered using atexit. This provides a mean for the dynamic
417 linker to call DT_FINI functions for shared libraries that have
418diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
419index 3f0c59f..cf1e100 100644
420--- a/arch/alpha/include/asm/pgtable.h
421+++ b/arch/alpha/include/asm/pgtable.h
422@@ -101,6 +101,17 @@ struct vm_area_struct;
423 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
424 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
426+
427+#ifdef CONFIG_PAX_PAGEEXEC
428+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
429+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
431+#else
432+# define PAGE_SHARED_NOEXEC PAGE_SHARED
433+# define PAGE_COPY_NOEXEC PAGE_COPY
434+# define PAGE_READONLY_NOEXEC PAGE_READONLY
435+#endif
436+
437 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
438
439 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
440diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
441index ebc3c89..20cfa63 100644
442--- a/arch/alpha/kernel/module.c
443+++ b/arch/alpha/kernel/module.c
444@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
445
446 /* The small sections were sorted to the end of the segment.
447 The following should definitely cover them. */
448- gp = (u64)me->module_core + me->core_size - 0x8000;
449+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
450 got = sechdrs[me->arch.gotsecindex].sh_addr;
451
452 for (i = 0; i < n; i++) {
453diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
454index a94e49c..d71dd44 100644
455--- a/arch/alpha/kernel/osf_sys.c
456+++ b/arch/alpha/kernel/osf_sys.c
457@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
458 /* At this point: (!vma || addr < vma->vm_end). */
459 if (limit - len < addr)
460 return -ENOMEM;
461- if (!vma || addr + len <= vma->vm_start)
462+ if (check_heap_stack_gap(vma, addr, len))
463 return addr;
464 addr = vma->vm_end;
465 vma = vma->vm_next;
466@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
467 merely specific addresses, but regions of memory -- perhaps
468 this feature should be incorporated into all ports? */
469
470+#ifdef CONFIG_PAX_RANDMMAP
471+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
472+#endif
473+
474 if (addr) {
475 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
476 if (addr != (unsigned long) -ENOMEM)
477@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
478 }
479
480 /* Next, try allocating at TASK_UNMAPPED_BASE. */
481- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
482- len, limit);
483+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
484+
485 if (addr != (unsigned long) -ENOMEM)
486 return addr;
487
488diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
489index 00a31de..2ded0f2 100644
490--- a/arch/alpha/mm/fault.c
491+++ b/arch/alpha/mm/fault.c
492@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
493 __reload_thread(pcb);
494 }
495
496+#ifdef CONFIG_PAX_PAGEEXEC
497+/*
498+ * PaX: decide what to do with offenders (regs->pc = fault address)
499+ *
500+ * returns 1 when task should be killed
501+ * 2 when patched PLT trampoline was detected
502+ * 3 when unpatched PLT trampoline was detected
503+ */
504+static int pax_handle_fetch_fault(struct pt_regs *regs)
505+{
506+
507+#ifdef CONFIG_PAX_EMUPLT
508+ int err;
509+
510+ do { /* PaX: patched PLT emulation #1 */
511+ unsigned int ldah, ldq, jmp;
512+
513+ err = get_user(ldah, (unsigned int *)regs->pc);
514+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
515+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
516+
517+ if (err)
518+ break;
519+
520+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
521+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
522+ jmp == 0x6BFB0000U)
523+ {
524+ unsigned long r27, addr;
525+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
526+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
527+
528+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
529+ err = get_user(r27, (unsigned long *)addr);
530+ if (err)
531+ break;
532+
533+ regs->r27 = r27;
534+ regs->pc = r27;
535+ return 2;
536+ }
537+ } while (0);
538+
539+ do { /* PaX: patched PLT emulation #2 */
540+ unsigned int ldah, lda, br;
541+
542+ err = get_user(ldah, (unsigned int *)regs->pc);
543+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
544+ err |= get_user(br, (unsigned int *)(regs->pc+8));
545+
546+ if (err)
547+ break;
548+
549+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
550+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
551+ (br & 0xFFE00000U) == 0xC3E00000U)
552+ {
553+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
554+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
555+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
556+
557+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
558+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
559+ return 2;
560+ }
561+ } while (0);
562+
563+ do { /* PaX: unpatched PLT emulation */
564+ unsigned int br;
565+
566+ err = get_user(br, (unsigned int *)regs->pc);
567+
568+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
569+ unsigned int br2, ldq, nop, jmp;
570+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
571+
572+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
573+ err = get_user(br2, (unsigned int *)addr);
574+ err |= get_user(ldq, (unsigned int *)(addr+4));
575+ err |= get_user(nop, (unsigned int *)(addr+8));
576+ err |= get_user(jmp, (unsigned int *)(addr+12));
577+ err |= get_user(resolver, (unsigned long *)(addr+16));
578+
579+ if (err)
580+ break;
581+
582+ if (br2 == 0xC3600000U &&
583+ ldq == 0xA77B000CU &&
584+ nop == 0x47FF041FU &&
585+ jmp == 0x6B7B0000U)
586+ {
587+ regs->r28 = regs->pc+4;
588+ regs->r27 = addr+16;
589+ regs->pc = resolver;
590+ return 3;
591+ }
592+ }
593+ } while (0);
594+#endif
595+
596+ return 1;
597+}
598+
599+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
600+{
601+ unsigned long i;
602+
603+ printk(KERN_ERR "PAX: bytes at PC: ");
604+ for (i = 0; i < 5; i++) {
605+ unsigned int c;
606+ if (get_user(c, (unsigned int *)pc+i))
607+ printk(KERN_CONT "???????? ");
608+ else
609+ printk(KERN_CONT "%08x ", c);
610+ }
611+ printk("\n");
612+}
613+#endif
614
615 /*
616 * This routine handles page faults. It determines the address,
617@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
618 good_area:
619 si_code = SEGV_ACCERR;
620 if (cause < 0) {
621- if (!(vma->vm_flags & VM_EXEC))
622+ if (!(vma->vm_flags & VM_EXEC)) {
623+
624+#ifdef CONFIG_PAX_PAGEEXEC
625+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
626+ goto bad_area;
627+
628+ up_read(&mm->mmap_sem);
629+ switch (pax_handle_fetch_fault(regs)) {
630+
631+#ifdef CONFIG_PAX_EMUPLT
632+ case 2:
633+ case 3:
634+ return;
635+#endif
636+
637+ }
638+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
639+ do_group_exit(SIGKILL);
640+#else
641 goto bad_area;
642+#endif
643+
644+ }
645 } else if (!cause) {
646 /* Allow reads even for write-only mappings */
647 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
648diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
649index 6aac3f5..265536b 100644
650--- a/arch/arm/include/asm/elf.h
651+++ b/arch/arm/include/asm/elf.h
652@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
653 the loader. We need to make sure that it is out of the way of the program
654 that it will "exec", and that there is sufficient room for the brk. */
655
656-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
657+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
658+
659+#ifdef CONFIG_PAX_ASLR
660+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
661+
662+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
663+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
664+#endif
665
666 /* When the program starts, a1 contains a pointer to a function to be
667 registered with atexit, as per the SVR4 ABI. A value of 0 means we
668diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
669index c019949..388fdd1 100644
670--- a/arch/arm/include/asm/kmap_types.h
671+++ b/arch/arm/include/asm/kmap_types.h
672@@ -19,6 +19,7 @@ enum km_type {
673 KM_SOFTIRQ0,
674 KM_SOFTIRQ1,
675 KM_L2_CACHE,
676+ KM_CLEARPAGE,
677 KM_TYPE_NR
678 };
679
680diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
681index 1d6bd40..fba0cb9 100644
682--- a/arch/arm/include/asm/uaccess.h
683+++ b/arch/arm/include/asm/uaccess.h
684@@ -22,6 +22,8 @@
685 #define VERIFY_READ 0
686 #define VERIFY_WRITE 1
687
688+extern void check_object_size(const void *ptr, unsigned long n, bool to);
689+
690 /*
691 * The exception table consists of pairs of addresses: the first is the
692 * address of an instruction that is allowed to fault, and the second is
693@@ -387,8 +389,23 @@ do { \
694
695
696 #ifdef CONFIG_MMU
697-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
698-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
699+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
700+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
701+
702+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
703+{
704+ if (!__builtin_constant_p(n))
705+ check_object_size(to, n, false);
706+ return ___copy_from_user(to, from, n);
707+}
708+
709+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
710+{
711+ if (!__builtin_constant_p(n))
712+ check_object_size(from, n, true);
713+ return ___copy_to_user(to, from, n);
714+}
715+
716 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
717 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
718 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
719@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
720
721 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
722 {
723+ if ((long)n < 0)
724+ return n;
725+
726 if (access_ok(VERIFY_READ, from, n))
727 n = __copy_from_user(to, from, n);
728 else /* security hole - plug it */
729@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
730
731 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
732 {
733+ if ((long)n < 0)
734+ return n;
735+
736 if (access_ok(VERIFY_WRITE, to, n))
737 n = __copy_to_user(to, from, n);
738 return n;
739diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
740index 0e62770..e2c2cd6 100644
741--- a/arch/arm/kernel/armksyms.c
742+++ b/arch/arm/kernel/armksyms.c
743@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
744 #ifdef CONFIG_MMU
745 EXPORT_SYMBOL(copy_page);
746
747-EXPORT_SYMBOL(__copy_from_user);
748-EXPORT_SYMBOL(__copy_to_user);
749+EXPORT_SYMBOL(___copy_from_user);
750+EXPORT_SYMBOL(___copy_to_user);
751 EXPORT_SYMBOL(__clear_user);
752
753 EXPORT_SYMBOL(__get_user_1);
754diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
755index ba8ccfe..2dc34dc 100644
756--- a/arch/arm/kernel/kgdb.c
757+++ b/arch/arm/kernel/kgdb.c
758@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
759 * and we handle the normal undef case within the do_undefinstr
760 * handler.
761 */
762-struct kgdb_arch arch_kgdb_ops = {
763+const struct kgdb_arch arch_kgdb_ops = {
764 #ifndef __ARMEB__
765 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
766 #else /* ! __ARMEB__ */
767diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
768index 3f361a7..6e806e1 100644
769--- a/arch/arm/kernel/traps.c
770+++ b/arch/arm/kernel/traps.c
771@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
772
773 DEFINE_SPINLOCK(die_lock);
774
775+extern void gr_handle_kernel_exploit(void);
776+
777 /*
778 * This function is protected against re-entrancy.
779 */
780@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
781 if (panic_on_oops)
782 panic("Fatal exception");
783
784+ gr_handle_kernel_exploit();
785+
786 do_exit(SIGSEGV);
787 }
788
789diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
790index e4fe124..0fc246b 100644
791--- a/arch/arm/lib/copy_from_user.S
792+++ b/arch/arm/lib/copy_from_user.S
793@@ -16,7 +16,7 @@
794 /*
795 * Prototype:
796 *
797- * size_t __copy_from_user(void *to, const void *from, size_t n)
798+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
799 *
800 * Purpose:
801 *
802@@ -84,11 +84,11 @@
803
804 .text
805
806-ENTRY(__copy_from_user)
807+ENTRY(___copy_from_user)
808
809 #include "copy_template.S"
810
811-ENDPROC(__copy_from_user)
812+ENDPROC(___copy_from_user)
813
814 .section .fixup,"ax"
815 .align 0
816diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
817index 1a71e15..ac7b258 100644
818--- a/arch/arm/lib/copy_to_user.S
819+++ b/arch/arm/lib/copy_to_user.S
820@@ -16,7 +16,7 @@
821 /*
822 * Prototype:
823 *
824- * size_t __copy_to_user(void *to, const void *from, size_t n)
825+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
826 *
827 * Purpose:
828 *
829@@ -88,11 +88,11 @@
830 .text
831
832 ENTRY(__copy_to_user_std)
833-WEAK(__copy_to_user)
834+WEAK(___copy_to_user)
835
836 #include "copy_template.S"
837
838-ENDPROC(__copy_to_user)
839+ENDPROC(___copy_to_user)
840
841 .section .fixup,"ax"
842 .align 0
843diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
844index ffdd274..91017b6 100644
845--- a/arch/arm/lib/uaccess.S
846+++ b/arch/arm/lib/uaccess.S
847@@ -19,7 +19,7 @@
848
849 #define PAGE_SHIFT 12
850
851-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
852+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
853 * Purpose : copy a block to user memory from kernel memory
854 * Params : to - user memory
855 * : from - kernel memory
856@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
857 sub r2, r2, ip
858 b .Lc2u_dest_aligned
859
860-ENTRY(__copy_to_user)
861+ENTRY(___copy_to_user)
862 stmfd sp!, {r2, r4 - r7, lr}
863 cmp r2, #4
864 blt .Lc2u_not_enough
865@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
866 ldrgtb r3, [r1], #0
867 USER( strgtbt r3, [r0], #1) @ May fault
868 b .Lc2u_finished
869-ENDPROC(__copy_to_user)
870+ENDPROC(___copy_to_user)
871
872 .section .fixup,"ax"
873 .align 0
874 9001: ldmfd sp!, {r0, r4 - r7, pc}
875 .previous
876
877-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
878+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
879 * Purpose : copy a block from user memory to kernel memory
880 * Params : to - kernel memory
881 * : from - user memory
882@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
883 sub r2, r2, ip
884 b .Lcfu_dest_aligned
885
886-ENTRY(__copy_from_user)
887+ENTRY(___copy_from_user)
888 stmfd sp!, {r0, r2, r4 - r7, lr}
889 cmp r2, #4
890 blt .Lcfu_not_enough
891@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
892 USER( ldrgtbt r3, [r1], #1) @ May fault
893 strgtb r3, [r0], #1
894 b .Lcfu_finished
895-ENDPROC(__copy_from_user)
896+ENDPROC(___copy_from_user)
897
898 .section .fixup,"ax"
899 .align 0
900diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
901index 6b967ff..67d5b2b 100644
902--- a/arch/arm/lib/uaccess_with_memcpy.c
903+++ b/arch/arm/lib/uaccess_with_memcpy.c
904@@ -97,7 +97,7 @@ out:
905 }
906
907 unsigned long
908-__copy_to_user(void __user *to, const void *from, unsigned long n)
909+___copy_to_user(void __user *to, const void *from, unsigned long n)
910 {
911 /*
912 * This test is stubbed out of the main function above to keep
913diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
914index 4028724..beec230 100644
915--- a/arch/arm/mach-at91/pm.c
916+++ b/arch/arm/mach-at91/pm.c
917@@ -348,7 +348,7 @@ static void at91_pm_end(void)
918 }
919
920
921-static struct platform_suspend_ops at91_pm_ops ={
922+static const struct platform_suspend_ops at91_pm_ops ={
923 .valid = at91_pm_valid_state,
924 .begin = at91_pm_begin,
925 .enter = at91_pm_enter,
926diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
927index 5218943..0a34552 100644
928--- a/arch/arm/mach-omap1/pm.c
929+++ b/arch/arm/mach-omap1/pm.c
930@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
931
932
933
934-static struct platform_suspend_ops omap_pm_ops ={
935+static const struct platform_suspend_ops omap_pm_ops ={
936 .prepare = omap_pm_prepare,
937 .enter = omap_pm_enter,
938 .finish = omap_pm_finish,
939diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
940index bff5c4e..d4c649b 100644
941--- a/arch/arm/mach-omap2/pm24xx.c
942+++ b/arch/arm/mach-omap2/pm24xx.c
943@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
944 enable_hlt();
945 }
946
947-static struct platform_suspend_ops omap_pm_ops = {
948+static const struct platform_suspend_ops omap_pm_ops = {
949 .prepare = omap2_pm_prepare,
950 .enter = omap2_pm_enter,
951 .finish = omap2_pm_finish,
952diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
953index 8946319..7d3e661 100644
954--- a/arch/arm/mach-omap2/pm34xx.c
955+++ b/arch/arm/mach-omap2/pm34xx.c
956@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
957 return;
958 }
959
960-static struct platform_suspend_ops omap_pm_ops = {
961+static const struct platform_suspend_ops omap_pm_ops = {
962 .begin = omap3_pm_begin,
963 .end = omap3_pm_end,
964 .prepare = omap3_pm_prepare,
965diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
966index b3d8d53..6e68ebc 100644
967--- a/arch/arm/mach-pnx4008/pm.c
968+++ b/arch/arm/mach-pnx4008/pm.c
969@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
970 (state == PM_SUSPEND_MEM);
971 }
972
973-static struct platform_suspend_ops pnx4008_pm_ops = {
974+static const struct platform_suspend_ops pnx4008_pm_ops = {
975 .enter = pnx4008_pm_enter,
976 .valid = pnx4008_pm_valid,
977 };
978diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
979index 7693355..9beb00a 100644
980--- a/arch/arm/mach-pxa/pm.c
981+++ b/arch/arm/mach-pxa/pm.c
982@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
983 pxa_cpu_pm_fns->finish();
984 }
985
986-static struct platform_suspend_ops pxa_pm_ops = {
987+static const struct platform_suspend_ops pxa_pm_ops = {
988 .valid = pxa_pm_valid,
989 .enter = pxa_pm_enter,
990 .prepare = pxa_pm_prepare,
991diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
992index 629e05d..06be589 100644
993--- a/arch/arm/mach-pxa/sharpsl_pm.c
994+++ b/arch/arm/mach-pxa/sharpsl_pm.c
995@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
996 }
997
998 #ifdef CONFIG_PM
999-static struct platform_suspend_ops sharpsl_pm_ops = {
1000+static const struct platform_suspend_ops sharpsl_pm_ops = {
1001 .prepare = pxa_pm_prepare,
1002 .finish = pxa_pm_finish,
1003 .enter = corgi_pxa_pm_enter,
1004diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1005index c83fdc8..ab9fc44 100644
1006--- a/arch/arm/mach-sa1100/pm.c
1007+++ b/arch/arm/mach-sa1100/pm.c
1008@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1009 return virt_to_phys(sp);
1010 }
1011
1012-static struct platform_suspend_ops sa11x0_pm_ops = {
1013+static const struct platform_suspend_ops sa11x0_pm_ops = {
1014 .enter = sa11x0_pm_enter,
1015 .valid = suspend_valid_only_mem,
1016 };
1017diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1018index 3191cd6..c0739db 100644
1019--- a/arch/arm/mm/fault.c
1020+++ b/arch/arm/mm/fault.c
1021@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1022 }
1023 #endif
1024
1025+#ifdef CONFIG_PAX_PAGEEXEC
1026+ if (fsr & FSR_LNX_PF) {
1027+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1028+ do_group_exit(SIGKILL);
1029+ }
1030+#endif
1031+
1032 tsk->thread.address = addr;
1033 tsk->thread.error_code = fsr;
1034 tsk->thread.trap_no = 14;
1035@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1036 }
1037 #endif /* CONFIG_MMU */
1038
1039+#ifdef CONFIG_PAX_PAGEEXEC
1040+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1041+{
1042+ long i;
1043+
1044+ printk(KERN_ERR "PAX: bytes at PC: ");
1045+ for (i = 0; i < 20; i++) {
1046+ unsigned char c;
1047+ if (get_user(c, (__force unsigned char __user *)pc+i))
1048+ printk(KERN_CONT "?? ");
1049+ else
1050+ printk(KERN_CONT "%02x ", c);
1051+ }
1052+ printk("\n");
1053+
1054+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1055+ for (i = -1; i < 20; i++) {
1056+ unsigned long c;
1057+ if (get_user(c, (__force unsigned long __user *)sp+i))
1058+ printk(KERN_CONT "???????? ");
1059+ else
1060+ printk(KERN_CONT "%08lx ", c);
1061+ }
1062+ printk("\n");
1063+}
1064+#endif
1065+
1066 /*
1067 * First Level Translation Fault Handler
1068 *
1069diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1070index f5abc51..7ec524c 100644
1071--- a/arch/arm/mm/mmap.c
1072+++ b/arch/arm/mm/mmap.c
1073@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1074 if (len > TASK_SIZE)
1075 return -ENOMEM;
1076
1077+#ifdef CONFIG_PAX_RANDMMAP
1078+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1079+#endif
1080+
1081 if (addr) {
1082 if (do_align)
1083 addr = COLOUR_ALIGN(addr, pgoff);
1084@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1085 addr = PAGE_ALIGN(addr);
1086
1087 vma = find_vma(mm, addr);
1088- if (TASK_SIZE - len >= addr &&
1089- (!vma || addr + len <= vma->vm_start))
1090+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1091 return addr;
1092 }
1093 if (len > mm->cached_hole_size) {
1094- start_addr = addr = mm->free_area_cache;
1095+ start_addr = addr = mm->free_area_cache;
1096 } else {
1097- start_addr = addr = TASK_UNMAPPED_BASE;
1098- mm->cached_hole_size = 0;
1099+ start_addr = addr = mm->mmap_base;
1100+ mm->cached_hole_size = 0;
1101 }
1102
1103 full_search:
1104@@ -94,14 +97,14 @@ full_search:
1105 * Start a new search - just in case we missed
1106 * some holes.
1107 */
1108- if (start_addr != TASK_UNMAPPED_BASE) {
1109- start_addr = addr = TASK_UNMAPPED_BASE;
1110+ if (start_addr != mm->mmap_base) {
1111+ start_addr = addr = mm->mmap_base;
1112 mm->cached_hole_size = 0;
1113 goto full_search;
1114 }
1115 return -ENOMEM;
1116 }
1117- if (!vma || addr + len <= vma->vm_start) {
1118+ if (check_heap_stack_gap(vma, addr, len)) {
1119 /*
1120 * Remember the place where we stopped the search:
1121 */
1122diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1123index 8d97db2..b66cfa5 100644
1124--- a/arch/arm/plat-s3c/pm.c
1125+++ b/arch/arm/plat-s3c/pm.c
1126@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1127 s3c_pm_check_cleanup();
1128 }
1129
1130-static struct platform_suspend_ops s3c_pm_ops = {
1131+static const struct platform_suspend_ops s3c_pm_ops = {
1132 .enter = s3c_pm_enter,
1133 .prepare = s3c_pm_prepare,
1134 .finish = s3c_pm_finish,
1135diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1136index d5d1d41..856e2ed 100644
1137--- a/arch/avr32/include/asm/elf.h
1138+++ b/arch/avr32/include/asm/elf.h
1139@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1140 the loader. We need to make sure that it is out of the way of the program
1141 that it will "exec", and that there is sufficient room for the brk. */
1142
1143-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1144+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1145
1146+#ifdef CONFIG_PAX_ASLR
1147+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1148+
1149+#define PAX_DELTA_MMAP_LEN 15
1150+#define PAX_DELTA_STACK_LEN 15
1151+#endif
1152
1153 /* This yields a mask that user programs can use to figure out what
1154 instruction set this CPU supports. This could be done in user space,
1155diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1156index b7f5c68..556135c 100644
1157--- a/arch/avr32/include/asm/kmap_types.h
1158+++ b/arch/avr32/include/asm/kmap_types.h
1159@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1160 D(11) KM_IRQ1,
1161 D(12) KM_SOFTIRQ0,
1162 D(13) KM_SOFTIRQ1,
1163-D(14) KM_TYPE_NR
1164+D(14) KM_CLEARPAGE,
1165+D(15) KM_TYPE_NR
1166 };
1167
1168 #undef D
1169diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1170index f021edf..32d680e 100644
1171--- a/arch/avr32/mach-at32ap/pm.c
1172+++ b/arch/avr32/mach-at32ap/pm.c
1173@@ -176,7 +176,7 @@ out:
1174 return 0;
1175 }
1176
1177-static struct platform_suspend_ops avr32_pm_ops = {
1178+static const struct platform_suspend_ops avr32_pm_ops = {
1179 .valid = avr32_pm_valid_state,
1180 .enter = avr32_pm_enter,
1181 };
1182diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1183index b61d86d..e292c7f 100644
1184--- a/arch/avr32/mm/fault.c
1185+++ b/arch/avr32/mm/fault.c
1186@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1187
1188 int exception_trace = 1;
1189
1190+#ifdef CONFIG_PAX_PAGEEXEC
1191+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1192+{
1193+ unsigned long i;
1194+
1195+ printk(KERN_ERR "PAX: bytes at PC: ");
1196+ for (i = 0; i < 20; i++) {
1197+ unsigned char c;
1198+ if (get_user(c, (unsigned char *)pc+i))
1199+ printk(KERN_CONT "???????? ");
1200+ else
1201+ printk(KERN_CONT "%02x ", c);
1202+ }
1203+ printk("\n");
1204+}
1205+#endif
1206+
1207 /*
1208 * This routine handles page faults. It determines the address and the
1209 * problem, and then passes it off to one of the appropriate routines.
1210@@ -157,6 +174,16 @@ bad_area:
1211 up_read(&mm->mmap_sem);
1212
1213 if (user_mode(regs)) {
1214+
1215+#ifdef CONFIG_PAX_PAGEEXEC
1216+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1217+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1218+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1219+ do_group_exit(SIGKILL);
1220+ }
1221+ }
1222+#endif
1223+
1224 if (exception_trace && printk_ratelimit())
1225 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1226 "sp %08lx ecr %lu\n",
1227diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1228index cce79d0..c406c85 100644
1229--- a/arch/blackfin/kernel/kgdb.c
1230+++ b/arch/blackfin/kernel/kgdb.c
1231@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1232 return -1; /* this means that we do not want to exit from the handler */
1233 }
1234
1235-struct kgdb_arch arch_kgdb_ops = {
1236+const struct kgdb_arch arch_kgdb_ops = {
1237 .gdb_bpt_instr = {0xa1},
1238 #ifdef CONFIG_SMP
1239 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1240diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1241index 8837be4..b2fb413 100644
1242--- a/arch/blackfin/mach-common/pm.c
1243+++ b/arch/blackfin/mach-common/pm.c
1244@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1245 return 0;
1246 }
1247
1248-struct platform_suspend_ops bfin_pm_ops = {
1249+const struct platform_suspend_ops bfin_pm_ops = {
1250 .enter = bfin_pm_enter,
1251 .valid = bfin_pm_valid,
1252 };
1253diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1254index f8e16b2..c73ff79 100644
1255--- a/arch/frv/include/asm/kmap_types.h
1256+++ b/arch/frv/include/asm/kmap_types.h
1257@@ -23,6 +23,7 @@ enum km_type {
1258 KM_IRQ1,
1259 KM_SOFTIRQ0,
1260 KM_SOFTIRQ1,
1261+ KM_CLEARPAGE,
1262 KM_TYPE_NR
1263 };
1264
1265diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1266index 385fd30..6c3d97e 100644
1267--- a/arch/frv/mm/elf-fdpic.c
1268+++ b/arch/frv/mm/elf-fdpic.c
1269@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1270 if (addr) {
1271 addr = PAGE_ALIGN(addr);
1272 vma = find_vma(current->mm, addr);
1273- if (TASK_SIZE - len >= addr &&
1274- (!vma || addr + len <= vma->vm_start))
1275+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1276 goto success;
1277 }
1278
1279@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1280 for (; vma; vma = vma->vm_next) {
1281 if (addr > limit)
1282 break;
1283- if (addr + len <= vma->vm_start)
1284+ if (check_heap_stack_gap(vma, addr, len))
1285 goto success;
1286 addr = vma->vm_end;
1287 }
1288@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1289 for (; vma; vma = vma->vm_next) {
1290 if (addr > limit)
1291 break;
1292- if (addr + len <= vma->vm_start)
1293+ if (check_heap_stack_gap(vma, addr, len))
1294 goto success;
1295 addr = vma->vm_end;
1296 }
1297diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1298index e4a80d8..11a7ea1 100644
1299--- a/arch/ia64/hp/common/hwsw_iommu.c
1300+++ b/arch/ia64/hp/common/hwsw_iommu.c
1301@@ -17,7 +17,7 @@
1302 #include <linux/swiotlb.h>
1303 #include <asm/machvec.h>
1304
1305-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1306+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1307
1308 /* swiotlb declarations & definitions: */
1309 extern int swiotlb_late_init_with_default_size (size_t size);
1310@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1311 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1312 }
1313
1314-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1315+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1316 {
1317 if (use_swiotlb(dev))
1318 return &swiotlb_dma_ops;
1319diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1320index 01ae69b..35752fd 100644
1321--- a/arch/ia64/hp/common/sba_iommu.c
1322+++ b/arch/ia64/hp/common/sba_iommu.c
1323@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1324 },
1325 };
1326
1327-extern struct dma_map_ops swiotlb_dma_ops;
1328+extern const struct dma_map_ops swiotlb_dma_ops;
1329
1330 static int __init
1331 sba_init(void)
1332@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1333
1334 __setup("sbapagesize=",sba_page_override);
1335
1336-struct dma_map_ops sba_dma_ops = {
1337+const struct dma_map_ops sba_dma_ops = {
1338 .alloc_coherent = sba_alloc_coherent,
1339 .free_coherent = sba_free_coherent,
1340 .map_page = sba_map_page,
1341diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1342index c69552b..c7122f4 100644
1343--- a/arch/ia64/ia32/binfmt_elf32.c
1344+++ b/arch/ia64/ia32/binfmt_elf32.c
1345@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1346
1347 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1348
1349+#ifdef CONFIG_PAX_ASLR
1350+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1351+
1352+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1353+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1354+#endif
1355+
1356 /* Ugly but avoids duplication */
1357 #include "../../../fs/binfmt_elf.c"
1358
1359diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1360index 0f15349..26b3429 100644
1361--- a/arch/ia64/ia32/ia32priv.h
1362+++ b/arch/ia64/ia32/ia32priv.h
1363@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1364 #define ELF_DATA ELFDATA2LSB
1365 #define ELF_ARCH EM_386
1366
1367-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1368+#ifdef CONFIG_PAX_RANDUSTACK
1369+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1370+#else
1371+#define __IA32_DELTA_STACK 0UL
1372+#endif
1373+
1374+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1375+
1376 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1377 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1378
1379diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1380index 8d3c79c..71b3af6 100644
1381--- a/arch/ia64/include/asm/dma-mapping.h
1382+++ b/arch/ia64/include/asm/dma-mapping.h
1383@@ -12,7 +12,7 @@
1384
1385 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1386
1387-extern struct dma_map_ops *dma_ops;
1388+extern const struct dma_map_ops *dma_ops;
1389 extern struct ia64_machine_vector ia64_mv;
1390 extern void set_iommu_machvec(void);
1391
1392@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1393 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1394 dma_addr_t *daddr, gfp_t gfp)
1395 {
1396- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1397+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1398 void *caddr;
1399
1400 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1401@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1402 static inline void dma_free_coherent(struct device *dev, size_t size,
1403 void *caddr, dma_addr_t daddr)
1404 {
1405- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1406+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1407 debug_dma_free_coherent(dev, size, caddr, daddr);
1408 ops->free_coherent(dev, size, caddr, daddr);
1409 }
1410@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1411
1412 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1413 {
1414- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1415+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1416 return ops->mapping_error(dev, daddr);
1417 }
1418
1419 static inline int dma_supported(struct device *dev, u64 mask)
1420 {
1421- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1422+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1423 return ops->dma_supported(dev, mask);
1424 }
1425
1426diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1427index 86eddee..b116bb4 100644
1428--- a/arch/ia64/include/asm/elf.h
1429+++ b/arch/ia64/include/asm/elf.h
1430@@ -43,6 +43,13 @@
1431 */
1432 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1433
1434+#ifdef CONFIG_PAX_ASLR
1435+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1436+
1437+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1438+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1439+#endif
1440+
1441 #define PT_IA_64_UNWIND 0x70000001
1442
1443 /* IA-64 relocations: */
1444diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1445index 367d299..9ad4279 100644
1446--- a/arch/ia64/include/asm/machvec.h
1447+++ b/arch/ia64/include/asm/machvec.h
1448@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1449 /* DMA-mapping interface: */
1450 typedef void ia64_mv_dma_init (void);
1451 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1452-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1453+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1454
1455 /*
1456 * WARNING: The legacy I/O space is _architected_. Platforms are
1457@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1458 # endif /* CONFIG_IA64_GENERIC */
1459
1460 extern void swiotlb_dma_init(void);
1461-extern struct dma_map_ops *dma_get_ops(struct device *);
1462+extern const struct dma_map_ops *dma_get_ops(struct device *);
1463
1464 /*
1465 * Define default versions so we can extend machvec for new platforms without having
1466diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1467index 8840a69..cdb63d9 100644
1468--- a/arch/ia64/include/asm/pgtable.h
1469+++ b/arch/ia64/include/asm/pgtable.h
1470@@ -12,7 +12,7 @@
1471 * David Mosberger-Tang <davidm@hpl.hp.com>
1472 */
1473
1474-
1475+#include <linux/const.h>
1476 #include <asm/mman.h>
1477 #include <asm/page.h>
1478 #include <asm/processor.h>
1479@@ -143,6 +143,17 @@
1480 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1481 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1482 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1483+
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1486+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1487+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1488+#else
1489+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1490+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1491+# define PAGE_COPY_NOEXEC PAGE_COPY
1492+#endif
1493+
1494 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1495 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1496 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1497diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1498index 239ecdc..f94170e 100644
1499--- a/arch/ia64/include/asm/spinlock.h
1500+++ b/arch/ia64/include/asm/spinlock.h
1501@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1502 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1503
1504 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1505- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1506+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1507 }
1508
1509 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1510diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1511index 449c8c0..432a3d2 100644
1512--- a/arch/ia64/include/asm/uaccess.h
1513+++ b/arch/ia64/include/asm/uaccess.h
1514@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1515 const void *__cu_from = (from); \
1516 long __cu_len = (n); \
1517 \
1518- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1519+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1520 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1521 __cu_len; \
1522 })
1523@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1524 long __cu_len = (n); \
1525 \
1526 __chk_user_ptr(__cu_from); \
1527- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1528+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1529 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1530 __cu_len; \
1531 })
1532diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1533index f2c1600..969398a 100644
1534--- a/arch/ia64/kernel/dma-mapping.c
1535+++ b/arch/ia64/kernel/dma-mapping.c
1536@@ -3,7 +3,7 @@
1537 /* Set this to 1 if there is a HW IOMMU in the system */
1538 int iommu_detected __read_mostly;
1539
1540-struct dma_map_ops *dma_ops;
1541+const struct dma_map_ops *dma_ops;
1542 EXPORT_SYMBOL(dma_ops);
1543
1544 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1545@@ -16,7 +16,7 @@ static int __init dma_init(void)
1546 }
1547 fs_initcall(dma_init);
1548
1549-struct dma_map_ops *dma_get_ops(struct device *dev)
1550+const struct dma_map_ops *dma_get_ops(struct device *dev)
1551 {
1552 return dma_ops;
1553 }
1554diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1555index 1481b0a..e7d38ff 100644
1556--- a/arch/ia64/kernel/module.c
1557+++ b/arch/ia64/kernel/module.c
1558@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1559 void
1560 module_free (struct module *mod, void *module_region)
1561 {
1562- if (mod && mod->arch.init_unw_table &&
1563- module_region == mod->module_init) {
1564+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1565 unw_remove_unwind_table(mod->arch.init_unw_table);
1566 mod->arch.init_unw_table = NULL;
1567 }
1568@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1569 }
1570
1571 static inline int
1572+in_init_rx (const struct module *mod, uint64_t addr)
1573+{
1574+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1575+}
1576+
1577+static inline int
1578+in_init_rw (const struct module *mod, uint64_t addr)
1579+{
1580+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1581+}
1582+
1583+static inline int
1584 in_init (const struct module *mod, uint64_t addr)
1585 {
1586- return addr - (uint64_t) mod->module_init < mod->init_size;
1587+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1588+}
1589+
1590+static inline int
1591+in_core_rx (const struct module *mod, uint64_t addr)
1592+{
1593+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1594+}
1595+
1596+static inline int
1597+in_core_rw (const struct module *mod, uint64_t addr)
1598+{
1599+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1600 }
1601
1602 static inline int
1603 in_core (const struct module *mod, uint64_t addr)
1604 {
1605- return addr - (uint64_t) mod->module_core < mod->core_size;
1606+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1607 }
1608
1609 static inline int
1610@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1611 break;
1612
1613 case RV_BDREL:
1614- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1615+ if (in_init_rx(mod, val))
1616+ val -= (uint64_t) mod->module_init_rx;
1617+ else if (in_init_rw(mod, val))
1618+ val -= (uint64_t) mod->module_init_rw;
1619+ else if (in_core_rx(mod, val))
1620+ val -= (uint64_t) mod->module_core_rx;
1621+ else if (in_core_rw(mod, val))
1622+ val -= (uint64_t) mod->module_core_rw;
1623 break;
1624
1625 case RV_LTV:
1626@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1627 * addresses have been selected...
1628 */
1629 uint64_t gp;
1630- if (mod->core_size > MAX_LTOFF)
1631+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1632 /*
1633 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1634 * at the end of the module.
1635 */
1636- gp = mod->core_size - MAX_LTOFF / 2;
1637+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1638 else
1639- gp = mod->core_size / 2;
1640- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1641+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1642+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1643 mod->arch.gp = gp;
1644 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1645 }
1646diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1647index f6b1ff0..de773fb 100644
1648--- a/arch/ia64/kernel/pci-dma.c
1649+++ b/arch/ia64/kernel/pci-dma.c
1650@@ -43,7 +43,7 @@ struct device fallback_dev = {
1651 .dma_mask = &fallback_dev.coherent_dma_mask,
1652 };
1653
1654-extern struct dma_map_ops intel_dma_ops;
1655+extern const struct dma_map_ops intel_dma_ops;
1656
1657 static int __init pci_iommu_init(void)
1658 {
1659@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1660 }
1661 EXPORT_SYMBOL(iommu_dma_supported);
1662
1663+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1664+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1665+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1666+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1667+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1668+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1669+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1670+
1671+static const struct dma_map_ops intel_iommu_dma_ops = {
1672+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1673+ .alloc_coherent = intel_alloc_coherent,
1674+ .free_coherent = intel_free_coherent,
1675+ .map_sg = intel_map_sg,
1676+ .unmap_sg = intel_unmap_sg,
1677+ .map_page = intel_map_page,
1678+ .unmap_page = intel_unmap_page,
1679+ .mapping_error = intel_mapping_error,
1680+
1681+ .sync_single_for_cpu = machvec_dma_sync_single,
1682+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1683+ .sync_single_for_device = machvec_dma_sync_single,
1684+ .sync_sg_for_device = machvec_dma_sync_sg,
1685+ .dma_supported = iommu_dma_supported,
1686+};
1687+
1688 void __init pci_iommu_alloc(void)
1689 {
1690- dma_ops = &intel_dma_ops;
1691-
1692- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1693- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1694- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1695- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1696- dma_ops->dma_supported = iommu_dma_supported;
1697+ dma_ops = &intel_iommu_dma_ops;
1698
1699 /*
1700 * The order of these functions is important for
1701diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1702index 285aae8..61dbab6 100644
1703--- a/arch/ia64/kernel/pci-swiotlb.c
1704+++ b/arch/ia64/kernel/pci-swiotlb.c
1705@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1706 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1707 }
1708
1709-struct dma_map_ops swiotlb_dma_ops = {
1710+const struct dma_map_ops swiotlb_dma_ops = {
1711 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1712 .free_coherent = swiotlb_free_coherent,
1713 .map_page = swiotlb_map_page,
1714diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1715index 609d500..7dde2a8 100644
1716--- a/arch/ia64/kernel/sys_ia64.c
1717+++ b/arch/ia64/kernel/sys_ia64.c
1718@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1719 if (REGION_NUMBER(addr) == RGN_HPAGE)
1720 addr = 0;
1721 #endif
1722+
1723+#ifdef CONFIG_PAX_RANDMMAP
1724+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1725+ addr = mm->free_area_cache;
1726+ else
1727+#endif
1728+
1729 if (!addr)
1730 addr = mm->free_area_cache;
1731
1732@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1733 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1734 /* At this point: (!vma || addr < vma->vm_end). */
1735 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1736- if (start_addr != TASK_UNMAPPED_BASE) {
1737+ if (start_addr != mm->mmap_base) {
1738 /* Start a new search --- just in case we missed some holes. */
1739- addr = TASK_UNMAPPED_BASE;
1740+ addr = mm->mmap_base;
1741 goto full_search;
1742 }
1743 return -ENOMEM;
1744 }
1745- if (!vma || addr + len <= vma->vm_start) {
1746+ if (check_heap_stack_gap(vma, addr, len)) {
1747 /* Remember the address where we stopped this search: */
1748 mm->free_area_cache = addr + len;
1749 return addr;
1750diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1751index 8f06035..b3a5818 100644
1752--- a/arch/ia64/kernel/topology.c
1753+++ b/arch/ia64/kernel/topology.c
1754@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1755 return ret;
1756 }
1757
1758-static struct sysfs_ops cache_sysfs_ops = {
1759+static const struct sysfs_ops cache_sysfs_ops = {
1760 .show = cache_show
1761 };
1762
1763diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1764index 0a0c77b..8e55a81 100644
1765--- a/arch/ia64/kernel/vmlinux.lds.S
1766+++ b/arch/ia64/kernel/vmlinux.lds.S
1767@@ -190,7 +190,7 @@ SECTIONS
1768 /* Per-cpu data: */
1769 . = ALIGN(PERCPU_PAGE_SIZE);
1770 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1771- __phys_per_cpu_start = __per_cpu_load;
1772+ __phys_per_cpu_start = per_cpu_load;
1773 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1774 * into percpu page size
1775 */
1776diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1777index 19261a9..1611b7a 100644
1778--- a/arch/ia64/mm/fault.c
1779+++ b/arch/ia64/mm/fault.c
1780@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1781 return pte_present(pte);
1782 }
1783
1784+#ifdef CONFIG_PAX_PAGEEXEC
1785+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1786+{
1787+ unsigned long i;
1788+
1789+ printk(KERN_ERR "PAX: bytes at PC: ");
1790+ for (i = 0; i < 8; i++) {
1791+ unsigned int c;
1792+ if (get_user(c, (unsigned int *)pc+i))
1793+ printk(KERN_CONT "???????? ");
1794+ else
1795+ printk(KERN_CONT "%08x ", c);
1796+ }
1797+ printk("\n");
1798+}
1799+#endif
1800+
1801 void __kprobes
1802 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1803 {
1804@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1805 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1806 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1807
1808- if ((vma->vm_flags & mask) != mask)
1809+ if ((vma->vm_flags & mask) != mask) {
1810+
1811+#ifdef CONFIG_PAX_PAGEEXEC
1812+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1813+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1814+ goto bad_area;
1815+
1816+ up_read(&mm->mmap_sem);
1817+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1818+ do_group_exit(SIGKILL);
1819+ }
1820+#endif
1821+
1822 goto bad_area;
1823
1824+ }
1825+
1826 survive:
1827 /*
1828 * If for any reason at all we couldn't handle the fault, make
1829diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1830index b0f6157..a082bbc 100644
1831--- a/arch/ia64/mm/hugetlbpage.c
1832+++ b/arch/ia64/mm/hugetlbpage.c
1833@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1834 /* At this point: (!vmm || addr < vmm->vm_end). */
1835 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1836 return -ENOMEM;
1837- if (!vmm || (addr + len) <= vmm->vm_start)
1838+ if (check_heap_stack_gap(vmm, addr, len))
1839 return addr;
1840 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1841 }
1842diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1843index 1857766..05cc6a3 100644
1844--- a/arch/ia64/mm/init.c
1845+++ b/arch/ia64/mm/init.c
1846@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1847 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1848 vma->vm_end = vma->vm_start + PAGE_SIZE;
1849 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1850+
1851+#ifdef CONFIG_PAX_PAGEEXEC
1852+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1853+ vma->vm_flags &= ~VM_EXEC;
1854+
1855+#ifdef CONFIG_PAX_MPROTECT
1856+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1857+ vma->vm_flags &= ~VM_MAYEXEC;
1858+#endif
1859+
1860+ }
1861+#endif
1862+
1863 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1864 down_write(&current->mm->mmap_sem);
1865 if (insert_vm_struct(current->mm, vma)) {
1866diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1867index 98b6849..8046766 100644
1868--- a/arch/ia64/sn/pci/pci_dma.c
1869+++ b/arch/ia64/sn/pci/pci_dma.c
1870@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1871 return ret;
1872 }
1873
1874-static struct dma_map_ops sn_dma_ops = {
1875+static const struct dma_map_ops sn_dma_ops = {
1876 .alloc_coherent = sn_dma_alloc_coherent,
1877 .free_coherent = sn_dma_free_coherent,
1878 .map_page = sn_dma_map_page,
1879diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1880index 82abd15..d95ae5d 100644
1881--- a/arch/m32r/lib/usercopy.c
1882+++ b/arch/m32r/lib/usercopy.c
1883@@ -14,6 +14,9 @@
1884 unsigned long
1885 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1886 {
1887+ if ((long)n < 0)
1888+ return n;
1889+
1890 prefetch(from);
1891 if (access_ok(VERIFY_WRITE, to, n))
1892 __copy_user(to,from,n);
1893@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1894 unsigned long
1895 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1896 {
1897+ if ((long)n < 0)
1898+ return n;
1899+
1900 prefetchw(to);
1901 if (access_ok(VERIFY_READ, from, n))
1902 __copy_user_zeroing(to,from,n);
1903diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1904index 77f5021..2b1db8a 100644
1905--- a/arch/mips/Makefile
1906+++ b/arch/mips/Makefile
1907@@ -51,6 +51,8 @@ endif
1908 cflags-y := -ffunction-sections
1909 cflags-y += $(call cc-option, -mno-check-zero-division)
1910
1911+cflags-y += -Wno-sign-compare -Wno-extra
1912+
1913 ifdef CONFIG_32BIT
1914 ld-emul = $(32bit-emul)
1915 vmlinux-32 = vmlinux
1916diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1917index 632f986..fd0378d 100644
1918--- a/arch/mips/alchemy/devboards/pm.c
1919+++ b/arch/mips/alchemy/devboards/pm.c
1920@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1921
1922 }
1923
1924-static struct platform_suspend_ops db1x_pm_ops = {
1925+static const struct platform_suspend_ops db1x_pm_ops = {
1926 .valid = suspend_valid_only_mem,
1927 .begin = db1x_pm_begin,
1928 .enter = db1x_pm_enter,
1929diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1930index 7990694..4e93acf 100644
1931--- a/arch/mips/include/asm/elf.h
1932+++ b/arch/mips/include/asm/elf.h
1933@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1934 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1935 #endif
1936
1937+#ifdef CONFIG_PAX_ASLR
1938+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1939+
1940+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1941+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1942+#endif
1943+
1944 #endif /* _ASM_ELF_H */
1945diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1946index f266295..627cfff 100644
1947--- a/arch/mips/include/asm/page.h
1948+++ b/arch/mips/include/asm/page.h
1949@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1950 #ifdef CONFIG_CPU_MIPS32
1951 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1952 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1953- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1954+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1955 #else
1956 typedef struct { unsigned long long pte; } pte_t;
1957 #define pte_val(x) ((x).pte)
1958diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1959index e48c0bf..f3acf65 100644
1960--- a/arch/mips/include/asm/reboot.h
1961+++ b/arch/mips/include/asm/reboot.h
1962@@ -9,7 +9,7 @@
1963 #ifndef _ASM_REBOOT_H
1964 #define _ASM_REBOOT_H
1965
1966-extern void (*_machine_restart)(char *command);
1967-extern void (*_machine_halt)(void);
1968+extern void (*__noreturn _machine_restart)(char *command);
1969+extern void (*__noreturn _machine_halt)(void);
1970
1971 #endif /* _ASM_REBOOT_H */
1972diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1973index 83b5509..9fa24a23 100644
1974--- a/arch/mips/include/asm/system.h
1975+++ b/arch/mips/include/asm/system.h
1976@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1977 */
1978 #define __ARCH_WANT_UNLOCKED_CTXSW
1979
1980-extern unsigned long arch_align_stack(unsigned long sp);
1981+#define arch_align_stack(x) ((x) & ~0xfUL)
1982
1983 #endif /* _ASM_SYSTEM_H */
1984diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1985index 9fdd8bc..fcf9d68 100644
1986--- a/arch/mips/kernel/binfmt_elfn32.c
1987+++ b/arch/mips/kernel/binfmt_elfn32.c
1988@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1989 #undef ELF_ET_DYN_BASE
1990 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1991
1992+#ifdef CONFIG_PAX_ASLR
1993+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1994+
1995+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1996+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1997+#endif
1998+
1999 #include <asm/processor.h>
2000 #include <linux/module.h>
2001 #include <linux/elfcore.h>
2002diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2003index ff44823..cf0b48a 100644
2004--- a/arch/mips/kernel/binfmt_elfo32.c
2005+++ b/arch/mips/kernel/binfmt_elfo32.c
2006@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2007 #undef ELF_ET_DYN_BASE
2008 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2009
2010+#ifdef CONFIG_PAX_ASLR
2011+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2012+
2013+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2014+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2015+#endif
2016+
2017 #include <asm/processor.h>
2018
2019 /*
2020diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2021index 50c9bb8..efdd5f8 100644
2022--- a/arch/mips/kernel/kgdb.c
2023+++ b/arch/mips/kernel/kgdb.c
2024@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2025 return -1;
2026 }
2027
2028+/* cannot be const */
2029 struct kgdb_arch arch_kgdb_ops;
2030
2031 /*
2032diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2033index f3d73e1..bb3f57a 100644
2034--- a/arch/mips/kernel/process.c
2035+++ b/arch/mips/kernel/process.c
2036@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2037 out:
2038 return pc;
2039 }
2040-
2041-/*
2042- * Don't forget that the stack pointer must be aligned on a 8 bytes
2043- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2044- */
2045-unsigned long arch_align_stack(unsigned long sp)
2046-{
2047- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2048- sp -= get_random_int() & ~PAGE_MASK;
2049-
2050- return sp & ALMASK;
2051-}
2052diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2053index 060563a..7fbf310 100644
2054--- a/arch/mips/kernel/reset.c
2055+++ b/arch/mips/kernel/reset.c
2056@@ -19,8 +19,8 @@
2057 * So handle all using function pointers to machine specific
2058 * functions.
2059 */
2060-void (*_machine_restart)(char *command);
2061-void (*_machine_halt)(void);
2062+void (*__noreturn _machine_restart)(char *command);
2063+void (*__noreturn _machine_halt)(void);
2064 void (*pm_power_off)(void);
2065
2066 EXPORT_SYMBOL(pm_power_off);
2067@@ -29,16 +29,19 @@ void machine_restart(char *command)
2068 {
2069 if (_machine_restart)
2070 _machine_restart(command);
2071+ BUG();
2072 }
2073
2074 void machine_halt(void)
2075 {
2076 if (_machine_halt)
2077 _machine_halt();
2078+ BUG();
2079 }
2080
2081 void machine_power_off(void)
2082 {
2083 if (pm_power_off)
2084 pm_power_off();
2085+ BUG();
2086 }
2087diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2088index 3f7f466..3abe0b5 100644
2089--- a/arch/mips/kernel/syscall.c
2090+++ b/arch/mips/kernel/syscall.c
2091@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2092 do_color_align = 0;
2093 if (filp || (flags & MAP_SHARED))
2094 do_color_align = 1;
2095+
2096+#ifdef CONFIG_PAX_RANDMMAP
2097+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2098+#endif
2099+
2100 if (addr) {
2101 if (do_color_align)
2102 addr = COLOUR_ALIGN(addr, pgoff);
2103 else
2104 addr = PAGE_ALIGN(addr);
2105 vmm = find_vma(current->mm, addr);
2106- if (task_size - len >= addr &&
2107- (!vmm || addr + len <= vmm->vm_start))
2108+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2109 return addr;
2110 }
2111- addr = TASK_UNMAPPED_BASE;
2112+ addr = current->mm->mmap_base;
2113 if (do_color_align)
2114 addr = COLOUR_ALIGN(addr, pgoff);
2115 else
2116@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2117 /* At this point: (!vmm || addr < vmm->vm_end). */
2118 if (task_size - len < addr)
2119 return -ENOMEM;
2120- if (!vmm || addr + len <= vmm->vm_start)
2121+ if (check_heap_stack_gap(vmm, addr, len))
2122 return addr;
2123 addr = vmm->vm_end;
2124 if (do_color_align)
2125diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2126index e97a7a2..f18f5b0 100644
2127--- a/arch/mips/mm/fault.c
2128+++ b/arch/mips/mm/fault.c
2129@@ -26,6 +26,23 @@
2130 #include <asm/ptrace.h>
2131 #include <asm/highmem.h> /* For VMALLOC_END */
2132
2133+#ifdef CONFIG_PAX_PAGEEXEC
2134+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2135+{
2136+ unsigned long i;
2137+
2138+ printk(KERN_ERR "PAX: bytes at PC: ");
2139+ for (i = 0; i < 5; i++) {
2140+ unsigned int c;
2141+ if (get_user(c, (unsigned int *)pc+i))
2142+ printk(KERN_CONT "???????? ");
2143+ else
2144+ printk(KERN_CONT "%08x ", c);
2145+ }
2146+ printk("\n");
2147+}
2148+#endif
2149+
2150 /*
2151 * This routine handles page faults. It determines the address,
2152 * and the problem, and then passes it off to one of the appropriate
2153diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2154index 9c802eb..0592e41 100644
2155--- a/arch/parisc/include/asm/elf.h
2156+++ b/arch/parisc/include/asm/elf.h
2157@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2158
2159 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2160
2161+#ifdef CONFIG_PAX_ASLR
2162+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2163+
2164+#define PAX_DELTA_MMAP_LEN 16
2165+#define PAX_DELTA_STACK_LEN 16
2166+#endif
2167+
2168 /* This yields a mask that user programs can use to figure out what
2169 instruction set this CPU supports. This could be done in user space,
2170 but it's not easy, and we've already done it here. */
2171diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2172index a27d2e2..18fd845 100644
2173--- a/arch/parisc/include/asm/pgtable.h
2174+++ b/arch/parisc/include/asm/pgtable.h
2175@@ -207,6 +207,17 @@
2176 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2177 #define PAGE_COPY PAGE_EXECREAD
2178 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2179+
2180+#ifdef CONFIG_PAX_PAGEEXEC
2181+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2182+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2183+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2184+#else
2185+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2186+# define PAGE_COPY_NOEXEC PAGE_COPY
2187+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2188+#endif
2189+
2190 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2191 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2192 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2193diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2194index 2120746..8d70a5e 100644
2195--- a/arch/parisc/kernel/module.c
2196+++ b/arch/parisc/kernel/module.c
2197@@ -95,16 +95,38 @@
2198
2199 /* three functions to determine where in the module core
2200 * or init pieces the location is */
2201+static inline int in_init_rx(struct module *me, void *loc)
2202+{
2203+ return (loc >= me->module_init_rx &&
2204+ loc < (me->module_init_rx + me->init_size_rx));
2205+}
2206+
2207+static inline int in_init_rw(struct module *me, void *loc)
2208+{
2209+ return (loc >= me->module_init_rw &&
2210+ loc < (me->module_init_rw + me->init_size_rw));
2211+}
2212+
2213 static inline int in_init(struct module *me, void *loc)
2214 {
2215- return (loc >= me->module_init &&
2216- loc <= (me->module_init + me->init_size));
2217+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2218+}
2219+
2220+static inline int in_core_rx(struct module *me, void *loc)
2221+{
2222+ return (loc >= me->module_core_rx &&
2223+ loc < (me->module_core_rx + me->core_size_rx));
2224+}
2225+
2226+static inline int in_core_rw(struct module *me, void *loc)
2227+{
2228+ return (loc >= me->module_core_rw &&
2229+ loc < (me->module_core_rw + me->core_size_rw));
2230 }
2231
2232 static inline int in_core(struct module *me, void *loc)
2233 {
2234- return (loc >= me->module_core &&
2235- loc <= (me->module_core + me->core_size));
2236+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2237 }
2238
2239 static inline int in_local(struct module *me, void *loc)
2240@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2241 }
2242
2243 /* align things a bit */
2244- me->core_size = ALIGN(me->core_size, 16);
2245- me->arch.got_offset = me->core_size;
2246- me->core_size += gots * sizeof(struct got_entry);
2247+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2248+ me->arch.got_offset = me->core_size_rw;
2249+ me->core_size_rw += gots * sizeof(struct got_entry);
2250
2251- me->core_size = ALIGN(me->core_size, 16);
2252- me->arch.fdesc_offset = me->core_size;
2253- me->core_size += fdescs * sizeof(Elf_Fdesc);
2254+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2255+ me->arch.fdesc_offset = me->core_size_rw;
2256+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2257
2258 me->arch.got_max = gots;
2259 me->arch.fdesc_max = fdescs;
2260@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2261
2262 BUG_ON(value == 0);
2263
2264- got = me->module_core + me->arch.got_offset;
2265+ got = me->module_core_rw + me->arch.got_offset;
2266 for (i = 0; got[i].addr; i++)
2267 if (got[i].addr == value)
2268 goto out;
2269@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2270 #ifdef CONFIG_64BIT
2271 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2272 {
2273- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2274+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2275
2276 if (!value) {
2277 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2278@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2279
2280 /* Create new one */
2281 fdesc->addr = value;
2282- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2283+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2284 return (Elf_Addr)fdesc;
2285 }
2286 #endif /* CONFIG_64BIT */
2287@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2288
2289 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2290 end = table + sechdrs[me->arch.unwind_section].sh_size;
2291- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2292+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2293
2294 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2295 me->arch.unwind_section, table, end, gp);
2296diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2297index 9147391..f3d949a 100644
2298--- a/arch/parisc/kernel/sys_parisc.c
2299+++ b/arch/parisc/kernel/sys_parisc.c
2300@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2301 /* At this point: (!vma || addr < vma->vm_end). */
2302 if (TASK_SIZE - len < addr)
2303 return -ENOMEM;
2304- if (!vma || addr + len <= vma->vm_start)
2305+ if (check_heap_stack_gap(vma, addr, len))
2306 return addr;
2307 addr = vma->vm_end;
2308 }
2309@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2310 /* At this point: (!vma || addr < vma->vm_end). */
2311 if (TASK_SIZE - len < addr)
2312 return -ENOMEM;
2313- if (!vma || addr + len <= vma->vm_start)
2314+ if (check_heap_stack_gap(vma, addr, len))
2315 return addr;
2316 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2317 if (addr < vma->vm_end) /* handle wraparound */
2318@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2319 if (flags & MAP_FIXED)
2320 return addr;
2321 if (!addr)
2322- addr = TASK_UNMAPPED_BASE;
2323+ addr = current->mm->mmap_base;
2324
2325 if (filp) {
2326 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2327diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2328index 8b58bf0..7afff03 100644
2329--- a/arch/parisc/kernel/traps.c
2330+++ b/arch/parisc/kernel/traps.c
2331@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2332
2333 down_read(&current->mm->mmap_sem);
2334 vma = find_vma(current->mm,regs->iaoq[0]);
2335- if (vma && (regs->iaoq[0] >= vma->vm_start)
2336- && (vma->vm_flags & VM_EXEC)) {
2337-
2338+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2339 fault_address = regs->iaoq[0];
2340 fault_space = regs->iasq[0];
2341
2342diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2343index c6afbfc..c5839f6 100644
2344--- a/arch/parisc/mm/fault.c
2345+++ b/arch/parisc/mm/fault.c
2346@@ -15,6 +15,7 @@
2347 #include <linux/sched.h>
2348 #include <linux/interrupt.h>
2349 #include <linux/module.h>
2350+#include <linux/unistd.h>
2351
2352 #include <asm/uaccess.h>
2353 #include <asm/traps.h>
2354@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2355 static unsigned long
2356 parisc_acctyp(unsigned long code, unsigned int inst)
2357 {
2358- if (code == 6 || code == 16)
2359+ if (code == 6 || code == 7 || code == 16)
2360 return VM_EXEC;
2361
2362 switch (inst & 0xf0000000) {
2363@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2364 }
2365 #endif
2366
2367+#ifdef CONFIG_PAX_PAGEEXEC
2368+/*
2369+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2370+ *
2371+ * returns 1 when task should be killed
2372+ * 2 when rt_sigreturn trampoline was detected
2373+ * 3 when unpatched PLT trampoline was detected
2374+ */
2375+static int pax_handle_fetch_fault(struct pt_regs *regs)
2376+{
2377+
2378+#ifdef CONFIG_PAX_EMUPLT
2379+ int err;
2380+
2381+ do { /* PaX: unpatched PLT emulation */
2382+ unsigned int bl, depwi;
2383+
2384+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2385+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2386+
2387+ if (err)
2388+ break;
2389+
2390+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2391+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2392+
2393+ err = get_user(ldw, (unsigned int *)addr);
2394+ err |= get_user(bv, (unsigned int *)(addr+4));
2395+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2396+
2397+ if (err)
2398+ break;
2399+
2400+ if (ldw == 0x0E801096U &&
2401+ bv == 0xEAC0C000U &&
2402+ ldw2 == 0x0E881095U)
2403+ {
2404+ unsigned int resolver, map;
2405+
2406+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2407+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2408+ if (err)
2409+ break;
2410+
2411+ regs->gr[20] = instruction_pointer(regs)+8;
2412+ regs->gr[21] = map;
2413+ regs->gr[22] = resolver;
2414+ regs->iaoq[0] = resolver | 3UL;
2415+ regs->iaoq[1] = regs->iaoq[0] + 4;
2416+ return 3;
2417+ }
2418+ }
2419+ } while (0);
2420+#endif
2421+
2422+#ifdef CONFIG_PAX_EMUTRAMP
2423+
2424+#ifndef CONFIG_PAX_EMUSIGRT
2425+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2426+ return 1;
2427+#endif
2428+
2429+ do { /* PaX: rt_sigreturn emulation */
2430+ unsigned int ldi1, ldi2, bel, nop;
2431+
2432+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2433+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2434+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2435+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2436+
2437+ if (err)
2438+ break;
2439+
2440+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2441+ ldi2 == 0x3414015AU &&
2442+ bel == 0xE4008200U &&
2443+ nop == 0x08000240U)
2444+ {
2445+ regs->gr[25] = (ldi1 & 2) >> 1;
2446+ regs->gr[20] = __NR_rt_sigreturn;
2447+ regs->gr[31] = regs->iaoq[1] + 16;
2448+ regs->sr[0] = regs->iasq[1];
2449+ regs->iaoq[0] = 0x100UL;
2450+ regs->iaoq[1] = regs->iaoq[0] + 4;
2451+ regs->iasq[0] = regs->sr[2];
2452+ regs->iasq[1] = regs->sr[2];
2453+ return 2;
2454+ }
2455+ } while (0);
2456+#endif
2457+
2458+ return 1;
2459+}
2460+
2461+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2462+{
2463+ unsigned long i;
2464+
2465+ printk(KERN_ERR "PAX: bytes at PC: ");
2466+ for (i = 0; i < 5; i++) {
2467+ unsigned int c;
2468+ if (get_user(c, (unsigned int *)pc+i))
2469+ printk(KERN_CONT "???????? ");
2470+ else
2471+ printk(KERN_CONT "%08x ", c);
2472+ }
2473+ printk("\n");
2474+}
2475+#endif
2476+
2477 int fixup_exception(struct pt_regs *regs)
2478 {
2479 const struct exception_table_entry *fix;
2480@@ -192,8 +303,33 @@ good_area:
2481
2482 acc_type = parisc_acctyp(code,regs->iir);
2483
2484- if ((vma->vm_flags & acc_type) != acc_type)
2485+ if ((vma->vm_flags & acc_type) != acc_type) {
2486+
2487+#ifdef CONFIG_PAX_PAGEEXEC
2488+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2489+ (address & ~3UL) == instruction_pointer(regs))
2490+ {
2491+ up_read(&mm->mmap_sem);
2492+ switch (pax_handle_fetch_fault(regs)) {
2493+
2494+#ifdef CONFIG_PAX_EMUPLT
2495+ case 3:
2496+ return;
2497+#endif
2498+
2499+#ifdef CONFIG_PAX_EMUTRAMP
2500+ case 2:
2501+ return;
2502+#endif
2503+
2504+ }
2505+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2506+ do_group_exit(SIGKILL);
2507+ }
2508+#endif
2509+
2510 goto bad_area;
2511+ }
2512
2513 /*
2514 * If for any reason at all we couldn't handle the fault, make
2515diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2516index c107b74..409dc0f 100644
2517--- a/arch/powerpc/Makefile
2518+++ b/arch/powerpc/Makefile
2519@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2520 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2521 CPP = $(CC) -E $(KBUILD_CFLAGS)
2522
2523+cflags-y += -Wno-sign-compare -Wno-extra
2524+
2525 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2526
2527 ifeq ($(CONFIG_PPC64),y)
2528diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2529index 6d94d27..50d4cad 100644
2530--- a/arch/powerpc/include/asm/device.h
2531+++ b/arch/powerpc/include/asm/device.h
2532@@ -14,7 +14,7 @@ struct dev_archdata {
2533 struct device_node *of_node;
2534
2535 /* DMA operations on that device */
2536- struct dma_map_ops *dma_ops;
2537+ const struct dma_map_ops *dma_ops;
2538
2539 /*
2540 * When an iommu is in use, dma_data is used as a ptr to the base of the
2541diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2542index e281dae..2b8a784 100644
2543--- a/arch/powerpc/include/asm/dma-mapping.h
2544+++ b/arch/powerpc/include/asm/dma-mapping.h
2545@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2546 #ifdef CONFIG_PPC64
2547 extern struct dma_map_ops dma_iommu_ops;
2548 #endif
2549-extern struct dma_map_ops dma_direct_ops;
2550+extern const struct dma_map_ops dma_direct_ops;
2551
2552-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2553+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2554 {
2555 /* We don't handle the NULL dev case for ISA for now. We could
2556 * do it via an out of line call but it is not needed for now. The
2557@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2558 return dev->archdata.dma_ops;
2559 }
2560
2561-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2562+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2563 {
2564 dev->archdata.dma_ops = ops;
2565 }
2566@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2567
2568 static inline int dma_supported(struct device *dev, u64 mask)
2569 {
2570- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2571+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2572
2573 if (unlikely(dma_ops == NULL))
2574 return 0;
2575@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2576
2577 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2578 {
2579- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2580+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2581
2582 if (unlikely(dma_ops == NULL))
2583 return -EIO;
2584@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2585 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2586 dma_addr_t *dma_handle, gfp_t flag)
2587 {
2588- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2589+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2590 void *cpu_addr;
2591
2592 BUG_ON(!dma_ops);
2593@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2594 static inline void dma_free_coherent(struct device *dev, size_t size,
2595 void *cpu_addr, dma_addr_t dma_handle)
2596 {
2597- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2598+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2599
2600 BUG_ON(!dma_ops);
2601
2602@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2603
2604 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2605 {
2606- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2607+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2608
2609 if (dma_ops->mapping_error)
2610 return dma_ops->mapping_error(dev, dma_addr);
2611diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2612index 5698502..5db093c 100644
2613--- a/arch/powerpc/include/asm/elf.h
2614+++ b/arch/powerpc/include/asm/elf.h
2615@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2616 the loader. We need to make sure that it is out of the way of the program
2617 that it will "exec", and that there is sufficient room for the brk. */
2618
2619-extern unsigned long randomize_et_dyn(unsigned long base);
2620-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2621+#define ELF_ET_DYN_BASE (0x20000000)
2622+
2623+#ifdef CONFIG_PAX_ASLR
2624+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2625+
2626+#ifdef __powerpc64__
2627+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2628+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2629+#else
2630+#define PAX_DELTA_MMAP_LEN 15
2631+#define PAX_DELTA_STACK_LEN 15
2632+#endif
2633+#endif
2634
2635 /*
2636 * Our registers are always unsigned longs, whether we're a 32 bit
2637@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2638 (0x7ff >> (PAGE_SHIFT - 12)) : \
2639 (0x3ffff >> (PAGE_SHIFT - 12)))
2640
2641-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2642-#define arch_randomize_brk arch_randomize_brk
2643-
2644 #endif /* __KERNEL__ */
2645
2646 /*
2647diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2648index edfc980..1766f59 100644
2649--- a/arch/powerpc/include/asm/iommu.h
2650+++ b/arch/powerpc/include/asm/iommu.h
2651@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2652 extern void iommu_init_early_dart(void);
2653 extern void iommu_init_early_pasemi(void);
2654
2655+/* dma-iommu.c */
2656+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2657+
2658 #ifdef CONFIG_PCI
2659 extern void pci_iommu_init(void);
2660 extern void pci_direct_iommu_init(void);
2661diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2662index 9163695..5a00112 100644
2663--- a/arch/powerpc/include/asm/kmap_types.h
2664+++ b/arch/powerpc/include/asm/kmap_types.h
2665@@ -26,6 +26,7 @@ enum km_type {
2666 KM_SOFTIRQ1,
2667 KM_PPC_SYNC_PAGE,
2668 KM_PPC_SYNC_ICACHE,
2669+ KM_CLEARPAGE,
2670 KM_TYPE_NR
2671 };
2672
2673diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2674index ff24254..fe45b21 100644
2675--- a/arch/powerpc/include/asm/page.h
2676+++ b/arch/powerpc/include/asm/page.h
2677@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2678 * and needs to be executable. This means the whole heap ends
2679 * up being executable.
2680 */
2681-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2682- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2683+#define VM_DATA_DEFAULT_FLAGS32 \
2684+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2685+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2686
2687 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2688 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2689@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2690 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2691 #endif
2692
2693+#define ktla_ktva(addr) (addr)
2694+#define ktva_ktla(addr) (addr)
2695+
2696 #ifndef __ASSEMBLY__
2697
2698 #undef STRICT_MM_TYPECHECKS
2699diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2700index 3f17b83..1f9e766 100644
2701--- a/arch/powerpc/include/asm/page_64.h
2702+++ b/arch/powerpc/include/asm/page_64.h
2703@@ -180,15 +180,18 @@ do { \
2704 * stack by default, so in the absense of a PT_GNU_STACK program header
2705 * we turn execute permission off.
2706 */
2707-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2708- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2709+#define VM_STACK_DEFAULT_FLAGS32 \
2710+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2711+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2712
2713 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2714 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2715
2716+#ifndef CONFIG_PAX_PAGEEXEC
2717 #define VM_STACK_DEFAULT_FLAGS \
2718 (test_thread_flag(TIF_32BIT) ? \
2719 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2720+#endif
2721
2722 #include <asm-generic/getorder.h>
2723
2724diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2725index b5ea626..4030822 100644
2726--- a/arch/powerpc/include/asm/pci.h
2727+++ b/arch/powerpc/include/asm/pci.h
2728@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2729 }
2730
2731 #ifdef CONFIG_PCI
2732-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2733-extern struct dma_map_ops *get_pci_dma_ops(void);
2734+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2735+extern const struct dma_map_ops *get_pci_dma_ops(void);
2736 #else /* CONFIG_PCI */
2737 #define set_pci_dma_ops(d)
2738 #define get_pci_dma_ops() NULL
2739diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2740index 2a5da06..d65bea2 100644
2741--- a/arch/powerpc/include/asm/pgtable.h
2742+++ b/arch/powerpc/include/asm/pgtable.h
2743@@ -2,6 +2,7 @@
2744 #define _ASM_POWERPC_PGTABLE_H
2745 #ifdef __KERNEL__
2746
2747+#include <linux/const.h>
2748 #ifndef __ASSEMBLY__
2749 #include <asm/processor.h> /* For TASK_SIZE */
2750 #include <asm/mmu.h>
2751diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2752index 4aad413..85d86bf 100644
2753--- a/arch/powerpc/include/asm/pte-hash32.h
2754+++ b/arch/powerpc/include/asm/pte-hash32.h
2755@@ -21,6 +21,7 @@
2756 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2757 #define _PAGE_USER 0x004 /* usermode access allowed */
2758 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2759+#define _PAGE_EXEC _PAGE_GUARDED
2760 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2761 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2762 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2763diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2764index 8c34149..78f425a 100644
2765--- a/arch/powerpc/include/asm/ptrace.h
2766+++ b/arch/powerpc/include/asm/ptrace.h
2767@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2768 } while(0)
2769
2770 struct task_struct;
2771-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2772+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2773 extern int ptrace_put_reg(struct task_struct *task, int regno,
2774 unsigned long data);
2775
2776diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2777index 32a7c30..be3a8bb 100644
2778--- a/arch/powerpc/include/asm/reg.h
2779+++ b/arch/powerpc/include/asm/reg.h
2780@@ -191,6 +191,7 @@
2781 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2782 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2783 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2784+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2785 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2786 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2787 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2788diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2789index 8979d4c..d2fd0d3 100644
2790--- a/arch/powerpc/include/asm/swiotlb.h
2791+++ b/arch/powerpc/include/asm/swiotlb.h
2792@@ -13,7 +13,7 @@
2793
2794 #include <linux/swiotlb.h>
2795
2796-extern struct dma_map_ops swiotlb_dma_ops;
2797+extern const struct dma_map_ops swiotlb_dma_ops;
2798
2799 static inline void dma_mark_clean(void *addr, size_t size) {}
2800
2801diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2802index 094a12a..877a60a 100644
2803--- a/arch/powerpc/include/asm/system.h
2804+++ b/arch/powerpc/include/asm/system.h
2805@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2806 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2807 #endif
2808
2809-extern unsigned long arch_align_stack(unsigned long sp);
2810+#define arch_align_stack(x) ((x) & ~0xfUL)
2811
2812 /* Used in very early kernel initialization. */
2813 extern unsigned long reloc_offset(void);
2814diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2815index bd0fb84..a42a14b 100644
2816--- a/arch/powerpc/include/asm/uaccess.h
2817+++ b/arch/powerpc/include/asm/uaccess.h
2818@@ -13,6 +13,8 @@
2819 #define VERIFY_READ 0
2820 #define VERIFY_WRITE 1
2821
2822+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2823+
2824 /*
2825 * The fs value determines whether argument validity checking should be
2826 * performed or not. If get_fs() == USER_DS, checking is performed, with
2827@@ -327,52 +329,6 @@ do { \
2828 extern unsigned long __copy_tofrom_user(void __user *to,
2829 const void __user *from, unsigned long size);
2830
2831-#ifndef __powerpc64__
2832-
2833-static inline unsigned long copy_from_user(void *to,
2834- const void __user *from, unsigned long n)
2835-{
2836- unsigned long over;
2837-
2838- if (access_ok(VERIFY_READ, from, n))
2839- return __copy_tofrom_user((__force void __user *)to, from, n);
2840- if ((unsigned long)from < TASK_SIZE) {
2841- over = (unsigned long)from + n - TASK_SIZE;
2842- return __copy_tofrom_user((__force void __user *)to, from,
2843- n - over) + over;
2844- }
2845- return n;
2846-}
2847-
2848-static inline unsigned long copy_to_user(void __user *to,
2849- const void *from, unsigned long n)
2850-{
2851- unsigned long over;
2852-
2853- if (access_ok(VERIFY_WRITE, to, n))
2854- return __copy_tofrom_user(to, (__force void __user *)from, n);
2855- if ((unsigned long)to < TASK_SIZE) {
2856- over = (unsigned long)to + n - TASK_SIZE;
2857- return __copy_tofrom_user(to, (__force void __user *)from,
2858- n - over) + over;
2859- }
2860- return n;
2861-}
2862-
2863-#else /* __powerpc64__ */
2864-
2865-#define __copy_in_user(to, from, size) \
2866- __copy_tofrom_user((to), (from), (size))
2867-
2868-extern unsigned long copy_from_user(void *to, const void __user *from,
2869- unsigned long n);
2870-extern unsigned long copy_to_user(void __user *to, const void *from,
2871- unsigned long n);
2872-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2873- unsigned long n);
2874-
2875-#endif /* __powerpc64__ */
2876-
2877 static inline unsigned long __copy_from_user_inatomic(void *to,
2878 const void __user *from, unsigned long n)
2879 {
2880@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2881 if (ret == 0)
2882 return 0;
2883 }
2884+
2885+ if (!__builtin_constant_p(n))
2886+ check_object_size(to, n, false);
2887+
2888 return __copy_tofrom_user((__force void __user *)to, from, n);
2889 }
2890
2891@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2892 if (ret == 0)
2893 return 0;
2894 }
2895+
2896+ if (!__builtin_constant_p(n))
2897+ check_object_size(from, n, true);
2898+
2899 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2900 }
2901
2902@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2903 return __copy_to_user_inatomic(to, from, size);
2904 }
2905
2906+#ifndef __powerpc64__
2907+
2908+static inline unsigned long __must_check copy_from_user(void *to,
2909+ const void __user *from, unsigned long n)
2910+{
2911+ unsigned long over;
2912+
2913+ if ((long)n < 0)
2914+ return n;
2915+
2916+ if (access_ok(VERIFY_READ, from, n)) {
2917+ if (!__builtin_constant_p(n))
2918+ check_object_size(to, n, false);
2919+ return __copy_tofrom_user((__force void __user *)to, from, n);
2920+ }
2921+ if ((unsigned long)from < TASK_SIZE) {
2922+ over = (unsigned long)from + n - TASK_SIZE;
2923+ if (!__builtin_constant_p(n - over))
2924+ check_object_size(to, n - over, false);
2925+ return __copy_tofrom_user((__force void __user *)to, from,
2926+ n - over) + over;
2927+ }
2928+ return n;
2929+}
2930+
2931+static inline unsigned long __must_check copy_to_user(void __user *to,
2932+ const void *from, unsigned long n)
2933+{
2934+ unsigned long over;
2935+
2936+ if ((long)n < 0)
2937+ return n;
2938+
2939+ if (access_ok(VERIFY_WRITE, to, n)) {
2940+ if (!__builtin_constant_p(n))
2941+ check_object_size(from, n, true);
2942+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2943+ }
2944+ if ((unsigned long)to < TASK_SIZE) {
2945+ over = (unsigned long)to + n - TASK_SIZE;
2946+ if (!__builtin_constant_p(n))
2947+ check_object_size(from, n - over, true);
2948+ return __copy_tofrom_user(to, (__force void __user *)from,
2949+ n - over) + over;
2950+ }
2951+ return n;
2952+}
2953+
2954+#else /* __powerpc64__ */
2955+
2956+#define __copy_in_user(to, from, size) \
2957+ __copy_tofrom_user((to), (from), (size))
2958+
2959+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2960+{
2961+ if ((long)n < 0 || n > INT_MAX)
2962+ return n;
2963+
2964+ if (!__builtin_constant_p(n))
2965+ check_object_size(to, n, false);
2966+
2967+ if (likely(access_ok(VERIFY_READ, from, n)))
2968+ n = __copy_from_user(to, from, n);
2969+ else
2970+ memset(to, 0, n);
2971+ return n;
2972+}
2973+
2974+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2975+{
2976+ if ((long)n < 0 || n > INT_MAX)
2977+ return n;
2978+
2979+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2980+ if (!__builtin_constant_p(n))
2981+ check_object_size(from, n, true);
2982+ n = __copy_to_user(to, from, n);
2983+ }
2984+ return n;
2985+}
2986+
2987+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2988+ unsigned long n);
2989+
2990+#endif /* __powerpc64__ */
2991+
2992 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2993
2994 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2995diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
2996index bb37b1d..01fe9ce 100644
2997--- a/arch/powerpc/kernel/cacheinfo.c
2998+++ b/arch/powerpc/kernel/cacheinfo.c
2999@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3000 &cache_assoc_attr,
3001 };
3002
3003-static struct sysfs_ops cache_index_ops = {
3004+static const struct sysfs_ops cache_index_ops = {
3005 .show = cache_index_show,
3006 };
3007
3008diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3009index 37771a5..648530c 100644
3010--- a/arch/powerpc/kernel/dma-iommu.c
3011+++ b/arch/powerpc/kernel/dma-iommu.c
3012@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3013 }
3014
3015 /* We support DMA to/from any memory page via the iommu */
3016-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3017+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3018 {
3019 struct iommu_table *tbl = get_iommu_table_base(dev);
3020
3021diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3022index e96cbbd..bdd6d41 100644
3023--- a/arch/powerpc/kernel/dma-swiotlb.c
3024+++ b/arch/powerpc/kernel/dma-swiotlb.c
3025@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3026 * map_page, and unmap_page on highmem, use normal dma_ops
3027 * for everything else.
3028 */
3029-struct dma_map_ops swiotlb_dma_ops = {
3030+const struct dma_map_ops swiotlb_dma_ops = {
3031 .alloc_coherent = dma_direct_alloc_coherent,
3032 .free_coherent = dma_direct_free_coherent,
3033 .map_sg = swiotlb_map_sg_attrs,
3034diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3035index 6215062..ebea59c 100644
3036--- a/arch/powerpc/kernel/dma.c
3037+++ b/arch/powerpc/kernel/dma.c
3038@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3039 }
3040 #endif
3041
3042-struct dma_map_ops dma_direct_ops = {
3043+const struct dma_map_ops dma_direct_ops = {
3044 .alloc_coherent = dma_direct_alloc_coherent,
3045 .free_coherent = dma_direct_free_coherent,
3046 .map_sg = dma_direct_map_sg,
3047diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3048index 24dcc0e..a300455 100644
3049--- a/arch/powerpc/kernel/exceptions-64e.S
3050+++ b/arch/powerpc/kernel/exceptions-64e.S
3051@@ -455,6 +455,7 @@ storage_fault_common:
3052 std r14,_DAR(r1)
3053 std r15,_DSISR(r1)
3054 addi r3,r1,STACK_FRAME_OVERHEAD
3055+ bl .save_nvgprs
3056 mr r4,r14
3057 mr r5,r15
3058 ld r14,PACA_EXGEN+EX_R14(r13)
3059@@ -464,8 +465,7 @@ storage_fault_common:
3060 cmpdi r3,0
3061 bne- 1f
3062 b .ret_from_except_lite
3063-1: bl .save_nvgprs
3064- mr r5,r3
3065+1: mr r5,r3
3066 addi r3,r1,STACK_FRAME_OVERHEAD
3067 ld r4,_DAR(r1)
3068 bl .bad_page_fault
3069diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3070index 1808876..9fd206a 100644
3071--- a/arch/powerpc/kernel/exceptions-64s.S
3072+++ b/arch/powerpc/kernel/exceptions-64s.S
3073@@ -818,10 +818,10 @@ handle_page_fault:
3074 11: ld r4,_DAR(r1)
3075 ld r5,_DSISR(r1)
3076 addi r3,r1,STACK_FRAME_OVERHEAD
3077+ bl .save_nvgprs
3078 bl .do_page_fault
3079 cmpdi r3,0
3080 beq+ 13f
3081- bl .save_nvgprs
3082 mr r5,r3
3083 addi r3,r1,STACK_FRAME_OVERHEAD
3084 lwz r4,_DAR(r1)
3085diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3086index a4c8b38..1b09ad9 100644
3087--- a/arch/powerpc/kernel/ibmebus.c
3088+++ b/arch/powerpc/kernel/ibmebus.c
3089@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3090 return 1;
3091 }
3092
3093-static struct dma_map_ops ibmebus_dma_ops = {
3094+static const struct dma_map_ops ibmebus_dma_ops = {
3095 .alloc_coherent = ibmebus_alloc_coherent,
3096 .free_coherent = ibmebus_free_coherent,
3097 .map_sg = ibmebus_map_sg,
3098diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3099index 641c74b..8339ad7 100644
3100--- a/arch/powerpc/kernel/kgdb.c
3101+++ b/arch/powerpc/kernel/kgdb.c
3102@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3103 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3104 return 0;
3105
3106- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3107+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3108 regs->nip += 4;
3109
3110 return 1;
3111@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3112 /*
3113 * Global data
3114 */
3115-struct kgdb_arch arch_kgdb_ops = {
3116+const struct kgdb_arch arch_kgdb_ops = {
3117 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3118 };
3119
3120diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3121index 477c663..4f50234 100644
3122--- a/arch/powerpc/kernel/module.c
3123+++ b/arch/powerpc/kernel/module.c
3124@@ -31,11 +31,24 @@
3125
3126 LIST_HEAD(module_bug_list);
3127
3128+#ifdef CONFIG_PAX_KERNEXEC
3129 void *module_alloc(unsigned long size)
3130 {
3131 if (size == 0)
3132 return NULL;
3133
3134+ return vmalloc(size);
3135+}
3136+
3137+void *module_alloc_exec(unsigned long size)
3138+#else
3139+void *module_alloc(unsigned long size)
3140+#endif
3141+
3142+{
3143+ if (size == 0)
3144+ return NULL;
3145+
3146 return vmalloc_exec(size);
3147 }
3148
3149@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3150 vfree(module_region);
3151 }
3152
3153+#ifdef CONFIG_PAX_KERNEXEC
3154+void module_free_exec(struct module *mod, void *module_region)
3155+{
3156+ module_free(mod, module_region);
3157+}
3158+#endif
3159+
3160 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3161 const Elf_Shdr *sechdrs,
3162 const char *name)
3163diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3164index f832773..0507238 100644
3165--- a/arch/powerpc/kernel/module_32.c
3166+++ b/arch/powerpc/kernel/module_32.c
3167@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3168 me->arch.core_plt_section = i;
3169 }
3170 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3171- printk("Module doesn't contain .plt or .init.plt sections.\n");
3172+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3173 return -ENOEXEC;
3174 }
3175
3176@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3177
3178 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3179 /* Init, or core PLT? */
3180- if (location >= mod->module_core
3181- && location < mod->module_core + mod->core_size)
3182+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3183+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3184 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3185- else
3186+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3187+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3188 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3189+ else {
3190+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3191+ return ~0UL;
3192+ }
3193
3194 /* Find this entry, or if that fails, the next avail. entry */
3195 while (entry->jump[0]) {
3196diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3197index cadbed6..b9bbb00 100644
3198--- a/arch/powerpc/kernel/pci-common.c
3199+++ b/arch/powerpc/kernel/pci-common.c
3200@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3201 unsigned int ppc_pci_flags = 0;
3202
3203
3204-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3205+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3206
3207-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3208+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3209 {
3210 pci_dma_ops = dma_ops;
3211 }
3212
3213-struct dma_map_ops *get_pci_dma_ops(void)
3214+const struct dma_map_ops *get_pci_dma_ops(void)
3215 {
3216 return pci_dma_ops;
3217 }
3218diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3219index 7b816da..8d5c277 100644
3220--- a/arch/powerpc/kernel/process.c
3221+++ b/arch/powerpc/kernel/process.c
3222@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3223 * Lookup NIP late so we have the best change of getting the
3224 * above info out without failing
3225 */
3226- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3227- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3228+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3229+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3230 #endif
3231 show_stack(current, (unsigned long *) regs->gpr[1]);
3232 if (!user_mode(regs))
3233@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3234 newsp = stack[0];
3235 ip = stack[STACK_FRAME_LR_SAVE];
3236 if (!firstframe || ip != lr) {
3237- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3238+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3239 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3240 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3241- printk(" (%pS)",
3242+ printk(" (%pA)",
3243 (void *)current->ret_stack[curr_frame].ret);
3244 curr_frame--;
3245 }
3246@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3247 struct pt_regs *regs = (struct pt_regs *)
3248 (sp + STACK_FRAME_OVERHEAD);
3249 lr = regs->link;
3250- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3251+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3252 regs->trap, (void *)regs->nip, (void *)lr);
3253 firstframe = 1;
3254 }
3255@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3256 }
3257
3258 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3259-
3260-unsigned long arch_align_stack(unsigned long sp)
3261-{
3262- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3263- sp -= get_random_int() & ~PAGE_MASK;
3264- return sp & ~0xf;
3265-}
3266-
3267-static inline unsigned long brk_rnd(void)
3268-{
3269- unsigned long rnd = 0;
3270-
3271- /* 8MB for 32bit, 1GB for 64bit */
3272- if (is_32bit_task())
3273- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3274- else
3275- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3276-
3277- return rnd << PAGE_SHIFT;
3278-}
3279-
3280-unsigned long arch_randomize_brk(struct mm_struct *mm)
3281-{
3282- unsigned long base = mm->brk;
3283- unsigned long ret;
3284-
3285-#ifdef CONFIG_PPC_STD_MMU_64
3286- /*
3287- * If we are using 1TB segments and we are allowed to randomise
3288- * the heap, we can put it above 1TB so it is backed by a 1TB
3289- * segment. Otherwise the heap will be in the bottom 1TB
3290- * which always uses 256MB segments and this may result in a
3291- * performance penalty.
3292- */
3293- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3294- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3295-#endif
3296-
3297- ret = PAGE_ALIGN(base + brk_rnd());
3298-
3299- if (ret < mm->brk)
3300- return mm->brk;
3301-
3302- return ret;
3303-}
3304-
3305-unsigned long randomize_et_dyn(unsigned long base)
3306-{
3307- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3308-
3309- if (ret < base)
3310- return base;
3311-
3312- return ret;
3313-}
3314diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3315index ef14988..856c4bc 100644
3316--- a/arch/powerpc/kernel/ptrace.c
3317+++ b/arch/powerpc/kernel/ptrace.c
3318@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3319 /*
3320 * Get contents of register REGNO in task TASK.
3321 */
3322-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3323+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3324 {
3325 if (task->thread.regs == NULL)
3326 return -EIO;
3327@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3328
3329 CHECK_FULL_REGS(child->thread.regs);
3330 if (index < PT_FPR0) {
3331- tmp = ptrace_get_reg(child, (int) index);
3332+ tmp = ptrace_get_reg(child, index);
3333 } else {
3334 flush_fp_to_thread(child);
3335 tmp = ((unsigned long *)child->thread.fpr)
3336diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3337index d670429..2bc59b2 100644
3338--- a/arch/powerpc/kernel/signal_32.c
3339+++ b/arch/powerpc/kernel/signal_32.c
3340@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3341 /* Save user registers on the stack */
3342 frame = &rt_sf->uc.uc_mcontext;
3343 addr = frame;
3344- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3345+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3346 if (save_user_regs(regs, frame, 0, 1))
3347 goto badframe;
3348 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3349diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3350index 2fe6fc6..ada0d96 100644
3351--- a/arch/powerpc/kernel/signal_64.c
3352+++ b/arch/powerpc/kernel/signal_64.c
3353@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3354 current->thread.fpscr.val = 0;
3355
3356 /* Set up to return from userspace. */
3357- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3358+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3359 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3360 } else {
3361 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3362diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3363index b97c2d6..dd01a6a 100644
3364--- a/arch/powerpc/kernel/sys_ppc32.c
3365+++ b/arch/powerpc/kernel/sys_ppc32.c
3366@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3367 if (oldlenp) {
3368 if (!error) {
3369 if (get_user(oldlen, oldlenp) ||
3370- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3371+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3372+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3373 error = -EFAULT;
3374 }
3375- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3376 }
3377 return error;
3378 }
3379diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3380index 6f0ae1a..e4b6a56 100644
3381--- a/arch/powerpc/kernel/traps.c
3382+++ b/arch/powerpc/kernel/traps.c
3383@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3384 static inline void pmac_backlight_unblank(void) { }
3385 #endif
3386
3387+extern void gr_handle_kernel_exploit(void);
3388+
3389 int die(const char *str, struct pt_regs *regs, long err)
3390 {
3391 static struct {
3392@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3393 if (panic_on_oops)
3394 panic("Fatal exception");
3395
3396+ gr_handle_kernel_exploit();
3397+
3398 oops_exit();
3399 do_exit(err);
3400
3401diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3402index 137dc22..fe57a79 100644
3403--- a/arch/powerpc/kernel/vdso.c
3404+++ b/arch/powerpc/kernel/vdso.c
3405@@ -36,6 +36,7 @@
3406 #include <asm/firmware.h>
3407 #include <asm/vdso.h>
3408 #include <asm/vdso_datapage.h>
3409+#include <asm/mman.h>
3410
3411 #include "setup.h"
3412
3413@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3414 vdso_base = VDSO32_MBASE;
3415 #endif
3416
3417- current->mm->context.vdso_base = 0;
3418+ current->mm->context.vdso_base = ~0UL;
3419
3420 /* vDSO has a problem and was disabled, just don't "enable" it for the
3421 * process
3422@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3423 vdso_base = get_unmapped_area(NULL, vdso_base,
3424 (vdso_pages << PAGE_SHIFT) +
3425 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3426- 0, 0);
3427+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3428 if (IS_ERR_VALUE(vdso_base)) {
3429 rc = vdso_base;
3430 goto fail_mmapsem;
3431diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3432index 77f6421..829564a 100644
3433--- a/arch/powerpc/kernel/vio.c
3434+++ b/arch/powerpc/kernel/vio.c
3435@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3436 vio_cmo_dealloc(viodev, alloc_size);
3437 }
3438
3439-struct dma_map_ops vio_dma_mapping_ops = {
3440+static const struct dma_map_ops vio_dma_mapping_ops = {
3441 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3442 .free_coherent = vio_dma_iommu_free_coherent,
3443 .map_sg = vio_dma_iommu_map_sg,
3444 .unmap_sg = vio_dma_iommu_unmap_sg,
3445+ .dma_supported = dma_iommu_dma_supported,
3446 .map_page = vio_dma_iommu_map_page,
3447 .unmap_page = vio_dma_iommu_unmap_page,
3448
3449@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3450
3451 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3452 {
3453- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3454 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3455 }
3456
3457diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3458index 5eea6f3..5d10396 100644
3459--- a/arch/powerpc/lib/usercopy_64.c
3460+++ b/arch/powerpc/lib/usercopy_64.c
3461@@ -9,22 +9,6 @@
3462 #include <linux/module.h>
3463 #include <asm/uaccess.h>
3464
3465-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3466-{
3467- if (likely(access_ok(VERIFY_READ, from, n)))
3468- n = __copy_from_user(to, from, n);
3469- else
3470- memset(to, 0, n);
3471- return n;
3472-}
3473-
3474-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3475-{
3476- if (likely(access_ok(VERIFY_WRITE, to, n)))
3477- n = __copy_to_user(to, from, n);
3478- return n;
3479-}
3480-
3481 unsigned long copy_in_user(void __user *to, const void __user *from,
3482 unsigned long n)
3483 {
3484@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3485 return n;
3486 }
3487
3488-EXPORT_SYMBOL(copy_from_user);
3489-EXPORT_SYMBOL(copy_to_user);
3490 EXPORT_SYMBOL(copy_in_user);
3491
3492diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3493index e7dae82..877ce0d 100644
3494--- a/arch/powerpc/mm/fault.c
3495+++ b/arch/powerpc/mm/fault.c
3496@@ -30,6 +30,10 @@
3497 #include <linux/kprobes.h>
3498 #include <linux/kdebug.h>
3499 #include <linux/perf_event.h>
3500+#include <linux/slab.h>
3501+#include <linux/pagemap.h>
3502+#include <linux/compiler.h>
3503+#include <linux/unistd.h>
3504
3505 #include <asm/firmware.h>
3506 #include <asm/page.h>
3507@@ -40,6 +44,7 @@
3508 #include <asm/uaccess.h>
3509 #include <asm/tlbflush.h>
3510 #include <asm/siginfo.h>
3511+#include <asm/ptrace.h>
3512
3513
3514 #ifdef CONFIG_KPROBES
3515@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3516 }
3517 #endif
3518
3519+#ifdef CONFIG_PAX_PAGEEXEC
3520+/*
3521+ * PaX: decide what to do with offenders (regs->nip = fault address)
3522+ *
3523+ * returns 1 when task should be killed
3524+ */
3525+static int pax_handle_fetch_fault(struct pt_regs *regs)
3526+{
3527+ return 1;
3528+}
3529+
3530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3531+{
3532+ unsigned long i;
3533+
3534+ printk(KERN_ERR "PAX: bytes at PC: ");
3535+ for (i = 0; i < 5; i++) {
3536+ unsigned int c;
3537+ if (get_user(c, (unsigned int __user *)pc+i))
3538+ printk(KERN_CONT "???????? ");
3539+ else
3540+ printk(KERN_CONT "%08x ", c);
3541+ }
3542+ printk("\n");
3543+}
3544+#endif
3545+
3546 /*
3547 * Check whether the instruction at regs->nip is a store using
3548 * an update addressing form which will update r1.
3549@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3550 * indicate errors in DSISR but can validly be set in SRR1.
3551 */
3552 if (trap == 0x400)
3553- error_code &= 0x48200000;
3554+ error_code &= 0x58200000;
3555 else
3556 is_write = error_code & DSISR_ISSTORE;
3557 #else
3558@@ -250,7 +282,7 @@ good_area:
3559 * "undefined". Of those that can be set, this is the only
3560 * one which seems bad.
3561 */
3562- if (error_code & 0x10000000)
3563+ if (error_code & DSISR_GUARDED)
3564 /* Guarded storage error. */
3565 goto bad_area;
3566 #endif /* CONFIG_8xx */
3567@@ -265,7 +297,7 @@ good_area:
3568 * processors use the same I/D cache coherency mechanism
3569 * as embedded.
3570 */
3571- if (error_code & DSISR_PROTFAULT)
3572+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3573 goto bad_area;
3574 #endif /* CONFIG_PPC_STD_MMU */
3575
3576@@ -335,6 +367,23 @@ bad_area:
3577 bad_area_nosemaphore:
3578 /* User mode accesses cause a SIGSEGV */
3579 if (user_mode(regs)) {
3580+
3581+#ifdef CONFIG_PAX_PAGEEXEC
3582+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3583+#ifdef CONFIG_PPC_STD_MMU
3584+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3585+#else
3586+ if (is_exec && regs->nip == address) {
3587+#endif
3588+ switch (pax_handle_fetch_fault(regs)) {
3589+ }
3590+
3591+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3592+ do_group_exit(SIGKILL);
3593+ }
3594+ }
3595+#endif
3596+
3597 _exception(SIGSEGV, regs, code, address);
3598 return 0;
3599 }
3600diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3601index 5973631..ad617af 100644
3602--- a/arch/powerpc/mm/mem.c
3603+++ b/arch/powerpc/mm/mem.c
3604@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3605 {
3606 unsigned long lmb_next_region_start_pfn,
3607 lmb_region_max_pfn;
3608- int i;
3609+ unsigned int i;
3610
3611 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3612 lmb_region_max_pfn =
3613diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3614index 0d957a4..26d968f 100644
3615--- a/arch/powerpc/mm/mmap_64.c
3616+++ b/arch/powerpc/mm/mmap_64.c
3617@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3618 */
3619 if (mmap_is_legacy()) {
3620 mm->mmap_base = TASK_UNMAPPED_BASE;
3621+
3622+#ifdef CONFIG_PAX_RANDMMAP
3623+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3624+ mm->mmap_base += mm->delta_mmap;
3625+#endif
3626+
3627 mm->get_unmapped_area = arch_get_unmapped_area;
3628 mm->unmap_area = arch_unmap_area;
3629 } else {
3630 mm->mmap_base = mmap_base();
3631+
3632+#ifdef CONFIG_PAX_RANDMMAP
3633+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3634+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3635+#endif
3636+
3637 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3638 mm->unmap_area = arch_unmap_area_topdown;
3639 }
3640diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3641index ba51948..23009d9 100644
3642--- a/arch/powerpc/mm/slice.c
3643+++ b/arch/powerpc/mm/slice.c
3644@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3645 if ((mm->task_size - len) < addr)
3646 return 0;
3647 vma = find_vma(mm, addr);
3648- return (!vma || (addr + len) <= vma->vm_start);
3649+ return check_heap_stack_gap(vma, addr, len);
3650 }
3651
3652 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3653@@ -256,7 +256,7 @@ full_search:
3654 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3655 continue;
3656 }
3657- if (!vma || addr + len <= vma->vm_start) {
3658+ if (check_heap_stack_gap(vma, addr, len)) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3663 }
3664 }
3665
3666- addr = mm->mmap_base;
3667- while (addr > len) {
3668+ if (mm->mmap_base < len)
3669+ addr = -ENOMEM;
3670+ else
3671+ addr = mm->mmap_base - len;
3672+
3673+ while (!IS_ERR_VALUE(addr)) {
3674 /* Go down by chunk size */
3675- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3676+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3677
3678 /* Check for hit with different page size */
3679 mask = slice_range_to_mask(addr, len);
3680@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3681 * return with success:
3682 */
3683 vma = find_vma(mm, addr);
3684- if (!vma || (addr + len) <= vma->vm_start) {
3685+ if (check_heap_stack_gap(vma, addr, len)) {
3686 /* remember the address as a hint for next time */
3687 if (use_cache)
3688 mm->free_area_cache = addr;
3689@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3690 mm->cached_hole_size = vma->vm_start - addr;
3691
3692 /* try just below the current vma->vm_start */
3693- addr = vma->vm_start;
3694+ addr = skip_heap_stack_gap(vma, len);
3695 }
3696
3697 /*
3698@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3699 if (fixed && addr > (mm->task_size - len))
3700 return -EINVAL;
3701
3702+#ifdef CONFIG_PAX_RANDMMAP
3703+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3704+ addr = 0;
3705+#endif
3706+
3707 /* If hint, make sure it matches our alignment restrictions */
3708 if (!fixed && addr) {
3709 addr = _ALIGN_UP(addr, 1ul << pshift);
3710diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3711index b5c753d..8f01abe 100644
3712--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3713+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3714@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3715 lite5200_pm_target_state = PM_SUSPEND_ON;
3716 }
3717
3718-static struct platform_suspend_ops lite5200_pm_ops = {
3719+static const struct platform_suspend_ops lite5200_pm_ops = {
3720 .valid = lite5200_pm_valid,
3721 .begin = lite5200_pm_begin,
3722 .prepare = lite5200_pm_prepare,
3723diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3724index a55b0b6..478c18e 100644
3725--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3726+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3727@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3728 iounmap(mbar);
3729 }
3730
3731-static struct platform_suspend_ops mpc52xx_pm_ops = {
3732+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3733 .valid = mpc52xx_pm_valid,
3734 .prepare = mpc52xx_pm_prepare,
3735 .enter = mpc52xx_pm_enter,
3736diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3737index 08e65fc..643d3ac 100644
3738--- a/arch/powerpc/platforms/83xx/suspend.c
3739+++ b/arch/powerpc/platforms/83xx/suspend.c
3740@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3741 return ret;
3742 }
3743
3744-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3745+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3746 .valid = mpc83xx_suspend_valid,
3747 .begin = mpc83xx_suspend_begin,
3748 .enter = mpc83xx_suspend_enter,
3749diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3750index ca5bfdf..1602e09 100644
3751--- a/arch/powerpc/platforms/cell/iommu.c
3752+++ b/arch/powerpc/platforms/cell/iommu.c
3753@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3754
3755 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3756
3757-struct dma_map_ops dma_iommu_fixed_ops = {
3758+const struct dma_map_ops dma_iommu_fixed_ops = {
3759 .alloc_coherent = dma_fixed_alloc_coherent,
3760 .free_coherent = dma_fixed_free_coherent,
3761 .map_sg = dma_fixed_map_sg,
3762diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3763index e34b305..20e48ec 100644
3764--- a/arch/powerpc/platforms/ps3/system-bus.c
3765+++ b/arch/powerpc/platforms/ps3/system-bus.c
3766@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3767 return mask >= DMA_BIT_MASK(32);
3768 }
3769
3770-static struct dma_map_ops ps3_sb_dma_ops = {
3771+static const struct dma_map_ops ps3_sb_dma_ops = {
3772 .alloc_coherent = ps3_alloc_coherent,
3773 .free_coherent = ps3_free_coherent,
3774 .map_sg = ps3_sb_map_sg,
3775@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3776 .unmap_page = ps3_unmap_page,
3777 };
3778
3779-static struct dma_map_ops ps3_ioc0_dma_ops = {
3780+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3781 .alloc_coherent = ps3_alloc_coherent,
3782 .free_coherent = ps3_free_coherent,
3783 .map_sg = ps3_ioc0_map_sg,
3784diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3785index f0e6f28..60d53ed 100644
3786--- a/arch/powerpc/platforms/pseries/Kconfig
3787+++ b/arch/powerpc/platforms/pseries/Kconfig
3788@@ -2,6 +2,8 @@ config PPC_PSERIES
3789 depends on PPC64 && PPC_BOOK3S
3790 bool "IBM pSeries & new (POWER5-based) iSeries"
3791 select MPIC
3792+ select PCI_MSI
3793+ select XICS
3794 select PPC_I8259
3795 select PPC_RTAS
3796 select RTAS_ERROR_LOGGING
3797diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3798index 43c0aca..42c045b 100644
3799--- a/arch/s390/Kconfig
3800+++ b/arch/s390/Kconfig
3801@@ -194,28 +194,26 @@ config AUDIT_ARCH
3802
3803 config S390_SWITCH_AMODE
3804 bool "Switch kernel/user addressing modes"
3805+ default y
3806 help
3807 This option allows to switch the addressing modes of kernel and user
3808- space. The kernel parameter switch_amode=on will enable this feature,
3809- default is disabled. Enabling this (via kernel parameter) on machines
3810- earlier than IBM System z9-109 EC/BC will reduce system performance.
3811+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3812+ will reduce system performance.
3813
3814 Note that this option will also be selected by selecting the execute
3815- protection option below. Enabling the execute protection via the
3816- noexec kernel parameter will also switch the addressing modes,
3817- independent of the switch_amode kernel parameter.
3818+ protection option below. Enabling the execute protection will also
3819+ switch the addressing modes, independent of this option.
3820
3821
3822 config S390_EXEC_PROTECT
3823 bool "Data execute protection"
3824+ default y
3825 select S390_SWITCH_AMODE
3826 help
3827 This option allows to enable a buffer overflow protection for user
3828 space programs and it also selects the addressing mode option above.
3829- The kernel parameter noexec=on will enable this feature and also
3830- switch the addressing modes, default is disabled. Enabling this (via
3831- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3832- will reduce system performance.
3833+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3834+ reduce system performance.
3835
3836 comment "Code generation options"
3837
3838diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3839index e885442..5e6c303 100644
3840--- a/arch/s390/include/asm/elf.h
3841+++ b/arch/s390/include/asm/elf.h
3842@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3843 that it will "exec", and that there is sufficient room for the brk. */
3844 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3845
3846+#ifdef CONFIG_PAX_ASLR
3847+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3848+
3849+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3850+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3851+#endif
3852+
3853 /* This yields a mask that user programs can use to figure out what
3854 instruction set this CPU supports. */
3855
3856diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3857index e37478e..9ce0e9f 100644
3858--- a/arch/s390/include/asm/setup.h
3859+++ b/arch/s390/include/asm/setup.h
3860@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3861 void detect_memory_layout(struct mem_chunk chunk[]);
3862
3863 #ifdef CONFIG_S390_SWITCH_AMODE
3864-extern unsigned int switch_amode;
3865+#define switch_amode (1)
3866 #else
3867 #define switch_amode (0)
3868 #endif
3869
3870 #ifdef CONFIG_S390_EXEC_PROTECT
3871-extern unsigned int s390_noexec;
3872+#define s390_noexec (1)
3873 #else
3874 #define s390_noexec (0)
3875 #endif
3876diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3877index 8377e91..e28e6f1 100644
3878--- a/arch/s390/include/asm/uaccess.h
3879+++ b/arch/s390/include/asm/uaccess.h
3880@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3881 copy_to_user(void __user *to, const void *from, unsigned long n)
3882 {
3883 might_fault();
3884+
3885+ if ((long)n < 0)
3886+ return n;
3887+
3888 if (access_ok(VERIFY_WRITE, to, n))
3889 n = __copy_to_user(to, from, n);
3890 return n;
3891@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3892 static inline unsigned long __must_check
3893 __copy_from_user(void *to, const void __user *from, unsigned long n)
3894 {
3895+ if ((long)n < 0)
3896+ return n;
3897+
3898 if (__builtin_constant_p(n) && (n <= 256))
3899 return uaccess.copy_from_user_small(n, from, to);
3900 else
3901@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3902 copy_from_user(void *to, const void __user *from, unsigned long n)
3903 {
3904 might_fault();
3905+
3906+ if ((long)n < 0)
3907+ return n;
3908+
3909 if (access_ok(VERIFY_READ, from, n))
3910 n = __copy_from_user(to, from, n);
3911 else
3912diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3913index 639380a..72e3c02 100644
3914--- a/arch/s390/kernel/module.c
3915+++ b/arch/s390/kernel/module.c
3916@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3917
3918 /* Increase core size by size of got & plt and set start
3919 offsets for got and plt. */
3920- me->core_size = ALIGN(me->core_size, 4);
3921- me->arch.got_offset = me->core_size;
3922- me->core_size += me->arch.got_size;
3923- me->arch.plt_offset = me->core_size;
3924- me->core_size += me->arch.plt_size;
3925+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3926+ me->arch.got_offset = me->core_size_rw;
3927+ me->core_size_rw += me->arch.got_size;
3928+ me->arch.plt_offset = me->core_size_rx;
3929+ me->core_size_rx += me->arch.plt_size;
3930 return 0;
3931 }
3932
3933@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3934 if (info->got_initialized == 0) {
3935 Elf_Addr *gotent;
3936
3937- gotent = me->module_core + me->arch.got_offset +
3938+ gotent = me->module_core_rw + me->arch.got_offset +
3939 info->got_offset;
3940 *gotent = val;
3941 info->got_initialized = 1;
3942@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3943 else if (r_type == R_390_GOTENT ||
3944 r_type == R_390_GOTPLTENT)
3945 *(unsigned int *) loc =
3946- (val + (Elf_Addr) me->module_core - loc) >> 1;
3947+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3948 else if (r_type == R_390_GOT64 ||
3949 r_type == R_390_GOTPLT64)
3950 *(unsigned long *) loc = val;
3951@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3952 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3953 if (info->plt_initialized == 0) {
3954 unsigned int *ip;
3955- ip = me->module_core + me->arch.plt_offset +
3956+ ip = me->module_core_rx + me->arch.plt_offset +
3957 info->plt_offset;
3958 #ifndef CONFIG_64BIT
3959 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3960@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3961 val - loc + 0xffffUL < 0x1ffffeUL) ||
3962 (r_type == R_390_PLT32DBL &&
3963 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3964- val = (Elf_Addr) me->module_core +
3965+ val = (Elf_Addr) me->module_core_rx +
3966 me->arch.plt_offset +
3967 info->plt_offset;
3968 val += rela->r_addend - loc;
3969@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3970 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3971 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3972 val = val + rela->r_addend -
3973- ((Elf_Addr) me->module_core + me->arch.got_offset);
3974+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3975 if (r_type == R_390_GOTOFF16)
3976 *(unsigned short *) loc = val;
3977 else if (r_type == R_390_GOTOFF32)
3978@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3979 break;
3980 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3981 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3982- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3983+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3984 rela->r_addend - loc;
3985 if (r_type == R_390_GOTPC)
3986 *(unsigned int *) loc = val;
3987diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3988index 061479f..dbfb08c 100644
3989--- a/arch/s390/kernel/setup.c
3990+++ b/arch/s390/kernel/setup.c
3991@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3992 early_param("mem", early_parse_mem);
3993
3994 #ifdef CONFIG_S390_SWITCH_AMODE
3995-unsigned int switch_amode = 0;
3996-EXPORT_SYMBOL_GPL(switch_amode);
3997-
3998 static int set_amode_and_uaccess(unsigned long user_amode,
3999 unsigned long user32_amode)
4000 {
4001@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4002 return 0;
4003 }
4004 }
4005-
4006-/*
4007- * Switch kernel/user addressing modes?
4008- */
4009-static int __init early_parse_switch_amode(char *p)
4010-{
4011- switch_amode = 1;
4012- return 0;
4013-}
4014-early_param("switch_amode", early_parse_switch_amode);
4015-
4016 #else /* CONFIG_S390_SWITCH_AMODE */
4017 static inline int set_amode_and_uaccess(unsigned long user_amode,
4018 unsigned long user32_amode)
4019@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4020 }
4021 #endif /* CONFIG_S390_SWITCH_AMODE */
4022
4023-#ifdef CONFIG_S390_EXEC_PROTECT
4024-unsigned int s390_noexec = 0;
4025-EXPORT_SYMBOL_GPL(s390_noexec);
4026-
4027-/*
4028- * Enable execute protection?
4029- */
4030-static int __init early_parse_noexec(char *p)
4031-{
4032- if (!strncmp(p, "off", 3))
4033- return 0;
4034- switch_amode = 1;
4035- s390_noexec = 1;
4036- return 0;
4037-}
4038-early_param("noexec", early_parse_noexec);
4039-#endif /* CONFIG_S390_EXEC_PROTECT */
4040-
4041 static void setup_addressing_mode(void)
4042 {
4043 if (s390_noexec) {
4044diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4045index f4558cc..e461f37 100644
4046--- a/arch/s390/mm/mmap.c
4047+++ b/arch/s390/mm/mmap.c
4048@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4049 */
4050 if (mmap_is_legacy()) {
4051 mm->mmap_base = TASK_UNMAPPED_BASE;
4052+
4053+#ifdef CONFIG_PAX_RANDMMAP
4054+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4055+ mm->mmap_base += mm->delta_mmap;
4056+#endif
4057+
4058 mm->get_unmapped_area = arch_get_unmapped_area;
4059 mm->unmap_area = arch_unmap_area;
4060 } else {
4061 mm->mmap_base = mmap_base();
4062+
4063+#ifdef CONFIG_PAX_RANDMMAP
4064+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4065+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4066+#endif
4067+
4068 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4069 mm->unmap_area = arch_unmap_area_topdown;
4070 }
4071@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4072 */
4073 if (mmap_is_legacy()) {
4074 mm->mmap_base = TASK_UNMAPPED_BASE;
4075+
4076+#ifdef CONFIG_PAX_RANDMMAP
4077+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4078+ mm->mmap_base += mm->delta_mmap;
4079+#endif
4080+
4081 mm->get_unmapped_area = s390_get_unmapped_area;
4082 mm->unmap_area = arch_unmap_area;
4083 } else {
4084 mm->mmap_base = mmap_base();
4085+
4086+#ifdef CONFIG_PAX_RANDMMAP
4087+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4088+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4089+#endif
4090+
4091 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4092 mm->unmap_area = arch_unmap_area_topdown;
4093 }
4094diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4095index 589d5c7..669e274 100644
4096--- a/arch/score/include/asm/system.h
4097+++ b/arch/score/include/asm/system.h
4098@@ -17,7 +17,7 @@ do { \
4099 #define finish_arch_switch(prev) do {} while (0)
4100
4101 typedef void (*vi_handler_t)(void);
4102-extern unsigned long arch_align_stack(unsigned long sp);
4103+#define arch_align_stack(x) (x)
4104
4105 #define mb() barrier()
4106 #define rmb() barrier()
4107diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4108index 25d0803..d6c8e36 100644
4109--- a/arch/score/kernel/process.c
4110+++ b/arch/score/kernel/process.c
4111@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4112
4113 return task_pt_regs(task)->cp0_epc;
4114 }
4115-
4116-unsigned long arch_align_stack(unsigned long sp)
4117-{
4118- return sp;
4119-}
4120diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4121index d936c1a..304a252 100644
4122--- a/arch/sh/boards/mach-hp6xx/pm.c
4123+++ b/arch/sh/boards/mach-hp6xx/pm.c
4124@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4125 return 0;
4126 }
4127
4128-static struct platform_suspend_ops hp6x0_pm_ops = {
4129+static const struct platform_suspend_ops hp6x0_pm_ops = {
4130 .enter = hp6x0_pm_enter,
4131 .valid = suspend_valid_only_mem,
4132 };
4133diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4134index 8a8a993..7b3079b 100644
4135--- a/arch/sh/kernel/cpu/sh4/sq.c
4136+++ b/arch/sh/kernel/cpu/sh4/sq.c
4137@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4138 NULL,
4139 };
4140
4141-static struct sysfs_ops sq_sysfs_ops = {
4142+static const struct sysfs_ops sq_sysfs_ops = {
4143 .show = sq_sysfs_show,
4144 .store = sq_sysfs_store,
4145 };
4146diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4147index ee3c2aa..c49cee6 100644
4148--- a/arch/sh/kernel/cpu/shmobile/pm.c
4149+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4150@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4151 return 0;
4152 }
4153
4154-static struct platform_suspend_ops sh_pm_ops = {
4155+static const struct platform_suspend_ops sh_pm_ops = {
4156 .enter = sh_pm_enter,
4157 .valid = suspend_valid_only_mem,
4158 };
4159diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4160index 3e532d0..9faa306 100644
4161--- a/arch/sh/kernel/kgdb.c
4162+++ b/arch/sh/kernel/kgdb.c
4163@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4164 {
4165 }
4166
4167-struct kgdb_arch arch_kgdb_ops = {
4168+const struct kgdb_arch arch_kgdb_ops = {
4169 /* Breakpoint instruction: trapa #0x3c */
4170 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4171 .gdb_bpt_instr = { 0x3c, 0xc3 },
4172diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4173index afeb710..d1d1289 100644
4174--- a/arch/sh/mm/mmap.c
4175+++ b/arch/sh/mm/mmap.c
4176@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4177 addr = PAGE_ALIGN(addr);
4178
4179 vma = find_vma(mm, addr);
4180- if (TASK_SIZE - len >= addr &&
4181- (!vma || addr + len <= vma->vm_start))
4182+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4183 return addr;
4184 }
4185
4186@@ -106,7 +105,7 @@ full_search:
4187 }
4188 return -ENOMEM;
4189 }
4190- if (likely(!vma || addr + len <= vma->vm_start)) {
4191+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4192 /*
4193 * Remember the place where we stopped the search:
4194 */
4195@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4196 addr = PAGE_ALIGN(addr);
4197
4198 vma = find_vma(mm, addr);
4199- if (TASK_SIZE - len >= addr &&
4200- (!vma || addr + len <= vma->vm_start))
4201+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4202 return addr;
4203 }
4204
4205@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4206 /* make sure it can fit in the remaining address space */
4207 if (likely(addr > len)) {
4208 vma = find_vma(mm, addr-len);
4209- if (!vma || addr <= vma->vm_start) {
4210+ if (check_heap_stack_gap(vma, addr - len, len)) {
4211 /* remember the address as a hint for next time */
4212 return (mm->free_area_cache = addr-len);
4213 }
4214@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4215 if (unlikely(mm->mmap_base < len))
4216 goto bottomup;
4217
4218- addr = mm->mmap_base-len;
4219- if (do_colour_align)
4220- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4221+ addr = mm->mmap_base - len;
4222
4223 do {
4224+ if (do_colour_align)
4225+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4226 /*
4227 * Lookup failure means no vma is above this address,
4228 * else if new region fits below vma->vm_start,
4229 * return with success:
4230 */
4231 vma = find_vma(mm, addr);
4232- if (likely(!vma || addr+len <= vma->vm_start)) {
4233+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4234 /* remember the address as a hint for next time */
4235 return (mm->free_area_cache = addr);
4236 }
4237@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4238 mm->cached_hole_size = vma->vm_start - addr;
4239
4240 /* try just below the current vma->vm_start */
4241- addr = vma->vm_start-len;
4242- if (do_colour_align)
4243- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4244- } while (likely(len < vma->vm_start));
4245+ addr = skip_heap_stack_gap(vma, len);
4246+ } while (!IS_ERR_VALUE(addr));
4247
4248 bottomup:
4249 /*
4250diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4251index 113225b..7fd04e7 100644
4252--- a/arch/sparc/Makefile
4253+++ b/arch/sparc/Makefile
4254@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4255 # Export what is needed by arch/sparc/boot/Makefile
4256 export VMLINUX_INIT VMLINUX_MAIN
4257 VMLINUX_INIT := $(head-y) $(init-y)
4258-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4259+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4260 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4261 VMLINUX_MAIN += $(drivers-y) $(net-y)
4262
4263diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4264index f5cc06f..f858d47 100644
4265--- a/arch/sparc/include/asm/atomic_64.h
4266+++ b/arch/sparc/include/asm/atomic_64.h
4267@@ -14,18 +14,40 @@
4268 #define ATOMIC64_INIT(i) { (i) }
4269
4270 #define atomic_read(v) ((v)->counter)
4271+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4272+{
4273+ return v->counter;
4274+}
4275 #define atomic64_read(v) ((v)->counter)
4276+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4277+{
4278+ return v->counter;
4279+}
4280
4281 #define atomic_set(v, i) (((v)->counter) = i)
4282+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4283+{
4284+ v->counter = i;
4285+}
4286 #define atomic64_set(v, i) (((v)->counter) = i)
4287+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4288+{
4289+ v->counter = i;
4290+}
4291
4292 extern void atomic_add(int, atomic_t *);
4293+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4294 extern void atomic64_add(long, atomic64_t *);
4295+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4296 extern void atomic_sub(int, atomic_t *);
4297+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_sub(long, atomic64_t *);
4299+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4300
4301 extern int atomic_add_ret(int, atomic_t *);
4302+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4303 extern long atomic64_add_ret(long, atomic64_t *);
4304+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4305 extern int atomic_sub_ret(int, atomic_t *);
4306 extern long atomic64_sub_ret(long, atomic64_t *);
4307
4308@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4309 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4310
4311 #define atomic_inc_return(v) atomic_add_ret(1, v)
4312+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4313+{
4314+ return atomic_add_ret_unchecked(1, v);
4315+}
4316 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4317+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4318+{
4319+ return atomic64_add_ret_unchecked(1, v);
4320+}
4321
4322 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4323 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4324
4325 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4326+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4327+{
4328+ return atomic_add_ret_unchecked(i, v);
4329+}
4330 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4331+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4332+{
4333+ return atomic64_add_ret_unchecked(i, v);
4334+}
4335
4336 /*
4337 * atomic_inc_and_test - increment and test
4338@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4339 * other cases.
4340 */
4341 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4342+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4343+{
4344+ return atomic_inc_return_unchecked(v) == 0;
4345+}
4346 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4347
4348 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4349@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4350 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4351
4352 #define atomic_inc(v) atomic_add(1, v)
4353+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4354+{
4355+ atomic_add_unchecked(1, v);
4356+}
4357 #define atomic64_inc(v) atomic64_add(1, v)
4358+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4359+{
4360+ atomic64_add_unchecked(1, v);
4361+}
4362
4363 #define atomic_dec(v) atomic_sub(1, v)
4364+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4365+{
4366+ atomic_sub_unchecked(1, v);
4367+}
4368 #define atomic64_dec(v) atomic64_sub(1, v)
4369+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4370+{
4371+ atomic64_sub_unchecked(1, v);
4372+}
4373
4374 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4375 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4376
4377 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4378+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4379+{
4380+ return cmpxchg(&v->counter, old, new);
4381+}
4382 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4383+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4384+{
4385+ return xchg(&v->counter, new);
4386+}
4387
4388 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4389 {
4390- int c, old;
4391+ int c, old, new;
4392 c = atomic_read(v);
4393 for (;;) {
4394- if (unlikely(c == (u)))
4395+ if (unlikely(c == u))
4396 break;
4397- old = atomic_cmpxchg((v), c, c + (a));
4398+
4399+ asm volatile("addcc %2, %0, %0\n"
4400+
4401+#ifdef CONFIG_PAX_REFCOUNT
4402+ "tvs %%icc, 6\n"
4403+#endif
4404+
4405+ : "=r" (new)
4406+ : "0" (c), "ir" (a)
4407+ : "cc");
4408+
4409+ old = atomic_cmpxchg(v, c, new);
4410 if (likely(old == c))
4411 break;
4412 c = old;
4413 }
4414- return c != (u);
4415+ return c != u;
4416 }
4417
4418 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4419@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4420 #define atomic64_cmpxchg(v, o, n) \
4421 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4422 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4423+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4424+{
4425+ return xchg(&v->counter, new);
4426+}
4427
4428 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4429 {
4430- long c, old;
4431+ long c, old, new;
4432 c = atomic64_read(v);
4433 for (;;) {
4434- if (unlikely(c == (u)))
4435+ if (unlikely(c == u))
4436 break;
4437- old = atomic64_cmpxchg((v), c, c + (a));
4438+
4439+ asm volatile("addcc %2, %0, %0\n"
4440+
4441+#ifdef CONFIG_PAX_REFCOUNT
4442+ "tvs %%xcc, 6\n"
4443+#endif
4444+
4445+ : "=r" (new)
4446+ : "0" (c), "ir" (a)
4447+ : "cc");
4448+
4449+ old = atomic64_cmpxchg(v, c, new);
4450 if (likely(old == c))
4451 break;
4452 c = old;
4453 }
4454- return c != (u);
4455+ return c != u;
4456 }
4457
4458 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4459diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4460index 41f85ae..fb54d5e 100644
4461--- a/arch/sparc/include/asm/cache.h
4462+++ b/arch/sparc/include/asm/cache.h
4463@@ -8,7 +8,7 @@
4464 #define _SPARC_CACHE_H
4465
4466 #define L1_CACHE_SHIFT 5
4467-#define L1_CACHE_BYTES 32
4468+#define L1_CACHE_BYTES 32UL
4469 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4470
4471 #ifdef CONFIG_SPARC32
4472diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4473index 5a8c308..38def92 100644
4474--- a/arch/sparc/include/asm/dma-mapping.h
4475+++ b/arch/sparc/include/asm/dma-mapping.h
4476@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4477 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4478 #define dma_is_consistent(d, h) (1)
4479
4480-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4481+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4482 extern struct bus_type pci_bus_type;
4483
4484-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4485+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4486 {
4487 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4488 if (dev->bus == &pci_bus_type)
4489@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4490 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4491 dma_addr_t *dma_handle, gfp_t flag)
4492 {
4493- struct dma_map_ops *ops = get_dma_ops(dev);
4494+ const struct dma_map_ops *ops = get_dma_ops(dev);
4495 void *cpu_addr;
4496
4497 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4498@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4499 static inline void dma_free_coherent(struct device *dev, size_t size,
4500 void *cpu_addr, dma_addr_t dma_handle)
4501 {
4502- struct dma_map_ops *ops = get_dma_ops(dev);
4503+ const struct dma_map_ops *ops = get_dma_ops(dev);
4504
4505 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4506 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4507diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4508index 381a1b5..b97e3ff 100644
4509--- a/arch/sparc/include/asm/elf_32.h
4510+++ b/arch/sparc/include/asm/elf_32.h
4511@@ -116,6 +116,13 @@ typedef struct {
4512
4513 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4514
4515+#ifdef CONFIG_PAX_ASLR
4516+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4517+
4518+#define PAX_DELTA_MMAP_LEN 16
4519+#define PAX_DELTA_STACK_LEN 16
4520+#endif
4521+
4522 /* This yields a mask that user programs can use to figure out what
4523 instruction set this cpu supports. This can NOT be done in userspace
4524 on Sparc. */
4525diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4526index 9968085..c2106ef 100644
4527--- a/arch/sparc/include/asm/elf_64.h
4528+++ b/arch/sparc/include/asm/elf_64.h
4529@@ -163,6 +163,12 @@ typedef struct {
4530 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4531 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4532
4533+#ifdef CONFIG_PAX_ASLR
4534+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4535+
4536+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4537+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4538+#endif
4539
4540 /* This yields a mask that user programs can use to figure out what
4541 instruction set this cpu supports. */
4542diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4543index e0cabe7..efd60f1 100644
4544--- a/arch/sparc/include/asm/pgtable_32.h
4545+++ b/arch/sparc/include/asm/pgtable_32.h
4546@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4547 BTFIXUPDEF_INT(page_none)
4548 BTFIXUPDEF_INT(page_copy)
4549 BTFIXUPDEF_INT(page_readonly)
4550+
4551+#ifdef CONFIG_PAX_PAGEEXEC
4552+BTFIXUPDEF_INT(page_shared_noexec)
4553+BTFIXUPDEF_INT(page_copy_noexec)
4554+BTFIXUPDEF_INT(page_readonly_noexec)
4555+#endif
4556+
4557 BTFIXUPDEF_INT(page_kernel)
4558
4559 #define PMD_SHIFT SUN4C_PMD_SHIFT
4560@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4561 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4562 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4563
4564+#ifdef CONFIG_PAX_PAGEEXEC
4565+extern pgprot_t PAGE_SHARED_NOEXEC;
4566+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4567+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4568+#else
4569+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4570+# define PAGE_COPY_NOEXEC PAGE_COPY
4571+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4572+#endif
4573+
4574 extern unsigned long page_kernel;
4575
4576 #ifdef MODULE
4577diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4578index 1407c07..7e10231 100644
4579--- a/arch/sparc/include/asm/pgtsrmmu.h
4580+++ b/arch/sparc/include/asm/pgtsrmmu.h
4581@@ -115,6 +115,13 @@
4582 SRMMU_EXEC | SRMMU_REF)
4583 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4584 SRMMU_EXEC | SRMMU_REF)
4585+
4586+#ifdef CONFIG_PAX_PAGEEXEC
4587+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4588+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4589+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4590+#endif
4591+
4592 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4593 SRMMU_DIRTY | SRMMU_REF)
4594
4595diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4596index 43e5147..47622a1 100644
4597--- a/arch/sparc/include/asm/spinlock_64.h
4598+++ b/arch/sparc/include/asm/spinlock_64.h
4599@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4600
4601 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4602
4603-static void inline arch_read_lock(raw_rwlock_t *lock)
4604+static inline void arch_read_lock(raw_rwlock_t *lock)
4605 {
4606 unsigned long tmp1, tmp2;
4607
4608 __asm__ __volatile__ (
4609 "1: ldsw [%2], %0\n"
4610 " brlz,pn %0, 2f\n"
4611-"4: add %0, 1, %1\n"
4612+"4: addcc %0, 1, %1\n"
4613+
4614+#ifdef CONFIG_PAX_REFCOUNT
4615+" tvs %%icc, 6\n"
4616+#endif
4617+
4618 " cas [%2], %0, %1\n"
4619 " cmp %0, %1\n"
4620 " bne,pn %%icc, 1b\n"
4621@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4622 " .previous"
4623 : "=&r" (tmp1), "=&r" (tmp2)
4624 : "r" (lock)
4625- : "memory");
4626+ : "memory", "cc");
4627 }
4628
4629-static int inline arch_read_trylock(raw_rwlock_t *lock)
4630+static inline int arch_read_trylock(raw_rwlock_t *lock)
4631 {
4632 int tmp1, tmp2;
4633
4634@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4635 "1: ldsw [%2], %0\n"
4636 " brlz,a,pn %0, 2f\n"
4637 " mov 0, %0\n"
4638-" add %0, 1, %1\n"
4639+" addcc %0, 1, %1\n"
4640+
4641+#ifdef CONFIG_PAX_REFCOUNT
4642+" tvs %%icc, 6\n"
4643+#endif
4644+
4645 " cas [%2], %0, %1\n"
4646 " cmp %0, %1\n"
4647 " bne,pn %%icc, 1b\n"
4648@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4649 return tmp1;
4650 }
4651
4652-static void inline arch_read_unlock(raw_rwlock_t *lock)
4653+static inline void arch_read_unlock(raw_rwlock_t *lock)
4654 {
4655 unsigned long tmp1, tmp2;
4656
4657 __asm__ __volatile__(
4658 "1: lduw [%2], %0\n"
4659-" sub %0, 1, %1\n"
4660+" subcc %0, 1, %1\n"
4661+
4662+#ifdef CONFIG_PAX_REFCOUNT
4663+" tvs %%icc, 6\n"
4664+#endif
4665+
4666 " cas [%2], %0, %1\n"
4667 " cmp %0, %1\n"
4668 " bne,pn %%xcc, 1b\n"
4669@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4670 : "memory");
4671 }
4672
4673-static void inline arch_write_lock(raw_rwlock_t *lock)
4674+static inline void arch_write_lock(raw_rwlock_t *lock)
4675 {
4676 unsigned long mask, tmp1, tmp2;
4677
4678@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4679 : "memory");
4680 }
4681
4682-static void inline arch_write_unlock(raw_rwlock_t *lock)
4683+static inline void arch_write_unlock(raw_rwlock_t *lock)
4684 {
4685 __asm__ __volatile__(
4686 " stw %%g0, [%0]"
4687@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4688 : "memory");
4689 }
4690
4691-static int inline arch_write_trylock(raw_rwlock_t *lock)
4692+static inline int arch_write_trylock(raw_rwlock_t *lock)
4693 {
4694 unsigned long mask, tmp1, tmp2, result;
4695
4696diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4697index 844d73a..f787fb9 100644
4698--- a/arch/sparc/include/asm/thread_info_32.h
4699+++ b/arch/sparc/include/asm/thread_info_32.h
4700@@ -50,6 +50,8 @@ struct thread_info {
4701 unsigned long w_saved;
4702
4703 struct restart_block restart_block;
4704+
4705+ unsigned long lowest_stack;
4706 };
4707
4708 /*
4709diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4710index f78ad9a..9f55fc7 100644
4711--- a/arch/sparc/include/asm/thread_info_64.h
4712+++ b/arch/sparc/include/asm/thread_info_64.h
4713@@ -68,6 +68,8 @@ struct thread_info {
4714 struct pt_regs *kern_una_regs;
4715 unsigned int kern_una_insn;
4716
4717+ unsigned long lowest_stack;
4718+
4719 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4720 };
4721
4722diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4723index e88fbe5..96b0ce5 100644
4724--- a/arch/sparc/include/asm/uaccess.h
4725+++ b/arch/sparc/include/asm/uaccess.h
4726@@ -1,5 +1,13 @@
4727 #ifndef ___ASM_SPARC_UACCESS_H
4728 #define ___ASM_SPARC_UACCESS_H
4729+
4730+#ifdef __KERNEL__
4731+#ifndef __ASSEMBLY__
4732+#include <linux/types.h>
4733+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4734+#endif
4735+#endif
4736+
4737 #if defined(__sparc__) && defined(__arch64__)
4738 #include <asm/uaccess_64.h>
4739 #else
4740diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4741index 8303ac4..07f333d 100644
4742--- a/arch/sparc/include/asm/uaccess_32.h
4743+++ b/arch/sparc/include/asm/uaccess_32.h
4744@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4745
4746 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4747 {
4748- if (n && __access_ok((unsigned long) to, n))
4749+ if ((long)n < 0)
4750+ return n;
4751+
4752+ if (n && __access_ok((unsigned long) to, n)) {
4753+ if (!__builtin_constant_p(n))
4754+ check_object_size(from, n, true);
4755 return __copy_user(to, (__force void __user *) from, n);
4756- else
4757+ } else
4758 return n;
4759 }
4760
4761 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4762 {
4763+ if ((long)n < 0)
4764+ return n;
4765+
4766+ if (!__builtin_constant_p(n))
4767+ check_object_size(from, n, true);
4768+
4769 return __copy_user(to, (__force void __user *) from, n);
4770 }
4771
4772 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774- if (n && __access_ok((unsigned long) from, n))
4775+ if ((long)n < 0)
4776+ return n;
4777+
4778+ if (n && __access_ok((unsigned long) from, n)) {
4779+ if (!__builtin_constant_p(n))
4780+ check_object_size(to, n, false);
4781 return __copy_user((__force void __user *) to, from, n);
4782- else
4783+ } else
4784 return n;
4785 }
4786
4787 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789+ if ((long)n < 0)
4790+ return n;
4791+
4792 return __copy_user((__force void __user *) to, from, n);
4793 }
4794
4795diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4796index 9ea271e..7b8a271 100644
4797--- a/arch/sparc/include/asm/uaccess_64.h
4798+++ b/arch/sparc/include/asm/uaccess_64.h
4799@@ -9,6 +9,7 @@
4800 #include <linux/compiler.h>
4801 #include <linux/string.h>
4802 #include <linux/thread_info.h>
4803+#include <linux/kernel.h>
4804 #include <asm/asi.h>
4805 #include <asm/system.h>
4806 #include <asm/spitfire.h>
4807@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4808 static inline unsigned long __must_check
4809 copy_from_user(void *to, const void __user *from, unsigned long size)
4810 {
4811- unsigned long ret = ___copy_from_user(to, from, size);
4812+ unsigned long ret;
4813
4814+ if ((long)size < 0 || size > INT_MAX)
4815+ return size;
4816+
4817+ if (!__builtin_constant_p(size))
4818+ check_object_size(to, size, false);
4819+
4820+ ret = ___copy_from_user(to, from, size);
4821 if (unlikely(ret))
4822 ret = copy_from_user_fixup(to, from, size);
4823 return ret;
4824@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4825 static inline unsigned long __must_check
4826 copy_to_user(void __user *to, const void *from, unsigned long size)
4827 {
4828- unsigned long ret = ___copy_to_user(to, from, size);
4829+ unsigned long ret;
4830
4831+ if ((long)size < 0 || size > INT_MAX)
4832+ return size;
4833+
4834+ if (!__builtin_constant_p(size))
4835+ check_object_size(from, size, true);
4836+
4837+ ret = ___copy_to_user(to, from, size);
4838 if (unlikely(ret))
4839 ret = copy_to_user_fixup(to, from, size);
4840 return ret;
4841diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4842index 2782681..77ded84 100644
4843--- a/arch/sparc/kernel/Makefile
4844+++ b/arch/sparc/kernel/Makefile
4845@@ -3,7 +3,7 @@
4846 #
4847
4848 asflags-y := -ansi
4849-ccflags-y := -Werror
4850+#ccflags-y := -Werror
4851
4852 extra-y := head_$(BITS).o
4853 extra-y += init_task.o
4854diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4855index 7690cc2..ece64c9 100644
4856--- a/arch/sparc/kernel/iommu.c
4857+++ b/arch/sparc/kernel/iommu.c
4858@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4859 spin_unlock_irqrestore(&iommu->lock, flags);
4860 }
4861
4862-static struct dma_map_ops sun4u_dma_ops = {
4863+static const struct dma_map_ops sun4u_dma_ops = {
4864 .alloc_coherent = dma_4u_alloc_coherent,
4865 .free_coherent = dma_4u_free_coherent,
4866 .map_page = dma_4u_map_page,
4867@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4868 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4869 };
4870
4871-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4872+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4873 EXPORT_SYMBOL(dma_ops);
4874
4875 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4876diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4877index 9f61fd8..bd048db 100644
4878--- a/arch/sparc/kernel/ioport.c
4879+++ b/arch/sparc/kernel/ioport.c
4880@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4881 BUG();
4882 }
4883
4884-struct dma_map_ops sbus_dma_ops = {
4885+const struct dma_map_ops sbus_dma_ops = {
4886 .alloc_coherent = sbus_alloc_coherent,
4887 .free_coherent = sbus_free_coherent,
4888 .map_page = sbus_map_page,
4889@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4890 .sync_sg_for_device = sbus_sync_sg_for_device,
4891 };
4892
4893-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4894+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4895 EXPORT_SYMBOL(dma_ops);
4896
4897 static int __init sparc_register_ioport(void)
4898@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4899 }
4900 }
4901
4902-struct dma_map_ops pci32_dma_ops = {
4903+const struct dma_map_ops pci32_dma_ops = {
4904 .alloc_coherent = pci32_alloc_coherent,
4905 .free_coherent = pci32_free_coherent,
4906 .map_page = pci32_map_page,
4907diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4908index 04df4ed..55c4b6e 100644
4909--- a/arch/sparc/kernel/kgdb_32.c
4910+++ b/arch/sparc/kernel/kgdb_32.c
4911@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4912 {
4913 }
4914
4915-struct kgdb_arch arch_kgdb_ops = {
4916+const struct kgdb_arch arch_kgdb_ops = {
4917 /* Breakpoint instruction: ta 0x7d */
4918 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4919 };
4920diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4921index f5a0fd4..d886f71 100644
4922--- a/arch/sparc/kernel/kgdb_64.c
4923+++ b/arch/sparc/kernel/kgdb_64.c
4924@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4925 {
4926 }
4927
4928-struct kgdb_arch arch_kgdb_ops = {
4929+const struct kgdb_arch arch_kgdb_ops = {
4930 /* Breakpoint instruction: ta 0x72 */
4931 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4932 };
4933diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4934index 23c33ff..d137fbd 100644
4935--- a/arch/sparc/kernel/pci_sun4v.c
4936+++ b/arch/sparc/kernel/pci_sun4v.c
4937@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4938 spin_unlock_irqrestore(&iommu->lock, flags);
4939 }
4940
4941-static struct dma_map_ops sun4v_dma_ops = {
4942+static const struct dma_map_ops sun4v_dma_ops = {
4943 .alloc_coherent = dma_4v_alloc_coherent,
4944 .free_coherent = dma_4v_free_coherent,
4945 .map_page = dma_4v_map_page,
4946diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4947index c49865b..b41a81b 100644
4948--- a/arch/sparc/kernel/process_32.c
4949+++ b/arch/sparc/kernel/process_32.c
4950@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4951 rw->ins[4], rw->ins[5],
4952 rw->ins[6],
4953 rw->ins[7]);
4954- printk("%pS\n", (void *) rw->ins[7]);
4955+ printk("%pA\n", (void *) rw->ins[7]);
4956 rw = (struct reg_window32 *) rw->ins[6];
4957 }
4958 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4959@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4960
4961 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4962 r->psr, r->pc, r->npc, r->y, print_tainted());
4963- printk("PC: <%pS>\n", (void *) r->pc);
4964+ printk("PC: <%pA>\n", (void *) r->pc);
4965 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4966 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4967 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4968 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4969 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4970 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4971- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4972+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4973
4974 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4975 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4976@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4977 rw = (struct reg_window32 *) fp;
4978 pc = rw->ins[7];
4979 printk("[%08lx : ", pc);
4980- printk("%pS ] ", (void *) pc);
4981+ printk("%pA ] ", (void *) pc);
4982 fp = rw->ins[6];
4983 } while (++count < 16);
4984 printk("\n");
4985diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4986index cb70476..3d0c191 100644
4987--- a/arch/sparc/kernel/process_64.c
4988+++ b/arch/sparc/kernel/process_64.c
4989@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4990 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4991 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4992 if (regs->tstate & TSTATE_PRIV)
4993- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4994+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4995 }
4996
4997 void show_regs(struct pt_regs *regs)
4998 {
4999 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5000 regs->tpc, regs->tnpc, regs->y, print_tainted());
5001- printk("TPC: <%pS>\n", (void *) regs->tpc);
5002+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5003 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5004 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5005 regs->u_regs[3]);
5006@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5007 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5008 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5009 regs->u_regs[15]);
5010- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5011+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5012 show_regwindow(regs);
5013 }
5014
5015@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5016 ((tp && tp->task) ? tp->task->pid : -1));
5017
5018 if (gp->tstate & TSTATE_PRIV) {
5019- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5020+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5021 (void *) gp->tpc,
5022 (void *) gp->o7,
5023 (void *) gp->i7,
5024diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5025index 6edc4e5..06a69b4 100644
5026--- a/arch/sparc/kernel/sigutil_64.c
5027+++ b/arch/sparc/kernel/sigutil_64.c
5028@@ -2,6 +2,7 @@
5029 #include <linux/types.h>
5030 #include <linux/thread_info.h>
5031 #include <linux/uaccess.h>
5032+#include <linux/errno.h>
5033
5034 #include <asm/sigcontext.h>
5035 #include <asm/fpumacro.h>
5036diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5037index 3a82e65..ce0a53a 100644
5038--- a/arch/sparc/kernel/sys_sparc_32.c
5039+++ b/arch/sparc/kernel/sys_sparc_32.c
5040@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5041 if (ARCH_SUN4C && len > 0x20000000)
5042 return -ENOMEM;
5043 if (!addr)
5044- addr = TASK_UNMAPPED_BASE;
5045+ addr = current->mm->mmap_base;
5046
5047 if (flags & MAP_SHARED)
5048 addr = COLOUR_ALIGN(addr);
5049@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5050 }
5051 if (TASK_SIZE - PAGE_SIZE - len < addr)
5052 return -ENOMEM;
5053- if (!vmm || addr + len <= vmm->vm_start)
5054+ if (check_heap_stack_gap(vmm, addr, len))
5055 return addr;
5056 addr = vmm->vm_end;
5057 if (flags & MAP_SHARED)
5058diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5059index cfa0e19..98972ac 100644
5060--- a/arch/sparc/kernel/sys_sparc_64.c
5061+++ b/arch/sparc/kernel/sys_sparc_64.c
5062@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 /* We do not accept a shared mapping if it would violate
5064 * cache aliasing constraints.
5065 */
5066- if ((flags & MAP_SHARED) &&
5067+ if ((filp || (flags & MAP_SHARED)) &&
5068 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5069 return -EINVAL;
5070 return addr;
5071@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5072 if (filp || (flags & MAP_SHARED))
5073 do_color_align = 1;
5074
5075+#ifdef CONFIG_PAX_RANDMMAP
5076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5077+#endif
5078+
5079 if (addr) {
5080 if (do_color_align)
5081 addr = COLOUR_ALIGN(addr, pgoff);
5082@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5083 addr = PAGE_ALIGN(addr);
5084
5085 vma = find_vma(mm, addr);
5086- if (task_size - len >= addr &&
5087- (!vma || addr + len <= vma->vm_start))
5088+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5089 return addr;
5090 }
5091
5092 if (len > mm->cached_hole_size) {
5093- start_addr = addr = mm->free_area_cache;
5094+ start_addr = addr = mm->free_area_cache;
5095 } else {
5096- start_addr = addr = TASK_UNMAPPED_BASE;
5097+ start_addr = addr = mm->mmap_base;
5098 mm->cached_hole_size = 0;
5099 }
5100
5101@@ -175,14 +178,14 @@ full_search:
5102 vma = find_vma(mm, VA_EXCLUDE_END);
5103 }
5104 if (unlikely(task_size < addr)) {
5105- if (start_addr != TASK_UNMAPPED_BASE) {
5106- start_addr = addr = TASK_UNMAPPED_BASE;
5107+ if (start_addr != mm->mmap_base) {
5108+ start_addr = addr = mm->mmap_base;
5109 mm->cached_hole_size = 0;
5110 goto full_search;
5111 }
5112 return -ENOMEM;
5113 }
5114- if (likely(!vma || addr + len <= vma->vm_start)) {
5115+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5116 /*
5117 * Remember the place where we stopped the search:
5118 */
5119@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5120 /* We do not accept a shared mapping if it would violate
5121 * cache aliasing constraints.
5122 */
5123- if ((flags & MAP_SHARED) &&
5124+ if ((filp || (flags & MAP_SHARED)) &&
5125 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5126 return -EINVAL;
5127 return addr;
5128@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5129 addr = PAGE_ALIGN(addr);
5130
5131 vma = find_vma(mm, addr);
5132- if (task_size - len >= addr &&
5133- (!vma || addr + len <= vma->vm_start))
5134+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5135 return addr;
5136 }
5137
5138@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5139 /* make sure it can fit in the remaining address space */
5140 if (likely(addr > len)) {
5141 vma = find_vma(mm, addr-len);
5142- if (!vma || addr <= vma->vm_start) {
5143+ if (check_heap_stack_gap(vma, addr - len, len)) {
5144 /* remember the address as a hint for next time */
5145 return (mm->free_area_cache = addr-len);
5146 }
5147@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5148 if (unlikely(mm->mmap_base < len))
5149 goto bottomup;
5150
5151- addr = mm->mmap_base-len;
5152- if (do_color_align)
5153- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5154+ addr = mm->mmap_base - len;
5155
5156 do {
5157+ if (do_color_align)
5158+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5159 /*
5160 * Lookup failure means no vma is above this address,
5161 * else if new region fits below vma->vm_start,
5162 * return with success:
5163 */
5164 vma = find_vma(mm, addr);
5165- if (likely(!vma || addr+len <= vma->vm_start)) {
5166+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5167 /* remember the address as a hint for next time */
5168 return (mm->free_area_cache = addr);
5169 }
5170@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5171 mm->cached_hole_size = vma->vm_start - addr;
5172
5173 /* try just below the current vma->vm_start */
5174- addr = vma->vm_start-len;
5175- if (do_color_align)
5176- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5177- } while (likely(len < vma->vm_start));
5178+ addr = skip_heap_stack_gap(vma, len);
5179+ } while (!IS_ERR_VALUE(addr));
5180
5181 bottomup:
5182 /*
5183@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5184 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5185 sysctl_legacy_va_layout) {
5186 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5187+
5188+#ifdef CONFIG_PAX_RANDMMAP
5189+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5190+ mm->mmap_base += mm->delta_mmap;
5191+#endif
5192+
5193 mm->get_unmapped_area = arch_get_unmapped_area;
5194 mm->unmap_area = arch_unmap_area;
5195 } else {
5196@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5197 gap = (task_size / 6 * 5);
5198
5199 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5200+
5201+#ifdef CONFIG_PAX_RANDMMAP
5202+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5203+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5204+#endif
5205+
5206 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5207 mm->unmap_area = arch_unmap_area_topdown;
5208 }
5209diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5210index c0490c7..84959d1 100644
5211--- a/arch/sparc/kernel/traps_32.c
5212+++ b/arch/sparc/kernel/traps_32.c
5213@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5214 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5215 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5216
5217+extern void gr_handle_kernel_exploit(void);
5218+
5219 void die_if_kernel(char *str, struct pt_regs *regs)
5220 {
5221 static int die_counter;
5222@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5223 count++ < 30 &&
5224 (((unsigned long) rw) >= PAGE_OFFSET) &&
5225 !(((unsigned long) rw) & 0x7)) {
5226- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5227+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5228 (void *) rw->ins[7]);
5229 rw = (struct reg_window32 *)rw->ins[6];
5230 }
5231 }
5232 printk("Instruction DUMP:");
5233 instruction_dump ((unsigned long *) regs->pc);
5234- if(regs->psr & PSR_PS)
5235+ if(regs->psr & PSR_PS) {
5236+ gr_handle_kernel_exploit();
5237 do_exit(SIGKILL);
5238+ }
5239 do_exit(SIGSEGV);
5240 }
5241
5242diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5243index 10f7bb9..cdb6793 100644
5244--- a/arch/sparc/kernel/traps_64.c
5245+++ b/arch/sparc/kernel/traps_64.c
5246@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5247 i + 1,
5248 p->trapstack[i].tstate, p->trapstack[i].tpc,
5249 p->trapstack[i].tnpc, p->trapstack[i].tt);
5250- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5251+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5252 }
5253 }
5254
5255@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5256
5257 lvl -= 0x100;
5258 if (regs->tstate & TSTATE_PRIV) {
5259+
5260+#ifdef CONFIG_PAX_REFCOUNT
5261+ if (lvl == 6)
5262+ pax_report_refcount_overflow(regs);
5263+#endif
5264+
5265 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5266 die_if_kernel(buffer, regs);
5267 }
5268@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5269 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5270 {
5271 char buffer[32];
5272-
5273+
5274 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5275 0, lvl, SIGTRAP) == NOTIFY_STOP)
5276 return;
5277
5278+#ifdef CONFIG_PAX_REFCOUNT
5279+ if (lvl == 6)
5280+ pax_report_refcount_overflow(regs);
5281+#endif
5282+
5283 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5284
5285 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5286@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5287 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5288 printk("%s" "ERROR(%d): ",
5289 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5290- printk("TPC<%pS>\n", (void *) regs->tpc);
5291+ printk("TPC<%pA>\n", (void *) regs->tpc);
5292 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5294 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5295@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5296 smp_processor_id(),
5297 (type & 0x1) ? 'I' : 'D',
5298 regs->tpc);
5299- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5300+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5301 panic("Irrecoverable Cheetah+ parity error.");
5302 }
5303
5304@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5305 smp_processor_id(),
5306 (type & 0x1) ? 'I' : 'D',
5307 regs->tpc);
5308- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5309+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5310 }
5311
5312 struct sun4v_error_entry {
5313@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5314
5315 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5316 regs->tpc, tl);
5317- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5318+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5319 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5320- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5321+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5322 (void *) regs->u_regs[UREG_I7]);
5323 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5324 "pte[%lx] error[%lx]\n",
5325@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5326
5327 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5328 regs->tpc, tl);
5329- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5330+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5331 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5332- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5333+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5334 (void *) regs->u_regs[UREG_I7]);
5335 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5336 "pte[%lx] error[%lx]\n",
5337@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5338 fp = (unsigned long)sf->fp + STACK_BIAS;
5339 }
5340
5341- printk(" [%016lx] %pS\n", pc, (void *) pc);
5342+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5343 } while (++count < 16);
5344 }
5345
5346@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5347 return (struct reg_window *) (fp + STACK_BIAS);
5348 }
5349
5350+extern void gr_handle_kernel_exploit(void);
5351+
5352 void die_if_kernel(char *str, struct pt_regs *regs)
5353 {
5354 static int die_counter;
5355@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5356 while (rw &&
5357 count++ < 30&&
5358 is_kernel_stack(current, rw)) {
5359- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5360+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5361 (void *) rw->ins[7]);
5362
5363 rw = kernel_stack_up(rw);
5364@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5365 }
5366 user_instruction_dump ((unsigned int __user *) regs->tpc);
5367 }
5368- if (regs->tstate & TSTATE_PRIV)
5369+ if (regs->tstate & TSTATE_PRIV) {
5370+ gr_handle_kernel_exploit();
5371 do_exit(SIGKILL);
5372+ }
5373+
5374 do_exit(SIGSEGV);
5375 }
5376 EXPORT_SYMBOL(die_if_kernel);
5377diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5378index be183fe..1c8d332 100644
5379--- a/arch/sparc/kernel/una_asm_64.S
5380+++ b/arch/sparc/kernel/una_asm_64.S
5381@@ -127,7 +127,7 @@ do_int_load:
5382 wr %o5, 0x0, %asi
5383 retl
5384 mov 0, %o0
5385- .size __do_int_load, .-__do_int_load
5386+ .size do_int_load, .-do_int_load
5387
5388 .section __ex_table,"a"
5389 .word 4b, __retl_efault
5390diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5391index 3792099..2af17d8 100644
5392--- a/arch/sparc/kernel/unaligned_64.c
5393+++ b/arch/sparc/kernel/unaligned_64.c
5394@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5395 if (count < 5) {
5396 last_time = jiffies;
5397 count++;
5398- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5399+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5400 regs->tpc, (void *) regs->tpc);
5401 }
5402 }
5403diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5404index e75faf0..24f12f9 100644
5405--- a/arch/sparc/lib/Makefile
5406+++ b/arch/sparc/lib/Makefile
5407@@ -2,7 +2,7 @@
5408 #
5409
5410 asflags-y := -ansi -DST_DIV0=0x02
5411-ccflags-y := -Werror
5412+#ccflags-y := -Werror
5413
5414 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5415 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5416diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5417index 0268210..f0291ca 100644
5418--- a/arch/sparc/lib/atomic_64.S
5419+++ b/arch/sparc/lib/atomic_64.S
5420@@ -18,7 +18,12 @@
5421 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5422 BACKOFF_SETUP(%o2)
5423 1: lduw [%o1], %g1
5424- add %g1, %o0, %g7
5425+ addcc %g1, %o0, %g7
5426+
5427+#ifdef CONFIG_PAX_REFCOUNT
5428+ tvs %icc, 6
5429+#endif
5430+
5431 cas [%o1], %g1, %g7
5432 cmp %g1, %g7
5433 bne,pn %icc, 2f
5434@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5435 2: BACKOFF_SPIN(%o2, %o3, 1b)
5436 .size atomic_add, .-atomic_add
5437
5438+ .globl atomic_add_unchecked
5439+ .type atomic_add_unchecked,#function
5440+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5441+ BACKOFF_SETUP(%o2)
5442+1: lduw [%o1], %g1
5443+ add %g1, %o0, %g7
5444+ cas [%o1], %g1, %g7
5445+ cmp %g1, %g7
5446+ bne,pn %icc, 2f
5447+ nop
5448+ retl
5449+ nop
5450+2: BACKOFF_SPIN(%o2, %o3, 1b)
5451+ .size atomic_add_unchecked, .-atomic_add_unchecked
5452+
5453 .globl atomic_sub
5454 .type atomic_sub,#function
5455 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5456 BACKOFF_SETUP(%o2)
5457 1: lduw [%o1], %g1
5458- sub %g1, %o0, %g7
5459+ subcc %g1, %o0, %g7
5460+
5461+#ifdef CONFIG_PAX_REFCOUNT
5462+ tvs %icc, 6
5463+#endif
5464+
5465 cas [%o1], %g1, %g7
5466 cmp %g1, %g7
5467 bne,pn %icc, 2f
5468@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5469 2: BACKOFF_SPIN(%o2, %o3, 1b)
5470 .size atomic_sub, .-atomic_sub
5471
5472+ .globl atomic_sub_unchecked
5473+ .type atomic_sub_unchecked,#function
5474+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5475+ BACKOFF_SETUP(%o2)
5476+1: lduw [%o1], %g1
5477+ sub %g1, %o0, %g7
5478+ cas [%o1], %g1, %g7
5479+ cmp %g1, %g7
5480+ bne,pn %icc, 2f
5481+ nop
5482+ retl
5483+ nop
5484+2: BACKOFF_SPIN(%o2, %o3, 1b)
5485+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5486+
5487 .globl atomic_add_ret
5488 .type atomic_add_ret,#function
5489 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5490 BACKOFF_SETUP(%o2)
5491 1: lduw [%o1], %g1
5492- add %g1, %o0, %g7
5493+ addcc %g1, %o0, %g7
5494+
5495+#ifdef CONFIG_PAX_REFCOUNT
5496+ tvs %icc, 6
5497+#endif
5498+
5499 cas [%o1], %g1, %g7
5500 cmp %g1, %g7
5501 bne,pn %icc, 2f
5502@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5503 2: BACKOFF_SPIN(%o2, %o3, 1b)
5504 .size atomic_add_ret, .-atomic_add_ret
5505
5506+ .globl atomic_add_ret_unchecked
5507+ .type atomic_add_ret_unchecked,#function
5508+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5509+ BACKOFF_SETUP(%o2)
5510+1: lduw [%o1], %g1
5511+ addcc %g1, %o0, %g7
5512+ cas [%o1], %g1, %g7
5513+ cmp %g1, %g7
5514+ bne,pn %icc, 2f
5515+ add %g7, %o0, %g7
5516+ sra %g7, 0, %o0
5517+ retl
5518+ nop
5519+2: BACKOFF_SPIN(%o2, %o3, 1b)
5520+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5521+
5522 .globl atomic_sub_ret
5523 .type atomic_sub_ret,#function
5524 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5525 BACKOFF_SETUP(%o2)
5526 1: lduw [%o1], %g1
5527- sub %g1, %o0, %g7
5528+ subcc %g1, %o0, %g7
5529+
5530+#ifdef CONFIG_PAX_REFCOUNT
5531+ tvs %icc, 6
5532+#endif
5533+
5534 cas [%o1], %g1, %g7
5535 cmp %g1, %g7
5536 bne,pn %icc, 2f
5537@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5538 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5539 BACKOFF_SETUP(%o2)
5540 1: ldx [%o1], %g1
5541- add %g1, %o0, %g7
5542+ addcc %g1, %o0, %g7
5543+
5544+#ifdef CONFIG_PAX_REFCOUNT
5545+ tvs %xcc, 6
5546+#endif
5547+
5548 casx [%o1], %g1, %g7
5549 cmp %g1, %g7
5550 bne,pn %xcc, 2f
5551@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5552 2: BACKOFF_SPIN(%o2, %o3, 1b)
5553 .size atomic64_add, .-atomic64_add
5554
5555+ .globl atomic64_add_unchecked
5556+ .type atomic64_add_unchecked,#function
5557+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5558+ BACKOFF_SETUP(%o2)
5559+1: ldx [%o1], %g1
5560+ addcc %g1, %o0, %g7
5561+ casx [%o1], %g1, %g7
5562+ cmp %g1, %g7
5563+ bne,pn %xcc, 2f
5564+ nop
5565+ retl
5566+ nop
5567+2: BACKOFF_SPIN(%o2, %o3, 1b)
5568+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5569+
5570 .globl atomic64_sub
5571 .type atomic64_sub,#function
5572 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5573 BACKOFF_SETUP(%o2)
5574 1: ldx [%o1], %g1
5575- sub %g1, %o0, %g7
5576+ subcc %g1, %o0, %g7
5577+
5578+#ifdef CONFIG_PAX_REFCOUNT
5579+ tvs %xcc, 6
5580+#endif
5581+
5582 casx [%o1], %g1, %g7
5583 cmp %g1, %g7
5584 bne,pn %xcc, 2f
5585@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5586 2: BACKOFF_SPIN(%o2, %o3, 1b)
5587 .size atomic64_sub, .-atomic64_sub
5588
5589+ .globl atomic64_sub_unchecked
5590+ .type atomic64_sub_unchecked,#function
5591+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5592+ BACKOFF_SETUP(%o2)
5593+1: ldx [%o1], %g1
5594+ subcc %g1, %o0, %g7
5595+ casx [%o1], %g1, %g7
5596+ cmp %g1, %g7
5597+ bne,pn %xcc, 2f
5598+ nop
5599+ retl
5600+ nop
5601+2: BACKOFF_SPIN(%o2, %o3, 1b)
5602+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5603+
5604 .globl atomic64_add_ret
5605 .type atomic64_add_ret,#function
5606 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5607 BACKOFF_SETUP(%o2)
5608 1: ldx [%o1], %g1
5609- add %g1, %o0, %g7
5610+ addcc %g1, %o0, %g7
5611+
5612+#ifdef CONFIG_PAX_REFCOUNT
5613+ tvs %xcc, 6
5614+#endif
5615+
5616 casx [%o1], %g1, %g7
5617 cmp %g1, %g7
5618 bne,pn %xcc, 2f
5619@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5620 2: BACKOFF_SPIN(%o2, %o3, 1b)
5621 .size atomic64_add_ret, .-atomic64_add_ret
5622
5623+ .globl atomic64_add_ret_unchecked
5624+ .type atomic64_add_ret_unchecked,#function
5625+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5626+ BACKOFF_SETUP(%o2)
5627+1: ldx [%o1], %g1
5628+ addcc %g1, %o0, %g7
5629+ casx [%o1], %g1, %g7
5630+ cmp %g1, %g7
5631+ bne,pn %xcc, 2f
5632+ add %g7, %o0, %g7
5633+ mov %g7, %o0
5634+ retl
5635+ nop
5636+2: BACKOFF_SPIN(%o2, %o3, 1b)
5637+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5638+
5639 .globl atomic64_sub_ret
5640 .type atomic64_sub_ret,#function
5641 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5642 BACKOFF_SETUP(%o2)
5643 1: ldx [%o1], %g1
5644- sub %g1, %o0, %g7
5645+ subcc %g1, %o0, %g7
5646+
5647+#ifdef CONFIG_PAX_REFCOUNT
5648+ tvs %xcc, 6
5649+#endif
5650+
5651 casx [%o1], %g1, %g7
5652 cmp %g1, %g7
5653 bne,pn %xcc, 2f
5654diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5655index 704b126..2e79d76 100644
5656--- a/arch/sparc/lib/ksyms.c
5657+++ b/arch/sparc/lib/ksyms.c
5658@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5659
5660 /* Atomic counter implementation. */
5661 EXPORT_SYMBOL(atomic_add);
5662+EXPORT_SYMBOL(atomic_add_unchecked);
5663 EXPORT_SYMBOL(atomic_add_ret);
5664+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5665 EXPORT_SYMBOL(atomic_sub);
5666+EXPORT_SYMBOL(atomic_sub_unchecked);
5667 EXPORT_SYMBOL(atomic_sub_ret);
5668 EXPORT_SYMBOL(atomic64_add);
5669+EXPORT_SYMBOL(atomic64_add_unchecked);
5670 EXPORT_SYMBOL(atomic64_add_ret);
5671+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5672 EXPORT_SYMBOL(atomic64_sub);
5673+EXPORT_SYMBOL(atomic64_sub_unchecked);
5674 EXPORT_SYMBOL(atomic64_sub_ret);
5675
5676 /* Atomic bit operations. */
5677diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5678index 91a7d29..ce75c29 100644
5679--- a/arch/sparc/lib/rwsem_64.S
5680+++ b/arch/sparc/lib/rwsem_64.S
5681@@ -11,7 +11,12 @@
5682 .globl __down_read
5683 __down_read:
5684 1: lduw [%o0], %g1
5685- add %g1, 1, %g7
5686+ addcc %g1, 1, %g7
5687+
5688+#ifdef CONFIG_PAX_REFCOUNT
5689+ tvs %icc, 6
5690+#endif
5691+
5692 cas [%o0], %g1, %g7
5693 cmp %g1, %g7
5694 bne,pn %icc, 1b
5695@@ -33,7 +38,12 @@ __down_read:
5696 .globl __down_read_trylock
5697 __down_read_trylock:
5698 1: lduw [%o0], %g1
5699- add %g1, 1, %g7
5700+ addcc %g1, 1, %g7
5701+
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ tvs %icc, 6
5704+#endif
5705+
5706 cmp %g7, 0
5707 bl,pn %icc, 2f
5708 mov 0, %o1
5709@@ -51,7 +61,12 @@ __down_write:
5710 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5711 1:
5712 lduw [%o0], %g3
5713- add %g3, %g1, %g7
5714+ addcc %g3, %g1, %g7
5715+
5716+#ifdef CONFIG_PAX_REFCOUNT
5717+ tvs %icc, 6
5718+#endif
5719+
5720 cas [%o0], %g3, %g7
5721 cmp %g3, %g7
5722 bne,pn %icc, 1b
5723@@ -77,7 +92,12 @@ __down_write_trylock:
5724 cmp %g3, 0
5725 bne,pn %icc, 2f
5726 mov 0, %o1
5727- add %g3, %g1, %g7
5728+ addcc %g3, %g1, %g7
5729+
5730+#ifdef CONFIG_PAX_REFCOUNT
5731+ tvs %icc, 6
5732+#endif
5733+
5734 cas [%o0], %g3, %g7
5735 cmp %g3, %g7
5736 bne,pn %icc, 1b
5737@@ -90,7 +110,12 @@ __down_write_trylock:
5738 __up_read:
5739 1:
5740 lduw [%o0], %g1
5741- sub %g1, 1, %g7
5742+ subcc %g1, 1, %g7
5743+
5744+#ifdef CONFIG_PAX_REFCOUNT
5745+ tvs %icc, 6
5746+#endif
5747+
5748 cas [%o0], %g1, %g7
5749 cmp %g1, %g7
5750 bne,pn %icc, 1b
5751@@ -118,7 +143,12 @@ __up_write:
5752 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5753 1:
5754 lduw [%o0], %g3
5755- sub %g3, %g1, %g7
5756+ subcc %g3, %g1, %g7
5757+
5758+#ifdef CONFIG_PAX_REFCOUNT
5759+ tvs %icc, 6
5760+#endif
5761+
5762 cas [%o0], %g3, %g7
5763 cmp %g3, %g7
5764 bne,pn %icc, 1b
5765@@ -143,7 +173,12 @@ __downgrade_write:
5766 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5767 1:
5768 lduw [%o0], %g3
5769- sub %g3, %g1, %g7
5770+ subcc %g3, %g1, %g7
5771+
5772+#ifdef CONFIG_PAX_REFCOUNT
5773+ tvs %icc, 6
5774+#endif
5775+
5776 cas [%o0], %g3, %g7
5777 cmp %g3, %g7
5778 bne,pn %icc, 1b
5779diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5780index 79836a7..62f47a2 100644
5781--- a/arch/sparc/mm/Makefile
5782+++ b/arch/sparc/mm/Makefile
5783@@ -2,7 +2,7 @@
5784 #
5785
5786 asflags-y := -ansi
5787-ccflags-y := -Werror
5788+#ccflags-y := -Werror
5789
5790 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5791 obj-y += fault_$(BITS).o
5792diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5793index b99f81c..3453e93 100644
5794--- a/arch/sparc/mm/fault_32.c
5795+++ b/arch/sparc/mm/fault_32.c
5796@@ -21,6 +21,9 @@
5797 #include <linux/interrupt.h>
5798 #include <linux/module.h>
5799 #include <linux/kdebug.h>
5800+#include <linux/slab.h>
5801+#include <linux/pagemap.h>
5802+#include <linux/compiler.h>
5803
5804 #include <asm/system.h>
5805 #include <asm/page.h>
5806@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5807 return safe_compute_effective_address(regs, insn);
5808 }
5809
5810+#ifdef CONFIG_PAX_PAGEEXEC
5811+#ifdef CONFIG_PAX_DLRESOLVE
5812+static void pax_emuplt_close(struct vm_area_struct *vma)
5813+{
5814+ vma->vm_mm->call_dl_resolve = 0UL;
5815+}
5816+
5817+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5818+{
5819+ unsigned int *kaddr;
5820+
5821+ vmf->page = alloc_page(GFP_HIGHUSER);
5822+ if (!vmf->page)
5823+ return VM_FAULT_OOM;
5824+
5825+ kaddr = kmap(vmf->page);
5826+ memset(kaddr, 0, PAGE_SIZE);
5827+ kaddr[0] = 0x9DE3BFA8U; /* save */
5828+ flush_dcache_page(vmf->page);
5829+ kunmap(vmf->page);
5830+ return VM_FAULT_MAJOR;
5831+}
5832+
5833+static const struct vm_operations_struct pax_vm_ops = {
5834+ .close = pax_emuplt_close,
5835+ .fault = pax_emuplt_fault
5836+};
5837+
5838+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5839+{
5840+ int ret;
5841+
5842+ vma->vm_mm = current->mm;
5843+ vma->vm_start = addr;
5844+ vma->vm_end = addr + PAGE_SIZE;
5845+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5846+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5847+ vma->vm_ops = &pax_vm_ops;
5848+
5849+ ret = insert_vm_struct(current->mm, vma);
5850+ if (ret)
5851+ return ret;
5852+
5853+ ++current->mm->total_vm;
5854+ return 0;
5855+}
5856+#endif
5857+
5858+/*
5859+ * PaX: decide what to do with offenders (regs->pc = fault address)
5860+ *
5861+ * returns 1 when task should be killed
5862+ * 2 when patched PLT trampoline was detected
5863+ * 3 when unpatched PLT trampoline was detected
5864+ */
5865+static int pax_handle_fetch_fault(struct pt_regs *regs)
5866+{
5867+
5868+#ifdef CONFIG_PAX_EMUPLT
5869+ int err;
5870+
5871+ do { /* PaX: patched PLT emulation #1 */
5872+ unsigned int sethi1, sethi2, jmpl;
5873+
5874+ err = get_user(sethi1, (unsigned int *)regs->pc);
5875+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5876+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5877+
5878+ if (err)
5879+ break;
5880+
5881+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5882+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5883+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5884+ {
5885+ unsigned int addr;
5886+
5887+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5888+ addr = regs->u_regs[UREG_G1];
5889+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5890+ regs->pc = addr;
5891+ regs->npc = addr+4;
5892+ return 2;
5893+ }
5894+ } while (0);
5895+
5896+ { /* PaX: patched PLT emulation #2 */
5897+ unsigned int ba;
5898+
5899+ err = get_user(ba, (unsigned int *)regs->pc);
5900+
5901+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5902+ unsigned int addr;
5903+
5904+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5905+ regs->pc = addr;
5906+ regs->npc = addr+4;
5907+ return 2;
5908+ }
5909+ }
5910+
5911+ do { /* PaX: patched PLT emulation #3 */
5912+ unsigned int sethi, jmpl, nop;
5913+
5914+ err = get_user(sethi, (unsigned int *)regs->pc);
5915+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5916+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5917+
5918+ if (err)
5919+ break;
5920+
5921+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5922+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5923+ nop == 0x01000000U)
5924+ {
5925+ unsigned int addr;
5926+
5927+ addr = (sethi & 0x003FFFFFU) << 10;
5928+ regs->u_regs[UREG_G1] = addr;
5929+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5930+ regs->pc = addr;
5931+ regs->npc = addr+4;
5932+ return 2;
5933+ }
5934+ } while (0);
5935+
5936+ do { /* PaX: unpatched PLT emulation step 1 */
5937+ unsigned int sethi, ba, nop;
5938+
5939+ err = get_user(sethi, (unsigned int *)regs->pc);
5940+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5941+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5942+
5943+ if (err)
5944+ break;
5945+
5946+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5947+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5948+ nop == 0x01000000U)
5949+ {
5950+ unsigned int addr, save, call;
5951+
5952+ if ((ba & 0xFFC00000U) == 0x30800000U)
5953+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5954+ else
5955+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5956+
5957+ err = get_user(save, (unsigned int *)addr);
5958+ err |= get_user(call, (unsigned int *)(addr+4));
5959+ err |= get_user(nop, (unsigned int *)(addr+8));
5960+ if (err)
5961+ break;
5962+
5963+#ifdef CONFIG_PAX_DLRESOLVE
5964+ if (save == 0x9DE3BFA8U &&
5965+ (call & 0xC0000000U) == 0x40000000U &&
5966+ nop == 0x01000000U)
5967+ {
5968+ struct vm_area_struct *vma;
5969+ unsigned long call_dl_resolve;
5970+
5971+ down_read(&current->mm->mmap_sem);
5972+ call_dl_resolve = current->mm->call_dl_resolve;
5973+ up_read(&current->mm->mmap_sem);
5974+ if (likely(call_dl_resolve))
5975+ goto emulate;
5976+
5977+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5978+
5979+ down_write(&current->mm->mmap_sem);
5980+ if (current->mm->call_dl_resolve) {
5981+ call_dl_resolve = current->mm->call_dl_resolve;
5982+ up_write(&current->mm->mmap_sem);
5983+ if (vma)
5984+ kmem_cache_free(vm_area_cachep, vma);
5985+ goto emulate;
5986+ }
5987+
5988+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5989+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5990+ up_write(&current->mm->mmap_sem);
5991+ if (vma)
5992+ kmem_cache_free(vm_area_cachep, vma);
5993+ return 1;
5994+ }
5995+
5996+ if (pax_insert_vma(vma, call_dl_resolve)) {
5997+ up_write(&current->mm->mmap_sem);
5998+ kmem_cache_free(vm_area_cachep, vma);
5999+ return 1;
6000+ }
6001+
6002+ current->mm->call_dl_resolve = call_dl_resolve;
6003+ up_write(&current->mm->mmap_sem);
6004+
6005+emulate:
6006+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6007+ regs->pc = call_dl_resolve;
6008+ regs->npc = addr+4;
6009+ return 3;
6010+ }
6011+#endif
6012+
6013+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6014+ if ((save & 0xFFC00000U) == 0x05000000U &&
6015+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6016+ nop == 0x01000000U)
6017+ {
6018+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6019+ regs->u_regs[UREG_G2] = addr + 4;
6020+ addr = (save & 0x003FFFFFU) << 10;
6021+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6022+ regs->pc = addr;
6023+ regs->npc = addr+4;
6024+ return 3;
6025+ }
6026+ }
6027+ } while (0);
6028+
6029+ do { /* PaX: unpatched PLT emulation step 2 */
6030+ unsigned int save, call, nop;
6031+
6032+ err = get_user(save, (unsigned int *)(regs->pc-4));
6033+ err |= get_user(call, (unsigned int *)regs->pc);
6034+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6035+ if (err)
6036+ break;
6037+
6038+ if (save == 0x9DE3BFA8U &&
6039+ (call & 0xC0000000U) == 0x40000000U &&
6040+ nop == 0x01000000U)
6041+ {
6042+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6043+
6044+ regs->u_regs[UREG_RETPC] = regs->pc;
6045+ regs->pc = dl_resolve;
6046+ regs->npc = dl_resolve+4;
6047+ return 3;
6048+ }
6049+ } while (0);
6050+#endif
6051+
6052+ return 1;
6053+}
6054+
6055+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6056+{
6057+ unsigned long i;
6058+
6059+ printk(KERN_ERR "PAX: bytes at PC: ");
6060+ for (i = 0; i < 8; i++) {
6061+ unsigned int c;
6062+ if (get_user(c, (unsigned int *)pc+i))
6063+ printk(KERN_CONT "???????? ");
6064+ else
6065+ printk(KERN_CONT "%08x ", c);
6066+ }
6067+ printk("\n");
6068+}
6069+#endif
6070+
6071 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6072 unsigned long address)
6073 {
6074@@ -231,6 +495,24 @@ good_area:
6075 if(!(vma->vm_flags & VM_WRITE))
6076 goto bad_area;
6077 } else {
6078+
6079+#ifdef CONFIG_PAX_PAGEEXEC
6080+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6081+ up_read(&mm->mmap_sem);
6082+ switch (pax_handle_fetch_fault(regs)) {
6083+
6084+#ifdef CONFIG_PAX_EMUPLT
6085+ case 2:
6086+ case 3:
6087+ return;
6088+#endif
6089+
6090+ }
6091+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6092+ do_group_exit(SIGKILL);
6093+ }
6094+#endif
6095+
6096 /* Allow reads even for write-only mappings */
6097 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6098 goto bad_area;
6099diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6100index 43b0da9..a0b78f9 100644
6101--- a/arch/sparc/mm/fault_64.c
6102+++ b/arch/sparc/mm/fault_64.c
6103@@ -20,6 +20,9 @@
6104 #include <linux/kprobes.h>
6105 #include <linux/kdebug.h>
6106 #include <linux/percpu.h>
6107+#include <linux/slab.h>
6108+#include <linux/pagemap.h>
6109+#include <linux/compiler.h>
6110
6111 #include <asm/page.h>
6112 #include <asm/pgtable.h>
6113@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6114 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6115 regs->tpc);
6116 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6117- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6118+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6119 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6120 dump_stack();
6121 unhandled_fault(regs->tpc, current, regs);
6122@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6123 show_regs(regs);
6124 }
6125
6126+#ifdef CONFIG_PAX_PAGEEXEC
6127+#ifdef CONFIG_PAX_DLRESOLVE
6128+static void pax_emuplt_close(struct vm_area_struct *vma)
6129+{
6130+ vma->vm_mm->call_dl_resolve = 0UL;
6131+}
6132+
6133+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6134+{
6135+ unsigned int *kaddr;
6136+
6137+ vmf->page = alloc_page(GFP_HIGHUSER);
6138+ if (!vmf->page)
6139+ return VM_FAULT_OOM;
6140+
6141+ kaddr = kmap(vmf->page);
6142+ memset(kaddr, 0, PAGE_SIZE);
6143+ kaddr[0] = 0x9DE3BFA8U; /* save */
6144+ flush_dcache_page(vmf->page);
6145+ kunmap(vmf->page);
6146+ return VM_FAULT_MAJOR;
6147+}
6148+
6149+static const struct vm_operations_struct pax_vm_ops = {
6150+ .close = pax_emuplt_close,
6151+ .fault = pax_emuplt_fault
6152+};
6153+
6154+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6155+{
6156+ int ret;
6157+
6158+ vma->vm_mm = current->mm;
6159+ vma->vm_start = addr;
6160+ vma->vm_end = addr + PAGE_SIZE;
6161+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6162+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6163+ vma->vm_ops = &pax_vm_ops;
6164+
6165+ ret = insert_vm_struct(current->mm, vma);
6166+ if (ret)
6167+ return ret;
6168+
6169+ ++current->mm->total_vm;
6170+ return 0;
6171+}
6172+#endif
6173+
6174+/*
6175+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6176+ *
6177+ * returns 1 when task should be killed
6178+ * 2 when patched PLT trampoline was detected
6179+ * 3 when unpatched PLT trampoline was detected
6180+ */
6181+static int pax_handle_fetch_fault(struct pt_regs *regs)
6182+{
6183+
6184+#ifdef CONFIG_PAX_EMUPLT
6185+ int err;
6186+
6187+ do { /* PaX: patched PLT emulation #1 */
6188+ unsigned int sethi1, sethi2, jmpl;
6189+
6190+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6191+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6192+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6193+
6194+ if (err)
6195+ break;
6196+
6197+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6198+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6199+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6200+ {
6201+ unsigned long addr;
6202+
6203+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6204+ addr = regs->u_regs[UREG_G1];
6205+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6206+
6207+ if (test_thread_flag(TIF_32BIT))
6208+ addr &= 0xFFFFFFFFUL;
6209+
6210+ regs->tpc = addr;
6211+ regs->tnpc = addr+4;
6212+ return 2;
6213+ }
6214+ } while (0);
6215+
6216+ { /* PaX: patched PLT emulation #2 */
6217+ unsigned int ba;
6218+
6219+ err = get_user(ba, (unsigned int *)regs->tpc);
6220+
6221+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6222+ unsigned long addr;
6223+
6224+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6225+
6226+ if (test_thread_flag(TIF_32BIT))
6227+ addr &= 0xFFFFFFFFUL;
6228+
6229+ regs->tpc = addr;
6230+ regs->tnpc = addr+4;
6231+ return 2;
6232+ }
6233+ }
6234+
6235+ do { /* PaX: patched PLT emulation #3 */
6236+ unsigned int sethi, jmpl, nop;
6237+
6238+ err = get_user(sethi, (unsigned int *)regs->tpc);
6239+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6240+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6241+
6242+ if (err)
6243+ break;
6244+
6245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6246+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6247+ nop == 0x01000000U)
6248+ {
6249+ unsigned long addr;
6250+
6251+ addr = (sethi & 0x003FFFFFU) << 10;
6252+ regs->u_regs[UREG_G1] = addr;
6253+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6254+
6255+ if (test_thread_flag(TIF_32BIT))
6256+ addr &= 0xFFFFFFFFUL;
6257+
6258+ regs->tpc = addr;
6259+ regs->tnpc = addr+4;
6260+ return 2;
6261+ }
6262+ } while (0);
6263+
6264+ do { /* PaX: patched PLT emulation #4 */
6265+ unsigned int sethi, mov1, call, mov2;
6266+
6267+ err = get_user(sethi, (unsigned int *)regs->tpc);
6268+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6269+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6270+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6271+
6272+ if (err)
6273+ break;
6274+
6275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6276+ mov1 == 0x8210000FU &&
6277+ (call & 0xC0000000U) == 0x40000000U &&
6278+ mov2 == 0x9E100001U)
6279+ {
6280+ unsigned long addr;
6281+
6282+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6283+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6284+
6285+ if (test_thread_flag(TIF_32BIT))
6286+ addr &= 0xFFFFFFFFUL;
6287+
6288+ regs->tpc = addr;
6289+ regs->tnpc = addr+4;
6290+ return 2;
6291+ }
6292+ } while (0);
6293+
6294+ do { /* PaX: patched PLT emulation #5 */
6295+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6296+
6297+ err = get_user(sethi, (unsigned int *)regs->tpc);
6298+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6299+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6300+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6301+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6302+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6303+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6304+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6305+
6306+ if (err)
6307+ break;
6308+
6309+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6310+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6311+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6312+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6313+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6314+ sllx == 0x83287020U &&
6315+ jmpl == 0x81C04005U &&
6316+ nop == 0x01000000U)
6317+ {
6318+ unsigned long addr;
6319+
6320+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6321+ regs->u_regs[UREG_G1] <<= 32;
6322+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6323+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6324+ regs->tpc = addr;
6325+ regs->tnpc = addr+4;
6326+ return 2;
6327+ }
6328+ } while (0);
6329+
6330+ do { /* PaX: patched PLT emulation #6 */
6331+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6332+
6333+ err = get_user(sethi, (unsigned int *)regs->tpc);
6334+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6335+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6336+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6337+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6338+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6339+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6340+
6341+ if (err)
6342+ break;
6343+
6344+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6345+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6346+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6347+ sllx == 0x83287020U &&
6348+ (or & 0xFFFFE000U) == 0x8A116000U &&
6349+ jmpl == 0x81C04005U &&
6350+ nop == 0x01000000U)
6351+ {
6352+ unsigned long addr;
6353+
6354+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6355+ regs->u_regs[UREG_G1] <<= 32;
6356+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6357+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6358+ regs->tpc = addr;
6359+ regs->tnpc = addr+4;
6360+ return 2;
6361+ }
6362+ } while (0);
6363+
6364+ do { /* PaX: unpatched PLT emulation step 1 */
6365+ unsigned int sethi, ba, nop;
6366+
6367+ err = get_user(sethi, (unsigned int *)regs->tpc);
6368+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6369+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6370+
6371+ if (err)
6372+ break;
6373+
6374+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6375+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6376+ nop == 0x01000000U)
6377+ {
6378+ unsigned long addr;
6379+ unsigned int save, call;
6380+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6381+
6382+ if ((ba & 0xFFC00000U) == 0x30800000U)
6383+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384+ else
6385+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6386+
6387+ if (test_thread_flag(TIF_32BIT))
6388+ addr &= 0xFFFFFFFFUL;
6389+
6390+ err = get_user(save, (unsigned int *)addr);
6391+ err |= get_user(call, (unsigned int *)(addr+4));
6392+ err |= get_user(nop, (unsigned int *)(addr+8));
6393+ if (err)
6394+ break;
6395+
6396+#ifdef CONFIG_PAX_DLRESOLVE
6397+ if (save == 0x9DE3BFA8U &&
6398+ (call & 0xC0000000U) == 0x40000000U &&
6399+ nop == 0x01000000U)
6400+ {
6401+ struct vm_area_struct *vma;
6402+ unsigned long call_dl_resolve;
6403+
6404+ down_read(&current->mm->mmap_sem);
6405+ call_dl_resolve = current->mm->call_dl_resolve;
6406+ up_read(&current->mm->mmap_sem);
6407+ if (likely(call_dl_resolve))
6408+ goto emulate;
6409+
6410+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6411+
6412+ down_write(&current->mm->mmap_sem);
6413+ if (current->mm->call_dl_resolve) {
6414+ call_dl_resolve = current->mm->call_dl_resolve;
6415+ up_write(&current->mm->mmap_sem);
6416+ if (vma)
6417+ kmem_cache_free(vm_area_cachep, vma);
6418+ goto emulate;
6419+ }
6420+
6421+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6422+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6423+ up_write(&current->mm->mmap_sem);
6424+ if (vma)
6425+ kmem_cache_free(vm_area_cachep, vma);
6426+ return 1;
6427+ }
6428+
6429+ if (pax_insert_vma(vma, call_dl_resolve)) {
6430+ up_write(&current->mm->mmap_sem);
6431+ kmem_cache_free(vm_area_cachep, vma);
6432+ return 1;
6433+ }
6434+
6435+ current->mm->call_dl_resolve = call_dl_resolve;
6436+ up_write(&current->mm->mmap_sem);
6437+
6438+emulate:
6439+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6440+ regs->tpc = call_dl_resolve;
6441+ regs->tnpc = addr+4;
6442+ return 3;
6443+ }
6444+#endif
6445+
6446+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6447+ if ((save & 0xFFC00000U) == 0x05000000U &&
6448+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6449+ nop == 0x01000000U)
6450+ {
6451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6452+ regs->u_regs[UREG_G2] = addr + 4;
6453+ addr = (save & 0x003FFFFFU) << 10;
6454+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6455+
6456+ if (test_thread_flag(TIF_32BIT))
6457+ addr &= 0xFFFFFFFFUL;
6458+
6459+ regs->tpc = addr;
6460+ regs->tnpc = addr+4;
6461+ return 3;
6462+ }
6463+
6464+ /* PaX: 64-bit PLT stub */
6465+ err = get_user(sethi1, (unsigned int *)addr);
6466+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6467+ err |= get_user(or1, (unsigned int *)(addr+8));
6468+ err |= get_user(or2, (unsigned int *)(addr+12));
6469+ err |= get_user(sllx, (unsigned int *)(addr+16));
6470+ err |= get_user(add, (unsigned int *)(addr+20));
6471+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6472+ err |= get_user(nop, (unsigned int *)(addr+28));
6473+ if (err)
6474+ break;
6475+
6476+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6477+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6478+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6479+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6480+ sllx == 0x89293020U &&
6481+ add == 0x8A010005U &&
6482+ jmpl == 0x89C14000U &&
6483+ nop == 0x01000000U)
6484+ {
6485+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6486+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6487+ regs->u_regs[UREG_G4] <<= 32;
6488+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6489+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6490+ regs->u_regs[UREG_G4] = addr + 24;
6491+ addr = regs->u_regs[UREG_G5];
6492+ regs->tpc = addr;
6493+ regs->tnpc = addr+4;
6494+ return 3;
6495+ }
6496+ }
6497+ } while (0);
6498+
6499+#ifdef CONFIG_PAX_DLRESOLVE
6500+ do { /* PaX: unpatched PLT emulation step 2 */
6501+ unsigned int save, call, nop;
6502+
6503+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6504+ err |= get_user(call, (unsigned int *)regs->tpc);
6505+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6506+ if (err)
6507+ break;
6508+
6509+ if (save == 0x9DE3BFA8U &&
6510+ (call & 0xC0000000U) == 0x40000000U &&
6511+ nop == 0x01000000U)
6512+ {
6513+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6514+
6515+ if (test_thread_flag(TIF_32BIT))
6516+ dl_resolve &= 0xFFFFFFFFUL;
6517+
6518+ regs->u_regs[UREG_RETPC] = regs->tpc;
6519+ regs->tpc = dl_resolve;
6520+ regs->tnpc = dl_resolve+4;
6521+ return 3;
6522+ }
6523+ } while (0);
6524+#endif
6525+
6526+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6527+ unsigned int sethi, ba, nop;
6528+
6529+ err = get_user(sethi, (unsigned int *)regs->tpc);
6530+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6531+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6532+
6533+ if (err)
6534+ break;
6535+
6536+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6537+ (ba & 0xFFF00000U) == 0x30600000U &&
6538+ nop == 0x01000000U)
6539+ {
6540+ unsigned long addr;
6541+
6542+ addr = (sethi & 0x003FFFFFU) << 10;
6543+ regs->u_regs[UREG_G1] = addr;
6544+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545+
6546+ if (test_thread_flag(TIF_32BIT))
6547+ addr &= 0xFFFFFFFFUL;
6548+
6549+ regs->tpc = addr;
6550+ regs->tnpc = addr+4;
6551+ return 2;
6552+ }
6553+ } while (0);
6554+
6555+#endif
6556+
6557+ return 1;
6558+}
6559+
6560+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6561+{
6562+ unsigned long i;
6563+
6564+ printk(KERN_ERR "PAX: bytes at PC: ");
6565+ for (i = 0; i < 8; i++) {
6566+ unsigned int c;
6567+ if (get_user(c, (unsigned int *)pc+i))
6568+ printk(KERN_CONT "???????? ");
6569+ else
6570+ printk(KERN_CONT "%08x ", c);
6571+ }
6572+ printk("\n");
6573+}
6574+#endif
6575+
6576 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6577 {
6578 struct mm_struct *mm = current->mm;
6579@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6580 if (!vma)
6581 goto bad_area;
6582
6583+#ifdef CONFIG_PAX_PAGEEXEC
6584+ /* PaX: detect ITLB misses on non-exec pages */
6585+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6586+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6587+ {
6588+ if (address != regs->tpc)
6589+ goto good_area;
6590+
6591+ up_read(&mm->mmap_sem);
6592+ switch (pax_handle_fetch_fault(regs)) {
6593+
6594+#ifdef CONFIG_PAX_EMUPLT
6595+ case 2:
6596+ case 3:
6597+ return;
6598+#endif
6599+
6600+ }
6601+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6602+ do_group_exit(SIGKILL);
6603+ }
6604+#endif
6605+
6606 /* Pure DTLB misses do not tell us whether the fault causing
6607 * load/store/atomic was a write or not, it only says that there
6608 * was no match. So in such a case we (carefully) read the
6609diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6610index f27d103..1b06377 100644
6611--- a/arch/sparc/mm/hugetlbpage.c
6612+++ b/arch/sparc/mm/hugetlbpage.c
6613@@ -69,7 +69,7 @@ full_search:
6614 }
6615 return -ENOMEM;
6616 }
6617- if (likely(!vma || addr + len <= vma->vm_start)) {
6618+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6619 /*
6620 * Remember the place where we stopped the search:
6621 */
6622@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6623 /* make sure it can fit in the remaining address space */
6624 if (likely(addr > len)) {
6625 vma = find_vma(mm, addr-len);
6626- if (!vma || addr <= vma->vm_start) {
6627+ if (check_heap_stack_gap(vma, addr - len, len)) {
6628 /* remember the address as a hint for next time */
6629 return (mm->free_area_cache = addr-len);
6630 }
6631@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6632 if (unlikely(mm->mmap_base < len))
6633 goto bottomup;
6634
6635- addr = (mm->mmap_base-len) & HPAGE_MASK;
6636+ addr = mm->mmap_base - len;
6637
6638 do {
6639+ addr &= HPAGE_MASK;
6640 /*
6641 * Lookup failure means no vma is above this address,
6642 * else if new region fits below vma->vm_start,
6643 * return with success:
6644 */
6645 vma = find_vma(mm, addr);
6646- if (likely(!vma || addr+len <= vma->vm_start)) {
6647+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6648 /* remember the address as a hint for next time */
6649 return (mm->free_area_cache = addr);
6650 }
6651@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6652 mm->cached_hole_size = vma->vm_start - addr;
6653
6654 /* try just below the current vma->vm_start */
6655- addr = (vma->vm_start-len) & HPAGE_MASK;
6656- } while (likely(len < vma->vm_start));
6657+ addr = skip_heap_stack_gap(vma, len);
6658+ } while (!IS_ERR_VALUE(addr));
6659
6660 bottomup:
6661 /*
6662@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6663 if (addr) {
6664 addr = ALIGN(addr, HPAGE_SIZE);
6665 vma = find_vma(mm, addr);
6666- if (task_size - len >= addr &&
6667- (!vma || addr + len <= vma->vm_start))
6668+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6669 return addr;
6670 }
6671 if (mm->get_unmapped_area == arch_get_unmapped_area)
6672diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6673index dc7c3b1..34c0070 100644
6674--- a/arch/sparc/mm/init_32.c
6675+++ b/arch/sparc/mm/init_32.c
6676@@ -317,6 +317,9 @@ extern void device_scan(void);
6677 pgprot_t PAGE_SHARED __read_mostly;
6678 EXPORT_SYMBOL(PAGE_SHARED);
6679
6680+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6681+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6682+
6683 void __init paging_init(void)
6684 {
6685 switch(sparc_cpu_model) {
6686@@ -345,17 +348,17 @@ void __init paging_init(void)
6687
6688 /* Initialize the protection map with non-constant, MMU dependent values. */
6689 protection_map[0] = PAGE_NONE;
6690- protection_map[1] = PAGE_READONLY;
6691- protection_map[2] = PAGE_COPY;
6692- protection_map[3] = PAGE_COPY;
6693+ protection_map[1] = PAGE_READONLY_NOEXEC;
6694+ protection_map[2] = PAGE_COPY_NOEXEC;
6695+ protection_map[3] = PAGE_COPY_NOEXEC;
6696 protection_map[4] = PAGE_READONLY;
6697 protection_map[5] = PAGE_READONLY;
6698 protection_map[6] = PAGE_COPY;
6699 protection_map[7] = PAGE_COPY;
6700 protection_map[8] = PAGE_NONE;
6701- protection_map[9] = PAGE_READONLY;
6702- protection_map[10] = PAGE_SHARED;
6703- protection_map[11] = PAGE_SHARED;
6704+ protection_map[9] = PAGE_READONLY_NOEXEC;
6705+ protection_map[10] = PAGE_SHARED_NOEXEC;
6706+ protection_map[11] = PAGE_SHARED_NOEXEC;
6707 protection_map[12] = PAGE_READONLY;
6708 protection_map[13] = PAGE_READONLY;
6709 protection_map[14] = PAGE_SHARED;
6710diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6711index 509b1ff..bfd7118 100644
6712--- a/arch/sparc/mm/srmmu.c
6713+++ b/arch/sparc/mm/srmmu.c
6714@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6715 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6716 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6717 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6718+
6719+#ifdef CONFIG_PAX_PAGEEXEC
6720+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6721+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6722+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6723+#endif
6724+
6725 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6726 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6727
6728diff --git a/arch/um/Makefile b/arch/um/Makefile
6729index fc633db..5e1a1c2 100644
6730--- a/arch/um/Makefile
6731+++ b/arch/um/Makefile
6732@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6733 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6734 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6735
6736+ifdef CONSTIFY_PLUGIN
6737+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6738+endif
6739+
6740 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6741
6742 #This will adjust *FLAGS accordingly to the platform.
6743diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6744index 6c03acd..a5e0215 100644
6745--- a/arch/um/include/asm/kmap_types.h
6746+++ b/arch/um/include/asm/kmap_types.h
6747@@ -23,6 +23,7 @@ enum km_type {
6748 KM_IRQ1,
6749 KM_SOFTIRQ0,
6750 KM_SOFTIRQ1,
6751+ KM_CLEARPAGE,
6752 KM_TYPE_NR
6753 };
6754
6755diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6756index 4cc9b6c..02e5029 100644
6757--- a/arch/um/include/asm/page.h
6758+++ b/arch/um/include/asm/page.h
6759@@ -14,6 +14,9 @@
6760 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6761 #define PAGE_MASK (~(PAGE_SIZE-1))
6762
6763+#define ktla_ktva(addr) (addr)
6764+#define ktva_ktla(addr) (addr)
6765+
6766 #ifndef __ASSEMBLY__
6767
6768 struct page;
6769diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6770index 4a28a15..654dc2a 100644
6771--- a/arch/um/kernel/process.c
6772+++ b/arch/um/kernel/process.c
6773@@ -393,22 +393,6 @@ int singlestepping(void * t)
6774 return 2;
6775 }
6776
6777-/*
6778- * Only x86 and x86_64 have an arch_align_stack().
6779- * All other arches have "#define arch_align_stack(x) (x)"
6780- * in their asm/system.h
6781- * As this is included in UML from asm-um/system-generic.h,
6782- * we can use it to behave as the subarch does.
6783- */
6784-#ifndef arch_align_stack
6785-unsigned long arch_align_stack(unsigned long sp)
6786-{
6787- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6788- sp -= get_random_int() % 8192;
6789- return sp & ~0xf;
6790-}
6791-#endif
6792-
6793 unsigned long get_wchan(struct task_struct *p)
6794 {
6795 unsigned long stack_page, sp, ip;
6796diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6797index d1b93c4..ae1b7fd 100644
6798--- a/arch/um/sys-i386/shared/sysdep/system.h
6799+++ b/arch/um/sys-i386/shared/sysdep/system.h
6800@@ -17,7 +17,7 @@
6801 # define AT_VECTOR_SIZE_ARCH 1
6802 #endif
6803
6804-extern unsigned long arch_align_stack(unsigned long sp);
6805+#define arch_align_stack(x) ((x) & ~0xfUL)
6806
6807 void default_idle(void);
6808
6809diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6810index 857ca0b..9a2669d 100644
6811--- a/arch/um/sys-i386/syscalls.c
6812+++ b/arch/um/sys-i386/syscalls.c
6813@@ -11,6 +11,21 @@
6814 #include "asm/uaccess.h"
6815 #include "asm/unistd.h"
6816
6817+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6818+{
6819+ unsigned long pax_task_size = TASK_SIZE;
6820+
6821+#ifdef CONFIG_PAX_SEGMEXEC
6822+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6823+ pax_task_size = SEGMEXEC_TASK_SIZE;
6824+#endif
6825+
6826+ if (len > pax_task_size || addr > pax_task_size - len)
6827+ return -EINVAL;
6828+
6829+ return 0;
6830+}
6831+
6832 /*
6833 * Perform the select(nd, in, out, ex, tv) and mmap() system
6834 * calls. Linux/i386 didn't use to be able to handle more than
6835diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6836index d1b93c4..ae1b7fd 100644
6837--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6838+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6839@@ -17,7 +17,7 @@
6840 # define AT_VECTOR_SIZE_ARCH 1
6841 #endif
6842
6843-extern unsigned long arch_align_stack(unsigned long sp);
6844+#define arch_align_stack(x) ((x) & ~0xfUL)
6845
6846 void default_idle(void);
6847
6848diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6849index 73ae02a..f932de5 100644
6850--- a/arch/x86/Kconfig
6851+++ b/arch/x86/Kconfig
6852@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6853
6854 config X86_32_LAZY_GS
6855 def_bool y
6856- depends on X86_32 && !CC_STACKPROTECTOR
6857+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6858
6859 config KTIME_SCALAR
6860 def_bool X86_32
6861@@ -1008,7 +1008,7 @@ choice
6862
6863 config NOHIGHMEM
6864 bool "off"
6865- depends on !X86_NUMAQ
6866+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6867 ---help---
6868 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6869 However, the address space of 32-bit x86 processors is only 4
6870@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6871
6872 config HIGHMEM4G
6873 bool "4GB"
6874- depends on !X86_NUMAQ
6875+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6876 ---help---
6877 Select this if you have a 32-bit processor and between 1 and 4
6878 gigabytes of physical RAM.
6879@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6880 hex
6881 default 0xB0000000 if VMSPLIT_3G_OPT
6882 default 0x80000000 if VMSPLIT_2G
6883- default 0x78000000 if VMSPLIT_2G_OPT
6884+ default 0x70000000 if VMSPLIT_2G_OPT
6885 default 0x40000000 if VMSPLIT_1G
6886 default 0xC0000000
6887 depends on X86_32
6888@@ -1460,6 +1460,7 @@ config SECCOMP
6889
6890 config CC_STACKPROTECTOR
6891 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6892+ depends on X86_64 || !PAX_MEMORY_UDEREF
6893 ---help---
6894 This option turns on the -fstack-protector GCC feature. This
6895 feature puts, at the beginning of functions, a canary value on
6896@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6897 config PHYSICAL_START
6898 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6899 default "0x1000000"
6900+ range 0x400000 0x40000000
6901 ---help---
6902 This gives the physical address where the kernel is loaded.
6903
6904@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6905 hex
6906 prompt "Alignment value to which kernel should be aligned" if X86_32
6907 default "0x1000000"
6908+ range 0x400000 0x1000000 if PAX_KERNEXEC
6909 range 0x2000 0x1000000
6910 ---help---
6911 This value puts the alignment restrictions on physical address
6912@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6913 Say N if you want to disable CPU hotplug.
6914
6915 config COMPAT_VDSO
6916- def_bool y
6917+ def_bool n
6918 prompt "Compat VDSO support"
6919 depends on X86_32 || IA32_EMULATION
6920+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6921 ---help---
6922 Map the 32-bit VDSO to the predictable old-style address too.
6923 ---help---
6924diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6925index 0e566103..1a6b57e 100644
6926--- a/arch/x86/Kconfig.cpu
6927+++ b/arch/x86/Kconfig.cpu
6928@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6929
6930 config X86_F00F_BUG
6931 def_bool y
6932- depends on M586MMX || M586TSC || M586 || M486 || M386
6933+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6934
6935 config X86_WP_WORKS_OK
6936 def_bool y
6937@@ -360,7 +360,7 @@ config X86_POPAD_OK
6938
6939 config X86_ALIGNMENT_16
6940 def_bool y
6941- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6942+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6943
6944 config X86_INTEL_USERCOPY
6945 def_bool y
6946@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6947 # generates cmov.
6948 config X86_CMOV
6949 def_bool y
6950- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6951+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6952
6953 config X86_MINIMUM_CPU_FAMILY
6954 int
6955diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6956index d105f29..c928727 100644
6957--- a/arch/x86/Kconfig.debug
6958+++ b/arch/x86/Kconfig.debug
6959@@ -99,7 +99,7 @@ config X86_PTDUMP
6960 config DEBUG_RODATA
6961 bool "Write protect kernel read-only data structures"
6962 default y
6963- depends on DEBUG_KERNEL
6964+ depends on DEBUG_KERNEL && BROKEN
6965 ---help---
6966 Mark the kernel read-only data as write-protected in the pagetables,
6967 in order to catch accidental (and incorrect) writes to such const
6968diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6969index d2d24c9..0f21f8d 100644
6970--- a/arch/x86/Makefile
6971+++ b/arch/x86/Makefile
6972@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6973 else
6974 BITS := 64
6975 UTS_MACHINE := x86_64
6976+ biarch := $(call cc-option,-m64)
6977 CHECKFLAGS += -D__x86_64__ -m64
6978
6979 KBUILD_AFLAGS += -m64
6980@@ -189,3 +190,12 @@ define archhelp
6981 echo ' FDARGS="..." arguments for the booted kernel'
6982 echo ' FDINITRD=file initrd for the booted kernel'
6983 endef
6984+
6985+define OLD_LD
6986+
6987+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6988+*** Please upgrade your binutils to 2.18 or newer
6989+endef
6990+
6991+archprepare:
6992+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6993diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6994index ec749c2..bbb5319 100644
6995--- a/arch/x86/boot/Makefile
6996+++ b/arch/x86/boot/Makefile
6997@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
6998 $(call cc-option, -fno-stack-protector) \
6999 $(call cc-option, -mpreferred-stack-boundary=2)
7000 KBUILD_CFLAGS += $(call cc-option, -m32)
7001+ifdef CONSTIFY_PLUGIN
7002+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7003+endif
7004 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7005 GCOV_PROFILE := n
7006
7007diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7008index 878e4b9..20537ab 100644
7009--- a/arch/x86/boot/bitops.h
7010+++ b/arch/x86/boot/bitops.h
7011@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7012 u8 v;
7013 const u32 *p = (const u32 *)addr;
7014
7015- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7016+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7017 return v;
7018 }
7019
7020@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7021
7022 static inline void set_bit(int nr, void *addr)
7023 {
7024- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7025+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7026 }
7027
7028 #endif /* BOOT_BITOPS_H */
7029diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7030index 98239d2..f40214c 100644
7031--- a/arch/x86/boot/boot.h
7032+++ b/arch/x86/boot/boot.h
7033@@ -82,7 +82,7 @@ static inline void io_delay(void)
7034 static inline u16 ds(void)
7035 {
7036 u16 seg;
7037- asm("movw %%ds,%0" : "=rm" (seg));
7038+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7039 return seg;
7040 }
7041
7042@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7043 static inline int memcmp(const void *s1, const void *s2, size_t len)
7044 {
7045 u8 diff;
7046- asm("repe; cmpsb; setnz %0"
7047+ asm volatile("repe; cmpsb; setnz %0"
7048 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7049 return diff;
7050 }
7051diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7052index f8ed065..5bf5ff3 100644
7053--- a/arch/x86/boot/compressed/Makefile
7054+++ b/arch/x86/boot/compressed/Makefile
7055@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7056 KBUILD_CFLAGS += $(cflags-y)
7057 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7058 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7059+ifdef CONSTIFY_PLUGIN
7060+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7061+endif
7062
7063 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7064 GCOV_PROFILE := n
7065diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7066index f543b70..b60fba8 100644
7067--- a/arch/x86/boot/compressed/head_32.S
7068+++ b/arch/x86/boot/compressed/head_32.S
7069@@ -76,7 +76,7 @@ ENTRY(startup_32)
7070 notl %eax
7071 andl %eax, %ebx
7072 #else
7073- movl $LOAD_PHYSICAL_ADDR, %ebx
7074+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7075 #endif
7076
7077 /* Target address to relocate to for decompression */
7078@@ -149,7 +149,7 @@ relocated:
7079 * and where it was actually loaded.
7080 */
7081 movl %ebp, %ebx
7082- subl $LOAD_PHYSICAL_ADDR, %ebx
7083+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7084 jz 2f /* Nothing to be done if loaded at compiled addr. */
7085 /*
7086 * Process relocations.
7087@@ -157,8 +157,7 @@ relocated:
7088
7089 1: subl $4, %edi
7090 movl (%edi), %ecx
7091- testl %ecx, %ecx
7092- jz 2f
7093+ jecxz 2f
7094 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7095 jmp 1b
7096 2:
7097diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7098index 077e1b6..2c6b13b 100644
7099--- a/arch/x86/boot/compressed/head_64.S
7100+++ b/arch/x86/boot/compressed/head_64.S
7101@@ -91,7 +91,7 @@ ENTRY(startup_32)
7102 notl %eax
7103 andl %eax, %ebx
7104 #else
7105- movl $LOAD_PHYSICAL_ADDR, %ebx
7106+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7107 #endif
7108
7109 /* Target address to relocate to for decompression */
7110@@ -183,7 +183,7 @@ no_longmode:
7111 hlt
7112 jmp 1b
7113
7114-#include "../../kernel/verify_cpu_64.S"
7115+#include "../../kernel/verify_cpu.S"
7116
7117 /*
7118 * Be careful here startup_64 needs to be at a predictable
7119@@ -234,7 +234,7 @@ ENTRY(startup_64)
7120 notq %rax
7121 andq %rax, %rbp
7122 #else
7123- movq $LOAD_PHYSICAL_ADDR, %rbp
7124+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7125 #endif
7126
7127 /* Target address to relocate to for decompression */
7128diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7129index 842b2a3..f00178b 100644
7130--- a/arch/x86/boot/compressed/misc.c
7131+++ b/arch/x86/boot/compressed/misc.c
7132@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7133 case PT_LOAD:
7134 #ifdef CONFIG_RELOCATABLE
7135 dest = output;
7136- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7137+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7138 #else
7139 dest = (void *)(phdr->p_paddr);
7140 #endif
7141@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7142 error("Destination address too large");
7143 #endif
7144 #ifndef CONFIG_RELOCATABLE
7145- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7146+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7147 error("Wrong destination address");
7148 #endif
7149
7150diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7151index bcbd36c..b1754af 100644
7152--- a/arch/x86/boot/compressed/mkpiggy.c
7153+++ b/arch/x86/boot/compressed/mkpiggy.c
7154@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7155
7156 offs = (olen > ilen) ? olen - ilen : 0;
7157 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7158- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7159+ offs += 64*1024; /* Add 64K bytes slack */
7160 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7161
7162 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7163diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7164index bbeb0c3..f5167ab 100644
7165--- a/arch/x86/boot/compressed/relocs.c
7166+++ b/arch/x86/boot/compressed/relocs.c
7167@@ -10,8 +10,11 @@
7168 #define USE_BSD
7169 #include <endian.h>
7170
7171+#include "../../../../include/linux/autoconf.h"
7172+
7173 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7174 static Elf32_Ehdr ehdr;
7175+static Elf32_Phdr *phdr;
7176 static unsigned long reloc_count, reloc_idx;
7177 static unsigned long *relocs;
7178
7179@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7180
7181 static int is_safe_abs_reloc(const char* sym_name)
7182 {
7183- int i;
7184+ unsigned int i;
7185
7186 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7187 if (!strcmp(sym_name, safe_abs_relocs[i]))
7188@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7189 }
7190 }
7191
7192+static void read_phdrs(FILE *fp)
7193+{
7194+ unsigned int i;
7195+
7196+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7197+ if (!phdr) {
7198+ die("Unable to allocate %d program headers\n",
7199+ ehdr.e_phnum);
7200+ }
7201+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7202+ die("Seek to %d failed: %s\n",
7203+ ehdr.e_phoff, strerror(errno));
7204+ }
7205+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7206+ die("Cannot read ELF program headers: %s\n",
7207+ strerror(errno));
7208+ }
7209+ for(i = 0; i < ehdr.e_phnum; i++) {
7210+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7211+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7212+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7213+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7214+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7215+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7216+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7217+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7218+ }
7219+
7220+}
7221+
7222 static void read_shdrs(FILE *fp)
7223 {
7224- int i;
7225+ unsigned int i;
7226 Elf32_Shdr shdr;
7227
7228 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7229@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7230
7231 static void read_strtabs(FILE *fp)
7232 {
7233- int i;
7234+ unsigned int i;
7235 for (i = 0; i < ehdr.e_shnum; i++) {
7236 struct section *sec = &secs[i];
7237 if (sec->shdr.sh_type != SHT_STRTAB) {
7238@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7239
7240 static void read_symtabs(FILE *fp)
7241 {
7242- int i,j;
7243+ unsigned int i,j;
7244 for (i = 0; i < ehdr.e_shnum; i++) {
7245 struct section *sec = &secs[i];
7246 if (sec->shdr.sh_type != SHT_SYMTAB) {
7247@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7248
7249 static void read_relocs(FILE *fp)
7250 {
7251- int i,j;
7252+ unsigned int i,j;
7253+ uint32_t base;
7254+
7255 for (i = 0; i < ehdr.e_shnum; i++) {
7256 struct section *sec = &secs[i];
7257 if (sec->shdr.sh_type != SHT_REL) {
7258@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7259 die("Cannot read symbol table: %s\n",
7260 strerror(errno));
7261 }
7262+ base = 0;
7263+ for (j = 0; j < ehdr.e_phnum; j++) {
7264+ if (phdr[j].p_type != PT_LOAD )
7265+ continue;
7266+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7267+ continue;
7268+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7269+ break;
7270+ }
7271 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7272 Elf32_Rel *rel = &sec->reltab[j];
7273- rel->r_offset = elf32_to_cpu(rel->r_offset);
7274+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7275 rel->r_info = elf32_to_cpu(rel->r_info);
7276 }
7277 }
7278@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7279
7280 static void print_absolute_symbols(void)
7281 {
7282- int i;
7283+ unsigned int i;
7284 printf("Absolute symbols\n");
7285 printf(" Num: Value Size Type Bind Visibility Name\n");
7286 for (i = 0; i < ehdr.e_shnum; i++) {
7287 struct section *sec = &secs[i];
7288 char *sym_strtab;
7289 Elf32_Sym *sh_symtab;
7290- int j;
7291+ unsigned int j;
7292
7293 if (sec->shdr.sh_type != SHT_SYMTAB) {
7294 continue;
7295@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7296
7297 static void print_absolute_relocs(void)
7298 {
7299- int i, printed = 0;
7300+ unsigned int i, printed = 0;
7301
7302 for (i = 0; i < ehdr.e_shnum; i++) {
7303 struct section *sec = &secs[i];
7304 struct section *sec_applies, *sec_symtab;
7305 char *sym_strtab;
7306 Elf32_Sym *sh_symtab;
7307- int j;
7308+ unsigned int j;
7309 if (sec->shdr.sh_type != SHT_REL) {
7310 continue;
7311 }
7312@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7313
7314 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7315 {
7316- int i;
7317+ unsigned int i;
7318 /* Walk through the relocations */
7319 for (i = 0; i < ehdr.e_shnum; i++) {
7320 char *sym_strtab;
7321 Elf32_Sym *sh_symtab;
7322 struct section *sec_applies, *sec_symtab;
7323- int j;
7324+ unsigned int j;
7325 struct section *sec = &secs[i];
7326
7327 if (sec->shdr.sh_type != SHT_REL) {
7328@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7329 if (sym->st_shndx == SHN_ABS) {
7330 continue;
7331 }
7332+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7333+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7334+ continue;
7335+
7336+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7337+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7338+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7339+ continue;
7340+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7341+ continue;
7342+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7345+ continue;
7346+#endif
7347 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7348 /*
7349 * NONE can be ignored and and PC relative
7350@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7351
7352 static void emit_relocs(int as_text)
7353 {
7354- int i;
7355+ unsigned int i;
7356 /* Count how many relocations I have and allocate space for them. */
7357 reloc_count = 0;
7358 walk_relocs(count_reloc);
7359@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7360 fname, strerror(errno));
7361 }
7362 read_ehdr(fp);
7363+ read_phdrs(fp);
7364 read_shdrs(fp);
7365 read_strtabs(fp);
7366 read_symtabs(fp);
7367diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7368index 4d3ff03..e4972ff 100644
7369--- a/arch/x86/boot/cpucheck.c
7370+++ b/arch/x86/boot/cpucheck.c
7371@@ -74,7 +74,7 @@ static int has_fpu(void)
7372 u16 fcw = -1, fsw = -1;
7373 u32 cr0;
7374
7375- asm("movl %%cr0,%0" : "=r" (cr0));
7376+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7377 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7378 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7379 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7380@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7381 {
7382 u32 f0, f1;
7383
7384- asm("pushfl ; "
7385+ asm volatile("pushfl ; "
7386 "pushfl ; "
7387 "popl %0 ; "
7388 "movl %0,%1 ; "
7389@@ -115,7 +115,7 @@ static void get_flags(void)
7390 set_bit(X86_FEATURE_FPU, cpu.flags);
7391
7392 if (has_eflag(X86_EFLAGS_ID)) {
7393- asm("cpuid"
7394+ asm volatile("cpuid"
7395 : "=a" (max_intel_level),
7396 "=b" (cpu_vendor[0]),
7397 "=d" (cpu_vendor[1]),
7398@@ -124,7 +124,7 @@ static void get_flags(void)
7399
7400 if (max_intel_level >= 0x00000001 &&
7401 max_intel_level <= 0x0000ffff) {
7402- asm("cpuid"
7403+ asm volatile("cpuid"
7404 : "=a" (tfms),
7405 "=c" (cpu.flags[4]),
7406 "=d" (cpu.flags[0])
7407@@ -136,7 +136,7 @@ static void get_flags(void)
7408 cpu.model += ((tfms >> 16) & 0xf) << 4;
7409 }
7410
7411- asm("cpuid"
7412+ asm volatile("cpuid"
7413 : "=a" (max_amd_level)
7414 : "a" (0x80000000)
7415 : "ebx", "ecx", "edx");
7416@@ -144,7 +144,7 @@ static void get_flags(void)
7417 if (max_amd_level >= 0x80000001 &&
7418 max_amd_level <= 0x8000ffff) {
7419 u32 eax = 0x80000001;
7420- asm("cpuid"
7421+ asm volatile("cpuid"
7422 : "+a" (eax),
7423 "=c" (cpu.flags[6]),
7424 "=d" (cpu.flags[1])
7425@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7426 u32 ecx = MSR_K7_HWCR;
7427 u32 eax, edx;
7428
7429- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7430+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7431 eax &= ~(1 << 15);
7432- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7433+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7434
7435 get_flags(); /* Make sure it really did something */
7436 err = check_flags();
7437@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7438 u32 ecx = MSR_VIA_FCR;
7439 u32 eax, edx;
7440
7441- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7442+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7443 eax |= (1<<1)|(1<<7);
7444- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7445+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7446
7447 set_bit(X86_FEATURE_CX8, cpu.flags);
7448 err = check_flags();
7449@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7450 u32 eax, edx;
7451 u32 level = 1;
7452
7453- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7454- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7455- asm("cpuid"
7456+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7457+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7458+ asm volatile("cpuid"
7459 : "+a" (level), "=d" (cpu.flags[0])
7460 : : "ecx", "ebx");
7461- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7462+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7463
7464 err = check_flags();
7465 }
7466diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7467index b31cc54..8d69237 100644
7468--- a/arch/x86/boot/header.S
7469+++ b/arch/x86/boot/header.S
7470@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7471 # single linked list of
7472 # struct setup_data
7473
7474-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7475+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7476
7477 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7478 #define VO_INIT_SIZE (VO__end - VO__text)
7479diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7480index cae3feb..ff8ff2a 100644
7481--- a/arch/x86/boot/memory.c
7482+++ b/arch/x86/boot/memory.c
7483@@ -19,7 +19,7 @@
7484
7485 static int detect_memory_e820(void)
7486 {
7487- int count = 0;
7488+ unsigned int count = 0;
7489 struct biosregs ireg, oreg;
7490 struct e820entry *desc = boot_params.e820_map;
7491 static struct e820entry buf; /* static so it is zeroed */
7492diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7493index 11e8c6e..fdbb1ed 100644
7494--- a/arch/x86/boot/video-vesa.c
7495+++ b/arch/x86/boot/video-vesa.c
7496@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7497
7498 boot_params.screen_info.vesapm_seg = oreg.es;
7499 boot_params.screen_info.vesapm_off = oreg.di;
7500+ boot_params.screen_info.vesapm_size = oreg.cx;
7501 }
7502
7503 /*
7504diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7505index d42da38..787cdf3 100644
7506--- a/arch/x86/boot/video.c
7507+++ b/arch/x86/boot/video.c
7508@@ -90,7 +90,7 @@ static void store_mode_params(void)
7509 static unsigned int get_entry(void)
7510 {
7511 char entry_buf[4];
7512- int i, len = 0;
7513+ unsigned int i, len = 0;
7514 int key;
7515 unsigned int v;
7516
7517diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7518index 5b577d5..3c1fed4 100644
7519--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7520+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7521@@ -8,6 +8,8 @@
7522 * including this sentence is retained in full.
7523 */
7524
7525+#include <asm/alternative-asm.h>
7526+
7527 .extern crypto_ft_tab
7528 .extern crypto_it_tab
7529 .extern crypto_fl_tab
7530@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7531 je B192; \
7532 leaq 32(r9),r9;
7533
7534+#define ret pax_force_retaddr 0, 1; ret
7535+
7536 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7537 movq r1,r2; \
7538 movq r3,r4; \
7539diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7540index eb0566e..e3ebad8 100644
7541--- a/arch/x86/crypto/aesni-intel_asm.S
7542+++ b/arch/x86/crypto/aesni-intel_asm.S
7543@@ -16,6 +16,7 @@
7544 */
7545
7546 #include <linux/linkage.h>
7547+#include <asm/alternative-asm.h>
7548
7549 .text
7550
7551@@ -52,6 +53,7 @@ _key_expansion_256a:
7552 pxor %xmm1, %xmm0
7553 movaps %xmm0, (%rcx)
7554 add $0x10, %rcx
7555+ pax_force_retaddr_bts
7556 ret
7557
7558 _key_expansion_192a:
7559@@ -75,6 +77,7 @@ _key_expansion_192a:
7560 shufps $0b01001110, %xmm2, %xmm1
7561 movaps %xmm1, 16(%rcx)
7562 add $0x20, %rcx
7563+ pax_force_retaddr_bts
7564 ret
7565
7566 _key_expansion_192b:
7567@@ -93,6 +96,7 @@ _key_expansion_192b:
7568
7569 movaps %xmm0, (%rcx)
7570 add $0x10, %rcx
7571+ pax_force_retaddr_bts
7572 ret
7573
7574 _key_expansion_256b:
7575@@ -104,6 +108,7 @@ _key_expansion_256b:
7576 pxor %xmm1, %xmm2
7577 movaps %xmm2, (%rcx)
7578 add $0x10, %rcx
7579+ pax_force_retaddr_bts
7580 ret
7581
7582 /*
7583@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7584 cmp %rcx, %rdi
7585 jb .Ldec_key_loop
7586 xor %rax, %rax
7587+ pax_force_retaddr 0, 1
7588 ret
7589+ENDPROC(aesni_set_key)
7590
7591 /*
7592 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7593@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7594 movups (INP), STATE # input
7595 call _aesni_enc1
7596 movups STATE, (OUTP) # output
7597+ pax_force_retaddr 0, 1
7598 ret
7599+ENDPROC(aesni_enc)
7600
7601 /*
7602 * _aesni_enc1: internal ABI
7603@@ -319,6 +328,7 @@ _aesni_enc1:
7604 movaps 0x70(TKEYP), KEY
7605 # aesenclast KEY, STATE # last round
7606 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7607+ pax_force_retaddr_bts
7608 ret
7609
7610 /*
7611@@ -482,6 +492,7 @@ _aesni_enc4:
7612 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7613 # aesenclast KEY, STATE4
7614 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7615+ pax_force_retaddr_bts
7616 ret
7617
7618 /*
7619@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7620 movups (INP), STATE # input
7621 call _aesni_dec1
7622 movups STATE, (OUTP) #output
7623+ pax_force_retaddr 0, 1
7624 ret
7625+ENDPROC(aesni_dec)
7626
7627 /*
7628 * _aesni_dec1: internal ABI
7629@@ -563,6 +576,7 @@ _aesni_dec1:
7630 movaps 0x70(TKEYP), KEY
7631 # aesdeclast KEY, STATE # last round
7632 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7633+ pax_force_retaddr_bts
7634 ret
7635
7636 /*
7637@@ -726,6 +740,7 @@ _aesni_dec4:
7638 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7639 # aesdeclast KEY, STATE4
7640 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7641+ pax_force_retaddr_bts
7642 ret
7643
7644 /*
7645@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7646 cmp $16, LEN
7647 jge .Lecb_enc_loop1
7648 .Lecb_enc_ret:
7649+ pax_force_retaddr 0, 1
7650 ret
7651+ENDPROC(aesni_ecb_enc)
7652
7653 /*
7654 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7655@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7656 cmp $16, LEN
7657 jge .Lecb_dec_loop1
7658 .Lecb_dec_ret:
7659+ pax_force_retaddr 0, 1
7660 ret
7661+ENDPROC(aesni_ecb_dec)
7662
7663 /*
7664 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7665@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7666 jge .Lcbc_enc_loop
7667 movups STATE, (IVP)
7668 .Lcbc_enc_ret:
7669+ pax_force_retaddr 0, 1
7670 ret
7671+ENDPROC(aesni_cbc_enc)
7672
7673 /*
7674 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7675@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7676 .Lcbc_dec_ret:
7677 movups IV, (IVP)
7678 .Lcbc_dec_just_ret:
7679+ pax_force_retaddr 0, 1
7680 ret
7681+ENDPROC(aesni_cbc_dec)
7682diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7683index 6214a9b..1f4fc9a 100644
7684--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7685+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7686@@ -1,3 +1,5 @@
7687+#include <asm/alternative-asm.h>
7688+
7689 # enter ECRYPT_encrypt_bytes
7690 .text
7691 .p2align 5
7692@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7693 add %r11,%rsp
7694 mov %rdi,%rax
7695 mov %rsi,%rdx
7696+ pax_force_retaddr 0, 1
7697 ret
7698 # bytesatleast65:
7699 ._bytesatleast65:
7700@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7701 add %r11,%rsp
7702 mov %rdi,%rax
7703 mov %rsi,%rdx
7704+ pax_force_retaddr
7705 ret
7706 # enter ECRYPT_ivsetup
7707 .text
7708@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7709 add %r11,%rsp
7710 mov %rdi,%rax
7711 mov %rsi,%rdx
7712+ pax_force_retaddr
7713 ret
7714diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7715index 35974a5..5662ae2 100644
7716--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7717+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7718@@ -21,6 +21,7 @@
7719 .text
7720
7721 #include <asm/asm-offsets.h>
7722+#include <asm/alternative-asm.h>
7723
7724 #define a_offset 0
7725 #define b_offset 4
7726@@ -269,6 +270,7 @@ twofish_enc_blk:
7727
7728 popq R1
7729 movq $1,%rax
7730+ pax_force_retaddr 0, 1
7731 ret
7732
7733 twofish_dec_blk:
7734@@ -321,4 +323,5 @@ twofish_dec_blk:
7735
7736 popq R1
7737 movq $1,%rax
7738+ pax_force_retaddr 0, 1
7739 ret
7740diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7741index 14531ab..a89a0c0 100644
7742--- a/arch/x86/ia32/ia32_aout.c
7743+++ b/arch/x86/ia32/ia32_aout.c
7744@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7745 unsigned long dump_start, dump_size;
7746 struct user32 dump;
7747
7748+ memset(&dump, 0, sizeof(dump));
7749+
7750 fs = get_fs();
7751 set_fs(KERNEL_DS);
7752 has_dumped = 1;
7753@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7754 dump_size = dump.u_ssize << PAGE_SHIFT;
7755 DUMP_WRITE(dump_start, dump_size);
7756 }
7757- /*
7758- * Finally dump the task struct. Not be used by gdb, but
7759- * could be useful
7760- */
7761- set_fs(KERNEL_DS);
7762- DUMP_WRITE(current, sizeof(*current));
7763 end_coredump:
7764 set_fs(fs);
7765 return has_dumped;
7766diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7767index 588a7aa..a3468b0 100644
7768--- a/arch/x86/ia32/ia32_signal.c
7769+++ b/arch/x86/ia32/ia32_signal.c
7770@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7771 }
7772 seg = get_fs();
7773 set_fs(KERNEL_DS);
7774- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7775+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7776 set_fs(seg);
7777 if (ret >= 0 && uoss_ptr) {
7778 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7779@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7780 */
7781 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7782 size_t frame_size,
7783- void **fpstate)
7784+ void __user **fpstate)
7785 {
7786 unsigned long sp;
7787
7788@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7789
7790 if (used_math()) {
7791 sp = sp - sig_xstate_ia32_size;
7792- *fpstate = (struct _fpstate_ia32 *) sp;
7793+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7794 if (save_i387_xstate_ia32(*fpstate) < 0)
7795 return (void __user *) -1L;
7796 }
7797@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7798 sp -= frame_size;
7799 /* Align the stack pointer according to the i386 ABI,
7800 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7801- sp = ((sp + 4) & -16ul) - 4;
7802+ sp = ((sp - 12) & -16ul) - 4;
7803 return (void __user *) sp;
7804 }
7805
7806@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7807 * These are actually not used anymore, but left because some
7808 * gdb versions depend on them as a marker.
7809 */
7810- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7811+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7812 } put_user_catch(err);
7813
7814 if (err)
7815@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7816 0xb8,
7817 __NR_ia32_rt_sigreturn,
7818 0x80cd,
7819- 0,
7820+ 0
7821 };
7822
7823 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7824@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7825
7826 if (ka->sa.sa_flags & SA_RESTORER)
7827 restorer = ka->sa.sa_restorer;
7828+ else if (current->mm->context.vdso)
7829+ /* Return stub is in 32bit vsyscall page */
7830+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7831 else
7832- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7833- rt_sigreturn);
7834+ restorer = &frame->retcode;
7835 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7836
7837 /*
7838 * Not actually used anymore, but left because some gdb
7839 * versions need it.
7840 */
7841- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7842+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7843 } put_user_catch(err);
7844
7845 if (err)
7846diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7847index 4edd8eb..07ac7fd 100644
7848--- a/arch/x86/ia32/ia32entry.S
7849+++ b/arch/x86/ia32/ia32entry.S
7850@@ -13,7 +13,9 @@
7851 #include <asm/thread_info.h>
7852 #include <asm/segment.h>
7853 #include <asm/irqflags.h>
7854+#include <asm/pgtable.h>
7855 #include <linux/linkage.h>
7856+#include <asm/alternative-asm.h>
7857
7858 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7859 #include <linux/elf-em.h>
7860@@ -93,6 +95,30 @@ ENTRY(native_irq_enable_sysexit)
7861 ENDPROC(native_irq_enable_sysexit)
7862 #endif
7863
7864+ .macro pax_enter_kernel_user
7865+ pax_set_fptr_mask
7866+#ifdef CONFIG_PAX_MEMORY_UDEREF
7867+ call pax_enter_kernel_user
7868+#endif
7869+ .endm
7870+
7871+ .macro pax_exit_kernel_user
7872+#ifdef CONFIG_PAX_MEMORY_UDEREF
7873+ call pax_exit_kernel_user
7874+#endif
7875+#ifdef CONFIG_PAX_RANDKSTACK
7876+ pushq %rax
7877+ call pax_randomize_kstack
7878+ popq %rax
7879+#endif
7880+ .endm
7881+
7882+.macro pax_erase_kstack
7883+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7884+ call pax_erase_kstack
7885+#endif
7886+.endm
7887+
7888 /*
7889 * 32bit SYSENTER instruction entry.
7890 *
7891@@ -119,12 +145,6 @@ ENTRY(ia32_sysenter_target)
7892 CFI_REGISTER rsp,rbp
7893 SWAPGS_UNSAFE_STACK
7894 movq PER_CPU_VAR(kernel_stack), %rsp
7895- addq $(KERNEL_STACK_OFFSET),%rsp
7896- /*
7897- * No need to follow this irqs on/off section: the syscall
7898- * disabled irqs, here we enable it straight after entry:
7899- */
7900- ENABLE_INTERRUPTS(CLBR_NONE)
7901 movl %ebp,%ebp /* zero extension */
7902 pushq $__USER32_DS
7903 CFI_ADJUST_CFA_OFFSET 8
7904@@ -135,28 +155,41 @@ ENTRY(ia32_sysenter_target)
7905 pushfq
7906 CFI_ADJUST_CFA_OFFSET 8
7907 /*CFI_REL_OFFSET rflags,0*/
7908- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7909- CFI_REGISTER rip,r10
7910+ GET_THREAD_INFO(%r11)
7911+ movl TI_sysenter_return(%r11), %r11d
7912+ CFI_REGISTER rip,r11
7913 pushq $__USER32_CS
7914 CFI_ADJUST_CFA_OFFSET 8
7915 /*CFI_REL_OFFSET cs,0*/
7916 movl %eax, %eax
7917- pushq %r10
7918+ pushq %r11
7919 CFI_ADJUST_CFA_OFFSET 8
7920 CFI_REL_OFFSET rip,0
7921 pushq %rax
7922 CFI_ADJUST_CFA_OFFSET 8
7923 cld
7924 SAVE_ARGS 0,0,1
7925+ pax_enter_kernel_user
7926+ /*
7927+ * No need to follow this irqs on/off section: the syscall
7928+ * disabled irqs, here we enable it straight after entry:
7929+ */
7930+ ENABLE_INTERRUPTS(CLBR_NONE)
7931 /* no need to do an access_ok check here because rbp has been
7932 32bit zero extended */
7933+
7934+#ifdef CONFIG_PAX_MEMORY_UDEREF
7935+ mov $PAX_USER_SHADOW_BASE,%r11
7936+ add %r11,%rbp
7937+#endif
7938+
7939 1: movl (%rbp),%ebp
7940 .section __ex_table,"a"
7941 .quad 1b,ia32_badarg
7942 .previous
7943- GET_THREAD_INFO(%r10)
7944- orl $TS_COMPAT,TI_status(%r10)
7945- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7946+ GET_THREAD_INFO(%r11)
7947+ orl $TS_COMPAT,TI_status(%r11)
7948+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7949 CFI_REMEMBER_STATE
7950 jnz sysenter_tracesys
7951 cmpq $(IA32_NR_syscalls-1),%rax
7952@@ -166,13 +199,15 @@ sysenter_do_call:
7953 sysenter_dispatch:
7954 call *ia32_sys_call_table(,%rax,8)
7955 movq %rax,RAX-ARGOFFSET(%rsp)
7956- GET_THREAD_INFO(%r10)
7957+ GET_THREAD_INFO(%r11)
7958 DISABLE_INTERRUPTS(CLBR_NONE)
7959 TRACE_IRQS_OFF
7960- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7961+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7962 jnz sysexit_audit
7963 sysexit_from_sys_call:
7964- andl $~TS_COMPAT,TI_status(%r10)
7965+ pax_exit_kernel_user
7966+ pax_erase_kstack
7967+ andl $~TS_COMPAT,TI_status(%r11)
7968 /* clear IF, that popfq doesn't enable interrupts early */
7969 andl $~0x200,EFLAGS-R11(%rsp)
7970 movl RIP-R11(%rsp),%edx /* User %eip */
7971@@ -200,6 +235,9 @@ sysexit_from_sys_call:
7972 movl %eax,%esi /* 2nd arg: syscall number */
7973 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7974 call audit_syscall_entry
7975+
7976+ pax_erase_kstack
7977+
7978 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7979 cmpq $(IA32_NR_syscalls-1),%rax
7980 ja ia32_badsys
7981@@ -211,7 +249,7 @@ sysexit_from_sys_call:
7982 .endm
7983
7984 .macro auditsys_exit exit
7985- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7986+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7987 jnz ia32_ret_from_sys_call
7988 TRACE_IRQS_ON
7989 sti
7990@@ -221,12 +259,12 @@ sysexit_from_sys_call:
7991 movzbl %al,%edi /* zero-extend that into %edi */
7992 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7993 call audit_syscall_exit
7994- GET_THREAD_INFO(%r10)
7995+ GET_THREAD_INFO(%r11)
7996 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7997 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7998 cli
7999 TRACE_IRQS_OFF
8000- testl %edi,TI_flags(%r10)
8001+ testl %edi,TI_flags(%r11)
8002 jz \exit
8003 CLEAR_RREGS -ARGOFFSET
8004 jmp int_with_check
8005@@ -244,7 +282,7 @@ sysexit_audit:
8006
8007 sysenter_tracesys:
8008 #ifdef CONFIG_AUDITSYSCALL
8009- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8010+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8011 jz sysenter_auditsys
8012 #endif
8013 SAVE_REST
8014@@ -252,6 +290,9 @@ sysenter_tracesys:
8015 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8016 movq %rsp,%rdi /* &pt_regs -> arg1 */
8017 call syscall_trace_enter
8018+
8019+ pax_erase_kstack
8020+
8021 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8022 RESTORE_REST
8023 cmpq $(IA32_NR_syscalls-1),%rax
8024@@ -283,19 +324,20 @@ ENDPROC(ia32_sysenter_target)
8025 ENTRY(ia32_cstar_target)
8026 CFI_STARTPROC32 simple
8027 CFI_SIGNAL_FRAME
8028- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8029+ CFI_DEF_CFA rsp,0
8030 CFI_REGISTER rip,rcx
8031 /*CFI_REGISTER rflags,r11*/
8032 SWAPGS_UNSAFE_STACK
8033 movl %esp,%r8d
8034 CFI_REGISTER rsp,r8
8035 movq PER_CPU_VAR(kernel_stack),%rsp
8036+ SAVE_ARGS 8*6,1,1
8037+ pax_enter_kernel_user
8038 /*
8039 * No need to follow this irqs on/off section: the syscall
8040 * disabled irqs and here we enable it straight after entry:
8041 */
8042 ENABLE_INTERRUPTS(CLBR_NONE)
8043- SAVE_ARGS 8,1,1
8044 movl %eax,%eax /* zero extension */
8045 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8046 movq %rcx,RIP-ARGOFFSET(%rsp)
8047@@ -311,13 +353,19 @@ ENTRY(ia32_cstar_target)
8048 /* no need to do an access_ok check here because r8 has been
8049 32bit zero extended */
8050 /* hardware stack frame is complete now */
8051+
8052+#ifdef CONFIG_PAX_MEMORY_UDEREF
8053+ mov $PAX_USER_SHADOW_BASE,%r11
8054+ add %r11,%r8
8055+#endif
8056+
8057 1: movl (%r8),%r9d
8058 .section __ex_table,"a"
8059 .quad 1b,ia32_badarg
8060 .previous
8061- GET_THREAD_INFO(%r10)
8062- orl $TS_COMPAT,TI_status(%r10)
8063- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8064+ GET_THREAD_INFO(%r11)
8065+ orl $TS_COMPAT,TI_status(%r11)
8066+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8067 CFI_REMEMBER_STATE
8068 jnz cstar_tracesys
8069 cmpq $IA32_NR_syscalls-1,%rax
8070@@ -327,13 +375,15 @@ cstar_do_call:
8071 cstar_dispatch:
8072 call *ia32_sys_call_table(,%rax,8)
8073 movq %rax,RAX-ARGOFFSET(%rsp)
8074- GET_THREAD_INFO(%r10)
8075+ GET_THREAD_INFO(%r11)
8076 DISABLE_INTERRUPTS(CLBR_NONE)
8077 TRACE_IRQS_OFF
8078- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8079+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8080 jnz sysretl_audit
8081 sysretl_from_sys_call:
8082- andl $~TS_COMPAT,TI_status(%r10)
8083+ pax_exit_kernel_user
8084+ pax_erase_kstack
8085+ andl $~TS_COMPAT,TI_status(%r11)
8086 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8087 movl RIP-ARGOFFSET(%rsp),%ecx
8088 CFI_REGISTER rip,rcx
8089@@ -361,7 +411,7 @@ sysretl_audit:
8090
8091 cstar_tracesys:
8092 #ifdef CONFIG_AUDITSYSCALL
8093- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8094+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8095 jz cstar_auditsys
8096 #endif
8097 xchgl %r9d,%ebp
8098@@ -370,6 +420,9 @@ cstar_tracesys:
8099 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8100 movq %rsp,%rdi /* &pt_regs -> arg1 */
8101 call syscall_trace_enter
8102+
8103+ pax_erase_kstack
8104+
8105 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8106 RESTORE_REST
8107 xchgl %ebp,%r9d
8108@@ -415,11 +468,6 @@ ENTRY(ia32_syscall)
8109 CFI_REL_OFFSET rip,RIP-RIP
8110 PARAVIRT_ADJUST_EXCEPTION_FRAME
8111 SWAPGS
8112- /*
8113- * No need to follow this irqs on/off section: the syscall
8114- * disabled irqs and here we enable it straight after entry:
8115- */
8116- ENABLE_INTERRUPTS(CLBR_NONE)
8117 movl %eax,%eax
8118 pushq %rax
8119 CFI_ADJUST_CFA_OFFSET 8
8120@@ -427,9 +475,15 @@ ENTRY(ia32_syscall)
8121 /* note the registers are not zero extended to the sf.
8122 this could be a problem. */
8123 SAVE_ARGS 0,0,1
8124- GET_THREAD_INFO(%r10)
8125- orl $TS_COMPAT,TI_status(%r10)
8126- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8127+ pax_enter_kernel_user
8128+ /*
8129+ * No need to follow this irqs on/off section: the syscall
8130+ * disabled irqs and here we enable it straight after entry:
8131+ */
8132+ ENABLE_INTERRUPTS(CLBR_NONE)
8133+ GET_THREAD_INFO(%r11)
8134+ orl $TS_COMPAT,TI_status(%r11)
8135+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8136 jnz ia32_tracesys
8137 cmpq $(IA32_NR_syscalls-1),%rax
8138 ja ia32_badsys
8139@@ -448,6 +502,9 @@ ia32_tracesys:
8140 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8141 movq %rsp,%rdi /* &pt_regs -> arg1 */
8142 call syscall_trace_enter
8143+
8144+ pax_erase_kstack
8145+
8146 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8147 RESTORE_REST
8148 cmpq $(IA32_NR_syscalls-1),%rax
8149@@ -462,6 +519,7 @@ ia32_badsys:
8150
8151 quiet_ni_syscall:
8152 movq $-ENOSYS,%rax
8153+ pax_force_retaddr
8154 ret
8155 CFI_ENDPROC
8156
8157diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8158index 016218c..47ccbdd 100644
8159--- a/arch/x86/ia32/sys_ia32.c
8160+++ b/arch/x86/ia32/sys_ia32.c
8161@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8162 */
8163 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8164 {
8165- typeof(ubuf->st_uid) uid = 0;
8166- typeof(ubuf->st_gid) gid = 0;
8167+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8168+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8169 SET_UID(uid, stat->uid);
8170 SET_GID(gid, stat->gid);
8171 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8172@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8173 }
8174 set_fs(KERNEL_DS);
8175 ret = sys_rt_sigprocmask(how,
8176- set ? (sigset_t __user *)&s : NULL,
8177- oset ? (sigset_t __user *)&s : NULL,
8178+ set ? (sigset_t __force_user *)&s : NULL,
8179+ oset ? (sigset_t __force_user *)&s : NULL,
8180 sigsetsize);
8181 set_fs(old_fs);
8182 if (ret)
8183@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8184 mm_segment_t old_fs = get_fs();
8185
8186 set_fs(KERNEL_DS);
8187- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8188+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8189 set_fs(old_fs);
8190 if (put_compat_timespec(&t, interval))
8191 return -EFAULT;
8192@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8193 mm_segment_t old_fs = get_fs();
8194
8195 set_fs(KERNEL_DS);
8196- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8197+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8198 set_fs(old_fs);
8199 if (!ret) {
8200 switch (_NSIG_WORDS) {
8201@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8202 if (copy_siginfo_from_user32(&info, uinfo))
8203 return -EFAULT;
8204 set_fs(KERNEL_DS);
8205- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8206+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8207 set_fs(old_fs);
8208 return ret;
8209 }
8210@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8211 return -EFAULT;
8212
8213 set_fs(KERNEL_DS);
8214- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8215+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8216 count);
8217 set_fs(old_fs);
8218
8219diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8220index e2077d3..e134a5e 100644
8221--- a/arch/x86/include/asm/alternative-asm.h
8222+++ b/arch/x86/include/asm/alternative-asm.h
8223@@ -19,4 +19,43 @@
8224 .endm
8225 #endif
8226
8227+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8228+ .macro pax_force_retaddr_bts rip=0
8229+ btsq $63,\rip(%rsp)
8230+ .endm
8231+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8232+ .macro pax_force_retaddr rip=0, reload=0
8233+ btsq $63,\rip(%rsp)
8234+ .endm
8235+ .macro pax_force_fptr ptr
8236+ btsq $63,\ptr
8237+ .endm
8238+ .macro pax_set_fptr_mask
8239+ .endm
8240+#endif
8241+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8242+ .macro pax_force_retaddr rip=0, reload=0
8243+ .if \reload
8244+ pax_set_fptr_mask
8245+ .endif
8246+ orq %r10,\rip(%rsp)
8247+ .endm
8248+ .macro pax_force_fptr ptr
8249+ orq %r10,\ptr
8250+ .endm
8251+ .macro pax_set_fptr_mask
8252+ movabs $0x8000000000000000,%r10
8253+ .endm
8254+#endif
8255+#else
8256+ .macro pax_force_retaddr rip=0, reload=0
8257+ .endm
8258+ .macro pax_force_fptr ptr
8259+ .endm
8260+ .macro pax_force_retaddr_bts rip=0
8261+ .endm
8262+ .macro pax_set_fptr_mask
8263+ .endm
8264+#endif
8265+
8266 #endif /* __ASSEMBLY__ */
8267diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8268index c240efc..fdfadf3 100644
8269--- a/arch/x86/include/asm/alternative.h
8270+++ b/arch/x86/include/asm/alternative.h
8271@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8272 " .byte 662b-661b\n" /* sourcelen */ \
8273 " .byte 664f-663f\n" /* replacementlen */ \
8274 ".previous\n" \
8275- ".section .altinstr_replacement, \"ax\"\n" \
8276+ ".section .altinstr_replacement, \"a\"\n" \
8277 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8278 ".previous"
8279
8280diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8281index 474d80d..1f97d58 100644
8282--- a/arch/x86/include/asm/apic.h
8283+++ b/arch/x86/include/asm/apic.h
8284@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8285
8286 #ifdef CONFIG_X86_LOCAL_APIC
8287
8288-extern unsigned int apic_verbosity;
8289+extern int apic_verbosity;
8290 extern int local_apic_timer_c2_ok;
8291
8292 extern int disable_apic;
8293diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8294index 20370c6..a2eb9b0 100644
8295--- a/arch/x86/include/asm/apm.h
8296+++ b/arch/x86/include/asm/apm.h
8297@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8298 __asm__ __volatile__(APM_DO_ZERO_SEGS
8299 "pushl %%edi\n\t"
8300 "pushl %%ebp\n\t"
8301- "lcall *%%cs:apm_bios_entry\n\t"
8302+ "lcall *%%ss:apm_bios_entry\n\t"
8303 "setc %%al\n\t"
8304 "popl %%ebp\n\t"
8305 "popl %%edi\n\t"
8306@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8307 __asm__ __volatile__(APM_DO_ZERO_SEGS
8308 "pushl %%edi\n\t"
8309 "pushl %%ebp\n\t"
8310- "lcall *%%cs:apm_bios_entry\n\t"
8311+ "lcall *%%ss:apm_bios_entry\n\t"
8312 "setc %%bl\n\t"
8313 "popl %%ebp\n\t"
8314 "popl %%edi\n\t"
8315diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8316index dc5a667..fbed878 100644
8317--- a/arch/x86/include/asm/atomic_32.h
8318+++ b/arch/x86/include/asm/atomic_32.h
8319@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8320 }
8321
8322 /**
8323+ * atomic_read_unchecked - read atomic variable
8324+ * @v: pointer of type atomic_unchecked_t
8325+ *
8326+ * Atomically reads the value of @v.
8327+ */
8328+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8329+{
8330+ return v->counter;
8331+}
8332+
8333+/**
8334 * atomic_set - set atomic variable
8335 * @v: pointer of type atomic_t
8336 * @i: required value
8337@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8338 }
8339
8340 /**
8341+ * atomic_set_unchecked - set atomic variable
8342+ * @v: pointer of type atomic_unchecked_t
8343+ * @i: required value
8344+ *
8345+ * Atomically sets the value of @v to @i.
8346+ */
8347+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8348+{
8349+ v->counter = i;
8350+}
8351+
8352+/**
8353 * atomic_add - add integer to atomic variable
8354 * @i: integer value to add
8355 * @v: pointer of type atomic_t
8356@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8357 */
8358 static inline void atomic_add(int i, atomic_t *v)
8359 {
8360- asm volatile(LOCK_PREFIX "addl %1,%0"
8361+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8362+
8363+#ifdef CONFIG_PAX_REFCOUNT
8364+ "jno 0f\n"
8365+ LOCK_PREFIX "subl %1,%0\n"
8366+ "int $4\n0:\n"
8367+ _ASM_EXTABLE(0b, 0b)
8368+#endif
8369+
8370+ : "+m" (v->counter)
8371+ : "ir" (i));
8372+}
8373+
8374+/**
8375+ * atomic_add_unchecked - add integer to atomic variable
8376+ * @i: integer value to add
8377+ * @v: pointer of type atomic_unchecked_t
8378+ *
8379+ * Atomically adds @i to @v.
8380+ */
8381+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8382+{
8383+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8384 : "+m" (v->counter)
8385 : "ir" (i));
8386 }
8387@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8388 */
8389 static inline void atomic_sub(int i, atomic_t *v)
8390 {
8391- asm volatile(LOCK_PREFIX "subl %1,%0"
8392+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8393+
8394+#ifdef CONFIG_PAX_REFCOUNT
8395+ "jno 0f\n"
8396+ LOCK_PREFIX "addl %1,%0\n"
8397+ "int $4\n0:\n"
8398+ _ASM_EXTABLE(0b, 0b)
8399+#endif
8400+
8401+ : "+m" (v->counter)
8402+ : "ir" (i));
8403+}
8404+
8405+/**
8406+ * atomic_sub_unchecked - subtract integer from atomic variable
8407+ * @i: integer value to subtract
8408+ * @v: pointer of type atomic_unchecked_t
8409+ *
8410+ * Atomically subtracts @i from @v.
8411+ */
8412+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8413+{
8414+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8415 : "+m" (v->counter)
8416 : "ir" (i));
8417 }
8418@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8419 {
8420 unsigned char c;
8421
8422- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8423+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8424+
8425+#ifdef CONFIG_PAX_REFCOUNT
8426+ "jno 0f\n"
8427+ LOCK_PREFIX "addl %2,%0\n"
8428+ "int $4\n0:\n"
8429+ _ASM_EXTABLE(0b, 0b)
8430+#endif
8431+
8432+ "sete %1\n"
8433 : "+m" (v->counter), "=qm" (c)
8434 : "ir" (i) : "memory");
8435 return c;
8436@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8437 */
8438 static inline void atomic_inc(atomic_t *v)
8439 {
8440- asm volatile(LOCK_PREFIX "incl %0"
8441+ asm volatile(LOCK_PREFIX "incl %0\n"
8442+
8443+#ifdef CONFIG_PAX_REFCOUNT
8444+ "jno 0f\n"
8445+ LOCK_PREFIX "decl %0\n"
8446+ "int $4\n0:\n"
8447+ _ASM_EXTABLE(0b, 0b)
8448+#endif
8449+
8450+ : "+m" (v->counter));
8451+}
8452+
8453+/**
8454+ * atomic_inc_unchecked - increment atomic variable
8455+ * @v: pointer of type atomic_unchecked_t
8456+ *
8457+ * Atomically increments @v by 1.
8458+ */
8459+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8460+{
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462 : "+m" (v->counter));
8463 }
8464
8465@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8466 */
8467 static inline void atomic_dec(atomic_t *v)
8468 {
8469- asm volatile(LOCK_PREFIX "decl %0"
8470+ asm volatile(LOCK_PREFIX "decl %0\n"
8471+
8472+#ifdef CONFIG_PAX_REFCOUNT
8473+ "jno 0f\n"
8474+ LOCK_PREFIX "incl %0\n"
8475+ "int $4\n0:\n"
8476+ _ASM_EXTABLE(0b, 0b)
8477+#endif
8478+
8479+ : "+m" (v->counter));
8480+}
8481+
8482+/**
8483+ * atomic_dec_unchecked - decrement atomic variable
8484+ * @v: pointer of type atomic_unchecked_t
8485+ *
8486+ * Atomically decrements @v by 1.
8487+ */
8488+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8489+{
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491 : "+m" (v->counter));
8492 }
8493
8494@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8495 {
8496 unsigned char c;
8497
8498- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8499+ asm volatile(LOCK_PREFIX "decl %0\n"
8500+
8501+#ifdef CONFIG_PAX_REFCOUNT
8502+ "jno 0f\n"
8503+ LOCK_PREFIX "incl %0\n"
8504+ "int $4\n0:\n"
8505+ _ASM_EXTABLE(0b, 0b)
8506+#endif
8507+
8508+ "sete %1\n"
8509 : "+m" (v->counter), "=qm" (c)
8510 : : "memory");
8511 return c != 0;
8512@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8513 {
8514 unsigned char c;
8515
8516- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8517+ asm volatile(LOCK_PREFIX "incl %0\n"
8518+
8519+#ifdef CONFIG_PAX_REFCOUNT
8520+ "jno 0f\n"
8521+ LOCK_PREFIX "decl %0\n"
8522+ "into\n0:\n"
8523+ _ASM_EXTABLE(0b, 0b)
8524+#endif
8525+
8526+ "sete %1\n"
8527+ : "+m" (v->counter), "=qm" (c)
8528+ : : "memory");
8529+ return c != 0;
8530+}
8531+
8532+/**
8533+ * atomic_inc_and_test_unchecked - increment and test
8534+ * @v: pointer of type atomic_unchecked_t
8535+ *
8536+ * Atomically increments @v by 1
8537+ * and returns true if the result is zero, or false for all
8538+ * other cases.
8539+ */
8540+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8541+{
8542+ unsigned char c;
8543+
8544+ asm volatile(LOCK_PREFIX "incl %0\n"
8545+ "sete %1\n"
8546 : "+m" (v->counter), "=qm" (c)
8547 : : "memory");
8548 return c != 0;
8549@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8550 {
8551 unsigned char c;
8552
8553- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8554+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8555+
8556+#ifdef CONFIG_PAX_REFCOUNT
8557+ "jno 0f\n"
8558+ LOCK_PREFIX "subl %2,%0\n"
8559+ "int $4\n0:\n"
8560+ _ASM_EXTABLE(0b, 0b)
8561+#endif
8562+
8563+ "sets %1\n"
8564 : "+m" (v->counter), "=qm" (c)
8565 : "ir" (i) : "memory");
8566 return c;
8567@@ -179,6 +341,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
8568 #endif
8569 /* Modern 486+ processor */
8570 __i = i;
8571+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "movl %0, %1\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580+ : "+r" (i), "+m" (v->counter)
8581+ : : "memory");
8582+ return i + __i;
8583+
8584+#ifdef CONFIG_M386
8585+no_xadd: /* Legacy 386 processor */
8586+ local_irq_save(flags);
8587+ __i = atomic_read(v);
8588+ atomic_set(v, i + __i);
8589+ local_irq_restore(flags);
8590+ return i + __i;
8591+#endif
8592+}
8593+
8594+/**
8595+ * atomic_add_return_unchecked - add integer and return
8596+ * @v: pointer of type atomic_unchecked_t
8597+ * @i: integer value to add
8598+ *
8599+ * Atomically adds @i to @v and returns @i + @v
8600+ */
8601+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8602+{
8603+ int __i;
8604+#ifdef CONFIG_M386
8605+ unsigned long flags;
8606+ if (unlikely(boot_cpu_data.x86 <= 3))
8607+ goto no_xadd;
8608+#endif
8609+ /* Modern 486+ processor */
8610+ __i = i;
8611 asm volatile(LOCK_PREFIX "xaddl %0, %1"
8612 : "+r" (i), "+m" (v->counter)
8613 : : "memory");
8614@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8615 return cmpxchg(&v->counter, old, new);
8616 }
8617
8618+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8619+{
8620+ return cmpxchg(&v->counter, old, new);
8621+}
8622+
8623 static inline int atomic_xchg(atomic_t *v, int new)
8624 {
8625 return xchg(&v->counter, new);
8626 }
8627
8628+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8629+{
8630+ return xchg(&v->counter, new);
8631+}
8632+
8633 /**
8634 * atomic_add_unless - add unless the number is already a given value
8635 * @v: pointer of type atomic_t
8636@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8637 */
8638 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8639 {
8640- int c, old;
8641+ int c, old, new;
8642 c = atomic_read(v);
8643 for (;;) {
8644- if (unlikely(c == (u)))
8645+ if (unlikely(c == u))
8646 break;
8647- old = atomic_cmpxchg((v), c, c + (a));
8648+
8649+ asm volatile("addl %2,%0\n"
8650+
8651+#ifdef CONFIG_PAX_REFCOUNT
8652+ "jno 0f\n"
8653+ "subl %2,%0\n"
8654+ "int $4\n0:\n"
8655+ _ASM_EXTABLE(0b, 0b)
8656+#endif
8657+
8658+ : "=r" (new)
8659+ : "0" (c), "ir" (a));
8660+
8661+ old = atomic_cmpxchg(v, c, new);
8662 if (likely(old == c))
8663 break;
8664 c = old;
8665 }
8666- return c != (u);
8667+ return c != u;
8668 }
8669
8670 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8671
8672 #define atomic_inc_return(v) (atomic_add_return(1, v))
8673+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8674+{
8675+ return atomic_add_return_unchecked(1, v);
8676+}
8677 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8678
8679 /* These are x86-specific, used by some header files */
8680@@ -266,9 +495,18 @@ typedef struct {
8681 u64 __aligned(8) counter;
8682 } atomic64_t;
8683
8684+#ifdef CONFIG_PAX_REFCOUNT
8685+typedef struct {
8686+ u64 __aligned(8) counter;
8687+} atomic64_unchecked_t;
8688+#else
8689+typedef atomic64_t atomic64_unchecked_t;
8690+#endif
8691+
8692 #define ATOMIC64_INIT(val) { (val) }
8693
8694 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8695+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8696
8697 /**
8698 * atomic64_xchg - xchg atomic64 variable
8699@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8700 * the old value.
8701 */
8702 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8703+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8704
8705 /**
8706 * atomic64_set - set atomic64 variable
8707@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8708 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8709
8710 /**
8711+ * atomic64_unchecked_set - set atomic64 variable
8712+ * @ptr: pointer to type atomic64_unchecked_t
8713+ * @new_val: value to assign
8714+ *
8715+ * Atomically sets the value of @ptr to @new_val.
8716+ */
8717+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8718+
8719+/**
8720 * atomic64_read - read atomic64 variable
8721 * @ptr: pointer to type atomic64_t
8722 *
8723@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8724 return res;
8725 }
8726
8727-extern u64 atomic64_read(atomic64_t *ptr);
8728+/**
8729+ * atomic64_read_unchecked - read atomic64 variable
8730+ * @ptr: pointer to type atomic64_unchecked_t
8731+ *
8732+ * Atomically reads the value of @ptr and returns it.
8733+ */
8734+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8735+{
8736+ u64 res;
8737+
8738+ /*
8739+ * Note, we inline this atomic64_unchecked_t primitive because
8740+ * it only clobbers EAX/EDX and leaves the others
8741+ * untouched. We also (somewhat subtly) rely on the
8742+ * fact that cmpxchg8b returns the current 64-bit value
8743+ * of the memory location we are touching:
8744+ */
8745+ asm volatile(
8746+ "mov %%ebx, %%eax\n\t"
8747+ "mov %%ecx, %%edx\n\t"
8748+ LOCK_PREFIX "cmpxchg8b %1\n"
8749+ : "=&A" (res)
8750+ : "m" (*ptr)
8751+ );
8752+
8753+ return res;
8754+}
8755
8756 /**
8757 * atomic64_add_return - add and return
8758@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8759 * Other variants with different arithmetic operators:
8760 */
8761 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8762+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8763 extern u64 atomic64_inc_return(atomic64_t *ptr);
8764+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8765 extern u64 atomic64_dec_return(atomic64_t *ptr);
8766+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8767
8768 /**
8769 * atomic64_add - add integer to atomic64 variable
8770@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8771 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8772
8773 /**
8774+ * atomic64_add_unchecked - add integer to atomic64 variable
8775+ * @delta: integer value to add
8776+ * @ptr: pointer to type atomic64_unchecked_t
8777+ *
8778+ * Atomically adds @delta to @ptr.
8779+ */
8780+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8781+
8782+/**
8783 * atomic64_sub - subtract the atomic64 variable
8784 * @delta: integer value to subtract
8785 * @ptr: pointer to type atomic64_t
8786@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8787 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8788
8789 /**
8790+ * atomic64_sub_unchecked - subtract the atomic64 variable
8791+ * @delta: integer value to subtract
8792+ * @ptr: pointer to type atomic64_unchecked_t
8793+ *
8794+ * Atomically subtracts @delta from @ptr.
8795+ */
8796+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8797+
8798+/**
8799 * atomic64_sub_and_test - subtract value from variable and test result
8800 * @delta: integer value to subtract
8801 * @ptr: pointer to type atomic64_t
8802@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8803 extern void atomic64_inc(atomic64_t *ptr);
8804
8805 /**
8806+ * atomic64_inc_unchecked - increment atomic64 variable
8807+ * @ptr: pointer to type atomic64_unchecked_t
8808+ *
8809+ * Atomically increments @ptr by 1.
8810+ */
8811+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8812+
8813+/**
8814 * atomic64_dec - decrement atomic64 variable
8815 * @ptr: pointer to type atomic64_t
8816 *
8817@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8818 extern void atomic64_dec(atomic64_t *ptr);
8819
8820 /**
8821+ * atomic64_dec_unchecked - decrement atomic64 variable
8822+ * @ptr: pointer to type atomic64_unchecked_t
8823+ *
8824+ * Atomically decrements @ptr by 1.
8825+ */
8826+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8827+
8828+/**
8829 * atomic64_dec_and_test - decrement and test
8830 * @ptr: pointer to type atomic64_t
8831 *
8832diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8833index d605dc2..fafd7bd 100644
8834--- a/arch/x86/include/asm/atomic_64.h
8835+++ b/arch/x86/include/asm/atomic_64.h
8836@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8837 }
8838
8839 /**
8840+ * atomic_read_unchecked - read atomic variable
8841+ * @v: pointer of type atomic_unchecked_t
8842+ *
8843+ * Atomically reads the value of @v.
8844+ */
8845+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8846+{
8847+ return v->counter;
8848+}
8849+
8850+/**
8851 * atomic_set - set atomic variable
8852 * @v: pointer of type atomic_t
8853 * @i: required value
8854@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8855 }
8856
8857 /**
8858+ * atomic_set_unchecked - set atomic variable
8859+ * @v: pointer of type atomic_unchecked_t
8860+ * @i: required value
8861+ *
8862+ * Atomically sets the value of @v to @i.
8863+ */
8864+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8865+{
8866+ v->counter = i;
8867+}
8868+
8869+/**
8870 * atomic_add - add integer to atomic variable
8871 * @i: integer value to add
8872 * @v: pointer of type atomic_t
8873@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8874 */
8875 static inline void atomic_add(int i, atomic_t *v)
8876 {
8877- asm volatile(LOCK_PREFIX "addl %1,%0"
8878+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8879+
8880+#ifdef CONFIG_PAX_REFCOUNT
8881+ "jno 0f\n"
8882+ LOCK_PREFIX "subl %1,%0\n"
8883+ "int $4\n0:\n"
8884+ _ASM_EXTABLE(0b, 0b)
8885+#endif
8886+
8887+ : "=m" (v->counter)
8888+ : "ir" (i), "m" (v->counter));
8889+}
8890+
8891+/**
8892+ * atomic_add_unchecked - add integer to atomic variable
8893+ * @i: integer value to add
8894+ * @v: pointer of type atomic_unchecked_t
8895+ *
8896+ * Atomically adds @i to @v.
8897+ */
8898+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8899+{
8900+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8901 : "=m" (v->counter)
8902 : "ir" (i), "m" (v->counter));
8903 }
8904@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8905 */
8906 static inline void atomic_sub(int i, atomic_t *v)
8907 {
8908- asm volatile(LOCK_PREFIX "subl %1,%0"
8909+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8910+
8911+#ifdef CONFIG_PAX_REFCOUNT
8912+ "jno 0f\n"
8913+ LOCK_PREFIX "addl %1,%0\n"
8914+ "int $4\n0:\n"
8915+ _ASM_EXTABLE(0b, 0b)
8916+#endif
8917+
8918+ : "=m" (v->counter)
8919+ : "ir" (i), "m" (v->counter));
8920+}
8921+
8922+/**
8923+ * atomic_sub_unchecked - subtract the atomic variable
8924+ * @i: integer value to subtract
8925+ * @v: pointer of type atomic_unchecked_t
8926+ *
8927+ * Atomically subtracts @i from @v.
8928+ */
8929+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8930+{
8931+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8932 : "=m" (v->counter)
8933 : "ir" (i), "m" (v->counter));
8934 }
8935@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8936 {
8937 unsigned char c;
8938
8939- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8940+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8941+
8942+#ifdef CONFIG_PAX_REFCOUNT
8943+ "jno 0f\n"
8944+ LOCK_PREFIX "addl %2,%0\n"
8945+ "int $4\n0:\n"
8946+ _ASM_EXTABLE(0b, 0b)
8947+#endif
8948+
8949+ "sete %1\n"
8950 : "=m" (v->counter), "=qm" (c)
8951 : "ir" (i), "m" (v->counter) : "memory");
8952 return c;
8953@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8954 */
8955 static inline void atomic_inc(atomic_t *v)
8956 {
8957- asm volatile(LOCK_PREFIX "incl %0"
8958+ asm volatile(LOCK_PREFIX "incl %0\n"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+ "jno 0f\n"
8962+ LOCK_PREFIX "decl %0\n"
8963+ "int $4\n0:\n"
8964+ _ASM_EXTABLE(0b, 0b)
8965+#endif
8966+
8967+ : "=m" (v->counter)
8968+ : "m" (v->counter));
8969+}
8970+
8971+/**
8972+ * atomic_inc_unchecked - increment atomic variable
8973+ * @v: pointer of type atomic_unchecked_t
8974+ *
8975+ * Atomically increments @v by 1.
8976+ */
8977+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8978+{
8979+ asm volatile(LOCK_PREFIX "incl %0\n"
8980 : "=m" (v->counter)
8981 : "m" (v->counter));
8982 }
8983@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
8984 */
8985 static inline void atomic_dec(atomic_t *v)
8986 {
8987- asm volatile(LOCK_PREFIX "decl %0"
8988+ asm volatile(LOCK_PREFIX "decl %0\n"
8989+
8990+#ifdef CONFIG_PAX_REFCOUNT
8991+ "jno 0f\n"
8992+ LOCK_PREFIX "incl %0\n"
8993+ "int $4\n0:\n"
8994+ _ASM_EXTABLE(0b, 0b)
8995+#endif
8996+
8997+ : "=m" (v->counter)
8998+ : "m" (v->counter));
8999+}
9000+
9001+/**
9002+ * atomic_dec_unchecked - decrement atomic variable
9003+ * @v: pointer of type atomic_unchecked_t
9004+ *
9005+ * Atomically decrements @v by 1.
9006+ */
9007+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9008+{
9009+ asm volatile(LOCK_PREFIX "decl %0\n"
9010 : "=m" (v->counter)
9011 : "m" (v->counter));
9012 }
9013@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9014 {
9015 unsigned char c;
9016
9017- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9018+ asm volatile(LOCK_PREFIX "decl %0\n"
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ "jno 0f\n"
9022+ LOCK_PREFIX "incl %0\n"
9023+ "int $4\n0:\n"
9024+ _ASM_EXTABLE(0b, 0b)
9025+#endif
9026+
9027+ "sete %1\n"
9028 : "=m" (v->counter), "=qm" (c)
9029 : "m" (v->counter) : "memory");
9030 return c != 0;
9031@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9032 {
9033 unsigned char c;
9034
9035- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9036+ asm volatile(LOCK_PREFIX "incl %0\n"
9037+
9038+#ifdef CONFIG_PAX_REFCOUNT
9039+ "jno 0f\n"
9040+ LOCK_PREFIX "decl %0\n"
9041+ "int $4\n0:\n"
9042+ _ASM_EXTABLE(0b, 0b)
9043+#endif
9044+
9045+ "sete %1\n"
9046+ : "=m" (v->counter), "=qm" (c)
9047+ : "m" (v->counter) : "memory");
9048+ return c != 0;
9049+}
9050+
9051+/**
9052+ * atomic_inc_and_test_unchecked - increment and test
9053+ * @v: pointer of type atomic_unchecked_t
9054+ *
9055+ * Atomically increments @v by 1
9056+ * and returns true if the result is zero, or false for all
9057+ * other cases.
9058+ */
9059+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9060+{
9061+ unsigned char c;
9062+
9063+ asm volatile(LOCK_PREFIX "incl %0\n"
9064+ "sete %1\n"
9065 : "=m" (v->counter), "=qm" (c)
9066 : "m" (v->counter) : "memory");
9067 return c != 0;
9068@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9069 {
9070 unsigned char c;
9071
9072- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9073+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9074+
9075+#ifdef CONFIG_PAX_REFCOUNT
9076+ "jno 0f\n"
9077+ LOCK_PREFIX "subl %2,%0\n"
9078+ "int $4\n0:\n"
9079+ _ASM_EXTABLE(0b, 0b)
9080+#endif
9081+
9082+ "sets %1\n"
9083 : "=m" (v->counter), "=qm" (c)
9084 : "ir" (i), "m" (v->counter) : "memory");
9085 return c;
9086@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9087 static inline int atomic_add_return(int i, atomic_t *v)
9088 {
9089 int __i = i;
9090- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9091+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9092+
9093+#ifdef CONFIG_PAX_REFCOUNT
9094+ "jno 0f\n"
9095+ "movl %0, %1\n"
9096+ "int $4\n0:\n"
9097+ _ASM_EXTABLE(0b, 0b)
9098+#endif
9099+
9100+ : "+r" (i), "+m" (v->counter)
9101+ : : "memory");
9102+ return i + __i;
9103+}
9104+
9105+/**
9106+ * atomic_add_return_unchecked - add and return
9107+ * @i: integer value to add
9108+ * @v: pointer of type atomic_unchecked_t
9109+ *
9110+ * Atomically adds @i to @v and returns @i + @v
9111+ */
9112+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9113+{
9114+ int __i = i;
9115+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9116 : "+r" (i), "+m" (v->counter)
9117 : : "memory");
9118 return i + __i;
9119@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9120 }
9121
9122 #define atomic_inc_return(v) (atomic_add_return(1, v))
9123+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9124+{
9125+ return atomic_add_return_unchecked(1, v);
9126+}
9127 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9128
9129 /* The 64-bit atomic type */
9130@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134+ * atomic64_read_unchecked - read atomic64 variable
9135+ * @v: pointer of type atomic64_unchecked_t
9136+ *
9137+ * Atomically reads the value of @v.
9138+ * Doesn't imply a read memory barrier.
9139+ */
9140+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9141+{
9142+ return v->counter;
9143+}
9144+
9145+/**
9146 * atomic64_set - set atomic64 variable
9147 * @v: pointer to type atomic64_t
9148 * @i: required value
9149@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9150 }
9151
9152 /**
9153+ * atomic64_set_unchecked - set atomic64 variable
9154+ * @v: pointer to type atomic64_unchecked_t
9155+ * @i: required value
9156+ *
9157+ * Atomically sets the value of @v to @i.
9158+ */
9159+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9160+{
9161+ v->counter = i;
9162+}
9163+
9164+/**
9165 * atomic64_add - add integer to atomic64 variable
9166 * @i: integer value to add
9167 * @v: pointer to type atomic64_t
9168@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9169 */
9170 static inline void atomic64_add(long i, atomic64_t *v)
9171 {
9172+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9173+
9174+#ifdef CONFIG_PAX_REFCOUNT
9175+ "jno 0f\n"
9176+ LOCK_PREFIX "subq %1,%0\n"
9177+ "int $4\n0:\n"
9178+ _ASM_EXTABLE(0b, 0b)
9179+#endif
9180+
9181+ : "=m" (v->counter)
9182+ : "er" (i), "m" (v->counter));
9183+}
9184+
9185+/**
9186+ * atomic64_add_unchecked - add integer to atomic64 variable
9187+ * @i: integer value to add
9188+ * @v: pointer to type atomic64_unchecked_t
9189+ *
9190+ * Atomically adds @i to @v.
9191+ */
9192+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9193+{
9194 asm volatile(LOCK_PREFIX "addq %1,%0"
9195 : "=m" (v->counter)
9196 : "er" (i), "m" (v->counter));
9197@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9198 */
9199 static inline void atomic64_sub(long i, atomic64_t *v)
9200 {
9201- asm volatile(LOCK_PREFIX "subq %1,%0"
9202+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9203+
9204+#ifdef CONFIG_PAX_REFCOUNT
9205+ "jno 0f\n"
9206+ LOCK_PREFIX "addq %1,%0\n"
9207+ "int $4\n0:\n"
9208+ _ASM_EXTABLE(0b, 0b)
9209+#endif
9210+
9211 : "=m" (v->counter)
9212 : "er" (i), "m" (v->counter));
9213 }
9214@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9215 {
9216 unsigned char c;
9217
9218- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9219+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9220+
9221+#ifdef CONFIG_PAX_REFCOUNT
9222+ "jno 0f\n"
9223+ LOCK_PREFIX "addq %2,%0\n"
9224+ "int $4\n0:\n"
9225+ _ASM_EXTABLE(0b, 0b)
9226+#endif
9227+
9228+ "sete %1\n"
9229 : "=m" (v->counter), "=qm" (c)
9230 : "er" (i), "m" (v->counter) : "memory");
9231 return c;
9232@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9233 */
9234 static inline void atomic64_inc(atomic64_t *v)
9235 {
9236+ asm volatile(LOCK_PREFIX "incq %0\n"
9237+
9238+#ifdef CONFIG_PAX_REFCOUNT
9239+ "jno 0f\n"
9240+ LOCK_PREFIX "decq %0\n"
9241+ "int $4\n0:\n"
9242+ _ASM_EXTABLE(0b, 0b)
9243+#endif
9244+
9245+ : "=m" (v->counter)
9246+ : "m" (v->counter));
9247+}
9248+
9249+/**
9250+ * atomic64_inc_unchecked - increment atomic64 variable
9251+ * @v: pointer to type atomic64_unchecked_t
9252+ *
9253+ * Atomically increments @v by 1.
9254+ */
9255+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9256+{
9257 asm volatile(LOCK_PREFIX "incq %0"
9258 : "=m" (v->counter)
9259 : "m" (v->counter));
9260@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9261 */
9262 static inline void atomic64_dec(atomic64_t *v)
9263 {
9264- asm volatile(LOCK_PREFIX "decq %0"
9265+ asm volatile(LOCK_PREFIX "decq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "incq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_dec_unchecked - decrement atomic64 variable
9280+ * @v: pointer to type atomic64_t
9281+ *
9282+ * Atomically decrements @v by 1.
9283+ */
9284+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9285+{
9286+ asm volatile(LOCK_PREFIX "decq %0\n"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289 }
9290@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9291 {
9292 unsigned char c;
9293
9294- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9295+ asm volatile(LOCK_PREFIX "decq %0\n"
9296+
9297+#ifdef CONFIG_PAX_REFCOUNT
9298+ "jno 0f\n"
9299+ LOCK_PREFIX "incq %0\n"
9300+ "int $4\n0:\n"
9301+ _ASM_EXTABLE(0b, 0b)
9302+#endif
9303+
9304+ "sete %1\n"
9305 : "=m" (v->counter), "=qm" (c)
9306 : "m" (v->counter) : "memory");
9307 return c != 0;
9308@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9309 {
9310 unsigned char c;
9311
9312- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9313+ asm volatile(LOCK_PREFIX "incq %0\n"
9314+
9315+#ifdef CONFIG_PAX_REFCOUNT
9316+ "jno 0f\n"
9317+ LOCK_PREFIX "decq %0\n"
9318+ "int $4\n0:\n"
9319+ _ASM_EXTABLE(0b, 0b)
9320+#endif
9321+
9322+ "sete %1\n"
9323 : "=m" (v->counter), "=qm" (c)
9324 : "m" (v->counter) : "memory");
9325 return c != 0;
9326@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9327 {
9328 unsigned char c;
9329
9330- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9331+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9332+
9333+#ifdef CONFIG_PAX_REFCOUNT
9334+ "jno 0f\n"
9335+ LOCK_PREFIX "subq %2,%0\n"
9336+ "int $4\n0:\n"
9337+ _ASM_EXTABLE(0b, 0b)
9338+#endif
9339+
9340+ "sets %1\n"
9341 : "=m" (v->counter), "=qm" (c)
9342 : "er" (i), "m" (v->counter) : "memory");
9343 return c;
9344@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9345 static inline long atomic64_add_return(long i, atomic64_t *v)
9346 {
9347 long __i = i;
9348- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9349+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9350+
9351+#ifdef CONFIG_PAX_REFCOUNT
9352+ "jno 0f\n"
9353+ "movq %0, %1\n"
9354+ "int $4\n0:\n"
9355+ _ASM_EXTABLE(0b, 0b)
9356+#endif
9357+
9358+ : "+r" (i), "+m" (v->counter)
9359+ : : "memory");
9360+ return i + __i;
9361+}
9362+
9363+/**
9364+ * atomic64_add_return_unchecked - add and return
9365+ * @i: integer value to add
9366+ * @v: pointer to type atomic64_unchecked_t
9367+ *
9368+ * Atomically adds @i to @v and returns @i + @v
9369+ */
9370+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9371+{
9372+ long __i = i;
9373+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9374 : "+r" (i), "+m" (v->counter)
9375 : : "memory");
9376 return i + __i;
9377@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9378 }
9379
9380 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9381+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9382+{
9383+ return atomic64_add_return_unchecked(1, v);
9384+}
9385 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9386
9387 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9388@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9389 return cmpxchg(&v->counter, old, new);
9390 }
9391
9392+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9393+{
9394+ return cmpxchg(&v->counter, old, new);
9395+}
9396+
9397 static inline long atomic64_xchg(atomic64_t *v, long new)
9398 {
9399 return xchg(&v->counter, new);
9400 }
9401
9402+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9403+{
9404+ return xchg(&v->counter, new);
9405+}
9406+
9407 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9408 {
9409 return cmpxchg(&v->counter, old, new);
9410 }
9411
9412+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9413+{
9414+ return cmpxchg(&v->counter, old, new);
9415+}
9416+
9417 static inline long atomic_xchg(atomic_t *v, int new)
9418 {
9419 return xchg(&v->counter, new);
9420 }
9421
9422+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9423+{
9424+ return xchg(&v->counter, new);
9425+}
9426+
9427 /**
9428 * atomic_add_unless - add unless the number is a given value
9429 * @v: pointer of type atomic_t
9430@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9431 */
9432 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9433 {
9434- int c, old;
9435+ int c, old, new;
9436 c = atomic_read(v);
9437 for (;;) {
9438- if (unlikely(c == (u)))
9439+ if (unlikely(c == u))
9440 break;
9441- old = atomic_cmpxchg((v), c, c + (a));
9442+
9443+ asm volatile("addl %2,%0\n"
9444+
9445+#ifdef CONFIG_PAX_REFCOUNT
9446+ "jno 0f\n"
9447+ "subl %2,%0\n"
9448+ "int $4\n0:\n"
9449+ _ASM_EXTABLE(0b, 0b)
9450+#endif
9451+
9452+ : "=r" (new)
9453+ : "0" (c), "ir" (a));
9454+
9455+ old = atomic_cmpxchg(v, c, new);
9456 if (likely(old == c))
9457 break;
9458 c = old;
9459 }
9460- return c != (u);
9461+ return c != u;
9462 }
9463
9464 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9465@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9466 */
9467 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9468 {
9469- long c, old;
9470+ long c, old, new;
9471 c = atomic64_read(v);
9472 for (;;) {
9473- if (unlikely(c == (u)))
9474+ if (unlikely(c == u))
9475 break;
9476- old = atomic64_cmpxchg((v), c, c + (a));
9477+
9478+ asm volatile("addq %2,%0\n"
9479+
9480+#ifdef CONFIG_PAX_REFCOUNT
9481+ "jno 0f\n"
9482+ "subq %2,%0\n"
9483+ "int $4\n0:\n"
9484+ _ASM_EXTABLE(0b, 0b)
9485+#endif
9486+
9487+ : "=r" (new)
9488+ : "0" (c), "er" (a));
9489+
9490+ old = atomic64_cmpxchg(v, c, new);
9491 if (likely(old == c))
9492 break;
9493 c = old;
9494 }
9495- return c != (u);
9496+ return c != u;
9497 }
9498
9499 /**
9500diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9501index 02b47a6..d5c4b15 100644
9502--- a/arch/x86/include/asm/bitops.h
9503+++ b/arch/x86/include/asm/bitops.h
9504@@ -38,7 +38,7 @@
9505 * a mask operation on a byte.
9506 */
9507 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9508-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9509+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9510 #define CONST_MASK(nr) (1 << ((nr) & 7))
9511
9512 /**
9513diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9514index 7a10659..8bbf355 100644
9515--- a/arch/x86/include/asm/boot.h
9516+++ b/arch/x86/include/asm/boot.h
9517@@ -11,10 +11,15 @@
9518 #include <asm/pgtable_types.h>
9519
9520 /* Physical address where kernel should be loaded. */
9521-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9522+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9523 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9524 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9525
9526+#ifndef __ASSEMBLY__
9527+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9528+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9529+#endif
9530+
9531 /* Minimum kernel alignment, as a power of two */
9532 #ifdef CONFIG_X86_64
9533 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9534diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9535index 549860d..7d45f68 100644
9536--- a/arch/x86/include/asm/cache.h
9537+++ b/arch/x86/include/asm/cache.h
9538@@ -5,9 +5,10 @@
9539
9540 /* L1 cache line size */
9541 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9542-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9543+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9544
9545 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9546+#define __read_only __attribute__((__section__(".data.read_only")))
9547
9548 #ifdef CONFIG_X86_VSMP
9549 /* vSMP Internode cacheline shift */
9550diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9551index b54f6af..5b376a6 100644
9552--- a/arch/x86/include/asm/cacheflush.h
9553+++ b/arch/x86/include/asm/cacheflush.h
9554@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9555 static inline unsigned long get_page_memtype(struct page *pg)
9556 {
9557 if (!PageUncached(pg) && !PageWC(pg))
9558- return -1;
9559+ return ~0UL;
9560 else if (!PageUncached(pg) && PageWC(pg))
9561 return _PAGE_CACHE_WC;
9562 else if (PageUncached(pg) && !PageWC(pg))
9563@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9564 SetPageWC(pg);
9565 break;
9566 default:
9567- case -1:
9568+ case ~0UL:
9569 ClearPageUncached(pg);
9570 ClearPageWC(pg);
9571 break;
9572diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9573index 0e63c9a..ab8d972 100644
9574--- a/arch/x86/include/asm/calling.h
9575+++ b/arch/x86/include/asm/calling.h
9576@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9577 * for assembly code:
9578 */
9579
9580-#define R15 0
9581-#define R14 8
9582-#define R13 16
9583-#define R12 24
9584-#define RBP 32
9585-#define RBX 40
9586+#define R15 (0)
9587+#define R14 (8)
9588+#define R13 (16)
9589+#define R12 (24)
9590+#define RBP (32)
9591+#define RBX (40)
9592
9593 /* arguments: interrupts/non tracing syscalls only save up to here: */
9594-#define R11 48
9595-#define R10 56
9596-#define R9 64
9597-#define R8 72
9598-#define RAX 80
9599-#define RCX 88
9600-#define RDX 96
9601-#define RSI 104
9602-#define RDI 112
9603-#define ORIG_RAX 120 /* + error_code */
9604+#define R11 (48)
9605+#define R10 (56)
9606+#define R9 (64)
9607+#define R8 (72)
9608+#define RAX (80)
9609+#define RCX (88)
9610+#define RDX (96)
9611+#define RSI (104)
9612+#define RDI (112)
9613+#define ORIG_RAX (120) /* + error_code */
9614 /* end of arguments */
9615
9616 /* cpu exception frame or undefined in case of fast syscall: */
9617-#define RIP 128
9618-#define CS 136
9619-#define EFLAGS 144
9620-#define RSP 152
9621-#define SS 160
9622+#define RIP (128)
9623+#define CS (136)
9624+#define EFLAGS (144)
9625+#define RSP (152)
9626+#define SS (160)
9627
9628 #define ARGOFFSET R11
9629 #define SWFRAME ORIG_RAX
9630diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9631index 46fc474..b02b0f9 100644
9632--- a/arch/x86/include/asm/checksum_32.h
9633+++ b/arch/x86/include/asm/checksum_32.h
9634@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9635 int len, __wsum sum,
9636 int *src_err_ptr, int *dst_err_ptr);
9637
9638+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9639+ int len, __wsum sum,
9640+ int *src_err_ptr, int *dst_err_ptr);
9641+
9642+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9643+ int len, __wsum sum,
9644+ int *src_err_ptr, int *dst_err_ptr);
9645+
9646 /*
9647 * Note: when you get a NULL pointer exception here this means someone
9648 * passed in an incorrect kernel address to one of these functions.
9649@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9650 int *err_ptr)
9651 {
9652 might_sleep();
9653- return csum_partial_copy_generic((__force void *)src, dst,
9654+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9655 len, sum, err_ptr, NULL);
9656 }
9657
9658@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9659 {
9660 might_sleep();
9661 if (access_ok(VERIFY_WRITE, dst, len))
9662- return csum_partial_copy_generic(src, (__force void *)dst,
9663+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9664 len, sum, NULL, err_ptr);
9665
9666 if (len)
9667diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9668index 617bd56..7b047a1 100644
9669--- a/arch/x86/include/asm/desc.h
9670+++ b/arch/x86/include/asm/desc.h
9671@@ -4,6 +4,7 @@
9672 #include <asm/desc_defs.h>
9673 #include <asm/ldt.h>
9674 #include <asm/mmu.h>
9675+#include <asm/pgtable.h>
9676 #include <linux/smp.h>
9677
9678 static inline void fill_ldt(struct desc_struct *desc,
9679@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9680 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9681 desc->type = (info->read_exec_only ^ 1) << 1;
9682 desc->type |= info->contents << 2;
9683+ desc->type |= info->seg_not_present ^ 1;
9684 desc->s = 1;
9685 desc->dpl = 0x3;
9686 desc->p = info->seg_not_present ^ 1;
9687@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9688 }
9689
9690 extern struct desc_ptr idt_descr;
9691-extern gate_desc idt_table[];
9692-
9693-struct gdt_page {
9694- struct desc_struct gdt[GDT_ENTRIES];
9695-} __attribute__((aligned(PAGE_SIZE)));
9696-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9697+extern gate_desc idt_table[256];
9698
9699+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9700 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9701 {
9702- return per_cpu(gdt_page, cpu).gdt;
9703+ return cpu_gdt_table[cpu];
9704 }
9705
9706 #ifdef CONFIG_X86_64
9707@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9708 unsigned long base, unsigned dpl, unsigned flags,
9709 unsigned short seg)
9710 {
9711- gate->a = (seg << 16) | (base & 0xffff);
9712- gate->b = (base & 0xffff0000) |
9713- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9714+ gate->gate.offset_low = base;
9715+ gate->gate.seg = seg;
9716+ gate->gate.reserved = 0;
9717+ gate->gate.type = type;
9718+ gate->gate.s = 0;
9719+ gate->gate.dpl = dpl;
9720+ gate->gate.p = 1;
9721+ gate->gate.offset_high = base >> 16;
9722 }
9723
9724 #endif
9725@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9726 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9727 const gate_desc *gate)
9728 {
9729+ pax_open_kernel();
9730 memcpy(&idt[entry], gate, sizeof(*gate));
9731+ pax_close_kernel();
9732 }
9733
9734 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9735 const void *desc)
9736 {
9737+ pax_open_kernel();
9738 memcpy(&ldt[entry], desc, 8);
9739+ pax_close_kernel();
9740 }
9741
9742 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9743@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9744 size = sizeof(struct desc_struct);
9745 break;
9746 }
9747+
9748+ pax_open_kernel();
9749 memcpy(&gdt[entry], desc, size);
9750+ pax_close_kernel();
9751 }
9752
9753 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9754@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9755
9756 static inline void native_load_tr_desc(void)
9757 {
9758+ pax_open_kernel();
9759 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_load_gdt(const struct desc_ptr *dtr)
9764@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9765 unsigned int i;
9766 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9767
9768+ pax_open_kernel();
9769 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9770 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9771+ pax_close_kernel();
9772 }
9773
9774 #define _LDT_empty(info) \
9775@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9776 desc->limit = (limit >> 16) & 0xf;
9777 }
9778
9779-static inline void _set_gate(int gate, unsigned type, void *addr,
9780+static inline void _set_gate(int gate, unsigned type, const void *addr,
9781 unsigned dpl, unsigned ist, unsigned seg)
9782 {
9783 gate_desc s;
9784@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9785 * Pentium F0 0F bugfix can have resulted in the mapped
9786 * IDT being write-protected.
9787 */
9788-static inline void set_intr_gate(unsigned int n, void *addr)
9789+static inline void set_intr_gate(unsigned int n, const void *addr)
9790 {
9791 BUG_ON((unsigned)n > 0xFF);
9792 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9793@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9794 /*
9795 * This routine sets up an interrupt gate at directory privilege level 3.
9796 */
9797-static inline void set_system_intr_gate(unsigned int n, void *addr)
9798+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9799 {
9800 BUG_ON((unsigned)n > 0xFF);
9801 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9802 }
9803
9804-static inline void set_system_trap_gate(unsigned int n, void *addr)
9805+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9806 {
9807 BUG_ON((unsigned)n > 0xFF);
9808 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9809 }
9810
9811-static inline void set_trap_gate(unsigned int n, void *addr)
9812+static inline void set_trap_gate(unsigned int n, const void *addr)
9813 {
9814 BUG_ON((unsigned)n > 0xFF);
9815 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9816@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9817 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9818 {
9819 BUG_ON((unsigned)n > 0xFF);
9820- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9821+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9822 }
9823
9824-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9825+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9826 {
9827 BUG_ON((unsigned)n > 0xFF);
9828 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9829 }
9830
9831-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9832+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9833 {
9834 BUG_ON((unsigned)n > 0xFF);
9835 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9836 }
9837
9838+#ifdef CONFIG_X86_32
9839+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9840+{
9841+ struct desc_struct d;
9842+
9843+ if (likely(limit))
9844+ limit = (limit - 1UL) >> PAGE_SHIFT;
9845+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9846+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9847+}
9848+#endif
9849+
9850 #endif /* _ASM_X86_DESC_H */
9851diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9852index 9d66848..6b4a691 100644
9853--- a/arch/x86/include/asm/desc_defs.h
9854+++ b/arch/x86/include/asm/desc_defs.h
9855@@ -31,6 +31,12 @@ struct desc_struct {
9856 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9857 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9858 };
9859+ struct {
9860+ u16 offset_low;
9861+ u16 seg;
9862+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9863+ unsigned offset_high: 16;
9864+ } gate;
9865 };
9866 } __attribute__((packed));
9867
9868diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9869index cee34e9..a7c3fa2 100644
9870--- a/arch/x86/include/asm/device.h
9871+++ b/arch/x86/include/asm/device.h
9872@@ -6,7 +6,7 @@ struct dev_archdata {
9873 void *acpi_handle;
9874 #endif
9875 #ifdef CONFIG_X86_64
9876-struct dma_map_ops *dma_ops;
9877+ const struct dma_map_ops *dma_ops;
9878 #endif
9879 #ifdef CONFIG_DMAR
9880 void *iommu; /* hook for IOMMU specific extension */
9881diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9882index 6a25d5d..786b202 100644
9883--- a/arch/x86/include/asm/dma-mapping.h
9884+++ b/arch/x86/include/asm/dma-mapping.h
9885@@ -25,9 +25,9 @@ extern int iommu_merge;
9886 extern struct device x86_dma_fallback_dev;
9887 extern int panic_on_overflow;
9888
9889-extern struct dma_map_ops *dma_ops;
9890+extern const struct dma_map_ops *dma_ops;
9891
9892-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9893+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9894 {
9895 #ifdef CONFIG_X86_32
9896 return dma_ops;
9897@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9898 /* Make sure we keep the same behaviour */
9899 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9900 {
9901- struct dma_map_ops *ops = get_dma_ops(dev);
9902+ const struct dma_map_ops *ops = get_dma_ops(dev);
9903 if (ops->mapping_error)
9904 return ops->mapping_error(dev, dma_addr);
9905
9906@@ -122,7 +122,7 @@ static inline void *
9907 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9908 gfp_t gfp)
9909 {
9910- struct dma_map_ops *ops = get_dma_ops(dev);
9911+ const struct dma_map_ops *ops = get_dma_ops(dev);
9912 void *memory;
9913
9914 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9915@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9916 static inline void dma_free_coherent(struct device *dev, size_t size,
9917 void *vaddr, dma_addr_t bus)
9918 {
9919- struct dma_map_ops *ops = get_dma_ops(dev);
9920+ const struct dma_map_ops *ops = get_dma_ops(dev);
9921
9922 WARN_ON(irqs_disabled()); /* for portability */
9923
9924diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9925index 40b4e61..40d8133 100644
9926--- a/arch/x86/include/asm/e820.h
9927+++ b/arch/x86/include/asm/e820.h
9928@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9929 #define ISA_END_ADDRESS 0x100000
9930 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9931
9932-#define BIOS_BEGIN 0x000a0000
9933+#define BIOS_BEGIN 0x000c0000
9934 #define BIOS_END 0x00100000
9935
9936 #ifdef __KERNEL__
9937diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9938index 8ac9d9a..0a6c96e 100644
9939--- a/arch/x86/include/asm/elf.h
9940+++ b/arch/x86/include/asm/elf.h
9941@@ -257,7 +257,25 @@ extern int force_personality32;
9942 the loader. We need to make sure that it is out of the way of the program
9943 that it will "exec", and that there is sufficient room for the brk. */
9944
9945+#ifdef CONFIG_PAX_SEGMEXEC
9946+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9947+#else
9948 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9949+#endif
9950+
9951+#ifdef CONFIG_PAX_ASLR
9952+#ifdef CONFIG_X86_32
9953+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9954+
9955+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9956+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9957+#else
9958+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9959+
9960+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9961+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9962+#endif
9963+#endif
9964
9965 /* This yields a mask that user programs can use to figure out what
9966 instruction set this CPU supports. This could be done in user space,
9967@@ -310,9 +328,7 @@ do { \
9968
9969 #define ARCH_DLINFO \
9970 do { \
9971- if (vdso_enabled) \
9972- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9973- (unsigned long)current->mm->context.vdso); \
9974+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9975 } while (0)
9976
9977 #define AT_SYSINFO 32
9978@@ -323,7 +339,7 @@ do { \
9979
9980 #endif /* !CONFIG_X86_32 */
9981
9982-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9983+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9984
9985 #define VDSO_ENTRY \
9986 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9987@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9988 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9989 #define compat_arch_setup_additional_pages syscall32_setup_pages
9990
9991-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9992-#define arch_randomize_brk arch_randomize_brk
9993-
9994 #endif /* _ASM_X86_ELF_H */
9995diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9996index cc70c1c..d96d011 100644
9997--- a/arch/x86/include/asm/emergency-restart.h
9998+++ b/arch/x86/include/asm/emergency-restart.h
9999@@ -15,6 +15,6 @@ enum reboot_type {
10000
10001 extern enum reboot_type reboot_type;
10002
10003-extern void machine_emergency_restart(void);
10004+extern void machine_emergency_restart(void) __noreturn;
10005
10006 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10007diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10008index 1f11ce4..7caabd1 100644
10009--- a/arch/x86/include/asm/futex.h
10010+++ b/arch/x86/include/asm/futex.h
10011@@ -12,16 +12,18 @@
10012 #include <asm/system.h>
10013
10014 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10015+ typecheck(u32 __user *, uaddr); \
10016 asm volatile("1:\t" insn "\n" \
10017 "2:\t.section .fixup,\"ax\"\n" \
10018 "3:\tmov\t%3, %1\n" \
10019 "\tjmp\t2b\n" \
10020 "\t.previous\n" \
10021 _ASM_EXTABLE(1b, 3b) \
10022- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10023+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10024 : "i" (-EFAULT), "0" (oparg), "1" (0))
10025
10026 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10027+ typecheck(u32 __user *, uaddr); \
10028 asm volatile("1:\tmovl %2, %0\n" \
10029 "\tmovl\t%0, %3\n" \
10030 "\t" insn "\n" \
10031@@ -34,10 +36,10 @@
10032 _ASM_EXTABLE(1b, 4b) \
10033 _ASM_EXTABLE(2b, 4b) \
10034 : "=&a" (oldval), "=&r" (ret), \
10035- "+m" (*uaddr), "=&r" (tem) \
10036+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10037 : "r" (oparg), "i" (-EFAULT), "1" (0))
10038
10039-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10040+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10041 {
10042 int op = (encoded_op >> 28) & 7;
10043 int cmp = (encoded_op >> 24) & 15;
10044@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10045
10046 switch (op) {
10047 case FUTEX_OP_SET:
10048- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10049+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10050 break;
10051 case FUTEX_OP_ADD:
10052- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10053+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10054 uaddr, oparg);
10055 break;
10056 case FUTEX_OP_OR:
10057@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10058 return ret;
10059 }
10060
10061-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10062+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10063 int newval)
10064 {
10065
10066@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10067 return -ENOSYS;
10068 #endif
10069
10070- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10071+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10072 return -EFAULT;
10073
10074- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10075+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10076 "2:\t.section .fixup, \"ax\"\n"
10077 "3:\tmov %2, %0\n"
10078 "\tjmp 2b\n"
10079 "\t.previous\n"
10080 _ASM_EXTABLE(1b, 3b)
10081- : "=a" (oldval), "+m" (*uaddr)
10082+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10083 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10084 : "memory"
10085 );
10086diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10087index ba180d9..3bad351 100644
10088--- a/arch/x86/include/asm/hw_irq.h
10089+++ b/arch/x86/include/asm/hw_irq.h
10090@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10091 extern void enable_IO_APIC(void);
10092
10093 /* Statistics */
10094-extern atomic_t irq_err_count;
10095-extern atomic_t irq_mis_count;
10096+extern atomic_unchecked_t irq_err_count;
10097+extern atomic_unchecked_t irq_mis_count;
10098
10099 /* EISA */
10100 extern void eisa_set_level_irq(unsigned int irq);
10101diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10102index 0b20bbb..4cb1396 100644
10103--- a/arch/x86/include/asm/i387.h
10104+++ b/arch/x86/include/asm/i387.h
10105@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10106 {
10107 int err;
10108
10109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10110+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10111+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10112+#endif
10113+
10114 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10115 "2:\n"
10116 ".section .fixup,\"ax\"\n"
10117@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10118 {
10119 int err;
10120
10121+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10122+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10123+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10124+#endif
10125+
10126 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10127 "2:\n"
10128 ".section .fixup,\"ax\"\n"
10129@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10130 }
10131
10132 /* We need a safe address that is cheap to find and that is already
10133- in L1 during context switch. The best choices are unfortunately
10134- different for UP and SMP */
10135-#ifdef CONFIG_SMP
10136-#define safe_address (__per_cpu_offset[0])
10137-#else
10138-#define safe_address (kstat_cpu(0).cpustat.user)
10139-#endif
10140+ in L1 during context switch. */
10141+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10142
10143 /*
10144 * These must be called with preempt disabled
10145@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10146 struct thread_info *me = current_thread_info();
10147 preempt_disable();
10148 if (me->status & TS_USEDFPU)
10149- __save_init_fpu(me->task);
10150+ __save_init_fpu(current);
10151 else
10152 clts();
10153 }
10154diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10155index a299900..15c5410 100644
10156--- a/arch/x86/include/asm/io_32.h
10157+++ b/arch/x86/include/asm/io_32.h
10158@@ -3,6 +3,7 @@
10159
10160 #include <linux/string.h>
10161 #include <linux/compiler.h>
10162+#include <asm/processor.h>
10163
10164 /*
10165 * This file contains the definitions for the x86 IO instructions
10166@@ -42,6 +43,17 @@
10167
10168 #ifdef __KERNEL__
10169
10170+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10171+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10172+{
10173+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10174+}
10175+
10176+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10177+{
10178+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10179+}
10180+
10181 #include <asm-generic/iomap.h>
10182
10183 #include <linux/vmalloc.h>
10184diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10185index 2440678..c158b88 100644
10186--- a/arch/x86/include/asm/io_64.h
10187+++ b/arch/x86/include/asm/io_64.h
10188@@ -140,6 +140,17 @@ __OUTS(l)
10189
10190 #include <linux/vmalloc.h>
10191
10192+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10193+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10194+{
10195+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10196+}
10197+
10198+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10199+{
10200+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10201+}
10202+
10203 #include <asm-generic/iomap.h>
10204
10205 void __memcpy_fromio(void *, unsigned long, unsigned);
10206diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10207index fd6d21b..8b13915 100644
10208--- a/arch/x86/include/asm/iommu.h
10209+++ b/arch/x86/include/asm/iommu.h
10210@@ -3,7 +3,7 @@
10211
10212 extern void pci_iommu_shutdown(void);
10213 extern void no_iommu_init(void);
10214-extern struct dma_map_ops nommu_dma_ops;
10215+extern const struct dma_map_ops nommu_dma_ops;
10216 extern int force_iommu, no_iommu;
10217 extern int iommu_detected;
10218 extern int iommu_pass_through;
10219diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10220index 9e2b952..557206e 100644
10221--- a/arch/x86/include/asm/irqflags.h
10222+++ b/arch/x86/include/asm/irqflags.h
10223@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10224 sti; \
10225 sysexit
10226
10227+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10228+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10229+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10230+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10231+
10232 #else
10233 #define INTERRUPT_RETURN iret
10234 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10235diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10236index 4fe681d..bb6d40c 100644
10237--- a/arch/x86/include/asm/kprobes.h
10238+++ b/arch/x86/include/asm/kprobes.h
10239@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10240 #define BREAKPOINT_INSTRUCTION 0xcc
10241 #define RELATIVEJUMP_INSTRUCTION 0xe9
10242 #define MAX_INSN_SIZE 16
10243-#define MAX_STACK_SIZE 64
10244-#define MIN_STACK_SIZE(ADDR) \
10245- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10246- THREAD_SIZE - (unsigned long)(ADDR))) \
10247- ? (MAX_STACK_SIZE) \
10248- : (((unsigned long)current_thread_info()) + \
10249- THREAD_SIZE - (unsigned long)(ADDR)))
10250+#define MAX_STACK_SIZE 64UL
10251+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10252
10253 #define flush_insn_slot(p) do { } while (0)
10254
10255diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10256index 08bc2ff..2e88d1f 100644
10257--- a/arch/x86/include/asm/kvm_host.h
10258+++ b/arch/x86/include/asm/kvm_host.h
10259@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10260 bool (*gb_page_enable)(void);
10261
10262 const struct trace_print_flags *exit_reasons_str;
10263-};
10264+} __do_const;
10265
10266-extern struct kvm_x86_ops *kvm_x86_ops;
10267+extern const struct kvm_x86_ops *kvm_x86_ops;
10268
10269 int kvm_mmu_module_init(void);
10270 void kvm_mmu_module_exit(void);
10271diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10272index 47b9b6f..815aaa1 100644
10273--- a/arch/x86/include/asm/local.h
10274+++ b/arch/x86/include/asm/local.h
10275@@ -18,26 +18,58 @@ typedef struct {
10276
10277 static inline void local_inc(local_t *l)
10278 {
10279- asm volatile(_ASM_INC "%0"
10280+ asm volatile(_ASM_INC "%0\n"
10281+
10282+#ifdef CONFIG_PAX_REFCOUNT
10283+ "jno 0f\n"
10284+ _ASM_DEC "%0\n"
10285+ "int $4\n0:\n"
10286+ _ASM_EXTABLE(0b, 0b)
10287+#endif
10288+
10289 : "+m" (l->a.counter));
10290 }
10291
10292 static inline void local_dec(local_t *l)
10293 {
10294- asm volatile(_ASM_DEC "%0"
10295+ asm volatile(_ASM_DEC "%0\n"
10296+
10297+#ifdef CONFIG_PAX_REFCOUNT
10298+ "jno 0f\n"
10299+ _ASM_INC "%0\n"
10300+ "int $4\n0:\n"
10301+ _ASM_EXTABLE(0b, 0b)
10302+#endif
10303+
10304 : "+m" (l->a.counter));
10305 }
10306
10307 static inline void local_add(long i, local_t *l)
10308 {
10309- asm volatile(_ASM_ADD "%1,%0"
10310+ asm volatile(_ASM_ADD "%1,%0\n"
10311+
10312+#ifdef CONFIG_PAX_REFCOUNT
10313+ "jno 0f\n"
10314+ _ASM_SUB "%1,%0\n"
10315+ "int $4\n0:\n"
10316+ _ASM_EXTABLE(0b, 0b)
10317+#endif
10318+
10319 : "+m" (l->a.counter)
10320 : "ir" (i));
10321 }
10322
10323 static inline void local_sub(long i, local_t *l)
10324 {
10325- asm volatile(_ASM_SUB "%1,%0"
10326+ asm volatile(_ASM_SUB "%1,%0\n"
10327+
10328+#ifdef CONFIG_PAX_REFCOUNT
10329+ "jno 0f\n"
10330+ _ASM_ADD "%1,%0\n"
10331+ "int $4\n0:\n"
10332+ _ASM_EXTABLE(0b, 0b)
10333+#endif
10334+
10335 : "+m" (l->a.counter)
10336 : "ir" (i));
10337 }
10338@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10339 {
10340 unsigned char c;
10341
10342- asm volatile(_ASM_SUB "%2,%0; sete %1"
10343+ asm volatile(_ASM_SUB "%2,%0\n"
10344+
10345+#ifdef CONFIG_PAX_REFCOUNT
10346+ "jno 0f\n"
10347+ _ASM_ADD "%2,%0\n"
10348+ "int $4\n0:\n"
10349+ _ASM_EXTABLE(0b, 0b)
10350+#endif
10351+
10352+ "sete %1\n"
10353 : "+m" (l->a.counter), "=qm" (c)
10354 : "ir" (i) : "memory");
10355 return c;
10356@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10357 {
10358 unsigned char c;
10359
10360- asm volatile(_ASM_DEC "%0; sete %1"
10361+ asm volatile(_ASM_DEC "%0\n"
10362+
10363+#ifdef CONFIG_PAX_REFCOUNT
10364+ "jno 0f\n"
10365+ _ASM_INC "%0\n"
10366+ "int $4\n0:\n"
10367+ _ASM_EXTABLE(0b, 0b)
10368+#endif
10369+
10370+ "sete %1\n"
10371 : "+m" (l->a.counter), "=qm" (c)
10372 : : "memory");
10373 return c != 0;
10374@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10375 {
10376 unsigned char c;
10377
10378- asm volatile(_ASM_INC "%0; sete %1"
10379+ asm volatile(_ASM_INC "%0\n"
10380+
10381+#ifdef CONFIG_PAX_REFCOUNT
10382+ "jno 0f\n"
10383+ _ASM_DEC "%0\n"
10384+ "int $4\n0:\n"
10385+ _ASM_EXTABLE(0b, 0b)
10386+#endif
10387+
10388+ "sete %1\n"
10389 : "+m" (l->a.counter), "=qm" (c)
10390 : : "memory");
10391 return c != 0;
10392@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10393 {
10394 unsigned char c;
10395
10396- asm volatile(_ASM_ADD "%2,%0; sets %1"
10397+ asm volatile(_ASM_ADD "%2,%0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ _ASM_SUB "%2,%0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ "sets %1\n"
10407 : "+m" (l->a.counter), "=qm" (c)
10408 : "ir" (i) : "memory");
10409 return c;
10410@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10411 #endif
10412 /* Modern 486+ processor */
10413 __i = i;
10414- asm volatile(_ASM_XADD "%0, %1;"
10415+ asm volatile(_ASM_XADD "%0, %1\n"
10416+
10417+#ifdef CONFIG_PAX_REFCOUNT
10418+ "jno 0f\n"
10419+ _ASM_MOV "%0,%1\n"
10420+ "int $4\n0:\n"
10421+ _ASM_EXTABLE(0b, 0b)
10422+#endif
10423+
10424 : "+r" (i), "+m" (l->a.counter)
10425 : : "memory");
10426 return i + __i;
10427diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10428index ef51b50..514ba37 100644
10429--- a/arch/x86/include/asm/microcode.h
10430+++ b/arch/x86/include/asm/microcode.h
10431@@ -12,13 +12,13 @@ struct device;
10432 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10433
10434 struct microcode_ops {
10435- enum ucode_state (*request_microcode_user) (int cpu,
10436+ enum ucode_state (* const request_microcode_user) (int cpu,
10437 const void __user *buf, size_t size);
10438
10439- enum ucode_state (*request_microcode_fw) (int cpu,
10440+ enum ucode_state (* const request_microcode_fw) (int cpu,
10441 struct device *device);
10442
10443- void (*microcode_fini_cpu) (int cpu);
10444+ void (* const microcode_fini_cpu) (int cpu);
10445
10446 /*
10447 * The generic 'microcode_core' part guarantees that
10448@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10449 extern struct ucode_cpu_info ucode_cpu_info[];
10450
10451 #ifdef CONFIG_MICROCODE_INTEL
10452-extern struct microcode_ops * __init init_intel_microcode(void);
10453+extern const struct microcode_ops * __init init_intel_microcode(void);
10454 #else
10455-static inline struct microcode_ops * __init init_intel_microcode(void)
10456+static inline const struct microcode_ops * __init init_intel_microcode(void)
10457 {
10458 return NULL;
10459 }
10460 #endif /* CONFIG_MICROCODE_INTEL */
10461
10462 #ifdef CONFIG_MICROCODE_AMD
10463-extern struct microcode_ops * __init init_amd_microcode(void);
10464+extern const struct microcode_ops * __init init_amd_microcode(void);
10465 #else
10466-static inline struct microcode_ops * __init init_amd_microcode(void)
10467+static inline const struct microcode_ops * __init init_amd_microcode(void)
10468 {
10469 return NULL;
10470 }
10471diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10472index 593e51d..fa69c9a 100644
10473--- a/arch/x86/include/asm/mman.h
10474+++ b/arch/x86/include/asm/mman.h
10475@@ -5,4 +5,14 @@
10476
10477 #include <asm-generic/mman.h>
10478
10479+#ifdef __KERNEL__
10480+#ifndef __ASSEMBLY__
10481+#ifdef CONFIG_X86_32
10482+#define arch_mmap_check i386_mmap_check
10483+int i386_mmap_check(unsigned long addr, unsigned long len,
10484+ unsigned long flags);
10485+#endif
10486+#endif
10487+#endif
10488+
10489 #endif /* _ASM_X86_MMAN_H */
10490diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10491index 80a1dee..239c67d 100644
10492--- a/arch/x86/include/asm/mmu.h
10493+++ b/arch/x86/include/asm/mmu.h
10494@@ -9,10 +9,23 @@
10495 * we put the segment information here.
10496 */
10497 typedef struct {
10498- void *ldt;
10499+ struct desc_struct *ldt;
10500 int size;
10501 struct mutex lock;
10502- void *vdso;
10503+ unsigned long vdso;
10504+
10505+#ifdef CONFIG_X86_32
10506+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10507+ unsigned long user_cs_base;
10508+ unsigned long user_cs_limit;
10509+
10510+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10511+ cpumask_t cpu_user_cs_mask;
10512+#endif
10513+
10514+#endif
10515+#endif
10516+
10517 } mm_context_t;
10518
10519 #ifdef CONFIG_SMP
10520diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10521index 8b5393e..8143173 100644
10522--- a/arch/x86/include/asm/mmu_context.h
10523+++ b/arch/x86/include/asm/mmu_context.h
10524@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10525
10526 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10527 {
10528+
10529+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10530+ unsigned int i;
10531+ pgd_t *pgd;
10532+
10533+ pax_open_kernel();
10534+ pgd = get_cpu_pgd(smp_processor_id());
10535+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10536+ set_pgd_batched(pgd+i, native_make_pgd(0));
10537+ pax_close_kernel();
10538+#endif
10539+
10540 #ifdef CONFIG_SMP
10541 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10542 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10543@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10544 struct task_struct *tsk)
10545 {
10546 unsigned cpu = smp_processor_id();
10547+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10548+ int tlbstate = TLBSTATE_OK;
10549+#endif
10550
10551 if (likely(prev != next)) {
10552 #ifdef CONFIG_SMP
10553+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10554+ tlbstate = percpu_read(cpu_tlbstate.state);
10555+#endif
10556 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10557 percpu_write(cpu_tlbstate.active_mm, next);
10558 #endif
10559 cpumask_set_cpu(cpu, mm_cpumask(next));
10560
10561 /* Re-load page tables */
10562+#ifdef CONFIG_PAX_PER_CPU_PGD
10563+ pax_open_kernel();
10564+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10565+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10566+ pax_close_kernel();
10567+ load_cr3(get_cpu_pgd(cpu));
10568+#else
10569 load_cr3(next->pgd);
10570+#endif
10571
10572 /* stop flush ipis for the previous mm */
10573 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10574@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10575 */
10576 if (unlikely(prev->context.ldt != next->context.ldt))
10577 load_LDT_nolock(&next->context);
10578- }
10579+
10580+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10581+ if (!nx_enabled) {
10582+ smp_mb__before_clear_bit();
10583+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10584+ smp_mb__after_clear_bit();
10585+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10586+ }
10587+#endif
10588+
10589+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10590+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10591+ prev->context.user_cs_limit != next->context.user_cs_limit))
10592+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10593 #ifdef CONFIG_SMP
10594+ else if (unlikely(tlbstate != TLBSTATE_OK))
10595+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10596+#endif
10597+#endif
10598+
10599+ }
10600 else {
10601+
10602+#ifdef CONFIG_PAX_PER_CPU_PGD
10603+ pax_open_kernel();
10604+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10605+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10606+ pax_close_kernel();
10607+ load_cr3(get_cpu_pgd(cpu));
10608+#endif
10609+
10610+#ifdef CONFIG_SMP
10611 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10612 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10613
10614@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10615 * tlb flush IPI delivery. We must reload CR3
10616 * to make sure to use no freed page tables.
10617 */
10618+
10619+#ifndef CONFIG_PAX_PER_CPU_PGD
10620 load_cr3(next->pgd);
10621+#endif
10622+
10623 load_LDT_nolock(&next->context);
10624+
10625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10626+ if (!nx_enabled)
10627+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10628+#endif
10629+
10630+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10631+#ifdef CONFIG_PAX_PAGEEXEC
10632+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10633+#endif
10634+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10635+#endif
10636+
10637 }
10638+#endif
10639 }
10640-#endif
10641 }
10642
10643 #define activate_mm(prev, next) \
10644diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10645index 3e2ce58..caaf478 100644
10646--- a/arch/x86/include/asm/module.h
10647+++ b/arch/x86/include/asm/module.h
10648@@ -5,6 +5,7 @@
10649
10650 #ifdef CONFIG_X86_64
10651 /* X86_64 does not define MODULE_PROC_FAMILY */
10652+#define MODULE_PROC_FAMILY ""
10653 #elif defined CONFIG_M386
10654 #define MODULE_PROC_FAMILY "386 "
10655 #elif defined CONFIG_M486
10656@@ -59,13 +60,26 @@
10657 #error unknown processor family
10658 #endif
10659
10660-#ifdef CONFIG_X86_32
10661-# ifdef CONFIG_4KSTACKS
10662-# define MODULE_STACKSIZE "4KSTACKS "
10663-# else
10664-# define MODULE_STACKSIZE ""
10665-# endif
10666-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10667+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10668+#define MODULE_STACKSIZE "4KSTACKS "
10669+#else
10670+#define MODULE_STACKSIZE ""
10671 #endif
10672
10673+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10674+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10675+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10676+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10677+#else
10678+#define MODULE_PAX_KERNEXEC ""
10679+#endif
10680+
10681+#ifdef CONFIG_PAX_MEMORY_UDEREF
10682+#define MODULE_PAX_UDEREF "UDEREF "
10683+#else
10684+#define MODULE_PAX_UDEREF ""
10685+#endif
10686+
10687+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10688+
10689 #endif /* _ASM_X86_MODULE_H */
10690diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10691index 7639dbf..e08a58c 100644
10692--- a/arch/x86/include/asm/page_64_types.h
10693+++ b/arch/x86/include/asm/page_64_types.h
10694@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10695
10696 /* duplicated to the one in bootmem.h */
10697 extern unsigned long max_pfn;
10698-extern unsigned long phys_base;
10699+extern const unsigned long phys_base;
10700
10701 extern unsigned long __phys_addr(unsigned long);
10702 #define __phys_reloc_hide(x) (x)
10703diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10704index efb3899..ef30687 100644
10705--- a/arch/x86/include/asm/paravirt.h
10706+++ b/arch/x86/include/asm/paravirt.h
10707@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10708 val);
10709 }
10710
10711+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10712+{
10713+ pgdval_t val = native_pgd_val(pgd);
10714+
10715+ if (sizeof(pgdval_t) > sizeof(long))
10716+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10717+ val, (u64)val >> 32);
10718+ else
10719+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10720+ val);
10721+}
10722+
10723 static inline void pgd_clear(pgd_t *pgdp)
10724 {
10725 set_pgd(pgdp, __pgd(0));
10726@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10727 pv_mmu_ops.set_fixmap(idx, phys, flags);
10728 }
10729
10730+#ifdef CONFIG_PAX_KERNEXEC
10731+static inline unsigned long pax_open_kernel(void)
10732+{
10733+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10734+}
10735+
10736+static inline unsigned long pax_close_kernel(void)
10737+{
10738+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10739+}
10740+#else
10741+static inline unsigned long pax_open_kernel(void) { return 0; }
10742+static inline unsigned long pax_close_kernel(void) { return 0; }
10743+#endif
10744+
10745 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10746
10747 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10748@@ -945,7 +972,7 @@ extern void default_banner(void);
10749
10750 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10751 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10752-#define PARA_INDIRECT(addr) *%cs:addr
10753+#define PARA_INDIRECT(addr) *%ss:addr
10754 #endif
10755
10756 #define INTERRUPT_RETURN \
10757@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10758 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10759 CLBR_NONE, \
10760 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10761+
10762+#define GET_CR0_INTO_RDI \
10763+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10764+ mov %rax,%rdi
10765+
10766+#define SET_RDI_INTO_CR0 \
10767+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10768+
10769+#define GET_CR3_INTO_RDI \
10770+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10771+ mov %rax,%rdi
10772+
10773+#define SET_RDI_INTO_CR3 \
10774+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10775+
10776 #endif /* CONFIG_X86_32 */
10777
10778 #endif /* __ASSEMBLY__ */
10779diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10780index 9357473..aeb2de5 100644
10781--- a/arch/x86/include/asm/paravirt_types.h
10782+++ b/arch/x86/include/asm/paravirt_types.h
10783@@ -78,19 +78,19 @@ struct pv_init_ops {
10784 */
10785 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10786 unsigned long addr, unsigned len);
10787-};
10788+} __no_const;
10789
10790
10791 struct pv_lazy_ops {
10792 /* Set deferred update mode, used for batching operations. */
10793 void (*enter)(void);
10794 void (*leave)(void);
10795-};
10796+} __no_const;
10797
10798 struct pv_time_ops {
10799 unsigned long long (*sched_clock)(void);
10800 unsigned long (*get_tsc_khz)(void);
10801-};
10802+} __no_const;
10803
10804 struct pv_cpu_ops {
10805 /* hooks for various privileged instructions */
10806@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10807
10808 void (*start_context_switch)(struct task_struct *prev);
10809 void (*end_context_switch)(struct task_struct *next);
10810-};
10811+} __no_const;
10812
10813 struct pv_irq_ops {
10814 /*
10815@@ -217,7 +217,7 @@ struct pv_apic_ops {
10816 unsigned long start_eip,
10817 unsigned long start_esp);
10818 #endif
10819-};
10820+} __no_const;
10821
10822 struct pv_mmu_ops {
10823 unsigned long (*read_cr2)(void);
10824@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10825 struct paravirt_callee_save make_pud;
10826
10827 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10828+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10829 #endif /* PAGETABLE_LEVELS == 4 */
10830 #endif /* PAGETABLE_LEVELS >= 3 */
10831
10832@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10833 an mfn. We can tell which is which from the index. */
10834 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10835 phys_addr_t phys, pgprot_t flags);
10836+
10837+#ifdef CONFIG_PAX_KERNEXEC
10838+ unsigned long (*pax_open_kernel)(void);
10839+ unsigned long (*pax_close_kernel)(void);
10840+#endif
10841+
10842 };
10843
10844 struct raw_spinlock;
10845@@ -326,7 +333,7 @@ struct pv_lock_ops {
10846 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10847 int (*spin_trylock)(struct raw_spinlock *lock);
10848 void (*spin_unlock)(struct raw_spinlock *lock);
10849-};
10850+} __no_const;
10851
10852 /* This contains all the paravirt structures: we get a convenient
10853 * number for each function using the offset which we use to indicate
10854diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10855index b399988..3f47c38 100644
10856--- a/arch/x86/include/asm/pci_x86.h
10857+++ b/arch/x86/include/asm/pci_x86.h
10858@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10859 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10860
10861 struct pci_raw_ops {
10862- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10863+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10864 int reg, int len, u32 *val);
10865- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10866+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10867 int reg, int len, u32 val);
10868 };
10869
10870-extern struct pci_raw_ops *raw_pci_ops;
10871-extern struct pci_raw_ops *raw_pci_ext_ops;
10872+extern const struct pci_raw_ops *raw_pci_ops;
10873+extern const struct pci_raw_ops *raw_pci_ext_ops;
10874
10875-extern struct pci_raw_ops pci_direct_conf1;
10876+extern const struct pci_raw_ops pci_direct_conf1;
10877 extern bool port_cf9_safe;
10878
10879 /* arch_initcall level */
10880diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10881index b65a36d..50345a4 100644
10882--- a/arch/x86/include/asm/percpu.h
10883+++ b/arch/x86/include/asm/percpu.h
10884@@ -78,6 +78,7 @@ do { \
10885 if (0) { \
10886 T__ tmp__; \
10887 tmp__ = (val); \
10888+ (void)tmp__; \
10889 } \
10890 switch (sizeof(var)) { \
10891 case 1: \
10892diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10893index 271de94..ef944d6 100644
10894--- a/arch/x86/include/asm/pgalloc.h
10895+++ b/arch/x86/include/asm/pgalloc.h
10896@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10897 pmd_t *pmd, pte_t *pte)
10898 {
10899 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10900+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10901+}
10902+
10903+static inline void pmd_populate_user(struct mm_struct *mm,
10904+ pmd_t *pmd, pte_t *pte)
10905+{
10906+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10907 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10908 }
10909
10910diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10911index 2334982..70bc412 100644
10912--- a/arch/x86/include/asm/pgtable-2level.h
10913+++ b/arch/x86/include/asm/pgtable-2level.h
10914@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10915
10916 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10917 {
10918+ pax_open_kernel();
10919 *pmdp = pmd;
10920+ pax_close_kernel();
10921 }
10922
10923 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10924diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10925index 33927d2..ccde329 100644
10926--- a/arch/x86/include/asm/pgtable-3level.h
10927+++ b/arch/x86/include/asm/pgtable-3level.h
10928@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10929
10930 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10931 {
10932+ pax_open_kernel();
10933 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10934+ pax_close_kernel();
10935 }
10936
10937 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10938 {
10939+ pax_open_kernel();
10940 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10941+ pax_close_kernel();
10942 }
10943
10944 /*
10945diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10946index af6fd36..867ff74 100644
10947--- a/arch/x86/include/asm/pgtable.h
10948+++ b/arch/x86/include/asm/pgtable.h
10949@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10950
10951 #ifndef __PAGETABLE_PUD_FOLDED
10952 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10953+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10954 #define pgd_clear(pgd) native_pgd_clear(pgd)
10955 #endif
10956
10957@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10958
10959 #define arch_end_context_switch(prev) do {} while(0)
10960
10961+#define pax_open_kernel() native_pax_open_kernel()
10962+#define pax_close_kernel() native_pax_close_kernel()
10963 #endif /* CONFIG_PARAVIRT */
10964
10965+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10966+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10967+
10968+#ifdef CONFIG_PAX_KERNEXEC
10969+static inline unsigned long native_pax_open_kernel(void)
10970+{
10971+ unsigned long cr0;
10972+
10973+ preempt_disable();
10974+ barrier();
10975+ cr0 = read_cr0() ^ X86_CR0_WP;
10976+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
10977+ write_cr0(cr0);
10978+ return cr0 ^ X86_CR0_WP;
10979+}
10980+
10981+static inline unsigned long native_pax_close_kernel(void)
10982+{
10983+ unsigned long cr0;
10984+
10985+ cr0 = read_cr0() ^ X86_CR0_WP;
10986+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10987+ write_cr0(cr0);
10988+ barrier();
10989+ preempt_enable_no_resched();
10990+ return cr0 ^ X86_CR0_WP;
10991+}
10992+#else
10993+static inline unsigned long native_pax_open_kernel(void) { return 0; }
10994+static inline unsigned long native_pax_close_kernel(void) { return 0; }
10995+#endif
10996+
10997 /*
10998 * The following only work if pte_present() is true.
10999 * Undefined behaviour if not..
11000 */
11001+static inline int pte_user(pte_t pte)
11002+{
11003+ return pte_val(pte) & _PAGE_USER;
11004+}
11005+
11006 static inline int pte_dirty(pte_t pte)
11007 {
11008 return pte_flags(pte) & _PAGE_DIRTY;
11009@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11010 return pte_clear_flags(pte, _PAGE_RW);
11011 }
11012
11013+static inline pte_t pte_mkread(pte_t pte)
11014+{
11015+ return __pte(pte_val(pte) | _PAGE_USER);
11016+}
11017+
11018 static inline pte_t pte_mkexec(pte_t pte)
11019 {
11020- return pte_clear_flags(pte, _PAGE_NX);
11021+#ifdef CONFIG_X86_PAE
11022+ if (__supported_pte_mask & _PAGE_NX)
11023+ return pte_clear_flags(pte, _PAGE_NX);
11024+ else
11025+#endif
11026+ return pte_set_flags(pte, _PAGE_USER);
11027+}
11028+
11029+static inline pte_t pte_exprotect(pte_t pte)
11030+{
11031+#ifdef CONFIG_X86_PAE
11032+ if (__supported_pte_mask & _PAGE_NX)
11033+ return pte_set_flags(pte, _PAGE_NX);
11034+ else
11035+#endif
11036+ return pte_clear_flags(pte, _PAGE_USER);
11037 }
11038
11039 static inline pte_t pte_mkdirty(pte_t pte)
11040@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11041 #endif
11042
11043 #ifndef __ASSEMBLY__
11044+
11045+#ifdef CONFIG_PAX_PER_CPU_PGD
11046+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11047+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11048+{
11049+ return cpu_pgd[cpu];
11050+}
11051+#endif
11052+
11053 #include <linux/mm_types.h>
11054
11055 static inline int pte_none(pte_t pte)
11056@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11057
11058 static inline int pgd_bad(pgd_t pgd)
11059 {
11060- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11061+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11062 }
11063
11064 static inline int pgd_none(pgd_t pgd)
11065@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11066 * pgd_offset() returns a (pgd_t *)
11067 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11068 */
11069-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11070+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11071+
11072+#ifdef CONFIG_PAX_PER_CPU_PGD
11073+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11074+#endif
11075+
11076 /*
11077 * a shortcut which implies the use of the kernel's pgd, instead
11078 * of a process's
11079@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11080 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11081 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11082
11083+#ifdef CONFIG_X86_32
11084+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11085+#else
11086+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11087+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11088+
11089+#ifdef CONFIG_PAX_MEMORY_UDEREF
11090+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11091+#else
11092+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11093+#endif
11094+
11095+#endif
11096+
11097 #ifndef __ASSEMBLY__
11098
11099 extern int direct_gbpages;
11100@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11101 * dst and src can be on the same page, but the range must not overlap,
11102 * and must not cross a page boundary.
11103 */
11104-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11105+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11106 {
11107- memcpy(dst, src, count * sizeof(pgd_t));
11108+ pax_open_kernel();
11109+ while (count--)
11110+ *dst++ = *src++;
11111+ pax_close_kernel();
11112 }
11113
11114+#ifdef CONFIG_PAX_PER_CPU_PGD
11115+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11116+#endif
11117+
11118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11119+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11120+#else
11121+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11122+#endif
11123
11124 #include <asm-generic/pgtable.h>
11125 #endif /* __ASSEMBLY__ */
11126diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11127index 750f1bf..971e839 100644
11128--- a/arch/x86/include/asm/pgtable_32.h
11129+++ b/arch/x86/include/asm/pgtable_32.h
11130@@ -26,9 +26,6 @@
11131 struct mm_struct;
11132 struct vm_area_struct;
11133
11134-extern pgd_t swapper_pg_dir[1024];
11135-extern pgd_t trampoline_pg_dir[1024];
11136-
11137 static inline void pgtable_cache_init(void) { }
11138 static inline void check_pgt_cache(void) { }
11139 void paging_init(void);
11140@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11141 # include <asm/pgtable-2level.h>
11142 #endif
11143
11144+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11145+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11146+#ifdef CONFIG_X86_PAE
11147+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11148+#endif
11149+
11150 #if defined(CONFIG_HIGHPTE)
11151 #define __KM_PTE \
11152 (in_nmi() ? KM_NMI_PTE : \
11153@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11154 /* Clear a kernel PTE and flush it from the TLB */
11155 #define kpte_clear_flush(ptep, vaddr) \
11156 do { \
11157+ pax_open_kernel(); \
11158 pte_clear(&init_mm, (vaddr), (ptep)); \
11159+ pax_close_kernel(); \
11160 __flush_tlb_one((vaddr)); \
11161 } while (0)
11162
11163@@ -85,6 +90,9 @@ do { \
11164
11165 #endif /* !__ASSEMBLY__ */
11166
11167+#define HAVE_ARCH_UNMAPPED_AREA
11168+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11169+
11170 /*
11171 * kern_addr_valid() is (1) for FLATMEM and (0) for
11172 * SPARSEMEM and DISCONTIGMEM
11173diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11174index 5e67c15..12d5c47 100644
11175--- a/arch/x86/include/asm/pgtable_32_types.h
11176+++ b/arch/x86/include/asm/pgtable_32_types.h
11177@@ -8,7 +8,7 @@
11178 */
11179 #ifdef CONFIG_X86_PAE
11180 # include <asm/pgtable-3level_types.h>
11181-# define PMD_SIZE (1UL << PMD_SHIFT)
11182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11183 # define PMD_MASK (~(PMD_SIZE - 1))
11184 #else
11185 # include <asm/pgtable-2level_types.h>
11186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11188 #endif
11189
11190+#ifdef CONFIG_PAX_KERNEXEC
11191+#ifndef __ASSEMBLY__
11192+extern unsigned char MODULES_EXEC_VADDR[];
11193+extern unsigned char MODULES_EXEC_END[];
11194+#endif
11195+#include <asm/boot.h>
11196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11198+#else
11199+#define ktla_ktva(addr) (addr)
11200+#define ktva_ktla(addr) (addr)
11201+#endif
11202+
11203 #define MODULES_VADDR VMALLOC_START
11204 #define MODULES_END VMALLOC_END
11205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11206diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11207index c57a301..312bdb4 100644
11208--- a/arch/x86/include/asm/pgtable_64.h
11209+++ b/arch/x86/include/asm/pgtable_64.h
11210@@ -16,10 +16,13 @@
11211
11212 extern pud_t level3_kernel_pgt[512];
11213 extern pud_t level3_ident_pgt[512];
11214+extern pud_t level3_vmalloc_pgt[512];
11215+extern pud_t level3_vmemmap_pgt[512];
11216+extern pud_t level2_vmemmap_pgt[512];
11217 extern pmd_t level2_kernel_pgt[512];
11218 extern pmd_t level2_fixmap_pgt[512];
11219-extern pmd_t level2_ident_pgt[512];
11220-extern pgd_t init_level4_pgt[];
11221+extern pmd_t level2_ident_pgt[512*2];
11222+extern pgd_t init_level4_pgt[512];
11223
11224 #define swapper_pg_dir init_level4_pgt
11225
11226@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11227
11228 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11229 {
11230+ pax_open_kernel();
11231 *pmdp = pmd;
11232+ pax_close_kernel();
11233 }
11234
11235 static inline void native_pmd_clear(pmd_t *pmd)
11236@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_t *pud)
11237
11238 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11239 {
11240+ pax_open_kernel();
11241+ *pgdp = pgd;
11242+ pax_close_kernel();
11243+}
11244+
11245+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11246+{
11247 *pgdp = pgd;
11248 }
11249
11250diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11251index 766ea16..5b96cb3 100644
11252--- a/arch/x86/include/asm/pgtable_64_types.h
11253+++ b/arch/x86/include/asm/pgtable_64_types.h
11254@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11255 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11256 #define MODULES_END _AC(0xffffffffff000000, UL)
11257 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11258+#define MODULES_EXEC_VADDR MODULES_VADDR
11259+#define MODULES_EXEC_END MODULES_END
11260+
11261+#define ktla_ktva(addr) (addr)
11262+#define ktva_ktla(addr) (addr)
11263
11264 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11265diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11266index d1f4a76..2f46ba1 100644
11267--- a/arch/x86/include/asm/pgtable_types.h
11268+++ b/arch/x86/include/asm/pgtable_types.h
11269@@ -16,12 +16,11 @@
11270 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11271 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11272 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11273-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11274+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11275 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11276 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11277 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11278-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11279-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11280+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11281 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11282
11283 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11284@@ -39,7 +38,6 @@
11285 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11286 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11287 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11288-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11289 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11290 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11291 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11292@@ -55,8 +53,10 @@
11293
11294 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11295 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11296-#else
11297+#elif defined(CONFIG_KMEMCHECK)
11298 #define _PAGE_NX (_AT(pteval_t, 0))
11299+#else
11300+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11301 #endif
11302
11303 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11304@@ -93,6 +93,9 @@
11305 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11306 _PAGE_ACCESSED)
11307
11308+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11309+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11310+
11311 #define __PAGE_KERNEL_EXEC \
11312 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11313 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11314@@ -103,8 +106,8 @@
11315 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11316 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11317 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11318-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11319-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11320+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11321+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11322 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11323 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11324 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11325@@ -163,8 +166,8 @@
11326 * bits are combined, this will alow user to access the high address mapped
11327 * VDSO in the presence of CONFIG_COMPAT_VDSO
11328 */
11329-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11330-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11331+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11332+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11333 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11334 #endif
11335
11336@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11337 {
11338 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11339 }
11340+#endif
11341
11342+#if PAGETABLE_LEVELS == 3
11343+#include <asm-generic/pgtable-nopud.h>
11344+#endif
11345+
11346+#if PAGETABLE_LEVELS == 2
11347+#include <asm-generic/pgtable-nopmd.h>
11348+#endif
11349+
11350+#ifndef __ASSEMBLY__
11351 #if PAGETABLE_LEVELS > 3
11352 typedef struct { pudval_t pud; } pud_t;
11353
11354@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11355 return pud.pud;
11356 }
11357 #else
11358-#include <asm-generic/pgtable-nopud.h>
11359-
11360 static inline pudval_t native_pud_val(pud_t pud)
11361 {
11362 return native_pgd_val(pud.pgd);
11363@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11364 return pmd.pmd;
11365 }
11366 #else
11367-#include <asm-generic/pgtable-nopmd.h>
11368-
11369 static inline pmdval_t native_pmd_val(pmd_t pmd)
11370 {
11371 return native_pgd_val(pmd.pud.pgd);
11372@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11373
11374 extern pteval_t __supported_pte_mask;
11375 extern void set_nx(void);
11376+
11377+#ifdef CONFIG_X86_32
11378+#ifdef CONFIG_X86_PAE
11379 extern int nx_enabled;
11380+#else
11381+#define nx_enabled (0)
11382+#endif
11383+#else
11384+#define nx_enabled (1)
11385+#endif
11386
11387 #define pgprot_writecombine pgprot_writecombine
11388 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11389diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11390index fa04dea..5f823fc 100644
11391--- a/arch/x86/include/asm/processor.h
11392+++ b/arch/x86/include/asm/processor.h
11393@@ -272,7 +272,7 @@ struct tss_struct {
11394
11395 } ____cacheline_aligned;
11396
11397-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11398+extern struct tss_struct init_tss[NR_CPUS];
11399
11400 /*
11401 * Save the original ist values for checking stack pointers during debugging
11402@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11403 */
11404 #define TASK_SIZE PAGE_OFFSET
11405 #define TASK_SIZE_MAX TASK_SIZE
11406+
11407+#ifdef CONFIG_PAX_SEGMEXEC
11408+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11409+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11410+#else
11411 #define STACK_TOP TASK_SIZE
11412-#define STACK_TOP_MAX STACK_TOP
11413+#endif
11414+
11415+#define STACK_TOP_MAX TASK_SIZE
11416
11417 #define INIT_THREAD { \
11418- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11419+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11420 .vm86_info = NULL, \
11421 .sysenter_cs = __KERNEL_CS, \
11422 .io_bitmap_ptr = NULL, \
11423@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11424 */
11425 #define INIT_TSS { \
11426 .x86_tss = { \
11427- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11428+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11429 .ss0 = __KERNEL_DS, \
11430 .ss1 = __KERNEL_CS, \
11431 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11432@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11433 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11434
11435 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11436-#define KSTK_TOP(info) \
11437-({ \
11438- unsigned long *__ptr = (unsigned long *)(info); \
11439- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11440-})
11441+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11442
11443 /*
11444 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11445@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11446 #define task_pt_regs(task) \
11447 ({ \
11448 struct pt_regs *__regs__; \
11449- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11450+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11451 __regs__ - 1; \
11452 })
11453
11454@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11455 /*
11456 * User space process size. 47bits minus one guard page.
11457 */
11458-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11459+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11460
11461 /* This decides where the kernel will search for a free chunk of vm
11462 * space during mmap's.
11463 */
11464 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11465- 0xc0000000 : 0xFFFFe000)
11466+ 0xc0000000 : 0xFFFFf000)
11467
11468 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11469 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11470@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11471 #define STACK_TOP_MAX TASK_SIZE_MAX
11472
11473 #define INIT_THREAD { \
11474- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11475+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11476 }
11477
11478 #define INIT_TSS { \
11479- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11480+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11481 }
11482
11483 /*
11484@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11485 */
11486 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11487
11488+#ifdef CONFIG_PAX_SEGMEXEC
11489+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11490+#endif
11491+
11492 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11493
11494 /* Get/set a process' ability to use the timestamp counter instruction */
11495diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11496index 0f0d908..f2e3da2 100644
11497--- a/arch/x86/include/asm/ptrace.h
11498+++ b/arch/x86/include/asm/ptrace.h
11499@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11500 }
11501
11502 /*
11503- * user_mode_vm(regs) determines whether a register set came from user mode.
11504+ * user_mode(regs) determines whether a register set came from user mode.
11505 * This is true if V8086 mode was enabled OR if the register set was from
11506 * protected mode with RPL-3 CS value. This tricky test checks that with
11507 * one comparison. Many places in the kernel can bypass this full check
11508- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11509+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11510+ * be used.
11511 */
11512-static inline int user_mode(struct pt_regs *regs)
11513+static inline int user_mode_novm(struct pt_regs *regs)
11514 {
11515 #ifdef CONFIG_X86_32
11516 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11517 #else
11518- return !!(regs->cs & 3);
11519+ return !!(regs->cs & SEGMENT_RPL_MASK);
11520 #endif
11521 }
11522
11523-static inline int user_mode_vm(struct pt_regs *regs)
11524+static inline int user_mode(struct pt_regs *regs)
11525 {
11526 #ifdef CONFIG_X86_32
11527 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11528 USER_RPL;
11529 #else
11530- return user_mode(regs);
11531+ return user_mode_novm(regs);
11532 #endif
11533 }
11534
11535diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11536index 562d4fd..6e39df1 100644
11537--- a/arch/x86/include/asm/reboot.h
11538+++ b/arch/x86/include/asm/reboot.h
11539@@ -6,19 +6,19 @@
11540 struct pt_regs;
11541
11542 struct machine_ops {
11543- void (*restart)(char *cmd);
11544- void (*halt)(void);
11545- void (*power_off)(void);
11546+ void (* __noreturn restart)(char *cmd);
11547+ void (* __noreturn halt)(void);
11548+ void (* __noreturn power_off)(void);
11549 void (*shutdown)(void);
11550 void (*crash_shutdown)(struct pt_regs *);
11551- void (*emergency_restart)(void);
11552-};
11553+ void (* __noreturn emergency_restart)(void);
11554+} __no_const;
11555
11556 extern struct machine_ops machine_ops;
11557
11558 void native_machine_crash_shutdown(struct pt_regs *regs);
11559 void native_machine_shutdown(void);
11560-void machine_real_restart(const unsigned char *code, int length);
11561+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11562
11563 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11564 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11565diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11566index 606ede1..dbfff37 100644
11567--- a/arch/x86/include/asm/rwsem.h
11568+++ b/arch/x86/include/asm/rwsem.h
11569@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11570 {
11571 asm volatile("# beginning down_read\n\t"
11572 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11573+
11574+#ifdef CONFIG_PAX_REFCOUNT
11575+ "jno 0f\n"
11576+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11577+ "int $4\n0:\n"
11578+ _ASM_EXTABLE(0b, 0b)
11579+#endif
11580+
11581 /* adds 0x00000001, returns the old value */
11582 " jns 1f\n"
11583 " call call_rwsem_down_read_failed\n"
11584@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11585 "1:\n\t"
11586 " mov %1,%2\n\t"
11587 " add %3,%2\n\t"
11588+
11589+#ifdef CONFIG_PAX_REFCOUNT
11590+ "jno 0f\n"
11591+ "sub %3,%2\n"
11592+ "int $4\n0:\n"
11593+ _ASM_EXTABLE(0b, 0b)
11594+#endif
11595+
11596 " jle 2f\n\t"
11597 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11598 " jnz 1b\n\t"
11599@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11600 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11601 asm volatile("# beginning down_write\n\t"
11602 LOCK_PREFIX " xadd %1,(%2)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ "mov %1,(%2)\n"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* subtract 0x0000ffff, returns the old value */
11612 " test %1,%1\n\t"
11613 /* was the count 0 before? */
11614@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11615 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11616 asm volatile("# beginning __up_read\n\t"
11617 LOCK_PREFIX " xadd %1,(%2)\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "mov %1,(%2)\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 /* subtracts 1, returns the old value */
11627 " jns 1f\n\t"
11628 " call call_rwsem_wake\n"
11629@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11630 rwsem_count_t tmp;
11631 asm volatile("# beginning __up_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* tries to transition
11642 0xffff0001 -> 0x00000000 */
11643 " jz 1f\n"
11644@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11645 {
11646 asm volatile("# beginning __downgrade_write\n\t"
11647 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /*
11657 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11658 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11659@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11660 static inline void rwsem_atomic_add(rwsem_count_t delta,
11661 struct rw_semaphore *sem)
11662 {
11663- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11664+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11665+
11666+#ifdef CONFIG_PAX_REFCOUNT
11667+ "jno 0f\n"
11668+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11669+ "int $4\n0:\n"
11670+ _ASM_EXTABLE(0b, 0b)
11671+#endif
11672+
11673 : "+m" (sem->count)
11674 : "er" (delta));
11675 }
11676@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11677 {
11678 rwsem_count_t tmp = delta;
11679
11680- asm volatile(LOCK_PREFIX "xadd %0,%1"
11681+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11682+
11683+#ifdef CONFIG_PAX_REFCOUNT
11684+ "jno 0f\n"
11685+ "mov %0,%1\n"
11686+ "int $4\n0:\n"
11687+ _ASM_EXTABLE(0b, 0b)
11688+#endif
11689+
11690 : "+r" (tmp), "+m" (sem->count)
11691 : : "memory");
11692
11693diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11694index 14e0ed8..7f7dd5e 100644
11695--- a/arch/x86/include/asm/segment.h
11696+++ b/arch/x86/include/asm/segment.h
11697@@ -62,10 +62,15 @@
11698 * 26 - ESPFIX small SS
11699 * 27 - per-cpu [ offset to per-cpu data area ]
11700 * 28 - stack_canary-20 [ for stack protector ]
11701- * 29 - unused
11702- * 30 - unused
11703+ * 29 - PCI BIOS CS
11704+ * 30 - PCI BIOS DS
11705 * 31 - TSS for double fault handler
11706 */
11707+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11708+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11709+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11710+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11711+
11712 #define GDT_ENTRY_TLS_MIN 6
11713 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11714
11715@@ -77,6 +82,8 @@
11716
11717 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11718
11719+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11720+
11721 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11722
11723 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11724@@ -88,7 +95,7 @@
11725 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11726 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11727
11728-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11729+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11730 #ifdef CONFIG_SMP
11731 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11732 #else
11733@@ -102,6 +109,12 @@
11734 #define __KERNEL_STACK_CANARY 0
11735 #endif
11736
11737+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11738+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11739+
11740+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11741+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11742+
11743 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11744
11745 /*
11746@@ -139,7 +152,7 @@
11747 */
11748
11749 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11750-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11751+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11752
11753
11754 #else
11755@@ -163,6 +176,8 @@
11756 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11757 #define __USER32_DS __USER_DS
11758
11759+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11760+
11761 #define GDT_ENTRY_TSS 8 /* needs two entries */
11762 #define GDT_ENTRY_LDT 10 /* needs two entries */
11763 #define GDT_ENTRY_TLS_MIN 12
11764@@ -183,6 +198,7 @@
11765 #endif
11766
11767 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11768+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11769 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11770 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11771 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11772diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11773index 4c2f63c..5685db2 100644
11774--- a/arch/x86/include/asm/smp.h
11775+++ b/arch/x86/include/asm/smp.h
11776@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11777 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11778 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11779 DECLARE_PER_CPU(u16, cpu_llc_id);
11780-DECLARE_PER_CPU(int, cpu_number);
11781+DECLARE_PER_CPU(unsigned int, cpu_number);
11782
11783 static inline struct cpumask *cpu_sibling_mask(int cpu)
11784 {
11785@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11786 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11787
11788 /* Static state in head.S used to set up a CPU */
11789-extern struct {
11790- void *sp;
11791- unsigned short ss;
11792-} stack_start;
11793+extern unsigned long stack_start; /* Initial stack pointer address */
11794
11795 struct smp_ops {
11796 void (*smp_prepare_boot_cpu)(void);
11797@@ -60,7 +57,7 @@ struct smp_ops {
11798
11799 void (*send_call_func_ipi)(const struct cpumask *mask);
11800 void (*send_call_func_single_ipi)(int cpu);
11801-};
11802+} __no_const;
11803
11804 /* Globals due to paravirt */
11805 extern void set_cpu_sibling_map(int cpu);
11806@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11807 extern int safe_smp_processor_id(void);
11808
11809 #elif defined(CONFIG_X86_64_SMP)
11810-#define raw_smp_processor_id() (percpu_read(cpu_number))
11811-
11812-#define stack_smp_processor_id() \
11813-({ \
11814- struct thread_info *ti; \
11815- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11816- ti->cpu; \
11817-})
11818+#define raw_smp_processor_id() (percpu_read(cpu_number))
11819+#define stack_smp_processor_id() raw_smp_processor_id()
11820 #define safe_smp_processor_id() smp_processor_id()
11821
11822 #endif
11823diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11824index 4e77853..4359783 100644
11825--- a/arch/x86/include/asm/spinlock.h
11826+++ b/arch/x86/include/asm/spinlock.h
11827@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11828 static inline void __raw_read_lock(raw_rwlock_t *rw)
11829 {
11830 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11831+
11832+#ifdef CONFIG_PAX_REFCOUNT
11833+ "jno 0f\n"
11834+ LOCK_PREFIX " addl $1,(%0)\n"
11835+ "int $4\n0:\n"
11836+ _ASM_EXTABLE(0b, 0b)
11837+#endif
11838+
11839 "jns 1f\n"
11840 "call __read_lock_failed\n\t"
11841 "1:\n"
11842@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11843 static inline void __raw_write_lock(raw_rwlock_t *rw)
11844 {
11845 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11846+
11847+#ifdef CONFIG_PAX_REFCOUNT
11848+ "jno 0f\n"
11849+ LOCK_PREFIX " addl %1,(%0)\n"
11850+ "int $4\n0:\n"
11851+ _ASM_EXTABLE(0b, 0b)
11852+#endif
11853+
11854 "jz 1f\n"
11855 "call __write_lock_failed\n\t"
11856 "1:\n"
11857@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11858
11859 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11860 {
11861- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11862+ asm volatile(LOCK_PREFIX "incl %0\n"
11863+
11864+#ifdef CONFIG_PAX_REFCOUNT
11865+ "jno 0f\n"
11866+ LOCK_PREFIX "decl %0\n"
11867+ "int $4\n0:\n"
11868+ _ASM_EXTABLE(0b, 0b)
11869+#endif
11870+
11871+ :"+m" (rw->lock) : : "memory");
11872 }
11873
11874 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11875 {
11876- asm volatile(LOCK_PREFIX "addl %1, %0"
11877+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11878+
11879+#ifdef CONFIG_PAX_REFCOUNT
11880+ "jno 0f\n"
11881+ LOCK_PREFIX "subl %1, %0\n"
11882+ "int $4\n0:\n"
11883+ _ASM_EXTABLE(0b, 0b)
11884+#endif
11885+
11886 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11887 }
11888
11889diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11890index 1575177..cb23f52 100644
11891--- a/arch/x86/include/asm/stackprotector.h
11892+++ b/arch/x86/include/asm/stackprotector.h
11893@@ -48,7 +48,7 @@
11894 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11895 */
11896 #define GDT_STACK_CANARY_INIT \
11897- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11898+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11899
11900 /*
11901 * Initialize the stackprotector canary value.
11902@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11903
11904 static inline void load_stack_canary_segment(void)
11905 {
11906-#ifdef CONFIG_X86_32
11907+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11908 asm volatile ("mov %0, %%gs" : : "r" (0));
11909 #endif
11910 }
11911diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11912index e0fbf29..858ef4a 100644
11913--- a/arch/x86/include/asm/system.h
11914+++ b/arch/x86/include/asm/system.h
11915@@ -132,7 +132,7 @@ do { \
11916 "thread_return:\n\t" \
11917 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11918 __switch_canary \
11919- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11920+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11921 "movq %%rax,%%rdi\n\t" \
11922 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11923 "jnz ret_from_fork\n\t" \
11924@@ -143,7 +143,7 @@ do { \
11925 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11926 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11927 [_tif_fork] "i" (_TIF_FORK), \
11928- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11929+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11930 [current_task] "m" (per_cpu_var(current_task)) \
11931 __switch_canary_iparam \
11932 : "memory", "cc" __EXTRA_CLOBBER)
11933@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11934 {
11935 unsigned long __limit;
11936 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11937- return __limit + 1;
11938+ return __limit;
11939 }
11940
11941 static inline void native_clts(void)
11942@@ -340,12 +340,12 @@ void enable_hlt(void);
11943
11944 void cpu_idle_wait(void);
11945
11946-extern unsigned long arch_align_stack(unsigned long sp);
11947+#define arch_align_stack(x) ((x) & ~0xfUL)
11948 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11949
11950 void default_idle(void);
11951
11952-void stop_this_cpu(void *dummy);
11953+void stop_this_cpu(void *dummy) __noreturn;
11954
11955 /*
11956 * Force strict CPU ordering.
11957diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11958index 19c3ce4..8962535 100644
11959--- a/arch/x86/include/asm/thread_info.h
11960+++ b/arch/x86/include/asm/thread_info.h
11961@@ -10,6 +10,7 @@
11962 #include <linux/compiler.h>
11963 #include <asm/page.h>
11964 #include <asm/types.h>
11965+#include <asm/percpu.h>
11966
11967 /*
11968 * low level task data that entry.S needs immediate access to
11969@@ -24,7 +25,6 @@ struct exec_domain;
11970 #include <asm/atomic.h>
11971
11972 struct thread_info {
11973- struct task_struct *task; /* main task structure */
11974 struct exec_domain *exec_domain; /* execution domain */
11975 __u32 flags; /* low level flags */
11976 __u32 status; /* thread synchronous flags */
11977@@ -34,18 +34,12 @@ struct thread_info {
11978 mm_segment_t addr_limit;
11979 struct restart_block restart_block;
11980 void __user *sysenter_return;
11981-#ifdef CONFIG_X86_32
11982- unsigned long previous_esp; /* ESP of the previous stack in
11983- case of nested (IRQ) stacks
11984- */
11985- __u8 supervisor_stack[0];
11986-#endif
11987+ unsigned long lowest_stack;
11988 int uaccess_err;
11989 };
11990
11991-#define INIT_THREAD_INFO(tsk) \
11992+#define INIT_THREAD_INFO \
11993 { \
11994- .task = &tsk, \
11995 .exec_domain = &default_exec_domain, \
11996 .flags = 0, \
11997 .cpu = 0, \
11998@@ -56,7 +50,7 @@ struct thread_info {
11999 }, \
12000 }
12001
12002-#define init_thread_info (init_thread_union.thread_info)
12003+#define init_thread_info (init_thread_union.stack)
12004 #define init_stack (init_thread_union.stack)
12005
12006 #else /* !__ASSEMBLY__ */
12007@@ -163,45 +157,40 @@ struct thread_info {
12008 #define alloc_thread_info(tsk) \
12009 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12010
12011-#ifdef CONFIG_X86_32
12012-
12013-#define STACK_WARN (THREAD_SIZE/8)
12014-/*
12015- * macros/functions for gaining access to the thread information structure
12016- *
12017- * preempt_count needs to be 1 initially, until the scheduler is functional.
12018- */
12019-#ifndef __ASSEMBLY__
12020-
12021-
12022-/* how to get the current stack pointer from C */
12023-register unsigned long current_stack_pointer asm("esp") __used;
12024-
12025-/* how to get the thread information struct from C */
12026-static inline struct thread_info *current_thread_info(void)
12027-{
12028- return (struct thread_info *)
12029- (current_stack_pointer & ~(THREAD_SIZE - 1));
12030-}
12031-
12032-#else /* !__ASSEMBLY__ */
12033-
12034+#ifdef __ASSEMBLY__
12035 /* how to get the thread information struct from ASM */
12036 #define GET_THREAD_INFO(reg) \
12037- movl $-THREAD_SIZE, reg; \
12038- andl %esp, reg
12039+ mov PER_CPU_VAR(current_tinfo), reg
12040
12041 /* use this one if reg already contains %esp */
12042-#define GET_THREAD_INFO_WITH_ESP(reg) \
12043- andl $-THREAD_SIZE, reg
12044+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12045+#else
12046+/* how to get the thread information struct from C */
12047+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12048+
12049+static __always_inline struct thread_info *current_thread_info(void)
12050+{
12051+ return percpu_read_stable(current_tinfo);
12052+}
12053+#endif
12054+
12055+#ifdef CONFIG_X86_32
12056+
12057+#define STACK_WARN (THREAD_SIZE/8)
12058+/*
12059+ * macros/functions for gaining access to the thread information structure
12060+ *
12061+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12062+ */
12063+#ifndef __ASSEMBLY__
12064+
12065+/* how to get the current stack pointer from C */
12066+register unsigned long current_stack_pointer asm("esp") __used;
12067
12068 #endif
12069
12070 #else /* X86_32 */
12071
12072-#include <asm/percpu.h>
12073-#define KERNEL_STACK_OFFSET (5*8)
12074-
12075 /*
12076 * macros/functions for gaining access to the thread information structure
12077 * preempt_count needs to be 1 initially, until the scheduler is functional.
12078@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12079 #ifndef __ASSEMBLY__
12080 DECLARE_PER_CPU(unsigned long, kernel_stack);
12081
12082-static inline struct thread_info *current_thread_info(void)
12083-{
12084- struct thread_info *ti;
12085- ti = (void *)(percpu_read_stable(kernel_stack) +
12086- KERNEL_STACK_OFFSET - THREAD_SIZE);
12087- return ti;
12088-}
12089-
12090-#else /* !__ASSEMBLY__ */
12091-
12092-/* how to get the thread information struct from ASM */
12093-#define GET_THREAD_INFO(reg) \
12094- movq PER_CPU_VAR(kernel_stack),reg ; \
12095- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12096-
12097+/* how to get the current stack pointer from C */
12098+register unsigned long current_stack_pointer asm("rsp") __used;
12099 #endif
12100
12101 #endif /* !X86_32 */
12102@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12103 extern void free_thread_info(struct thread_info *ti);
12104 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12105 #define arch_task_cache_init arch_task_cache_init
12106+
12107+#define __HAVE_THREAD_FUNCTIONS
12108+#define task_thread_info(task) (&(task)->tinfo)
12109+#define task_stack_page(task) ((task)->stack)
12110+#define setup_thread_stack(p, org) do {} while (0)
12111+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12112+
12113+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12114+extern struct task_struct *alloc_task_struct(void);
12115+extern void free_task_struct(struct task_struct *);
12116+
12117 #endif
12118 #endif /* _ASM_X86_THREAD_INFO_H */
12119diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12120index 61c5874..8a046e9 100644
12121--- a/arch/x86/include/asm/uaccess.h
12122+++ b/arch/x86/include/asm/uaccess.h
12123@@ -8,12 +8,15 @@
12124 #include <linux/thread_info.h>
12125 #include <linux/prefetch.h>
12126 #include <linux/string.h>
12127+#include <linux/sched.h>
12128 #include <asm/asm.h>
12129 #include <asm/page.h>
12130
12131 #define VERIFY_READ 0
12132 #define VERIFY_WRITE 1
12133
12134+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12135+
12136 /*
12137 * The fs value determines whether argument validity checking should be
12138 * performed or not. If get_fs() == USER_DS, checking is performed, with
12139@@ -29,7 +32,12 @@
12140
12141 #define get_ds() (KERNEL_DS)
12142 #define get_fs() (current_thread_info()->addr_limit)
12143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12144+void __set_fs(mm_segment_t x);
12145+void set_fs(mm_segment_t x);
12146+#else
12147 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12148+#endif
12149
12150 #define segment_eq(a, b) ((a).seg == (b).seg)
12151
12152@@ -77,7 +85,33 @@
12153 * checks that the pointer is in the user space range - after calling
12154 * this function, memory access functions may still return -EFAULT.
12155 */
12156-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12157+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12158+#define access_ok(type, addr, size) \
12159+({ \
12160+ long __size = size; \
12161+ unsigned long __addr = (unsigned long)addr; \
12162+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12163+ unsigned long __end_ao = __addr + __size - 1; \
12164+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12165+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12166+ while(__addr_ao <= __end_ao) { \
12167+ char __c_ao; \
12168+ __addr_ao += PAGE_SIZE; \
12169+ if (__size > PAGE_SIZE) \
12170+ cond_resched(); \
12171+ if (__get_user(__c_ao, (char __user *)__addr)) \
12172+ break; \
12173+ if (type != VERIFY_WRITE) { \
12174+ __addr = __addr_ao; \
12175+ continue; \
12176+ } \
12177+ if (__put_user(__c_ao, (char __user *)__addr)) \
12178+ break; \
12179+ __addr = __addr_ao; \
12180+ } \
12181+ } \
12182+ __ret_ao; \
12183+})
12184
12185 /*
12186 * The exception table consists of pairs of addresses: the first is the
12187@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12188 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12189 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12190
12191-
12192+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12193+#define __copyuser_seg "gs;"
12194+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12195+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12196+#else
12197+#define __copyuser_seg
12198+#define __COPYUSER_SET_ES
12199+#define __COPYUSER_RESTORE_ES
12200+#endif
12201
12202 #ifdef CONFIG_X86_32
12203 #define __put_user_asm_u64(x, addr, err, errret) \
12204- asm volatile("1: movl %%eax,0(%2)\n" \
12205- "2: movl %%edx,4(%2)\n" \
12206+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12207+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12208 "3:\n" \
12209 ".section .fixup,\"ax\"\n" \
12210 "4: movl %3,%0\n" \
12211@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12212 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12213
12214 #define __put_user_asm_ex_u64(x, addr) \
12215- asm volatile("1: movl %%eax,0(%1)\n" \
12216- "2: movl %%edx,4(%1)\n" \
12217+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12218+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12219 "3:\n" \
12220 _ASM_EXTABLE(1b, 2b - 1b) \
12221 _ASM_EXTABLE(2b, 3b - 2b) \
12222@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12223 __typeof__(*(ptr)) __pu_val; \
12224 __chk_user_ptr(ptr); \
12225 might_fault(); \
12226- __pu_val = x; \
12227+ __pu_val = (x); \
12228 switch (sizeof(*(ptr))) { \
12229 case 1: \
12230 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12231@@ -374,7 +416,7 @@ do { \
12232 } while (0)
12233
12234 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12235- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12236+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12237 "2:\n" \
12238 ".section .fixup,\"ax\"\n" \
12239 "3: mov %3,%0\n" \
12240@@ -382,7 +424,7 @@ do { \
12241 " jmp 2b\n" \
12242 ".previous\n" \
12243 _ASM_EXTABLE(1b, 3b) \
12244- : "=r" (err), ltype(x) \
12245+ : "=r" (err), ltype (x) \
12246 : "m" (__m(addr)), "i" (errret), "0" (err))
12247
12248 #define __get_user_size_ex(x, ptr, size) \
12249@@ -407,7 +449,7 @@ do { \
12250 } while (0)
12251
12252 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12253- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12254+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12255 "2:\n" \
12256 _ASM_EXTABLE(1b, 2b - 1b) \
12257 : ltype(x) : "m" (__m(addr)))
12258@@ -424,13 +466,24 @@ do { \
12259 int __gu_err; \
12260 unsigned long __gu_val; \
12261 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12262- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12263+ (x) = (__typeof__(*(ptr)))__gu_val; \
12264 __gu_err; \
12265 })
12266
12267 /* FIXME: this hack is definitely wrong -AK */
12268 struct __large_struct { unsigned long buf[100]; };
12269-#define __m(x) (*(struct __large_struct __user *)(x))
12270+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12271+#define ____m(x) \
12272+({ \
12273+ unsigned long ____x = (unsigned long)(x); \
12274+ if (____x < PAX_USER_SHADOW_BASE) \
12275+ ____x += PAX_USER_SHADOW_BASE; \
12276+ (void __user *)____x; \
12277+})
12278+#else
12279+#define ____m(x) (x)
12280+#endif
12281+#define __m(x) (*(struct __large_struct __user *)____m(x))
12282
12283 /*
12284 * Tell gcc we read from memory instead of writing: this is because
12285@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12286 * aliasing issues.
12287 */
12288 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12289- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12290+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12291 "2:\n" \
12292 ".section .fixup,\"ax\"\n" \
12293 "3: mov %3,%0\n" \
12294@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12295 ".previous\n" \
12296 _ASM_EXTABLE(1b, 3b) \
12297 : "=r"(err) \
12298- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12299+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12300
12301 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12302- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12303+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12304 "2:\n" \
12305 _ASM_EXTABLE(1b, 2b - 1b) \
12306 : : ltype(x), "m" (__m(addr)))
12307@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12308 * On error, the variable @x is set to zero.
12309 */
12310
12311+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12312+#define __get_user(x, ptr) get_user((x), (ptr))
12313+#else
12314 #define __get_user(x, ptr) \
12315 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12316+#endif
12317
12318 /**
12319 * __put_user: - Write a simple value into user space, with less checking.
12320@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12321 * Returns zero on success, or -EFAULT on error.
12322 */
12323
12324+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12325+#define __put_user(x, ptr) put_user((x), (ptr))
12326+#else
12327 #define __put_user(x, ptr) \
12328 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12329+#endif
12330
12331 #define __get_user_unaligned __get_user
12332 #define __put_user_unaligned __put_user
12333@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12334 #define get_user_ex(x, ptr) do { \
12335 unsigned long __gue_val; \
12336 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12337- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12338+ (x) = (__typeof__(*(ptr)))__gue_val; \
12339 } while (0)
12340
12341 #ifdef CONFIG_X86_WP_WORKS_OK
12342@@ -567,6 +628,7 @@ extern struct movsl_mask {
12343
12344 #define ARCH_HAS_NOCACHE_UACCESS 1
12345
12346+#define ARCH_HAS_SORT_EXTABLE
12347 #ifdef CONFIG_X86_32
12348 # include "uaccess_32.h"
12349 #else
12350diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12351index 632fb44..e30e334 100644
12352--- a/arch/x86/include/asm/uaccess_32.h
12353+++ b/arch/x86/include/asm/uaccess_32.h
12354@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12355 static __always_inline unsigned long __must_check
12356 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12357 {
12358+ pax_track_stack();
12359+
12360+ if ((long)n < 0)
12361+ return n;
12362+
12363 if (__builtin_constant_p(n)) {
12364 unsigned long ret;
12365
12366@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12367 return ret;
12368 }
12369 }
12370+ if (!__builtin_constant_p(n))
12371+ check_object_size(from, n, true);
12372 return __copy_to_user_ll(to, from, n);
12373 }
12374
12375@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12376 __copy_to_user(void __user *to, const void *from, unsigned long n)
12377 {
12378 might_fault();
12379+
12380 return __copy_to_user_inatomic(to, from, n);
12381 }
12382
12383 static __always_inline unsigned long
12384 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12385 {
12386+ if ((long)n < 0)
12387+ return n;
12388+
12389 /* Avoid zeroing the tail if the copy fails..
12390 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12391 * but as the zeroing behaviour is only significant when n is not
12392@@ -138,6 +149,12 @@ static __always_inline unsigned long
12393 __copy_from_user(void *to, const void __user *from, unsigned long n)
12394 {
12395 might_fault();
12396+
12397+ pax_track_stack();
12398+
12399+ if ((long)n < 0)
12400+ return n;
12401+
12402 if (__builtin_constant_p(n)) {
12403 unsigned long ret;
12404
12405@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12406 return ret;
12407 }
12408 }
12409+ if (!__builtin_constant_p(n))
12410+ check_object_size(to, n, false);
12411 return __copy_from_user_ll(to, from, n);
12412 }
12413
12414@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12415 const void __user *from, unsigned long n)
12416 {
12417 might_fault();
12418+
12419+ if ((long)n < 0)
12420+ return n;
12421+
12422 if (__builtin_constant_p(n)) {
12423 unsigned long ret;
12424
12425@@ -182,14 +205,62 @@ static __always_inline unsigned long
12426 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12427 unsigned long n)
12428 {
12429- return __copy_from_user_ll_nocache_nozero(to, from, n);
12430+ if ((long)n < 0)
12431+ return n;
12432+
12433+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12434+}
12435+
12436+/**
12437+ * copy_to_user: - Copy a block of data into user space.
12438+ * @to: Destination address, in user space.
12439+ * @from: Source address, in kernel space.
12440+ * @n: Number of bytes to copy.
12441+ *
12442+ * Context: User context only. This function may sleep.
12443+ *
12444+ * Copy data from kernel space to user space.
12445+ *
12446+ * Returns number of bytes that could not be copied.
12447+ * On success, this will be zero.
12448+ */
12449+static __always_inline unsigned long __must_check
12450+copy_to_user(void __user *to, const void *from, unsigned long n)
12451+{
12452+ if (access_ok(VERIFY_WRITE, to, n))
12453+ n = __copy_to_user(to, from, n);
12454+ return n;
12455+}
12456+
12457+/**
12458+ * copy_from_user: - Copy a block of data from user space.
12459+ * @to: Destination address, in kernel space.
12460+ * @from: Source address, in user space.
12461+ * @n: Number of bytes to copy.
12462+ *
12463+ * Context: User context only. This function may sleep.
12464+ *
12465+ * Copy data from user space to kernel space.
12466+ *
12467+ * Returns number of bytes that could not be copied.
12468+ * On success, this will be zero.
12469+ *
12470+ * If some data could not be copied, this function will pad the copied
12471+ * data to the requested size using zero bytes.
12472+ */
12473+static __always_inline unsigned long __must_check
12474+copy_from_user(void *to, const void __user *from, unsigned long n)
12475+{
12476+ if (access_ok(VERIFY_READ, from, n))
12477+ n = __copy_from_user(to, from, n);
12478+ else if ((long)n > 0) {
12479+ if (!__builtin_constant_p(n))
12480+ check_object_size(to, n, false);
12481+ memset(to, 0, n);
12482+ }
12483+ return n;
12484 }
12485
12486-unsigned long __must_check copy_to_user(void __user *to,
12487- const void *from, unsigned long n);
12488-unsigned long __must_check copy_from_user(void *to,
12489- const void __user *from,
12490- unsigned long n);
12491 long __must_check strncpy_from_user(char *dst, const char __user *src,
12492 long count);
12493 long __must_check __strncpy_from_user(char *dst,
12494diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12495index db24b21..f595ae7 100644
12496--- a/arch/x86/include/asm/uaccess_64.h
12497+++ b/arch/x86/include/asm/uaccess_64.h
12498@@ -9,6 +9,9 @@
12499 #include <linux/prefetch.h>
12500 #include <linux/lockdep.h>
12501 #include <asm/page.h>
12502+#include <asm/pgtable.h>
12503+
12504+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12505
12506 /*
12507 * Copy To/From Userspace
12508@@ -16,116 +19,205 @@
12509
12510 /* Handles exceptions in both to and from, but doesn't do access_ok */
12511 __must_check unsigned long
12512-copy_user_generic(void *to, const void *from, unsigned len);
12513+copy_user_generic(void *to, const void *from, unsigned long len);
12514
12515 __must_check unsigned long
12516-copy_to_user(void __user *to, const void *from, unsigned len);
12517-__must_check unsigned long
12518-copy_from_user(void *to, const void __user *from, unsigned len);
12519-__must_check unsigned long
12520-copy_in_user(void __user *to, const void __user *from, unsigned len);
12521+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12522
12523 static __always_inline __must_check
12524-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12525+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12526 {
12527- int ret = 0;
12528+ unsigned ret = 0;
12529
12530 might_fault();
12531- if (!__builtin_constant_p(size))
12532- return copy_user_generic(dst, (__force void *)src, size);
12533+
12534+ if (size > INT_MAX)
12535+ return size;
12536+
12537+#ifdef CONFIG_PAX_MEMORY_UDEREF
12538+ if (!__access_ok(VERIFY_READ, src, size))
12539+ return size;
12540+#endif
12541+
12542+ if (!__builtin_constant_p(size)) {
12543+ check_object_size(dst, size, false);
12544+
12545+#ifdef CONFIG_PAX_MEMORY_UDEREF
12546+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12547+ src += PAX_USER_SHADOW_BASE;
12548+#endif
12549+
12550+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12551+ }
12552 switch (size) {
12553- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12554+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12555 ret, "b", "b", "=q", 1);
12556 return ret;
12557- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12558+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12559 ret, "w", "w", "=r", 2);
12560 return ret;
12561- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12562+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12563 ret, "l", "k", "=r", 4);
12564 return ret;
12565- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12566+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12567 ret, "q", "", "=r", 8);
12568 return ret;
12569 case 10:
12570- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12571+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12572 ret, "q", "", "=r", 10);
12573 if (unlikely(ret))
12574 return ret;
12575 __get_user_asm(*(u16 *)(8 + (char *)dst),
12576- (u16 __user *)(8 + (char __user *)src),
12577+ (const u16 __user *)(8 + (const char __user *)src),
12578 ret, "w", "w", "=r", 2);
12579 return ret;
12580 case 16:
12581- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12582+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12583 ret, "q", "", "=r", 16);
12584 if (unlikely(ret))
12585 return ret;
12586 __get_user_asm(*(u64 *)(8 + (char *)dst),
12587- (u64 __user *)(8 + (char __user *)src),
12588+ (const u64 __user *)(8 + (const char __user *)src),
12589 ret, "q", "", "=r", 8);
12590 return ret;
12591 default:
12592- return copy_user_generic(dst, (__force void *)src, size);
12593+
12594+#ifdef CONFIG_PAX_MEMORY_UDEREF
12595+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12596+ src += PAX_USER_SHADOW_BASE;
12597+#endif
12598+
12599+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12600 }
12601 }
12602
12603 static __always_inline __must_check
12604-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12605+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12606 {
12607- int ret = 0;
12608+ unsigned ret = 0;
12609
12610 might_fault();
12611- if (!__builtin_constant_p(size))
12612- return copy_user_generic((__force void *)dst, src, size);
12613+
12614+ pax_track_stack();
12615+
12616+ if (size > INT_MAX)
12617+ return size;
12618+
12619+#ifdef CONFIG_PAX_MEMORY_UDEREF
12620+ if (!__access_ok(VERIFY_WRITE, dst, size))
12621+ return size;
12622+#endif
12623+
12624+ if (!__builtin_constant_p(size)) {
12625+ check_object_size(src, size, true);
12626+
12627+#ifdef CONFIG_PAX_MEMORY_UDEREF
12628+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12629+ dst += PAX_USER_SHADOW_BASE;
12630+#endif
12631+
12632+ return copy_user_generic((__force_kernel void *)dst, src, size);
12633+ }
12634 switch (size) {
12635- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12636+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12637 ret, "b", "b", "iq", 1);
12638 return ret;
12639- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12640+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12641 ret, "w", "w", "ir", 2);
12642 return ret;
12643- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12644+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12645 ret, "l", "k", "ir", 4);
12646 return ret;
12647- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12648+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12649 ret, "q", "", "er", 8);
12650 return ret;
12651 case 10:
12652- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12653+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12654 ret, "q", "", "er", 10);
12655 if (unlikely(ret))
12656 return ret;
12657 asm("":::"memory");
12658- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12659+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12660 ret, "w", "w", "ir", 2);
12661 return ret;
12662 case 16:
12663- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12664+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12665 ret, "q", "", "er", 16);
12666 if (unlikely(ret))
12667 return ret;
12668 asm("":::"memory");
12669- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12670+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12671 ret, "q", "", "er", 8);
12672 return ret;
12673 default:
12674- return copy_user_generic((__force void *)dst, src, size);
12675+
12676+#ifdef CONFIG_PAX_MEMORY_UDEREF
12677+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12678+ dst += PAX_USER_SHADOW_BASE;
12679+#endif
12680+
12681+ return copy_user_generic((__force_kernel void *)dst, src, size);
12682+ }
12683+}
12684+
12685+static __always_inline __must_check
12686+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12687+{
12688+ if (access_ok(VERIFY_WRITE, to, len))
12689+ len = __copy_to_user(to, from, len);
12690+ return len;
12691+}
12692+
12693+static __always_inline __must_check
12694+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12695+{
12696+ might_fault();
12697+
12698+ if (access_ok(VERIFY_READ, from, len))
12699+ len = __copy_from_user(to, from, len);
12700+ else if (len < INT_MAX) {
12701+ if (!__builtin_constant_p(len))
12702+ check_object_size(to, len, false);
12703+ memset(to, 0, len);
12704 }
12705+ return len;
12706 }
12707
12708 static __always_inline __must_check
12709-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12710+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12711 {
12712- int ret = 0;
12713+ unsigned ret = 0;
12714
12715 might_fault();
12716- if (!__builtin_constant_p(size))
12717- return copy_user_generic((__force void *)dst,
12718- (__force void *)src, size);
12719+
12720+ pax_track_stack();
12721+
12722+ if (size > INT_MAX)
12723+ return size;
12724+
12725+#ifdef CONFIG_PAX_MEMORY_UDEREF
12726+ if (!__access_ok(VERIFY_READ, src, size))
12727+ return size;
12728+ if (!__access_ok(VERIFY_WRITE, dst, size))
12729+ return size;
12730+#endif
12731+
12732+ if (!__builtin_constant_p(size)) {
12733+
12734+#ifdef CONFIG_PAX_MEMORY_UDEREF
12735+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12736+ src += PAX_USER_SHADOW_BASE;
12737+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12738+ dst += PAX_USER_SHADOW_BASE;
12739+#endif
12740+
12741+ return copy_user_generic((__force_kernel void *)dst,
12742+ (__force_kernel const void *)src, size);
12743+ }
12744 switch (size) {
12745 case 1: {
12746 u8 tmp;
12747- __get_user_asm(tmp, (u8 __user *)src,
12748+ __get_user_asm(tmp, (const u8 __user *)src,
12749 ret, "b", "b", "=q", 1);
12750 if (likely(!ret))
12751 __put_user_asm(tmp, (u8 __user *)dst,
12752@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12753 }
12754 case 2: {
12755 u16 tmp;
12756- __get_user_asm(tmp, (u16 __user *)src,
12757+ __get_user_asm(tmp, (const u16 __user *)src,
12758 ret, "w", "w", "=r", 2);
12759 if (likely(!ret))
12760 __put_user_asm(tmp, (u16 __user *)dst,
12761@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12762
12763 case 4: {
12764 u32 tmp;
12765- __get_user_asm(tmp, (u32 __user *)src,
12766+ __get_user_asm(tmp, (const u32 __user *)src,
12767 ret, "l", "k", "=r", 4);
12768 if (likely(!ret))
12769 __put_user_asm(tmp, (u32 __user *)dst,
12770@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12771 }
12772 case 8: {
12773 u64 tmp;
12774- __get_user_asm(tmp, (u64 __user *)src,
12775+ __get_user_asm(tmp, (const u64 __user *)src,
12776 ret, "q", "", "=r", 8);
12777 if (likely(!ret))
12778 __put_user_asm(tmp, (u64 __user *)dst,
12779@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12780 return ret;
12781 }
12782 default:
12783- return copy_user_generic((__force void *)dst,
12784- (__force void *)src, size);
12785+
12786+#ifdef CONFIG_PAX_MEMORY_UDEREF
12787+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12788+ src += PAX_USER_SHADOW_BASE;
12789+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12790+ dst += PAX_USER_SHADOW_BASE;
12791+#endif
12792+
12793+ return copy_user_generic((__force_kernel void *)dst,
12794+ (__force_kernel const void *)src, size);
12795 }
12796 }
12797
12798@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12799 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12800 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12801
12802-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12803- unsigned size);
12804+static __must_check __always_inline unsigned long
12805+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12806+{
12807+ pax_track_stack();
12808+
12809+ if (size > INT_MAX)
12810+ return size;
12811+
12812+#ifdef CONFIG_PAX_MEMORY_UDEREF
12813+ if (!__access_ok(VERIFY_READ, src, size))
12814+ return size;
12815
12816-static __must_check __always_inline int
12817-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12818+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12819+ src += PAX_USER_SHADOW_BASE;
12820+#endif
12821+
12822+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12823+}
12824+
12825+static __must_check __always_inline unsigned long
12826+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12827 {
12828- return copy_user_generic((__force void *)dst, src, size);
12829+ if (size > INT_MAX)
12830+ return size;
12831+
12832+#ifdef CONFIG_PAX_MEMORY_UDEREF
12833+ if (!__access_ok(VERIFY_WRITE, dst, size))
12834+ return size;
12835+
12836+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12837+ dst += PAX_USER_SHADOW_BASE;
12838+#endif
12839+
12840+ return copy_user_generic((__force_kernel void *)dst, src, size);
12841 }
12842
12843-extern long __copy_user_nocache(void *dst, const void __user *src,
12844- unsigned size, int zerorest);
12845+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12846+ unsigned long size, int zerorest);
12847
12848-static inline int
12849-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12850+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12851 {
12852 might_sleep();
12853+
12854+ if (size > INT_MAX)
12855+ return size;
12856+
12857+#ifdef CONFIG_PAX_MEMORY_UDEREF
12858+ if (!__access_ok(VERIFY_READ, src, size))
12859+ return size;
12860+#endif
12861+
12862 return __copy_user_nocache(dst, src, size, 1);
12863 }
12864
12865-static inline int
12866-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12867- unsigned size)
12868+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12869+ unsigned long size)
12870 {
12871+ if (size > INT_MAX)
12872+ return size;
12873+
12874+#ifdef CONFIG_PAX_MEMORY_UDEREF
12875+ if (!__access_ok(VERIFY_READ, src, size))
12876+ return size;
12877+#endif
12878+
12879 return __copy_user_nocache(dst, src, size, 0);
12880 }
12881
12882-unsigned long
12883-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12884+extern unsigned long
12885+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12886
12887 #endif /* _ASM_X86_UACCESS_64_H */
12888diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12889index 9064052..786cfbc 100644
12890--- a/arch/x86/include/asm/vdso.h
12891+++ b/arch/x86/include/asm/vdso.h
12892@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12893 #define VDSO32_SYMBOL(base, name) \
12894 ({ \
12895 extern const char VDSO32_##name[]; \
12896- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12897+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12898 })
12899 #endif
12900
12901diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12902index 3d61e20..9507180 100644
12903--- a/arch/x86/include/asm/vgtod.h
12904+++ b/arch/x86/include/asm/vgtod.h
12905@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12906 int sysctl_enabled;
12907 struct timezone sys_tz;
12908 struct { /* extract of a clocksource struct */
12909+ char name[8];
12910 cycle_t (*vread)(void);
12911 cycle_t cycle_last;
12912 cycle_t mask;
12913diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12914index 61e08c0..b0da582 100644
12915--- a/arch/x86/include/asm/vmi.h
12916+++ b/arch/x86/include/asm/vmi.h
12917@@ -191,6 +191,7 @@ struct vrom_header {
12918 u8 reserved[96]; /* Reserved for headers */
12919 char vmi_init[8]; /* VMI_Init jump point */
12920 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12921+ char rom_data[8048]; /* rest of the option ROM */
12922 } __attribute__((packed));
12923
12924 struct pnp_header {
12925diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12926index c6e0bee..fcb9f74 100644
12927--- a/arch/x86/include/asm/vmi_time.h
12928+++ b/arch/x86/include/asm/vmi_time.h
12929@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12930 int (*wallclock_updated)(void);
12931 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12932 void (*cancel_alarm)(u32 flags);
12933-} vmi_timer_ops;
12934+} __no_const vmi_timer_ops;
12935
12936 /* Prototypes */
12937 extern void __init vmi_time_init(void);
12938diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12939index d0983d2..1f7c9e9 100644
12940--- a/arch/x86/include/asm/vsyscall.h
12941+++ b/arch/x86/include/asm/vsyscall.h
12942@@ -15,9 +15,10 @@ enum vsyscall_num {
12943
12944 #ifdef __KERNEL__
12945 #include <linux/seqlock.h>
12946+#include <linux/getcpu.h>
12947+#include <linux/time.h>
12948
12949 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12950-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12951
12952 /* Definitions for CONFIG_GENERIC_TIME definitions */
12953 #define __section_vsyscall_gtod_data __attribute__ \
12954@@ -31,7 +32,6 @@ enum vsyscall_num {
12955 #define VGETCPU_LSL 2
12956
12957 extern int __vgetcpu_mode;
12958-extern volatile unsigned long __jiffies;
12959
12960 /* kernel space (writeable) */
12961 extern int vgetcpu_mode;
12962@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12963
12964 extern void map_vsyscall(void);
12965
12966+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12967+extern time_t vtime(time_t *t);
12968+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12969 #endif /* __KERNEL__ */
12970
12971 #endif /* _ASM_X86_VSYSCALL_H */
12972diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12973index 2c756fd..3377e37 100644
12974--- a/arch/x86/include/asm/x86_init.h
12975+++ b/arch/x86/include/asm/x86_init.h
12976@@ -28,7 +28,7 @@ struct x86_init_mpparse {
12977 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12978 void (*find_smp_config)(unsigned int reserve);
12979 void (*get_smp_config)(unsigned int early);
12980-};
12981+} __no_const;
12982
12983 /**
12984 * struct x86_init_resources - platform specific resource related ops
12985@@ -42,7 +42,7 @@ struct x86_init_resources {
12986 void (*probe_roms)(void);
12987 void (*reserve_resources)(void);
12988 char *(*memory_setup)(void);
12989-};
12990+} __no_const;
12991
12992 /**
12993 * struct x86_init_irqs - platform specific interrupt setup
12994@@ -55,7 +55,7 @@ struct x86_init_irqs {
12995 void (*pre_vector_init)(void);
12996 void (*intr_init)(void);
12997 void (*trap_init)(void);
12998-};
12999+} __no_const;
13000
13001 /**
13002 * struct x86_init_oem - oem platform specific customizing functions
13003@@ -65,7 +65,7 @@ struct x86_init_irqs {
13004 struct x86_init_oem {
13005 void (*arch_setup)(void);
13006 void (*banner)(void);
13007-};
13008+} __no_const;
13009
13010 /**
13011 * struct x86_init_paging - platform specific paging functions
13012@@ -75,7 +75,7 @@ struct x86_init_oem {
13013 struct x86_init_paging {
13014 void (*pagetable_setup_start)(pgd_t *base);
13015 void (*pagetable_setup_done)(pgd_t *base);
13016-};
13017+} __no_const;
13018
13019 /**
13020 * struct x86_init_timers - platform specific timer setup
13021@@ -88,7 +88,7 @@ struct x86_init_timers {
13022 void (*setup_percpu_clockev)(void);
13023 void (*tsc_pre_init)(void);
13024 void (*timer_init)(void);
13025-};
13026+} __no_const;
13027
13028 /**
13029 * struct x86_init_ops - functions for platform specific setup
13030@@ -101,7 +101,7 @@ struct x86_init_ops {
13031 struct x86_init_oem oem;
13032 struct x86_init_paging paging;
13033 struct x86_init_timers timers;
13034-};
13035+} __no_const;
13036
13037 /**
13038 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13039@@ -109,7 +109,7 @@ struct x86_init_ops {
13040 */
13041 struct x86_cpuinit_ops {
13042 void (*setup_percpu_clockev)(void);
13043-};
13044+} __no_const;
13045
13046 /**
13047 * struct x86_platform_ops - platform specific runtime functions
13048@@ -121,7 +121,7 @@ struct x86_platform_ops {
13049 unsigned long (*calibrate_tsc)(void);
13050 unsigned long (*get_wallclock)(void);
13051 int (*set_wallclock)(unsigned long nowtime);
13052-};
13053+} __no_const;
13054
13055 extern struct x86_init_ops x86_init;
13056 extern struct x86_cpuinit_ops x86_cpuinit;
13057diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13058index 727acc1..554f3eb 100644
13059--- a/arch/x86/include/asm/xsave.h
13060+++ b/arch/x86/include/asm/xsave.h
13061@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13062 static inline int xsave_user(struct xsave_struct __user *buf)
13063 {
13064 int err;
13065+
13066+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13067+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13068+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13069+#endif
13070+
13071 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13072 "2:\n"
13073 ".section .fixup,\"ax\"\n"
13074@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13075 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13076 {
13077 int err;
13078- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13079+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13080 u32 lmask = mask;
13081 u32 hmask = mask >> 32;
13082
13083+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13084+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13085+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13086+#endif
13087+
13088 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13089 "2:\n"
13090 ".section .fixup,\"ax\"\n"
13091diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13092index 6a564ac..9b1340c 100644
13093--- a/arch/x86/kernel/acpi/realmode/Makefile
13094+++ b/arch/x86/kernel/acpi/realmode/Makefile
13095@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13096 $(call cc-option, -fno-stack-protector) \
13097 $(call cc-option, -mpreferred-stack-boundary=2)
13098 KBUILD_CFLAGS += $(call cc-option, -m32)
13099+ifdef CONSTIFY_PLUGIN
13100+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13101+endif
13102 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13103 GCOV_PROFILE := n
13104
13105diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13106index 580b4e2..d4129e4 100644
13107--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13108+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13109@@ -91,6 +91,9 @@ _start:
13110 /* Do any other stuff... */
13111
13112 #ifndef CONFIG_64BIT
13113+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13114+ call verify_cpu
13115+
13116 /* This could also be done in C code... */
13117 movl pmode_cr3, %eax
13118 movl %eax, %cr3
13119@@ -104,7 +107,7 @@ _start:
13120 movl %eax, %ecx
13121 orl %edx, %ecx
13122 jz 1f
13123- movl $0xc0000080, %ecx
13124+ mov $MSR_EFER, %ecx
13125 wrmsr
13126 1:
13127
13128@@ -114,6 +117,7 @@ _start:
13129 movl pmode_cr0, %eax
13130 movl %eax, %cr0
13131 jmp pmode_return
13132+# include "../../verify_cpu.S"
13133 #else
13134 pushw $0
13135 pushw trampoline_segment
13136diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13137index ca93638..7042f24 100644
13138--- a/arch/x86/kernel/acpi/sleep.c
13139+++ b/arch/x86/kernel/acpi/sleep.c
13140@@ -11,11 +11,12 @@
13141 #include <linux/cpumask.h>
13142 #include <asm/segment.h>
13143 #include <asm/desc.h>
13144+#include <asm/e820.h>
13145
13146 #include "realmode/wakeup.h"
13147 #include "sleep.h"
13148
13149-unsigned long acpi_wakeup_address;
13150+unsigned long acpi_wakeup_address = 0x2000;
13151 unsigned long acpi_realmode_flags;
13152
13153 /* address in low memory of the wakeup routine. */
13154@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13155 #else /* CONFIG_64BIT */
13156 header->trampoline_segment = setup_trampoline() >> 4;
13157 #ifdef CONFIG_SMP
13158- stack_start.sp = temp_stack + sizeof(temp_stack);
13159+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13160+
13161+ pax_open_kernel();
13162 early_gdt_descr.address =
13163 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13164+ pax_close_kernel();
13165+
13166 initial_gs = per_cpu_offset(smp_processor_id());
13167 #endif
13168 initial_code = (unsigned long)wakeup_long64;
13169@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13170 return;
13171 }
13172
13173- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13174-
13175- if (!acpi_realmode) {
13176- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13177- return;
13178- }
13179-
13180- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13181+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13182+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13183 }
13184
13185
13186diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13187index 8ded418..079961e 100644
13188--- a/arch/x86/kernel/acpi/wakeup_32.S
13189+++ b/arch/x86/kernel/acpi/wakeup_32.S
13190@@ -30,13 +30,11 @@ wakeup_pmode_return:
13191 # and restore the stack ... but you need gdt for this to work
13192 movl saved_context_esp, %esp
13193
13194- movl %cs:saved_magic, %eax
13195- cmpl $0x12345678, %eax
13196+ cmpl $0x12345678, saved_magic
13197 jne bogus_magic
13198
13199 # jump to place where we left off
13200- movl saved_eip, %eax
13201- jmp *%eax
13202+ jmp *(saved_eip)
13203
13204 bogus_magic:
13205 jmp bogus_magic
13206diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13207index de7353c..075da5f 100644
13208--- a/arch/x86/kernel/alternative.c
13209+++ b/arch/x86/kernel/alternative.c
13210@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13211
13212 BUG_ON(p->len > MAX_PATCH_LEN);
13213 /* prep the buffer with the original instructions */
13214- memcpy(insnbuf, p->instr, p->len);
13215+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13216 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13217 (unsigned long)p->instr, p->len);
13218
13219@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13220 if (smp_alt_once)
13221 free_init_pages("SMP alternatives",
13222 (unsigned long)__smp_locks,
13223- (unsigned long)__smp_locks_end);
13224+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13225
13226 restart_nmi();
13227 }
13228@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13229 * instructions. And on the local CPU you need to be protected again NMI or MCE
13230 * handlers seeing an inconsistent instruction while you patch.
13231 */
13232-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13233+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13234 size_t len)
13235 {
13236 unsigned long flags;
13237 local_irq_save(flags);
13238- memcpy(addr, opcode, len);
13239+
13240+ pax_open_kernel();
13241+ memcpy(ktla_ktva(addr), opcode, len);
13242 sync_core();
13243+ pax_close_kernel();
13244+
13245 local_irq_restore(flags);
13246 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13247 that causes hangs on some VIA CPUs. */
13248@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13249 */
13250 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13251 {
13252- unsigned long flags;
13253- char *vaddr;
13254+ unsigned char *vaddr = ktla_ktva(addr);
13255 struct page *pages[2];
13256- int i;
13257+ size_t i;
13258
13259 if (!core_kernel_text((unsigned long)addr)) {
13260- pages[0] = vmalloc_to_page(addr);
13261- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13262+ pages[0] = vmalloc_to_page(vaddr);
13263+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13264 } else {
13265- pages[0] = virt_to_page(addr);
13266+ pages[0] = virt_to_page(vaddr);
13267 WARN_ON(!PageReserved(pages[0]));
13268- pages[1] = virt_to_page(addr + PAGE_SIZE);
13269+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13270 }
13271 BUG_ON(!pages[0]);
13272- local_irq_save(flags);
13273- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13274- if (pages[1])
13275- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13276- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13277- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13278- clear_fixmap(FIX_TEXT_POKE0);
13279- if (pages[1])
13280- clear_fixmap(FIX_TEXT_POKE1);
13281- local_flush_tlb();
13282- sync_core();
13283- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13284- that causes hangs on some VIA CPUs. */
13285+ text_poke_early(addr, opcode, len);
13286 for (i = 0; i < len; i++)
13287- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13288- local_irq_restore(flags);
13289+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13290 return addr;
13291 }
13292diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13293index 3a44b75..1601800 100644
13294--- a/arch/x86/kernel/amd_iommu.c
13295+++ b/arch/x86/kernel/amd_iommu.c
13296@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13297 }
13298 }
13299
13300-static struct dma_map_ops amd_iommu_dma_ops = {
13301+static const struct dma_map_ops amd_iommu_dma_ops = {
13302 .alloc_coherent = alloc_coherent,
13303 .free_coherent = free_coherent,
13304 .map_page = map_page,
13305diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13306index 1d2d670..8e3f477 100644
13307--- a/arch/x86/kernel/apic/apic.c
13308+++ b/arch/x86/kernel/apic/apic.c
13309@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13310 /*
13311 * Debug level, exported for io_apic.c
13312 */
13313-unsigned int apic_verbosity;
13314+int apic_verbosity;
13315
13316 int pic_mode;
13317
13318@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13319 apic_write(APIC_ESR, 0);
13320 v1 = apic_read(APIC_ESR);
13321 ack_APIC_irq();
13322- atomic_inc(&irq_err_count);
13323+ atomic_inc_unchecked(&irq_err_count);
13324
13325 /*
13326 * Here is what the APIC error bits mean:
13327@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13328 u16 *bios_cpu_apicid;
13329 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13330
13331+ pax_track_stack();
13332+
13333 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13334 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13335
13336diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13337index 8928d97..f799cea 100644
13338--- a/arch/x86/kernel/apic/io_apic.c
13339+++ b/arch/x86/kernel/apic/io_apic.c
13340@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13341 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13342 GFP_ATOMIC);
13343 if (!ioapic_entries)
13344- return 0;
13345+ return NULL;
13346
13347 for (apic = 0; apic < nr_ioapics; apic++) {
13348 ioapic_entries[apic] =
13349@@ -733,7 +733,7 @@ nomem:
13350 kfree(ioapic_entries[apic]);
13351 kfree(ioapic_entries);
13352
13353- return 0;
13354+ return NULL;
13355 }
13356
13357 /*
13358@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13359 }
13360 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13361
13362-void lock_vector_lock(void)
13363+void lock_vector_lock(void) __acquires(vector_lock)
13364 {
13365 /* Used to the online set of cpus does not change
13366 * during assign_irq_vector.
13367@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13368 spin_lock(&vector_lock);
13369 }
13370
13371-void unlock_vector_lock(void)
13372+void unlock_vector_lock(void) __releases(vector_lock)
13373 {
13374 spin_unlock(&vector_lock);
13375 }
13376@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13377 ack_APIC_irq();
13378 }
13379
13380-atomic_t irq_mis_count;
13381+atomic_unchecked_t irq_mis_count;
13382
13383 static void ack_apic_level(unsigned int irq)
13384 {
13385@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13386
13387 /* Tail end of version 0x11 I/O APIC bug workaround */
13388 if (!(v & (1 << (i & 0x1f)))) {
13389- atomic_inc(&irq_mis_count);
13390+ atomic_inc_unchecked(&irq_mis_count);
13391 spin_lock(&ioapic_lock);
13392 __mask_and_edge_IO_APIC_irq(cfg);
13393 __unmask_and_level_IO_APIC_irq(cfg);
13394diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13395index 151ace6..f317474 100644
13396--- a/arch/x86/kernel/apm_32.c
13397+++ b/arch/x86/kernel/apm_32.c
13398@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13399 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13400 * even though they are called in protected mode.
13401 */
13402-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13403+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13404 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13405
13406 static const char driver_version[] = "1.16ac"; /* no spaces */
13407@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13408 BUG_ON(cpu != 0);
13409 gdt = get_cpu_gdt_table(cpu);
13410 save_desc_40 = gdt[0x40 / 8];
13411+
13412+ pax_open_kernel();
13413 gdt[0x40 / 8] = bad_bios_desc;
13414+ pax_close_kernel();
13415
13416 apm_irq_save(flags);
13417 APM_DO_SAVE_SEGS;
13418@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13419 &call->esi);
13420 APM_DO_RESTORE_SEGS;
13421 apm_irq_restore(flags);
13422+
13423+ pax_open_kernel();
13424 gdt[0x40 / 8] = save_desc_40;
13425+ pax_close_kernel();
13426+
13427 put_cpu();
13428
13429 return call->eax & 0xff;
13430@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13431 BUG_ON(cpu != 0);
13432 gdt = get_cpu_gdt_table(cpu);
13433 save_desc_40 = gdt[0x40 / 8];
13434+
13435+ pax_open_kernel();
13436 gdt[0x40 / 8] = bad_bios_desc;
13437+ pax_close_kernel();
13438
13439 apm_irq_save(flags);
13440 APM_DO_SAVE_SEGS;
13441@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13442 &call->eax);
13443 APM_DO_RESTORE_SEGS;
13444 apm_irq_restore(flags);
13445+
13446+ pax_open_kernel();
13447 gdt[0x40 / 8] = save_desc_40;
13448+ pax_close_kernel();
13449+
13450 put_cpu();
13451 return error;
13452 }
13453@@ -975,7 +989,7 @@ recalc:
13454
13455 static void apm_power_off(void)
13456 {
13457- unsigned char po_bios_call[] = {
13458+ const unsigned char po_bios_call[] = {
13459 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13460 0x8e, 0xd0, /* movw ax,ss */
13461 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13462@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13463 * code to that CPU.
13464 */
13465 gdt = get_cpu_gdt_table(0);
13466+
13467+ pax_open_kernel();
13468 set_desc_base(&gdt[APM_CS >> 3],
13469 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13470 set_desc_base(&gdt[APM_CS_16 >> 3],
13471 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13472 set_desc_base(&gdt[APM_DS >> 3],
13473 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13474+ pax_close_kernel();
13475
13476 proc_create("apm", 0, NULL, &apm_file_ops);
13477
13478diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13479index dfdbf64..9b2b6ce 100644
13480--- a/arch/x86/kernel/asm-offsets_32.c
13481+++ b/arch/x86/kernel/asm-offsets_32.c
13482@@ -51,7 +51,6 @@ void foo(void)
13483 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13484 BLANK();
13485
13486- OFFSET(TI_task, thread_info, task);
13487 OFFSET(TI_exec_domain, thread_info, exec_domain);
13488 OFFSET(TI_flags, thread_info, flags);
13489 OFFSET(TI_status, thread_info, status);
13490@@ -60,6 +59,8 @@ void foo(void)
13491 OFFSET(TI_restart_block, thread_info, restart_block);
13492 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13493 OFFSET(TI_cpu, thread_info, cpu);
13494+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13495+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13496 BLANK();
13497
13498 OFFSET(GDS_size, desc_ptr, size);
13499@@ -99,6 +100,7 @@ void foo(void)
13500
13501 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13502 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13503+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13504 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13505 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13506 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13507@@ -115,6 +117,11 @@ void foo(void)
13508 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13509 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13510 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13511+
13512+#ifdef CONFIG_PAX_KERNEXEC
13513+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13514+#endif
13515+
13516 #endif
13517
13518 #ifdef CONFIG_XEN
13519diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13520index 4a6aeed..371de20 100644
13521--- a/arch/x86/kernel/asm-offsets_64.c
13522+++ b/arch/x86/kernel/asm-offsets_64.c
13523@@ -44,6 +44,8 @@ int main(void)
13524 ENTRY(addr_limit);
13525 ENTRY(preempt_count);
13526 ENTRY(status);
13527+ ENTRY(lowest_stack);
13528+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13529 #ifdef CONFIG_IA32_EMULATION
13530 ENTRY(sysenter_return);
13531 #endif
13532@@ -63,6 +65,18 @@ int main(void)
13533 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13534 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13535 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13536+
13537+#ifdef CONFIG_PAX_KERNEXEC
13538+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13539+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13540+#endif
13541+
13542+#ifdef CONFIG_PAX_MEMORY_UDEREF
13543+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13544+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13545+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13546+#endif
13547+
13548 #endif
13549
13550
13551@@ -115,6 +129,7 @@ int main(void)
13552 ENTRY(cr8);
13553 BLANK();
13554 #undef ENTRY
13555+ DEFINE(TSS_size, sizeof(struct tss_struct));
13556 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13557 BLANK();
13558 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13559@@ -130,6 +145,7 @@ int main(void)
13560
13561 BLANK();
13562 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13563+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13564 #ifdef CONFIG_XEN
13565 BLANK();
13566 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13567diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13568index ff502cc..dc5133e 100644
13569--- a/arch/x86/kernel/cpu/Makefile
13570+++ b/arch/x86/kernel/cpu/Makefile
13571@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13572 CFLAGS_REMOVE_common.o = -pg
13573 endif
13574
13575-# Make sure load_percpu_segment has no stackprotector
13576-nostackp := $(call cc-option, -fno-stack-protector)
13577-CFLAGS_common.o := $(nostackp)
13578-
13579 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13580 obj-y += proc.o capflags.o powerflags.o common.o
13581 obj-y += vmware.o hypervisor.o sched.o
13582diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13583index 6e082dc..a0b5f36 100644
13584--- a/arch/x86/kernel/cpu/amd.c
13585+++ b/arch/x86/kernel/cpu/amd.c
13586@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13587 unsigned int size)
13588 {
13589 /* AMD errata T13 (order #21922) */
13590- if ((c->x86 == 6)) {
13591+ if (c->x86 == 6) {
13592 /* Duron Rev A0 */
13593 if (c->x86_model == 3 && c->x86_mask == 0)
13594 size = 64;
13595diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13596index 4e34d10..a53b130a 100644
13597--- a/arch/x86/kernel/cpu/common.c
13598+++ b/arch/x86/kernel/cpu/common.c
13599@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13600
13601 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13602
13603-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13604-#ifdef CONFIG_X86_64
13605- /*
13606- * We need valid kernel segments for data and code in long mode too
13607- * IRET will check the segment types kkeil 2000/10/28
13608- * Also sysret mandates a special GDT layout
13609- *
13610- * TLS descriptors are currently at a different place compared to i386.
13611- * Hopefully nobody expects them at a fixed place (Wine?)
13612- */
13613- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13614- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13615- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13616- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13617- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13618- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13619-#else
13620- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13621- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13622- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13623- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13624- /*
13625- * Segments used for calling PnP BIOS have byte granularity.
13626- * They code segments and data segments have fixed 64k limits,
13627- * the transfer segment sizes are set at run time.
13628- */
13629- /* 32-bit code */
13630- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13631- /* 16-bit code */
13632- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13633- /* 16-bit data */
13634- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13635- /* 16-bit data */
13636- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13637- /* 16-bit data */
13638- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13639- /*
13640- * The APM segments have byte granularity and their bases
13641- * are set at run time. All have 64k limits.
13642- */
13643- /* 32-bit code */
13644- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13645- /* 16-bit code */
13646- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13647- /* data */
13648- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13649-
13650- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13651- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13652- GDT_STACK_CANARY_INIT
13653-#endif
13654-} };
13655-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13656-
13657 static int __init x86_xsave_setup(char *s)
13658 {
13659 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13660@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13661 {
13662 struct desc_ptr gdt_descr;
13663
13664- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13665+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13666 gdt_descr.size = GDT_SIZE - 1;
13667 load_gdt(&gdt_descr);
13668 /* Reload the per-cpu base */
13669@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13670 /* Filter out anything that depends on CPUID levels we don't have */
13671 filter_cpuid_features(c, true);
13672
13673+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
13674+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13675+#endif
13676+
13677 /* If the model name is still unset, do table lookup. */
13678 if (!c->x86_model_id[0]) {
13679 const char *p;
13680@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13681 }
13682 __setup("clearcpuid=", setup_disablecpuid);
13683
13684+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13685+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13686+
13687 #ifdef CONFIG_X86_64
13688 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13689
13690@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13691 EXPORT_PER_CPU_SYMBOL(current_task);
13692
13693 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13694- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13695+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13696 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13697
13698 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13699@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13700 {
13701 memset(regs, 0, sizeof(struct pt_regs));
13702 regs->fs = __KERNEL_PERCPU;
13703- regs->gs = __KERNEL_STACK_CANARY;
13704+ savesegment(gs, regs->gs);
13705
13706 return regs;
13707 }
13708@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13709 int i;
13710
13711 cpu = stack_smp_processor_id();
13712- t = &per_cpu(init_tss, cpu);
13713+ t = init_tss + cpu;
13714 orig_ist = &per_cpu(orig_ist, cpu);
13715
13716 #ifdef CONFIG_NUMA
13717@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13718 switch_to_new_gdt(cpu);
13719 loadsegment(fs, 0);
13720
13721- load_idt((const struct desc_ptr *)&idt_descr);
13722+ load_idt(&idt_descr);
13723
13724 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13725 syscall_init();
13726@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13727 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13728 barrier();
13729
13730- check_efer();
13731 if (cpu != 0)
13732 enable_x2apic();
13733
13734@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13735 {
13736 int cpu = smp_processor_id();
13737 struct task_struct *curr = current;
13738- struct tss_struct *t = &per_cpu(init_tss, cpu);
13739+ struct tss_struct *t = init_tss + cpu;
13740 struct thread_struct *thread = &curr->thread;
13741
13742 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13743diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13744index 6a77cca..4f4fca0 100644
13745--- a/arch/x86/kernel/cpu/intel.c
13746+++ b/arch/x86/kernel/cpu/intel.c
13747@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13748 * Update the IDT descriptor and reload the IDT so that
13749 * it uses the read-only mapped virtual address.
13750 */
13751- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13752+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13753 load_idt(&idt_descr);
13754 }
13755 #endif
13756diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13757index 417990f..96dc36b 100644
13758--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13759+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13760@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13761 return ret;
13762 }
13763
13764-static struct sysfs_ops sysfs_ops = {
13765+static const struct sysfs_ops sysfs_ops = {
13766 .show = show,
13767 .store = store,
13768 };
13769diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13770index 472763d..9831e11 100644
13771--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13772+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13773@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13774 static int inject_init(void)
13775 {
13776 printk(KERN_INFO "Machine check injector initialized\n");
13777- mce_chrdev_ops.write = mce_write;
13778+ pax_open_kernel();
13779+ *(void **)&mce_chrdev_ops.write = mce_write;
13780+ pax_close_kernel();
13781 register_die_notifier(&mce_raise_nb);
13782 return 0;
13783 }
13784diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13785index 0f16a2b..21740f5 100644
13786--- a/arch/x86/kernel/cpu/mcheck/mce.c
13787+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13788@@ -43,6 +43,7 @@
13789 #include <asm/ipi.h>
13790 #include <asm/mce.h>
13791 #include <asm/msr.h>
13792+#include <asm/local.h>
13793
13794 #include "mce-internal.h"
13795
13796@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13797 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13798 m->cs, m->ip);
13799
13800- if (m->cs == __KERNEL_CS)
13801+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13802 print_symbol("{%s}", m->ip);
13803 pr_cont("\n");
13804 }
13805@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13806
13807 #define PANIC_TIMEOUT 5 /* 5 seconds */
13808
13809-static atomic_t mce_paniced;
13810+static atomic_unchecked_t mce_paniced;
13811
13812 static int fake_panic;
13813-static atomic_t mce_fake_paniced;
13814+static atomic_unchecked_t mce_fake_paniced;
13815
13816 /* Panic in progress. Enable interrupts and wait for final IPI */
13817 static void wait_for_panic(void)
13818@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13819 /*
13820 * Make sure only one CPU runs in machine check panic
13821 */
13822- if (atomic_inc_return(&mce_paniced) > 1)
13823+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13824 wait_for_panic();
13825 barrier();
13826
13827@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13828 console_verbose();
13829 } else {
13830 /* Don't log too much for fake panic */
13831- if (atomic_inc_return(&mce_fake_paniced) > 1)
13832+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13833 return;
13834 }
13835 print_mce_head();
13836@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13837 * might have been modified by someone else.
13838 */
13839 rmb();
13840- if (atomic_read(&mce_paniced))
13841+ if (atomic_read_unchecked(&mce_paniced))
13842 wait_for_panic();
13843 if (!monarch_timeout)
13844 goto out;
13845@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13846 }
13847
13848 /* Call the installed machine check handler for this CPU setup. */
13849-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13850+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13851 unexpected_machine_check;
13852
13853 /*
13854@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13855 return;
13856 }
13857
13858+ pax_open_kernel();
13859 machine_check_vector = do_machine_check;
13860+ pax_close_kernel();
13861
13862 mce_init();
13863 mce_cpu_features(c);
13864@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13865 */
13866
13867 static DEFINE_SPINLOCK(mce_state_lock);
13868-static int open_count; /* #times opened */
13869+static local_t open_count; /* #times opened */
13870 static int open_exclu; /* already open exclusive? */
13871
13872 static int mce_open(struct inode *inode, struct file *file)
13873 {
13874 spin_lock(&mce_state_lock);
13875
13876- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13877+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13878 spin_unlock(&mce_state_lock);
13879
13880 return -EBUSY;
13881@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13882
13883 if (file->f_flags & O_EXCL)
13884 open_exclu = 1;
13885- open_count++;
13886+ local_inc(&open_count);
13887
13888 spin_unlock(&mce_state_lock);
13889
13890@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13891 {
13892 spin_lock(&mce_state_lock);
13893
13894- open_count--;
13895+ local_dec(&open_count);
13896 open_exclu = 0;
13897
13898 spin_unlock(&mce_state_lock);
13899@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13900 static void mce_reset(void)
13901 {
13902 cpu_missing = 0;
13903- atomic_set(&mce_fake_paniced, 0);
13904+ atomic_set_unchecked(&mce_fake_paniced, 0);
13905 atomic_set(&mce_executing, 0);
13906 atomic_set(&mce_callin, 0);
13907 atomic_set(&global_nwo, 0);
13908diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13909index ef3cd31..9d2f6ab 100644
13910--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13911+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13912@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13913 return ret;
13914 }
13915
13916-static struct sysfs_ops threshold_ops = {
13917+static const struct sysfs_ops threshold_ops = {
13918 .show = show,
13919 .store = store,
13920 };
13921diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13922index 5c0e653..1e82c7c 100644
13923--- a/arch/x86/kernel/cpu/mcheck/p5.c
13924+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13925@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13926 if (!cpu_has(c, X86_FEATURE_MCE))
13927 return;
13928
13929+ pax_open_kernel();
13930 machine_check_vector = pentium_machine_check;
13931+ pax_close_kernel();
13932 /* Make sure the vector pointer is visible before we enable MCEs: */
13933 wmb();
13934
13935diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13936index 54060f5..e6ba93d 100644
13937--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13938+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13939@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13940 {
13941 u32 lo, hi;
13942
13943+ pax_open_kernel();
13944 machine_check_vector = winchip_machine_check;
13945+ pax_close_kernel();
13946 /* Make sure the vector pointer is visible before we enable MCEs: */
13947 wmb();
13948
13949diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13950index 33af141..92ba9cd 100644
13951--- a/arch/x86/kernel/cpu/mtrr/amd.c
13952+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13953@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13954 return 0;
13955 }
13956
13957-static struct mtrr_ops amd_mtrr_ops = {
13958+static const struct mtrr_ops amd_mtrr_ops = {
13959 .vendor = X86_VENDOR_AMD,
13960 .set = amd_set_mtrr,
13961 .get = amd_get_mtrr,
13962diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13963index de89f14..316fe3e 100644
13964--- a/arch/x86/kernel/cpu/mtrr/centaur.c
13965+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13966@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13967 return 0;
13968 }
13969
13970-static struct mtrr_ops centaur_mtrr_ops = {
13971+static const struct mtrr_ops centaur_mtrr_ops = {
13972 .vendor = X86_VENDOR_CENTAUR,
13973 .set = centaur_set_mcr,
13974 .get = centaur_get_mcr,
13975diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
13976index 228d982..68a3343 100644
13977--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
13978+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
13979@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
13980 post_set();
13981 }
13982
13983-static struct mtrr_ops cyrix_mtrr_ops = {
13984+static const struct mtrr_ops cyrix_mtrr_ops = {
13985 .vendor = X86_VENDOR_CYRIX,
13986 .set_all = cyrix_set_all,
13987 .set = cyrix_set_arr,
13988diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
13989index 55da0c5..4d75584 100644
13990--- a/arch/x86/kernel/cpu/mtrr/generic.c
13991+++ b/arch/x86/kernel/cpu/mtrr/generic.c
13992@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
13993 /*
13994 * Generic structure...
13995 */
13996-struct mtrr_ops generic_mtrr_ops = {
13997+const struct mtrr_ops generic_mtrr_ops = {
13998 .use_intel_if = 1,
13999 .set_all = generic_set_all,
14000 .get = generic_get_mtrr,
14001diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14002index fd60f09..c94ef52 100644
14003--- a/arch/x86/kernel/cpu/mtrr/main.c
14004+++ b/arch/x86/kernel/cpu/mtrr/main.c
14005@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14006 u64 size_or_mask, size_and_mask;
14007 static bool mtrr_aps_delayed_init;
14008
14009-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14010+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14011
14012-struct mtrr_ops *mtrr_if;
14013+const struct mtrr_ops *mtrr_if;
14014
14015 static void set_mtrr(unsigned int reg, unsigned long base,
14016 unsigned long size, mtrr_type type);
14017
14018-void set_mtrr_ops(struct mtrr_ops *ops)
14019+void set_mtrr_ops(const struct mtrr_ops *ops)
14020 {
14021 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14022 mtrr_ops[ops->vendor] = ops;
14023diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14024index a501dee..816c719 100644
14025--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14026+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14027@@ -25,14 +25,14 @@ struct mtrr_ops {
14028 int (*validate_add_page)(unsigned long base, unsigned long size,
14029 unsigned int type);
14030 int (*have_wrcomb)(void);
14031-};
14032+} __do_const;
14033
14034 extern int generic_get_free_region(unsigned long base, unsigned long size,
14035 int replace_reg);
14036 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14037 unsigned int type);
14038
14039-extern struct mtrr_ops generic_mtrr_ops;
14040+extern const struct mtrr_ops generic_mtrr_ops;
14041
14042 extern int positive_have_wrcomb(void);
14043
14044@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14045 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14046 void get_mtrr_state(void);
14047
14048-extern void set_mtrr_ops(struct mtrr_ops *ops);
14049+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14050
14051 extern u64 size_or_mask, size_and_mask;
14052-extern struct mtrr_ops *mtrr_if;
14053+extern const struct mtrr_ops *mtrr_if;
14054
14055 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14056 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14057diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14058index 0ff02ca..fc49a60 100644
14059--- a/arch/x86/kernel/cpu/perf_event.c
14060+++ b/arch/x86/kernel/cpu/perf_event.c
14061@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14062 * count to the generic event atomically:
14063 */
14064 again:
14065- prev_raw_count = atomic64_read(&hwc->prev_count);
14066+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14067 rdmsrl(hwc->event_base + idx, new_raw_count);
14068
14069- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14070+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14071 new_raw_count) != prev_raw_count)
14072 goto again;
14073
14074@@ -741,7 +741,7 @@ again:
14075 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14076 delta >>= shift;
14077
14078- atomic64_add(delta, &event->count);
14079+ atomic64_add_unchecked(delta, &event->count);
14080 atomic64_sub(delta, &hwc->period_left);
14081
14082 return new_raw_count;
14083@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14084 * The hw event starts counting from this event offset,
14085 * mark it to be able to extra future deltas:
14086 */
14087- atomic64_set(&hwc->prev_count, (u64)-left);
14088+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14089
14090 err = checking_wrmsrl(hwc->event_base + idx,
14091 (u64)(-left) & x86_pmu.event_mask);
14092@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14093 break;
14094
14095 callchain_store(entry, frame.return_address);
14096- fp = frame.next_frame;
14097+ fp = (__force const void __user *)frame.next_frame;
14098 }
14099 }
14100
14101diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14102index 898df97..9e82503 100644
14103--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14104+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14105@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14106
14107 /* Interface defining a CPU specific perfctr watchdog */
14108 struct wd_ops {
14109- int (*reserve)(void);
14110- void (*unreserve)(void);
14111- int (*setup)(unsigned nmi_hz);
14112- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14113- void (*stop)(void);
14114+ int (* const reserve)(void);
14115+ void (* const unreserve)(void);
14116+ int (* const setup)(unsigned nmi_hz);
14117+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14118+ void (* const stop)(void);
14119 unsigned perfctr;
14120 unsigned evntsel;
14121 u64 checkbit;
14122@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14123 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14124 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14125
14126+/* cannot be const */
14127 static struct wd_ops intel_arch_wd_ops;
14128
14129 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14130@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14131 return 1;
14132 }
14133
14134+/* cannot be const */
14135 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14136 .reserve = single_msr_reserve,
14137 .unreserve = single_msr_unreserve,
14138diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14139index ff95824..2ffdcb5 100644
14140--- a/arch/x86/kernel/crash.c
14141+++ b/arch/x86/kernel/crash.c
14142@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14143 regs = args->regs;
14144
14145 #ifdef CONFIG_X86_32
14146- if (!user_mode_vm(regs)) {
14147+ if (!user_mode(regs)) {
14148 crash_fixup_ss_esp(&fixed_regs, regs);
14149 regs = &fixed_regs;
14150 }
14151diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14152index 37250fe..bf2ec74 100644
14153--- a/arch/x86/kernel/doublefault_32.c
14154+++ b/arch/x86/kernel/doublefault_32.c
14155@@ -11,7 +11,7 @@
14156
14157 #define DOUBLEFAULT_STACKSIZE (1024)
14158 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14159-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14160+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14161
14162 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14163
14164@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14165 unsigned long gdt, tss;
14166
14167 store_gdt(&gdt_desc);
14168- gdt = gdt_desc.address;
14169+ gdt = (unsigned long)gdt_desc.address;
14170
14171 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14172
14173@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14174 /* 0x2 bit is always set */
14175 .flags = X86_EFLAGS_SF | 0x2,
14176 .sp = STACK_START,
14177- .es = __USER_DS,
14178+ .es = __KERNEL_DS,
14179 .cs = __KERNEL_CS,
14180 .ss = __KERNEL_DS,
14181- .ds = __USER_DS,
14182+ .ds = __KERNEL_DS,
14183 .fs = __KERNEL_PERCPU,
14184
14185 .__cr3 = __pa_nodebug(swapper_pg_dir),
14186diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14187index 2d8a371..4fa6ae6 100644
14188--- a/arch/x86/kernel/dumpstack.c
14189+++ b/arch/x86/kernel/dumpstack.c
14190@@ -2,6 +2,9 @@
14191 * Copyright (C) 1991, 1992 Linus Torvalds
14192 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14193 */
14194+#ifdef CONFIG_GRKERNSEC_HIDESYM
14195+#define __INCLUDED_BY_HIDESYM 1
14196+#endif
14197 #include <linux/kallsyms.h>
14198 #include <linux/kprobes.h>
14199 #include <linux/uaccess.h>
14200@@ -28,7 +31,7 @@ static int die_counter;
14201
14202 void printk_address(unsigned long address, int reliable)
14203 {
14204- printk(" [<%p>] %s%pS\n", (void *) address,
14205+ printk(" [<%p>] %s%pA\n", (void *) address,
14206 reliable ? "" : "? ", (void *) address);
14207 }
14208
14209@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14210 static void
14211 print_ftrace_graph_addr(unsigned long addr, void *data,
14212 const struct stacktrace_ops *ops,
14213- struct thread_info *tinfo, int *graph)
14214+ struct task_struct *task, int *graph)
14215 {
14216- struct task_struct *task = tinfo->task;
14217 unsigned long ret_addr;
14218 int index = task->curr_ret_stack;
14219
14220@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14221 static inline void
14222 print_ftrace_graph_addr(unsigned long addr, void *data,
14223 const struct stacktrace_ops *ops,
14224- struct thread_info *tinfo, int *graph)
14225+ struct task_struct *task, int *graph)
14226 { }
14227 #endif
14228
14229@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14230 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14231 */
14232
14233-static inline int valid_stack_ptr(struct thread_info *tinfo,
14234- void *p, unsigned int size, void *end)
14235+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14236 {
14237- void *t = tinfo;
14238 if (end) {
14239 if (p < end && p >= (end-THREAD_SIZE))
14240 return 1;
14241@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14242 }
14243
14244 unsigned long
14245-print_context_stack(struct thread_info *tinfo,
14246+print_context_stack(struct task_struct *task, void *stack_start,
14247 unsigned long *stack, unsigned long bp,
14248 const struct stacktrace_ops *ops, void *data,
14249 unsigned long *end, int *graph)
14250 {
14251 struct stack_frame *frame = (struct stack_frame *)bp;
14252
14253- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14254+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14255 unsigned long addr;
14256
14257 addr = *stack;
14258@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14259 } else {
14260 ops->address(data, addr, 0);
14261 }
14262- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14263+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14264 }
14265 stack++;
14266 }
14267@@ -180,7 +180,7 @@ void dump_stack(void)
14268 #endif
14269
14270 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14271- current->pid, current->comm, print_tainted(),
14272+ task_pid_nr(current), current->comm, print_tainted(),
14273 init_utsname()->release,
14274 (int)strcspn(init_utsname()->version, " "),
14275 init_utsname()->version);
14276@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14277 return flags;
14278 }
14279
14280+extern void gr_handle_kernel_exploit(void);
14281+
14282 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14283 {
14284 if (regs && kexec_should_crash(current))
14285@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14286 panic("Fatal exception in interrupt");
14287 if (panic_on_oops)
14288 panic("Fatal exception");
14289- do_exit(signr);
14290+
14291+ gr_handle_kernel_exploit();
14292+
14293+ do_group_exit(signr);
14294 }
14295
14296 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14297@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14298 unsigned long flags = oops_begin();
14299 int sig = SIGSEGV;
14300
14301- if (!user_mode_vm(regs))
14302+ if (!user_mode(regs))
14303 report_bug(regs->ip, regs);
14304
14305 if (__die(str, regs, err))
14306diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14307index 81086c2..13e8b17 100644
14308--- a/arch/x86/kernel/dumpstack.h
14309+++ b/arch/x86/kernel/dumpstack.h
14310@@ -15,7 +15,7 @@
14311 #endif
14312
14313 extern unsigned long
14314-print_context_stack(struct thread_info *tinfo,
14315+print_context_stack(struct task_struct *task, void *stack_start,
14316 unsigned long *stack, unsigned long bp,
14317 const struct stacktrace_ops *ops, void *data,
14318 unsigned long *end, int *graph);
14319diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14320index f7dd2a7..504f53b 100644
14321--- a/arch/x86/kernel/dumpstack_32.c
14322+++ b/arch/x86/kernel/dumpstack_32.c
14323@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14324 #endif
14325
14326 for (;;) {
14327- struct thread_info *context;
14328+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14329+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14330
14331- context = (struct thread_info *)
14332- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14333- bp = print_context_stack(context, stack, bp, ops,
14334- data, NULL, &graph);
14335-
14336- stack = (unsigned long *)context->previous_esp;
14337- if (!stack)
14338+ if (stack_start == task_stack_page(task))
14339 break;
14340+ stack = *(unsigned long **)stack_start;
14341 if (ops->stack(data, "IRQ") < 0)
14342 break;
14343 touch_nmi_watchdog();
14344@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14345 * When in-kernel, we also print out the stack and code at the
14346 * time of the fault..
14347 */
14348- if (!user_mode_vm(regs)) {
14349+ if (!user_mode(regs)) {
14350 unsigned int code_prologue = code_bytes * 43 / 64;
14351 unsigned int code_len = code_bytes;
14352 unsigned char c;
14353 u8 *ip;
14354+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14355
14356 printk(KERN_EMERG "Stack:\n");
14357 show_stack_log_lvl(NULL, regs, &regs->sp,
14358@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14359
14360 printk(KERN_EMERG "Code: ");
14361
14362- ip = (u8 *)regs->ip - code_prologue;
14363+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14364 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14365 /* try starting at IP */
14366- ip = (u8 *)regs->ip;
14367+ ip = (u8 *)regs->ip + cs_base;
14368 code_len = code_len - code_prologue + 1;
14369 }
14370 for (i = 0; i < code_len; i++, ip++) {
14371@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14372 printk(" Bad EIP value.");
14373 break;
14374 }
14375- if (ip == (u8 *)regs->ip)
14376+ if (ip == (u8 *)regs->ip + cs_base)
14377 printk("<%02x> ", c);
14378 else
14379 printk("%02x ", c);
14380@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14381 printk("\n");
14382 }
14383
14384+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14385+void pax_check_alloca(unsigned long size)
14386+{
14387+ unsigned long sp = (unsigned long)&sp, stack_left;
14388+
14389+ /* all kernel stacks are of the same size */
14390+ stack_left = sp & (THREAD_SIZE - 1);
14391+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14392+}
14393+EXPORT_SYMBOL(pax_check_alloca);
14394+#endif
14395+
14396 int is_valid_bugaddr(unsigned long ip)
14397 {
14398 unsigned short ud2;
14399
14400+ ip = ktla_ktva(ip);
14401 if (ip < PAGE_OFFSET)
14402 return 0;
14403 if (probe_kernel_address((unsigned short *)ip, ud2))
14404diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14405index a071e6b..36cd585 100644
14406--- a/arch/x86/kernel/dumpstack_64.c
14407+++ b/arch/x86/kernel/dumpstack_64.c
14408@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14409 unsigned long *irq_stack_end =
14410 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14411 unsigned used = 0;
14412- struct thread_info *tinfo;
14413 int graph = 0;
14414+ void *stack_start;
14415
14416 if (!task)
14417 task = current;
14418@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14419 * current stack address. If the stacks consist of nested
14420 * exceptions
14421 */
14422- tinfo = task_thread_info(task);
14423 for (;;) {
14424 char *id;
14425 unsigned long *estack_end;
14426+
14427 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14428 &used, &id);
14429
14430@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14431 if (ops->stack(data, id) < 0)
14432 break;
14433
14434- bp = print_context_stack(tinfo, stack, bp, ops,
14435+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14436 data, estack_end, &graph);
14437 ops->stack(data, "<EOE>");
14438 /*
14439@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14440 if (stack >= irq_stack && stack < irq_stack_end) {
14441 if (ops->stack(data, "IRQ") < 0)
14442 break;
14443- bp = print_context_stack(tinfo, stack, bp,
14444+ bp = print_context_stack(task, irq_stack, stack, bp,
14445 ops, data, irq_stack_end, &graph);
14446 /*
14447 * We link to the next stack (which would be
14448@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14449 /*
14450 * This handles the process stack:
14451 */
14452- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14453+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14454+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14455 put_cpu();
14456 }
14457 EXPORT_SYMBOL(dump_trace);
14458@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14459 return ud2 == 0x0b0f;
14460 }
14461
14462+
14463+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14464+void pax_check_alloca(unsigned long size)
14465+{
14466+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14467+ unsigned cpu, used;
14468+ char *id;
14469+
14470+ /* check the process stack first */
14471+ stack_start = (unsigned long)task_stack_page(current);
14472+ stack_end = stack_start + THREAD_SIZE;
14473+ if (likely(stack_start <= sp && sp < stack_end)) {
14474+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14475+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14476+ return;
14477+ }
14478+
14479+ cpu = get_cpu();
14480+
14481+ /* check the irq stacks */
14482+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14483+ stack_start = stack_end - IRQ_STACK_SIZE;
14484+ if (stack_start <= sp && sp < stack_end) {
14485+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14486+ put_cpu();
14487+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14488+ return;
14489+ }
14490+
14491+ /* check the exception stacks */
14492+ used = 0;
14493+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14494+ stack_start = stack_end - EXCEPTION_STKSZ;
14495+ if (stack_end && stack_start <= sp && sp < stack_end) {
14496+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14497+ put_cpu();
14498+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14499+ return;
14500+ }
14501+
14502+ put_cpu();
14503+
14504+ /* unknown stack */
14505+ BUG();
14506+}
14507+EXPORT_SYMBOL(pax_check_alloca);
14508+#endif
14509diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14510index a89739a..95e0c48 100644
14511--- a/arch/x86/kernel/e820.c
14512+++ b/arch/x86/kernel/e820.c
14513@@ -733,7 +733,7 @@ struct early_res {
14514 };
14515 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14516 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14517- {}
14518+ { 0, 0, {0}, 0 }
14519 };
14520
14521 static int __init find_overlapped_early(u64 start, u64 end)
14522diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14523index b9c830c..1e41a96 100644
14524--- a/arch/x86/kernel/early_printk.c
14525+++ b/arch/x86/kernel/early_printk.c
14526@@ -7,6 +7,7 @@
14527 #include <linux/pci_regs.h>
14528 #include <linux/pci_ids.h>
14529 #include <linux/errno.h>
14530+#include <linux/sched.h>
14531 #include <asm/io.h>
14532 #include <asm/processor.h>
14533 #include <asm/fcntl.h>
14534@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14535 int n;
14536 va_list ap;
14537
14538+ pax_track_stack();
14539+
14540 va_start(ap, fmt);
14541 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14542 early_console->write(early_console, buf, n);
14543diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14544index 5cab48e..b025f9b 100644
14545--- a/arch/x86/kernel/efi_32.c
14546+++ b/arch/x86/kernel/efi_32.c
14547@@ -38,70 +38,56 @@
14548 */
14549
14550 static unsigned long efi_rt_eflags;
14551-static pgd_t efi_bak_pg_dir_pointer[2];
14552+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14553
14554-void efi_call_phys_prelog(void)
14555+void __init efi_call_phys_prelog(void)
14556 {
14557- unsigned long cr4;
14558- unsigned long temp;
14559 struct desc_ptr gdt_descr;
14560
14561+#ifdef CONFIG_PAX_KERNEXEC
14562+ struct desc_struct d;
14563+#endif
14564+
14565 local_irq_save(efi_rt_eflags);
14566
14567- /*
14568- * If I don't have PAE, I should just duplicate two entries in page
14569- * directory. If I have PAE, I just need to duplicate one entry in
14570- * page directory.
14571- */
14572- cr4 = read_cr4_safe();
14573-
14574- if (cr4 & X86_CR4_PAE) {
14575- efi_bak_pg_dir_pointer[0].pgd =
14576- swapper_pg_dir[pgd_index(0)].pgd;
14577- swapper_pg_dir[0].pgd =
14578- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14579- } else {
14580- efi_bak_pg_dir_pointer[0].pgd =
14581- swapper_pg_dir[pgd_index(0)].pgd;
14582- efi_bak_pg_dir_pointer[1].pgd =
14583- swapper_pg_dir[pgd_index(0x400000)].pgd;
14584- swapper_pg_dir[pgd_index(0)].pgd =
14585- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14586- temp = PAGE_OFFSET + 0x400000;
14587- swapper_pg_dir[pgd_index(0x400000)].pgd =
14588- swapper_pg_dir[pgd_index(temp)].pgd;
14589- }
14590+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14591+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14592+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14593
14594 /*
14595 * After the lock is released, the original page table is restored.
14596 */
14597 __flush_tlb_all();
14598
14599+#ifdef CONFIG_PAX_KERNEXEC
14600+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14601+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14602+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14603+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14604+#endif
14605+
14606 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14607 gdt_descr.size = GDT_SIZE - 1;
14608 load_gdt(&gdt_descr);
14609 }
14610
14611-void efi_call_phys_epilog(void)
14612+void __init efi_call_phys_epilog(void)
14613 {
14614- unsigned long cr4;
14615 struct desc_ptr gdt_descr;
14616
14617+#ifdef CONFIG_PAX_KERNEXEC
14618+ struct desc_struct d;
14619+
14620+ memset(&d, 0, sizeof d);
14621+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14622+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14623+#endif
14624+
14625 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14626 gdt_descr.size = GDT_SIZE - 1;
14627 load_gdt(&gdt_descr);
14628
14629- cr4 = read_cr4_safe();
14630-
14631- if (cr4 & X86_CR4_PAE) {
14632- swapper_pg_dir[pgd_index(0)].pgd =
14633- efi_bak_pg_dir_pointer[0].pgd;
14634- } else {
14635- swapper_pg_dir[pgd_index(0)].pgd =
14636- efi_bak_pg_dir_pointer[0].pgd;
14637- swapper_pg_dir[pgd_index(0x400000)].pgd =
14638- efi_bak_pg_dir_pointer[1].pgd;
14639- }
14640+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14641
14642 /*
14643 * After the lock is released, the original page table is restored.
14644diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14645index fbe66e6..c5c0dd2 100644
14646--- a/arch/x86/kernel/efi_stub_32.S
14647+++ b/arch/x86/kernel/efi_stub_32.S
14648@@ -6,7 +6,9 @@
14649 */
14650
14651 #include <linux/linkage.h>
14652+#include <linux/init.h>
14653 #include <asm/page_types.h>
14654+#include <asm/segment.h>
14655
14656 /*
14657 * efi_call_phys(void *, ...) is a function with variable parameters.
14658@@ -20,7 +22,7 @@
14659 * service functions will comply with gcc calling convention, too.
14660 */
14661
14662-.text
14663+__INIT
14664 ENTRY(efi_call_phys)
14665 /*
14666 * 0. The function can only be called in Linux kernel. So CS has been
14667@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14668 * The mapping of lower virtual memory has been created in prelog and
14669 * epilog.
14670 */
14671- movl $1f, %edx
14672- subl $__PAGE_OFFSET, %edx
14673- jmp *%edx
14674+ movl $(__KERNEXEC_EFI_DS), %edx
14675+ mov %edx, %ds
14676+ mov %edx, %es
14677+ mov %edx, %ss
14678+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14679 1:
14680
14681 /*
14682@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14683 * parameter 2, ..., param n. To make things easy, we save the return
14684 * address of efi_call_phys in a global variable.
14685 */
14686- popl %edx
14687- movl %edx, saved_return_addr
14688- /* get the function pointer into ECX*/
14689- popl %ecx
14690- movl %ecx, efi_rt_function_ptr
14691- movl $2f, %edx
14692- subl $__PAGE_OFFSET, %edx
14693- pushl %edx
14694+ popl (saved_return_addr)
14695+ popl (efi_rt_function_ptr)
14696
14697 /*
14698 * 3. Clear PG bit in %CR0.
14699@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14700 /*
14701 * 5. Call the physical function.
14702 */
14703- jmp *%ecx
14704+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14705
14706-2:
14707 /*
14708 * 6. After EFI runtime service returns, control will return to
14709 * following instruction. We'd better readjust stack pointer first.
14710@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14711 movl %cr0, %edx
14712 orl $0x80000000, %edx
14713 movl %edx, %cr0
14714- jmp 1f
14715-1:
14716+
14717 /*
14718 * 8. Now restore the virtual mode from flat mode by
14719 * adding EIP with PAGE_OFFSET.
14720 */
14721- movl $1f, %edx
14722- jmp *%edx
14723+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14724 1:
14725+ movl $(__KERNEL_DS), %edx
14726+ mov %edx, %ds
14727+ mov %edx, %es
14728+ mov %edx, %ss
14729
14730 /*
14731 * 9. Balance the stack. And because EAX contain the return value,
14732 * we'd better not clobber it.
14733 */
14734- leal efi_rt_function_ptr, %edx
14735- movl (%edx), %ecx
14736- pushl %ecx
14737+ pushl (efi_rt_function_ptr)
14738
14739 /*
14740- * 10. Push the saved return address onto the stack and return.
14741+ * 10. Return to the saved return address.
14742 */
14743- leal saved_return_addr, %edx
14744- movl (%edx), %ecx
14745- pushl %ecx
14746- ret
14747+ jmpl *(saved_return_addr)
14748 ENDPROC(efi_call_phys)
14749 .previous
14750
14751-.data
14752+__INITDATA
14753 saved_return_addr:
14754 .long 0
14755 efi_rt_function_ptr:
14756diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14757index 4c07cca..2c8427d 100644
14758--- a/arch/x86/kernel/efi_stub_64.S
14759+++ b/arch/x86/kernel/efi_stub_64.S
14760@@ -7,6 +7,7 @@
14761 */
14762
14763 #include <linux/linkage.h>
14764+#include <asm/alternative-asm.h>
14765
14766 #define SAVE_XMM \
14767 mov %rsp, %rax; \
14768@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14769 call *%rdi
14770 addq $32, %rsp
14771 RESTORE_XMM
14772+ pax_force_retaddr 0, 1
14773 ret
14774 ENDPROC(efi_call0)
14775
14776@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14777 call *%rdi
14778 addq $32, %rsp
14779 RESTORE_XMM
14780+ pax_force_retaddr 0, 1
14781 ret
14782 ENDPROC(efi_call1)
14783
14784@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14785 call *%rdi
14786 addq $32, %rsp
14787 RESTORE_XMM
14788+ pax_force_retaddr 0, 1
14789 ret
14790 ENDPROC(efi_call2)
14791
14792@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14793 call *%rdi
14794 addq $32, %rsp
14795 RESTORE_XMM
14796+ pax_force_retaddr 0, 1
14797 ret
14798 ENDPROC(efi_call3)
14799
14800@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14801 call *%rdi
14802 addq $32, %rsp
14803 RESTORE_XMM
14804+ pax_force_retaddr 0, 1
14805 ret
14806 ENDPROC(efi_call4)
14807
14808@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14809 call *%rdi
14810 addq $48, %rsp
14811 RESTORE_XMM
14812+ pax_force_retaddr 0, 1
14813 ret
14814 ENDPROC(efi_call5)
14815
14816@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14817 call *%rdi
14818 addq $48, %rsp
14819 RESTORE_XMM
14820+ pax_force_retaddr 0, 1
14821 ret
14822 ENDPROC(efi_call6)
14823diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14824index c097e7d..c689cf4 100644
14825--- a/arch/x86/kernel/entry_32.S
14826+++ b/arch/x86/kernel/entry_32.S
14827@@ -185,13 +185,146 @@
14828 /*CFI_REL_OFFSET gs, PT_GS*/
14829 .endm
14830 .macro SET_KERNEL_GS reg
14831+
14832+#ifdef CONFIG_CC_STACKPROTECTOR
14833 movl $(__KERNEL_STACK_CANARY), \reg
14834+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14835+ movl $(__USER_DS), \reg
14836+#else
14837+ xorl \reg, \reg
14838+#endif
14839+
14840 movl \reg, %gs
14841 .endm
14842
14843 #endif /* CONFIG_X86_32_LAZY_GS */
14844
14845-.macro SAVE_ALL
14846+.macro pax_enter_kernel
14847+#ifdef CONFIG_PAX_KERNEXEC
14848+ call pax_enter_kernel
14849+#endif
14850+.endm
14851+
14852+.macro pax_exit_kernel
14853+#ifdef CONFIG_PAX_KERNEXEC
14854+ call pax_exit_kernel
14855+#endif
14856+.endm
14857+
14858+#ifdef CONFIG_PAX_KERNEXEC
14859+ENTRY(pax_enter_kernel)
14860+#ifdef CONFIG_PARAVIRT
14861+ pushl %eax
14862+ pushl %ecx
14863+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14864+ mov %eax, %esi
14865+#else
14866+ mov %cr0, %esi
14867+#endif
14868+ bts $16, %esi
14869+ jnc 1f
14870+ mov %cs, %esi
14871+ cmp $__KERNEL_CS, %esi
14872+ jz 3f
14873+ ljmp $__KERNEL_CS, $3f
14874+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14875+2:
14876+#ifdef CONFIG_PARAVIRT
14877+ mov %esi, %eax
14878+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14879+#else
14880+ mov %esi, %cr0
14881+#endif
14882+3:
14883+#ifdef CONFIG_PARAVIRT
14884+ popl %ecx
14885+ popl %eax
14886+#endif
14887+ ret
14888+ENDPROC(pax_enter_kernel)
14889+
14890+ENTRY(pax_exit_kernel)
14891+#ifdef CONFIG_PARAVIRT
14892+ pushl %eax
14893+ pushl %ecx
14894+#endif
14895+ mov %cs, %esi
14896+ cmp $__KERNEXEC_KERNEL_CS, %esi
14897+ jnz 2f
14898+#ifdef CONFIG_PARAVIRT
14899+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14900+ mov %eax, %esi
14901+#else
14902+ mov %cr0, %esi
14903+#endif
14904+ btr $16, %esi
14905+ ljmp $__KERNEL_CS, $1f
14906+1:
14907+#ifdef CONFIG_PARAVIRT
14908+ mov %esi, %eax
14909+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14910+#else
14911+ mov %esi, %cr0
14912+#endif
14913+2:
14914+#ifdef CONFIG_PARAVIRT
14915+ popl %ecx
14916+ popl %eax
14917+#endif
14918+ ret
14919+ENDPROC(pax_exit_kernel)
14920+#endif
14921+
14922+.macro pax_erase_kstack
14923+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14924+ call pax_erase_kstack
14925+#endif
14926+.endm
14927+
14928+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14929+/*
14930+ * ebp: thread_info
14931+ * ecx, edx: can be clobbered
14932+ */
14933+ENTRY(pax_erase_kstack)
14934+ pushl %edi
14935+ pushl %eax
14936+
14937+ mov TI_lowest_stack(%ebp), %edi
14938+ mov $-0xBEEF, %eax
14939+ std
14940+
14941+1: mov %edi, %ecx
14942+ and $THREAD_SIZE_asm - 1, %ecx
14943+ shr $2, %ecx
14944+ repne scasl
14945+ jecxz 2f
14946+
14947+ cmp $2*16, %ecx
14948+ jc 2f
14949+
14950+ mov $2*16, %ecx
14951+ repe scasl
14952+ jecxz 2f
14953+ jne 1b
14954+
14955+2: cld
14956+ mov %esp, %ecx
14957+ sub %edi, %ecx
14958+ shr $2, %ecx
14959+ rep stosl
14960+
14961+ mov TI_task_thread_sp0(%ebp), %edi
14962+ sub $128, %edi
14963+ mov %edi, TI_lowest_stack(%ebp)
14964+
14965+ popl %eax
14966+ popl %edi
14967+ ret
14968+ENDPROC(pax_erase_kstack)
14969+#endif
14970+
14971+.macro __SAVE_ALL _DS
14972 cld
14973 PUSH_GS
14974 pushl %fs
14975@@ -224,7 +357,7 @@
14976 pushl %ebx
14977 CFI_ADJUST_CFA_OFFSET 4
14978 CFI_REL_OFFSET ebx, 0
14979- movl $(__USER_DS), %edx
14980+ movl $\_DS, %edx
14981 movl %edx, %ds
14982 movl %edx, %es
14983 movl $(__KERNEL_PERCPU), %edx
14984@@ -232,6 +365,15 @@
14985 SET_KERNEL_GS %edx
14986 .endm
14987
14988+.macro SAVE_ALL
14989+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14990+ __SAVE_ALL __KERNEL_DS
14991+ pax_enter_kernel
14992+#else
14993+ __SAVE_ALL __USER_DS
14994+#endif
14995+.endm
14996+
14997 .macro RESTORE_INT_REGS
14998 popl %ebx
14999 CFI_ADJUST_CFA_OFFSET -4
15000@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15001 CFI_ADJUST_CFA_OFFSET -4
15002 jmp syscall_exit
15003 CFI_ENDPROC
15004-END(ret_from_fork)
15005+ENDPROC(ret_from_fork)
15006
15007 /*
15008 * Return to user mode is not as complex as all this looks,
15009@@ -352,7 +494,15 @@ check_userspace:
15010 movb PT_CS(%esp), %al
15011 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15012 cmpl $USER_RPL, %eax
15013+
15014+#ifdef CONFIG_PAX_KERNEXEC
15015+ jae resume_userspace
15016+
15017+ PAX_EXIT_KERNEL
15018+ jmp resume_kernel
15019+#else
15020 jb resume_kernel # not returning to v8086 or userspace
15021+#endif
15022
15023 ENTRY(resume_userspace)
15024 LOCKDEP_SYS_EXIT
15025@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15026 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15027 # int/exception return?
15028 jne work_pending
15029- jmp restore_all
15030-END(ret_from_exception)
15031+ jmp restore_all_pax
15032+ENDPROC(ret_from_exception)
15033
15034 #ifdef CONFIG_PREEMPT
15035 ENTRY(resume_kernel)
15036@@ -380,7 +530,7 @@ need_resched:
15037 jz restore_all
15038 call preempt_schedule_irq
15039 jmp need_resched
15040-END(resume_kernel)
15041+ENDPROC(resume_kernel)
15042 #endif
15043 CFI_ENDPROC
15044
15045@@ -414,25 +564,36 @@ sysenter_past_esp:
15046 /*CFI_REL_OFFSET cs, 0*/
15047 /*
15048 * Push current_thread_info()->sysenter_return to the stack.
15049- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15050- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15051 */
15052- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15053+ pushl $0
15054 CFI_ADJUST_CFA_OFFSET 4
15055 CFI_REL_OFFSET eip, 0
15056
15057 pushl %eax
15058 CFI_ADJUST_CFA_OFFSET 4
15059 SAVE_ALL
15060+ GET_THREAD_INFO(%ebp)
15061+ movl TI_sysenter_return(%ebp),%ebp
15062+ movl %ebp,PT_EIP(%esp)
15063 ENABLE_INTERRUPTS(CLBR_NONE)
15064
15065 /*
15066 * Load the potential sixth argument from user stack.
15067 * Careful about security.
15068 */
15069+ movl PT_OLDESP(%esp),%ebp
15070+
15071+#ifdef CONFIG_PAX_MEMORY_UDEREF
15072+ mov PT_OLDSS(%esp),%ds
15073+1: movl %ds:(%ebp),%ebp
15074+ push %ss
15075+ pop %ds
15076+#else
15077 cmpl $__PAGE_OFFSET-3,%ebp
15078 jae syscall_fault
15079 1: movl (%ebp),%ebp
15080+#endif
15081+
15082 movl %ebp,PT_EBP(%esp)
15083 .section __ex_table,"a"
15084 .align 4
15085@@ -455,12 +616,24 @@ sysenter_do_call:
15086 testl $_TIF_ALLWORK_MASK, %ecx
15087 jne sysexit_audit
15088 sysenter_exit:
15089+
15090+#ifdef CONFIG_PAX_RANDKSTACK
15091+ pushl_cfi %eax
15092+ movl %esp, %eax
15093+ call pax_randomize_kstack
15094+ popl_cfi %eax
15095+#endif
15096+
15097+ pax_erase_kstack
15098+
15099 /* if something modifies registers it must also disable sysexit */
15100 movl PT_EIP(%esp), %edx
15101 movl PT_OLDESP(%esp), %ecx
15102 xorl %ebp,%ebp
15103 TRACE_IRQS_ON
15104 1: mov PT_FS(%esp), %fs
15105+2: mov PT_DS(%esp), %ds
15106+3: mov PT_ES(%esp), %es
15107 PTGS_TO_GS
15108 ENABLE_INTERRUPTS_SYSEXIT
15109
15110@@ -477,6 +650,9 @@ sysenter_audit:
15111 movl %eax,%edx /* 2nd arg: syscall number */
15112 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15113 call audit_syscall_entry
15114+
15115+ pax_erase_kstack
15116+
15117 pushl %ebx
15118 CFI_ADJUST_CFA_OFFSET 4
15119 movl PT_EAX(%esp),%eax /* reload syscall number */
15120@@ -504,11 +680,17 @@ sysexit_audit:
15121
15122 CFI_ENDPROC
15123 .pushsection .fixup,"ax"
15124-2: movl $0,PT_FS(%esp)
15125+4: movl $0,PT_FS(%esp)
15126+ jmp 1b
15127+5: movl $0,PT_DS(%esp)
15128+ jmp 1b
15129+6: movl $0,PT_ES(%esp)
15130 jmp 1b
15131 .section __ex_table,"a"
15132 .align 4
15133- .long 1b,2b
15134+ .long 1b,4b
15135+ .long 2b,5b
15136+ .long 3b,6b
15137 .popsection
15138 PTGS_TO_GS_EX
15139 ENDPROC(ia32_sysenter_target)
15140@@ -538,6 +720,15 @@ syscall_exit:
15141 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15142 jne syscall_exit_work
15143
15144+restore_all_pax:
15145+
15146+#ifdef CONFIG_PAX_RANDKSTACK
15147+ movl %esp, %eax
15148+ call pax_randomize_kstack
15149+#endif
15150+
15151+ pax_erase_kstack
15152+
15153 restore_all:
15154 TRACE_IRQS_IRET
15155 restore_all_notrace:
15156@@ -602,10 +793,29 @@ ldt_ss:
15157 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15158 mov %dx, %ax /* eax: new kernel esp */
15159 sub %eax, %edx /* offset (low word is 0) */
15160- PER_CPU(gdt_page, %ebx)
15161+#ifdef CONFIG_SMP
15162+ movl PER_CPU_VAR(cpu_number), %ebx
15163+ shll $PAGE_SHIFT_asm, %ebx
15164+ addl $cpu_gdt_table, %ebx
15165+#else
15166+ movl $cpu_gdt_table, %ebx
15167+#endif
15168 shr $16, %edx
15169+
15170+#ifdef CONFIG_PAX_KERNEXEC
15171+ mov %cr0, %esi
15172+ btr $16, %esi
15173+ mov %esi, %cr0
15174+#endif
15175+
15176 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15177 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15178+
15179+#ifdef CONFIG_PAX_KERNEXEC
15180+ bts $16, %esi
15181+ mov %esi, %cr0
15182+#endif
15183+
15184 pushl $__ESPFIX_SS
15185 CFI_ADJUST_CFA_OFFSET 4
15186 push %eax /* new kernel esp */
15187@@ -636,36 +846,30 @@ work_resched:
15188 movl TI_flags(%ebp), %ecx
15189 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15190 # than syscall tracing?
15191- jz restore_all
15192+ jz restore_all_pax
15193 testb $_TIF_NEED_RESCHED, %cl
15194 jnz work_resched
15195
15196 work_notifysig: # deal with pending signals and
15197 # notify-resume requests
15198+ movl %esp, %eax
15199 #ifdef CONFIG_VM86
15200 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15201- movl %esp, %eax
15202- jne work_notifysig_v86 # returning to kernel-space or
15203+ jz 1f # returning to kernel-space or
15204 # vm86-space
15205- xorl %edx, %edx
15206- call do_notify_resume
15207- jmp resume_userspace_sig
15208
15209- ALIGN
15210-work_notifysig_v86:
15211 pushl %ecx # save ti_flags for do_notify_resume
15212 CFI_ADJUST_CFA_OFFSET 4
15213 call save_v86_state # %eax contains pt_regs pointer
15214 popl %ecx
15215 CFI_ADJUST_CFA_OFFSET -4
15216 movl %eax, %esp
15217-#else
15218- movl %esp, %eax
15219+1:
15220 #endif
15221 xorl %edx, %edx
15222 call do_notify_resume
15223 jmp resume_userspace_sig
15224-END(work_pending)
15225+ENDPROC(work_pending)
15226
15227 # perform syscall exit tracing
15228 ALIGN
15229@@ -673,11 +877,14 @@ syscall_trace_entry:
15230 movl $-ENOSYS,PT_EAX(%esp)
15231 movl %esp, %eax
15232 call syscall_trace_enter
15233+
15234+ pax_erase_kstack
15235+
15236 /* What it returned is what we'll actually use. */
15237 cmpl $(nr_syscalls), %eax
15238 jnae syscall_call
15239 jmp syscall_exit
15240-END(syscall_trace_entry)
15241+ENDPROC(syscall_trace_entry)
15242
15243 # perform syscall exit tracing
15244 ALIGN
15245@@ -690,20 +897,24 @@ syscall_exit_work:
15246 movl %esp, %eax
15247 call syscall_trace_leave
15248 jmp resume_userspace
15249-END(syscall_exit_work)
15250+ENDPROC(syscall_exit_work)
15251 CFI_ENDPROC
15252
15253 RING0_INT_FRAME # can't unwind into user space anyway
15254 syscall_fault:
15255+#ifdef CONFIG_PAX_MEMORY_UDEREF
15256+ push %ss
15257+ pop %ds
15258+#endif
15259 GET_THREAD_INFO(%ebp)
15260 movl $-EFAULT,PT_EAX(%esp)
15261 jmp resume_userspace
15262-END(syscall_fault)
15263+ENDPROC(syscall_fault)
15264
15265 syscall_badsys:
15266 movl $-ENOSYS,PT_EAX(%esp)
15267 jmp resume_userspace
15268-END(syscall_badsys)
15269+ENDPROC(syscall_badsys)
15270 CFI_ENDPROC
15271
15272 /*
15273@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15274 PTREGSCALL(vm86)
15275 PTREGSCALL(vm86old)
15276
15277+ ALIGN;
15278+ENTRY(kernel_execve)
15279+ push %ebp
15280+ sub $PT_OLDSS+4,%esp
15281+ push %edi
15282+ push %ecx
15283+ push %eax
15284+ lea 3*4(%esp),%edi
15285+ mov $PT_OLDSS/4+1,%ecx
15286+ xorl %eax,%eax
15287+ rep stosl
15288+ pop %eax
15289+ pop %ecx
15290+ pop %edi
15291+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15292+ mov %eax,PT_EBX(%esp)
15293+ mov %edx,PT_ECX(%esp)
15294+ mov %ecx,PT_EDX(%esp)
15295+ mov %esp,%eax
15296+ call sys_execve
15297+ GET_THREAD_INFO(%ebp)
15298+ test %eax,%eax
15299+ jz syscall_exit
15300+ add $PT_OLDSS+4,%esp
15301+ pop %ebp
15302+ ret
15303+
15304 .macro FIXUP_ESPFIX_STACK
15305 /*
15306 * Switch back for ESPFIX stack to the normal zerobased stack
15307@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15308 * normal stack and adjusts ESP with the matching offset.
15309 */
15310 /* fixup the stack */
15311- PER_CPU(gdt_page, %ebx)
15312+#ifdef CONFIG_SMP
15313+ movl PER_CPU_VAR(cpu_number), %ebx
15314+ shll $PAGE_SHIFT_asm, %ebx
15315+ addl $cpu_gdt_table, %ebx
15316+#else
15317+ movl $cpu_gdt_table, %ebx
15318+#endif
15319 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15320 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15321 shl $16, %eax
15322@@ -793,7 +1037,7 @@ vector=vector+1
15323 .endr
15324 2: jmp common_interrupt
15325 .endr
15326-END(irq_entries_start)
15327+ENDPROC(irq_entries_start)
15328
15329 .previous
15330 END(interrupt)
15331@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15332 CFI_ADJUST_CFA_OFFSET 4
15333 jmp error_code
15334 CFI_ENDPROC
15335-END(coprocessor_error)
15336+ENDPROC(coprocessor_error)
15337
15338 ENTRY(simd_coprocessor_error)
15339 RING0_INT_FRAME
15340@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15341 CFI_ADJUST_CFA_OFFSET 4
15342 jmp error_code
15343 CFI_ENDPROC
15344-END(simd_coprocessor_error)
15345+ENDPROC(simd_coprocessor_error)
15346
15347 ENTRY(device_not_available)
15348 RING0_INT_FRAME
15349@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15350 CFI_ADJUST_CFA_OFFSET 4
15351 jmp error_code
15352 CFI_ENDPROC
15353-END(device_not_available)
15354+ENDPROC(device_not_available)
15355
15356 #ifdef CONFIG_PARAVIRT
15357 ENTRY(native_iret)
15358@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15359 .align 4
15360 .long native_iret, iret_exc
15361 .previous
15362-END(native_iret)
15363+ENDPROC(native_iret)
15364
15365 ENTRY(native_irq_enable_sysexit)
15366 sti
15367 sysexit
15368-END(native_irq_enable_sysexit)
15369+ENDPROC(native_irq_enable_sysexit)
15370 #endif
15371
15372 ENTRY(overflow)
15373@@ -885,7 +1129,7 @@ ENTRY(overflow)
15374 CFI_ADJUST_CFA_OFFSET 4
15375 jmp error_code
15376 CFI_ENDPROC
15377-END(overflow)
15378+ENDPROC(overflow)
15379
15380 ENTRY(bounds)
15381 RING0_INT_FRAME
15382@@ -895,7 +1139,7 @@ ENTRY(bounds)
15383 CFI_ADJUST_CFA_OFFSET 4
15384 jmp error_code
15385 CFI_ENDPROC
15386-END(bounds)
15387+ENDPROC(bounds)
15388
15389 ENTRY(invalid_op)
15390 RING0_INT_FRAME
15391@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15392 CFI_ADJUST_CFA_OFFSET 4
15393 jmp error_code
15394 CFI_ENDPROC
15395-END(invalid_op)
15396+ENDPROC(invalid_op)
15397
15398 ENTRY(coprocessor_segment_overrun)
15399 RING0_INT_FRAME
15400@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15401 CFI_ADJUST_CFA_OFFSET 4
15402 jmp error_code
15403 CFI_ENDPROC
15404-END(coprocessor_segment_overrun)
15405+ENDPROC(coprocessor_segment_overrun)
15406
15407 ENTRY(invalid_TSS)
15408 RING0_EC_FRAME
15409@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15410 CFI_ADJUST_CFA_OFFSET 4
15411 jmp error_code
15412 CFI_ENDPROC
15413-END(invalid_TSS)
15414+ENDPROC(invalid_TSS)
15415
15416 ENTRY(segment_not_present)
15417 RING0_EC_FRAME
15418@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15419 CFI_ADJUST_CFA_OFFSET 4
15420 jmp error_code
15421 CFI_ENDPROC
15422-END(segment_not_present)
15423+ENDPROC(segment_not_present)
15424
15425 ENTRY(stack_segment)
15426 RING0_EC_FRAME
15427@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15428 CFI_ADJUST_CFA_OFFSET 4
15429 jmp error_code
15430 CFI_ENDPROC
15431-END(stack_segment)
15432+ENDPROC(stack_segment)
15433
15434 ENTRY(alignment_check)
15435 RING0_EC_FRAME
15436@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15437 CFI_ADJUST_CFA_OFFSET 4
15438 jmp error_code
15439 CFI_ENDPROC
15440-END(alignment_check)
15441+ENDPROC(alignment_check)
15442
15443 ENTRY(divide_error)
15444 RING0_INT_FRAME
15445@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15446 CFI_ADJUST_CFA_OFFSET 4
15447 jmp error_code
15448 CFI_ENDPROC
15449-END(divide_error)
15450+ENDPROC(divide_error)
15451
15452 #ifdef CONFIG_X86_MCE
15453 ENTRY(machine_check)
15454@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15455 CFI_ADJUST_CFA_OFFSET 4
15456 jmp error_code
15457 CFI_ENDPROC
15458-END(machine_check)
15459+ENDPROC(machine_check)
15460 #endif
15461
15462 ENTRY(spurious_interrupt_bug)
15463@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15464 CFI_ADJUST_CFA_OFFSET 4
15465 jmp error_code
15466 CFI_ENDPROC
15467-END(spurious_interrupt_bug)
15468+ENDPROC(spurious_interrupt_bug)
15469
15470 ENTRY(kernel_thread_helper)
15471 pushl $0 # fake return address for unwinder
15472@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15473
15474 ENTRY(mcount)
15475 ret
15476-END(mcount)
15477+ENDPROC(mcount)
15478
15479 ENTRY(ftrace_caller)
15480 cmpl $0, function_trace_stop
15481@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15482 .globl ftrace_stub
15483 ftrace_stub:
15484 ret
15485-END(ftrace_caller)
15486+ENDPROC(ftrace_caller)
15487
15488 #else /* ! CONFIG_DYNAMIC_FTRACE */
15489
15490@@ -1160,7 +1404,7 @@ trace:
15491 popl %ecx
15492 popl %eax
15493 jmp ftrace_stub
15494-END(mcount)
15495+ENDPROC(mcount)
15496 #endif /* CONFIG_DYNAMIC_FTRACE */
15497 #endif /* CONFIG_FUNCTION_TRACER */
15498
15499@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15500 popl %ecx
15501 popl %eax
15502 ret
15503-END(ftrace_graph_caller)
15504+ENDPROC(ftrace_graph_caller)
15505
15506 .globl return_to_handler
15507 return_to_handler:
15508@@ -1198,7 +1442,6 @@ return_to_handler:
15509 ret
15510 #endif
15511
15512-.section .rodata,"a"
15513 #include "syscall_table_32.S"
15514
15515 syscall_table_size=(.-sys_call_table)
15516@@ -1255,15 +1498,18 @@ error_code:
15517 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15518 REG_TO_PTGS %ecx
15519 SET_KERNEL_GS %ecx
15520- movl $(__USER_DS), %ecx
15521+ movl $(__KERNEL_DS), %ecx
15522 movl %ecx, %ds
15523 movl %ecx, %es
15524+
15525+ pax_enter_kernel
15526+
15527 TRACE_IRQS_OFF
15528 movl %esp,%eax # pt_regs pointer
15529 call *%edi
15530 jmp ret_from_exception
15531 CFI_ENDPROC
15532-END(page_fault)
15533+ENDPROC(page_fault)
15534
15535 /*
15536 * Debug traps and NMI can happen at the one SYSENTER instruction
15537@@ -1309,7 +1555,7 @@ debug_stack_correct:
15538 call do_debug
15539 jmp ret_from_exception
15540 CFI_ENDPROC
15541-END(debug)
15542+ENDPROC(debug)
15543
15544 /*
15545 * NMI is doubly nasty. It can happen _while_ we're handling
15546@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15547 xorl %edx,%edx # zero error code
15548 movl %esp,%eax # pt_regs pointer
15549 call do_nmi
15550+
15551+ pax_exit_kernel
15552+
15553 jmp restore_all_notrace
15554 CFI_ENDPROC
15555
15556@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15557 FIXUP_ESPFIX_STACK # %eax == %esp
15558 xorl %edx,%edx # zero error code
15559 call do_nmi
15560+
15561+ pax_exit_kernel
15562+
15563 RESTORE_REGS
15564 lss 12+4(%esp), %esp # back to espfix stack
15565 CFI_ADJUST_CFA_OFFSET -24
15566 jmp irq_return
15567 CFI_ENDPROC
15568-END(nmi)
15569+ENDPROC(nmi)
15570
15571 ENTRY(int3)
15572 RING0_INT_FRAME
15573@@ -1409,7 +1661,7 @@ ENTRY(int3)
15574 call do_int3
15575 jmp ret_from_exception
15576 CFI_ENDPROC
15577-END(int3)
15578+ENDPROC(int3)
15579
15580 ENTRY(general_protection)
15581 RING0_EC_FRAME
15582@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15583 CFI_ADJUST_CFA_OFFSET 4
15584 jmp error_code
15585 CFI_ENDPROC
15586-END(general_protection)
15587+ENDPROC(general_protection)
15588
15589 /*
15590 * End of kprobes section
15591diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15592index 34a56a9..a98c643 100644
15593--- a/arch/x86/kernel/entry_64.S
15594+++ b/arch/x86/kernel/entry_64.S
15595@@ -53,6 +53,8 @@
15596 #include <asm/paravirt.h>
15597 #include <asm/ftrace.h>
15598 #include <asm/percpu.h>
15599+#include <asm/pgtable.h>
15600+#include <asm/alternative-asm.h>
15601
15602 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15603 #include <linux/elf-em.h>
15604@@ -64,8 +66,9 @@
15605 #ifdef CONFIG_FUNCTION_TRACER
15606 #ifdef CONFIG_DYNAMIC_FTRACE
15607 ENTRY(mcount)
15608+ pax_force_retaddr
15609 retq
15610-END(mcount)
15611+ENDPROC(mcount)
15612
15613 ENTRY(ftrace_caller)
15614 cmpl $0, function_trace_stop
15615@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15616 #endif
15617
15618 GLOBAL(ftrace_stub)
15619+ pax_force_retaddr
15620 retq
15621-END(ftrace_caller)
15622+ENDPROC(ftrace_caller)
15623
15624 #else /* ! CONFIG_DYNAMIC_FTRACE */
15625 ENTRY(mcount)
15626@@ -108,6 +112,7 @@ ENTRY(mcount)
15627 #endif
15628
15629 GLOBAL(ftrace_stub)
15630+ pax_force_retaddr
15631 retq
15632
15633 trace:
15634@@ -117,12 +122,13 @@ trace:
15635 movq 8(%rbp), %rsi
15636 subq $MCOUNT_INSN_SIZE, %rdi
15637
15638+ pax_force_fptr ftrace_trace_function
15639 call *ftrace_trace_function
15640
15641 MCOUNT_RESTORE_FRAME
15642
15643 jmp ftrace_stub
15644-END(mcount)
15645+ENDPROC(mcount)
15646 #endif /* CONFIG_DYNAMIC_FTRACE */
15647 #endif /* CONFIG_FUNCTION_TRACER */
15648
15649@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15650
15651 MCOUNT_RESTORE_FRAME
15652
15653+ pax_force_retaddr
15654 retq
15655-END(ftrace_graph_caller)
15656+ENDPROC(ftrace_graph_caller)
15657
15658 GLOBAL(return_to_handler)
15659 subq $24, %rsp
15660@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15661 movq 8(%rsp), %rdx
15662 movq (%rsp), %rax
15663 addq $16, %rsp
15664+ pax_force_retaddr
15665 retq
15666 #endif
15667
15668@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15669 ENDPROC(native_usergs_sysret64)
15670 #endif /* CONFIG_PARAVIRT */
15671
15672+ .macro ljmpq sel, off
15673+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15674+ .byte 0x48; ljmp *1234f(%rip)
15675+ .pushsection .rodata
15676+ .align 16
15677+ 1234: .quad \off; .word \sel
15678+ .popsection
15679+#else
15680+ pushq $\sel
15681+ pushq $\off
15682+ lretq
15683+#endif
15684+ .endm
15685+
15686+ .macro pax_enter_kernel
15687+ pax_set_fptr_mask
15688+#ifdef CONFIG_PAX_KERNEXEC
15689+ call pax_enter_kernel
15690+#endif
15691+ .endm
15692+
15693+ .macro pax_exit_kernel
15694+#ifdef CONFIG_PAX_KERNEXEC
15695+ call pax_exit_kernel
15696+#endif
15697+ .endm
15698+
15699+#ifdef CONFIG_PAX_KERNEXEC
15700+ENTRY(pax_enter_kernel)
15701+ pushq %rdi
15702+
15703+#ifdef CONFIG_PARAVIRT
15704+ PV_SAVE_REGS(CLBR_RDI)
15705+#endif
15706+
15707+ GET_CR0_INTO_RDI
15708+ bts $16,%rdi
15709+ jnc 3f
15710+ mov %cs,%edi
15711+ cmp $__KERNEL_CS,%edi
15712+ jnz 2f
15713+1:
15714+
15715+#ifdef CONFIG_PARAVIRT
15716+ PV_RESTORE_REGS(CLBR_RDI)
15717+#endif
15718+
15719+ popq %rdi
15720+ pax_force_retaddr
15721+ retq
15722+
15723+2: ljmpq __KERNEL_CS,1f
15724+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15725+4: SET_RDI_INTO_CR0
15726+ jmp 1b
15727+ENDPROC(pax_enter_kernel)
15728+
15729+ENTRY(pax_exit_kernel)
15730+ pushq %rdi
15731+
15732+#ifdef CONFIG_PARAVIRT
15733+ PV_SAVE_REGS(CLBR_RDI)
15734+#endif
15735+
15736+ mov %cs,%rdi
15737+ cmp $__KERNEXEC_KERNEL_CS,%edi
15738+ jz 2f
15739+1:
15740+
15741+#ifdef CONFIG_PARAVIRT
15742+ PV_RESTORE_REGS(CLBR_RDI);
15743+#endif
15744+
15745+ popq %rdi
15746+ pax_force_retaddr
15747+ retq
15748+
15749+2: GET_CR0_INTO_RDI
15750+ btr $16,%rdi
15751+ ljmpq __KERNEL_CS,3f
15752+3: SET_RDI_INTO_CR0
15753+ jmp 1b
15754+#ifdef CONFIG_PARAVIRT
15755+ PV_RESTORE_REGS(CLBR_RDI);
15756+#endif
15757+
15758+ popq %rdi
15759+ pax_force_retaddr
15760+ retq
15761+ENDPROC(pax_exit_kernel)
15762+#endif
15763+
15764+ .macro pax_enter_kernel_user
15765+ pax_set_fptr_mask
15766+#ifdef CONFIG_PAX_MEMORY_UDEREF
15767+ call pax_enter_kernel_user
15768+#endif
15769+ .endm
15770+
15771+ .macro pax_exit_kernel_user
15772+#ifdef CONFIG_PAX_MEMORY_UDEREF
15773+ call pax_exit_kernel_user
15774+#endif
15775+#ifdef CONFIG_PAX_RANDKSTACK
15776+ push %rax
15777+ call pax_randomize_kstack
15778+ pop %rax
15779+#endif
15780+ .endm
15781+
15782+#ifdef CONFIG_PAX_MEMORY_UDEREF
15783+ENTRY(pax_enter_kernel_user)
15784+ pushq %rdi
15785+ pushq %rbx
15786+
15787+#ifdef CONFIG_PARAVIRT
15788+ PV_SAVE_REGS(CLBR_RDI)
15789+#endif
15790+
15791+ GET_CR3_INTO_RDI
15792+ mov %rdi,%rbx
15793+ add $__START_KERNEL_map,%rbx
15794+ sub phys_base(%rip),%rbx
15795+
15796+#ifdef CONFIG_PARAVIRT
15797+ pushq %rdi
15798+ cmpl $0, pv_info+PARAVIRT_enabled
15799+ jz 1f
15800+ i = 0
15801+ .rept USER_PGD_PTRS
15802+ mov i*8(%rbx),%rsi
15803+ mov $0,%sil
15804+ lea i*8(%rbx),%rdi
15805+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15806+ i = i + 1
15807+ .endr
15808+ jmp 2f
15809+1:
15810+#endif
15811+
15812+ i = 0
15813+ .rept USER_PGD_PTRS
15814+ movb $0,i*8(%rbx)
15815+ i = i + 1
15816+ .endr
15817+
15818+#ifdef CONFIG_PARAVIRT
15819+2: popq %rdi
15820+#endif
15821+ SET_RDI_INTO_CR3
15822+
15823+#ifdef CONFIG_PAX_KERNEXEC
15824+ GET_CR0_INTO_RDI
15825+ bts $16,%rdi
15826+ SET_RDI_INTO_CR0
15827+#endif
15828+
15829+#ifdef CONFIG_PARAVIRT
15830+ PV_RESTORE_REGS(CLBR_RDI)
15831+#endif
15832+
15833+ popq %rbx
15834+ popq %rdi
15835+ pax_force_retaddr
15836+ retq
15837+ENDPROC(pax_enter_kernel_user)
15838+
15839+ENTRY(pax_exit_kernel_user)
15840+ push %rdi
15841+
15842+#ifdef CONFIG_PARAVIRT
15843+ pushq %rbx
15844+ PV_SAVE_REGS(CLBR_RDI)
15845+#endif
15846+
15847+#ifdef CONFIG_PAX_KERNEXEC
15848+ GET_CR0_INTO_RDI
15849+ btr $16,%rdi
15850+ SET_RDI_INTO_CR0
15851+#endif
15852+
15853+ GET_CR3_INTO_RDI
15854+ add $__START_KERNEL_map,%rdi
15855+ sub phys_base(%rip),%rdi
15856+
15857+#ifdef CONFIG_PARAVIRT
15858+ cmpl $0, pv_info+PARAVIRT_enabled
15859+ jz 1f
15860+ mov %rdi,%rbx
15861+ i = 0
15862+ .rept USER_PGD_PTRS
15863+ mov i*8(%rbx),%rsi
15864+ mov $0x67,%sil
15865+ lea i*8(%rbx),%rdi
15866+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15867+ i = i + 1
15868+ .endr
15869+ jmp 2f
15870+1:
15871+#endif
15872+
15873+ i = 0
15874+ .rept USER_PGD_PTRS
15875+ movb $0x67,i*8(%rdi)
15876+ i = i + 1
15877+ .endr
15878+
15879+#ifdef CONFIG_PARAVIRT
15880+2: PV_RESTORE_REGS(CLBR_RDI)
15881+ popq %rbx
15882+#endif
15883+
15884+ popq %rdi
15885+ pax_force_retaddr
15886+ retq
15887+ENDPROC(pax_exit_kernel_user)
15888+#endif
15889+
15890+.macro pax_erase_kstack
15891+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15892+ call pax_erase_kstack
15893+#endif
15894+.endm
15895+
15896+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15897+/*
15898+ * r11: thread_info
15899+ * rcx, rdx: can be clobbered
15900+ */
15901+ENTRY(pax_erase_kstack)
15902+ pushq %rdi
15903+ pushq %rax
15904+ pushq %r11
15905+
15906+ GET_THREAD_INFO(%r11)
15907+ mov TI_lowest_stack(%r11), %rdi
15908+ mov $-0xBEEF, %rax
15909+ std
15910+
15911+1: mov %edi, %ecx
15912+ and $THREAD_SIZE_asm - 1, %ecx
15913+ shr $3, %ecx
15914+ repne scasq
15915+ jecxz 2f
15916+
15917+ cmp $2*8, %ecx
15918+ jc 2f
15919+
15920+ mov $2*8, %ecx
15921+ repe scasq
15922+ jecxz 2f
15923+ jne 1b
15924+
15925+2: cld
15926+ mov %esp, %ecx
15927+ sub %edi, %ecx
15928+
15929+ cmp $THREAD_SIZE_asm, %rcx
15930+ jb 3f
15931+ ud2
15932+3:
15933+
15934+ shr $3, %ecx
15935+ rep stosq
15936+
15937+ mov TI_task_thread_sp0(%r11), %rdi
15938+ sub $256, %rdi
15939+ mov %rdi, TI_lowest_stack(%r11)
15940+
15941+ popq %r11
15942+ popq %rax
15943+ popq %rdi
15944+ pax_force_retaddr
15945+ ret
15946+ENDPROC(pax_erase_kstack)
15947+#endif
15948
15949 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15950 #ifdef CONFIG_TRACE_IRQFLAGS
15951@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15952 .endm
15953
15954 .macro UNFAKE_STACK_FRAME
15955- addq $8*6, %rsp
15956- CFI_ADJUST_CFA_OFFSET -(6*8)
15957+ addq $8*6 + ARG_SKIP, %rsp
15958+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15959 .endm
15960
15961 /*
15962@@ -317,7 +601,7 @@ ENTRY(save_args)
15963 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15964 movq_cfi rbp, 8 /* push %rbp */
15965 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15966- testl $3, CS(%rdi)
15967+ testb $3, CS(%rdi)
15968 je 1f
15969 SWAPGS
15970 /*
15971@@ -337,9 +621,10 @@ ENTRY(save_args)
15972 * We entered an interrupt context - irqs are off:
15973 */
15974 2: TRACE_IRQS_OFF
15975+ pax_force_retaddr
15976 ret
15977 CFI_ENDPROC
15978-END(save_args)
15979+ENDPROC(save_args)
15980
15981 ENTRY(save_rest)
15982 PARTIAL_FRAME 1 REST_SKIP+8
15983@@ -352,9 +637,10 @@ ENTRY(save_rest)
15984 movq_cfi r15, R15+16
15985 movq %r11, 8(%rsp) /* return address */
15986 FIXUP_TOP_OF_STACK %r11, 16
15987+ pax_force_retaddr
15988 ret
15989 CFI_ENDPROC
15990-END(save_rest)
15991+ENDPROC(save_rest)
15992
15993 /* save complete stack frame */
15994 .pushsection .kprobes.text, "ax"
15995@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
15996 js 1f /* negative -> in kernel */
15997 SWAPGS
15998 xorl %ebx,%ebx
15999-1: ret
16000+1: pax_force_retaddr_bts
16001+ ret
16002 CFI_ENDPROC
16003-END(save_paranoid)
16004+ENDPROC(save_paranoid)
16005 .popsection
16006
16007 /*
16008@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16009
16010 RESTORE_REST
16011
16012- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16013+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16014 je int_ret_from_sys_call
16015
16016 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16017@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16018 jmp ret_from_sys_call # go to the SYSRET fastpath
16019
16020 CFI_ENDPROC
16021-END(ret_from_fork)
16022+ENDPROC(ret_from_fork)
16023
16024 /*
16025 * System call entry. Upto 6 arguments in registers are supported.
16026@@ -455,7 +742,7 @@ END(ret_from_fork)
16027 ENTRY(system_call)
16028 CFI_STARTPROC simple
16029 CFI_SIGNAL_FRAME
16030- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16031+ CFI_DEF_CFA rsp,0
16032 CFI_REGISTER rip,rcx
16033 /*CFI_REGISTER rflags,r11*/
16034 SWAPGS_UNSAFE_STACK
16035@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16036
16037 movq %rsp,PER_CPU_VAR(old_rsp)
16038 movq PER_CPU_VAR(kernel_stack),%rsp
16039+ SAVE_ARGS 8*6,1
16040+ pax_enter_kernel_user
16041 /*
16042 * No need to follow this irqs off/on section - it's straight
16043 * and short:
16044 */
16045 ENABLE_INTERRUPTS(CLBR_NONE)
16046- SAVE_ARGS 8,1
16047 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16048 movq %rcx,RIP-ARGOFFSET(%rsp)
16049 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16050@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16051 system_call_fastpath:
16052 cmpq $__NR_syscall_max,%rax
16053 ja badsys
16054- movq %r10,%rcx
16055+ movq R10-ARGOFFSET(%rsp),%rcx
16056 call *sys_call_table(,%rax,8) # XXX: rip relative
16057 movq %rax,RAX-ARGOFFSET(%rsp)
16058 /*
16059@@ -502,6 +790,8 @@ sysret_check:
16060 andl %edi,%edx
16061 jnz sysret_careful
16062 CFI_REMEMBER_STATE
16063+ pax_exit_kernel_user
16064+ pax_erase_kstack
16065 /*
16066 * sysretq will re-enable interrupts:
16067 */
16068@@ -555,14 +845,18 @@ badsys:
16069 * jump back to the normal fast path.
16070 */
16071 auditsys:
16072- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16073+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16074 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16075 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16076 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16077 movq %rax,%rsi /* 2nd arg: syscall number */
16078 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16079 call audit_syscall_entry
16080+
16081+ pax_erase_kstack
16082+
16083 LOAD_ARGS 0 /* reload call-clobbered registers */
16084+ pax_set_fptr_mask
16085 jmp system_call_fastpath
16086
16087 /*
16088@@ -592,16 +886,20 @@ tracesys:
16089 FIXUP_TOP_OF_STACK %rdi
16090 movq %rsp,%rdi
16091 call syscall_trace_enter
16092+
16093+ pax_erase_kstack
16094+
16095 /*
16096 * Reload arg registers from stack in case ptrace changed them.
16097 * We don't reload %rax because syscall_trace_enter() returned
16098 * the value it wants us to use in the table lookup.
16099 */
16100 LOAD_ARGS ARGOFFSET, 1
16101+ pax_set_fptr_mask
16102 RESTORE_REST
16103 cmpq $__NR_syscall_max,%rax
16104 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16105- movq %r10,%rcx /* fixup for C */
16106+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16107 call *sys_call_table(,%rax,8)
16108 movq %rax,RAX-ARGOFFSET(%rsp)
16109 /* Use IRET because user could have changed frame */
16110@@ -613,7 +911,7 @@ tracesys:
16111 GLOBAL(int_ret_from_sys_call)
16112 DISABLE_INTERRUPTS(CLBR_NONE)
16113 TRACE_IRQS_OFF
16114- testl $3,CS-ARGOFFSET(%rsp)
16115+ testb $3,CS-ARGOFFSET(%rsp)
16116 je retint_restore_args
16117 movl $_TIF_ALLWORK_MASK,%edi
16118 /* edi: mask to check */
16119@@ -674,7 +972,7 @@ int_restore_rest:
16120 TRACE_IRQS_OFF
16121 jmp int_with_check
16122 CFI_ENDPROC
16123-END(system_call)
16124+ENDPROC(system_call)
16125
16126 /*
16127 * Certain special system calls that need to save a complete full stack frame.
16128@@ -690,7 +988,7 @@ ENTRY(\label)
16129 call \func
16130 jmp ptregscall_common
16131 CFI_ENDPROC
16132-END(\label)
16133+ENDPROC(\label)
16134 .endm
16135
16136 PTREGSCALL stub_clone, sys_clone, %r8
16137@@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16138 movq_cfi_restore R12+8, r12
16139 movq_cfi_restore RBP+8, rbp
16140 movq_cfi_restore RBX+8, rbx
16141+ pax_force_retaddr
16142 ret $REST_SKIP /* pop extended registers */
16143 CFI_ENDPROC
16144-END(ptregscall_common)
16145+ENDPROC(ptregscall_common)
16146
16147 ENTRY(stub_execve)
16148 CFI_STARTPROC
16149@@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16150 RESTORE_REST
16151 jmp int_ret_from_sys_call
16152 CFI_ENDPROC
16153-END(stub_execve)
16154+ENDPROC(stub_execve)
16155
16156 /*
16157 * sigreturn is special because it needs to restore all registers on return.
16158@@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16159 RESTORE_REST
16160 jmp int_ret_from_sys_call
16161 CFI_ENDPROC
16162-END(stub_rt_sigreturn)
16163+ENDPROC(stub_rt_sigreturn)
16164
16165 /*
16166 * Build the entry stubs and pointer table with some assembler magic.
16167@@ -780,7 +1079,7 @@ vector=vector+1
16168 2: jmp common_interrupt
16169 .endr
16170 CFI_ENDPROC
16171-END(irq_entries_start)
16172+ENDPROC(irq_entries_start)
16173
16174 .previous
16175 END(interrupt)
16176@@ -800,6 +1099,16 @@ END(interrupt)
16177 CFI_ADJUST_CFA_OFFSET 10*8
16178 call save_args
16179 PARTIAL_FRAME 0
16180+#ifdef CONFIG_PAX_MEMORY_UDEREF
16181+ testb $3, CS(%rdi)
16182+ jnz 1f
16183+ pax_enter_kernel
16184+ jmp 2f
16185+1: pax_enter_kernel_user
16186+2:
16187+#else
16188+ pax_enter_kernel
16189+#endif
16190 call \func
16191 .endm
16192
16193@@ -822,7 +1131,7 @@ ret_from_intr:
16194 CFI_ADJUST_CFA_OFFSET -8
16195 exit_intr:
16196 GET_THREAD_INFO(%rcx)
16197- testl $3,CS-ARGOFFSET(%rsp)
16198+ testb $3,CS-ARGOFFSET(%rsp)
16199 je retint_kernel
16200
16201 /* Interrupt came from user space */
16202@@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16203 * The iretq could re-enable interrupts:
16204 */
16205 DISABLE_INTERRUPTS(CLBR_ANY)
16206+ pax_exit_kernel_user
16207+ pax_erase_kstack
16208 TRACE_IRQS_IRETQ
16209 SWAPGS
16210 jmp restore_args
16211
16212 retint_restore_args: /* return to kernel space */
16213 DISABLE_INTERRUPTS(CLBR_ANY)
16214+ pax_exit_kernel
16215+ pax_force_retaddr RIP-ARGOFFSET
16216 /*
16217 * The iretq could re-enable interrupts:
16218 */
16219@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16220 #endif
16221
16222 CFI_ENDPROC
16223-END(common_interrupt)
16224+ENDPROC(common_interrupt)
16225
16226 /*
16227 * APIC interrupts.
16228@@ -953,7 +1266,7 @@ ENTRY(\sym)
16229 interrupt \do_sym
16230 jmp ret_from_intr
16231 CFI_ENDPROC
16232-END(\sym)
16233+ENDPROC(\sym)
16234 .endm
16235
16236 #ifdef CONFIG_SMP
16237@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16238 CFI_ADJUST_CFA_OFFSET 15*8
16239 call error_entry
16240 DEFAULT_FRAME 0
16241+#ifdef CONFIG_PAX_MEMORY_UDEREF
16242+ testb $3, CS(%rsp)
16243+ jnz 1f
16244+ pax_enter_kernel
16245+ jmp 2f
16246+1: pax_enter_kernel_user
16247+2:
16248+#else
16249+ pax_enter_kernel
16250+#endif
16251 movq %rsp,%rdi /* pt_regs pointer */
16252 xorl %esi,%esi /* no error code */
16253 call \do_sym
16254 jmp error_exit /* %ebx: no swapgs flag */
16255 CFI_ENDPROC
16256-END(\sym)
16257+ENDPROC(\sym)
16258 .endm
16259
16260 .macro paranoidzeroentry sym do_sym
16261@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16262 subq $15*8, %rsp
16263 call save_paranoid
16264 TRACE_IRQS_OFF
16265+#ifdef CONFIG_PAX_MEMORY_UDEREF
16266+ testb $3, CS(%rsp)
16267+ jnz 1f
16268+ pax_enter_kernel
16269+ jmp 2f
16270+1: pax_enter_kernel_user
16271+2:
16272+#else
16273+ pax_enter_kernel
16274+#endif
16275 movq %rsp,%rdi /* pt_regs pointer */
16276 xorl %esi,%esi /* no error code */
16277 call \do_sym
16278 jmp paranoid_exit /* %ebx: no swapgs flag */
16279 CFI_ENDPROC
16280-END(\sym)
16281+ENDPROC(\sym)
16282 .endm
16283
16284 .macro paranoidzeroentry_ist sym do_sym ist
16285@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16286 subq $15*8, %rsp
16287 call save_paranoid
16288 TRACE_IRQS_OFF
16289+#ifdef CONFIG_PAX_MEMORY_UDEREF
16290+ testb $3, CS(%rsp)
16291+ jnz 1f
16292+ pax_enter_kernel
16293+ jmp 2f
16294+1: pax_enter_kernel_user
16295+2:
16296+#else
16297+ pax_enter_kernel
16298+#endif
16299 movq %rsp,%rdi /* pt_regs pointer */
16300 xorl %esi,%esi /* no error code */
16301- PER_CPU(init_tss, %rbp)
16302+#ifdef CONFIG_SMP
16303+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16304+ lea init_tss(%rbp), %rbp
16305+#else
16306+ lea init_tss(%rip), %rbp
16307+#endif
16308 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16309 call \do_sym
16310 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16311 jmp paranoid_exit /* %ebx: no swapgs flag */
16312 CFI_ENDPROC
16313-END(\sym)
16314+ENDPROC(\sym)
16315 .endm
16316
16317 .macro errorentry sym do_sym
16318@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16319 CFI_ADJUST_CFA_OFFSET 15*8
16320 call error_entry
16321 DEFAULT_FRAME 0
16322+#ifdef CONFIG_PAX_MEMORY_UDEREF
16323+ testb $3, CS(%rsp)
16324+ jnz 1f
16325+ pax_enter_kernel
16326+ jmp 2f
16327+1: pax_enter_kernel_user
16328+2:
16329+#else
16330+ pax_enter_kernel
16331+#endif
16332 movq %rsp,%rdi /* pt_regs pointer */
16333 movq ORIG_RAX(%rsp),%rsi /* get error code */
16334 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16335 call \do_sym
16336 jmp error_exit /* %ebx: no swapgs flag */
16337 CFI_ENDPROC
16338-END(\sym)
16339+ENDPROC(\sym)
16340 .endm
16341
16342 /* error code is on the stack already */
16343@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16344 call save_paranoid
16345 DEFAULT_FRAME 0
16346 TRACE_IRQS_OFF
16347+#ifdef CONFIG_PAX_MEMORY_UDEREF
16348+ testb $3, CS(%rsp)
16349+ jnz 1f
16350+ pax_enter_kernel
16351+ jmp 2f
16352+1: pax_enter_kernel_user
16353+2:
16354+#else
16355+ pax_enter_kernel
16356+#endif
16357 movq %rsp,%rdi /* pt_regs pointer */
16358 movq ORIG_RAX(%rsp),%rsi /* get error code */
16359 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16360 call \do_sym
16361 jmp paranoid_exit /* %ebx: no swapgs flag */
16362 CFI_ENDPROC
16363-END(\sym)
16364+ENDPROC(\sym)
16365 .endm
16366
16367 zeroentry divide_error do_divide_error
16368@@ -1141,9 +1509,10 @@ gs_change:
16369 SWAPGS
16370 popf
16371 CFI_ADJUST_CFA_OFFSET -8
16372+ pax_force_retaddr
16373 ret
16374 CFI_ENDPROC
16375-END(native_load_gs_index)
16376+ENDPROC(native_load_gs_index)
16377
16378 .section __ex_table,"a"
16379 .align 8
16380@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16381 * of hacks for example to fork off the per-CPU idle tasks.
16382 * [Hopefully no generic code relies on the reschedule -AK]
16383 */
16384- RESTORE_ALL
16385+ RESTORE_REST
16386 UNFAKE_STACK_FRAME
16387+ pax_force_retaddr
16388 ret
16389 CFI_ENDPROC
16390-END(kernel_thread)
16391+ENDPROC(kernel_thread)
16392
16393 ENTRY(child_rip)
16394 pushq $0 # fake return address
16395@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16396 */
16397 movq %rdi, %rax
16398 movq %rsi, %rdi
16399+ pax_force_fptr %rax
16400 call *%rax
16401 # exit
16402 mov %eax, %edi
16403 call do_exit
16404 ud2 # padding for call trace
16405 CFI_ENDPROC
16406-END(child_rip)
16407+ENDPROC(child_rip)
16408
16409 /*
16410 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16411@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16412 RESTORE_REST
16413 testq %rax,%rax
16414 je int_ret_from_sys_call
16415- RESTORE_ARGS
16416 UNFAKE_STACK_FRAME
16417+ pax_force_retaddr
16418 ret
16419 CFI_ENDPROC
16420-END(kernel_execve)
16421+ENDPROC(kernel_execve)
16422
16423 /* Call softirq on interrupt stack. Interrupts are off. */
16424 ENTRY(call_softirq)
16425@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16426 CFI_DEF_CFA_REGISTER rsp
16427 CFI_ADJUST_CFA_OFFSET -8
16428 decl PER_CPU_VAR(irq_count)
16429+ pax_force_retaddr
16430 ret
16431 CFI_ENDPROC
16432-END(call_softirq)
16433+ENDPROC(call_softirq)
16434
16435 #ifdef CONFIG_XEN
16436 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16437@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16438 decl PER_CPU_VAR(irq_count)
16439 jmp error_exit
16440 CFI_ENDPROC
16441-END(xen_do_hypervisor_callback)
16442+ENDPROC(xen_do_hypervisor_callback)
16443
16444 /*
16445 * Hypervisor uses this for application faults while it executes.
16446@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16447 SAVE_ALL
16448 jmp error_exit
16449 CFI_ENDPROC
16450-END(xen_failsafe_callback)
16451+ENDPROC(xen_failsafe_callback)
16452
16453 #endif /* CONFIG_XEN */
16454
16455@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16456 TRACE_IRQS_OFF
16457 testl %ebx,%ebx /* swapgs needed? */
16458 jnz paranoid_restore
16459- testl $3,CS(%rsp)
16460+ testb $3,CS(%rsp)
16461 jnz paranoid_userspace
16462+#ifdef CONFIG_PAX_MEMORY_UDEREF
16463+ pax_exit_kernel
16464+ TRACE_IRQS_IRETQ 0
16465+ SWAPGS_UNSAFE_STACK
16466+ RESTORE_ALL 8
16467+ pax_force_retaddr_bts
16468+ jmp irq_return
16469+#endif
16470 paranoid_swapgs:
16471+#ifdef CONFIG_PAX_MEMORY_UDEREF
16472+ pax_exit_kernel_user
16473+#else
16474+ pax_exit_kernel
16475+#endif
16476 TRACE_IRQS_IRETQ 0
16477 SWAPGS_UNSAFE_STACK
16478 RESTORE_ALL 8
16479 jmp irq_return
16480 paranoid_restore:
16481+ pax_exit_kernel
16482 TRACE_IRQS_IRETQ 0
16483 RESTORE_ALL 8
16484+ pax_force_retaddr_bts
16485 jmp irq_return
16486 paranoid_userspace:
16487 GET_THREAD_INFO(%rcx)
16488@@ -1443,7 +1830,7 @@ paranoid_schedule:
16489 TRACE_IRQS_OFF
16490 jmp paranoid_userspace
16491 CFI_ENDPROC
16492-END(paranoid_exit)
16493+ENDPROC(paranoid_exit)
16494
16495 /*
16496 * Exception entry point. This expects an error code/orig_rax on the stack.
16497@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16498 movq_cfi r14, R14+8
16499 movq_cfi r15, R15+8
16500 xorl %ebx,%ebx
16501- testl $3,CS+8(%rsp)
16502+ testb $3,CS+8(%rsp)
16503 je error_kernelspace
16504 error_swapgs:
16505 SWAPGS
16506 error_sti:
16507 TRACE_IRQS_OFF
16508+ pax_force_retaddr_bts
16509 ret
16510 CFI_ENDPROC
16511
16512@@ -1497,7 +1885,7 @@ error_kernelspace:
16513 cmpq $gs_change,RIP+8(%rsp)
16514 je error_swapgs
16515 jmp error_sti
16516-END(error_entry)
16517+ENDPROC(error_entry)
16518
16519
16520 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16521@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16522 jnz retint_careful
16523 jmp retint_swapgs
16524 CFI_ENDPROC
16525-END(error_exit)
16526+ENDPROC(error_exit)
16527
16528
16529 /* runs on exception stack */
16530@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16531 CFI_ADJUST_CFA_OFFSET 15*8
16532 call save_paranoid
16533 DEFAULT_FRAME 0
16534+#ifdef CONFIG_PAX_MEMORY_UDEREF
16535+ testb $3, CS(%rsp)
16536+ jnz 1f
16537+ pax_enter_kernel
16538+ jmp 2f
16539+1: pax_enter_kernel_user
16540+2:
16541+#else
16542+ pax_enter_kernel
16543+#endif
16544 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16545 movq %rsp,%rdi
16546 movq $-1,%rsi
16547@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16548 DISABLE_INTERRUPTS(CLBR_NONE)
16549 testl %ebx,%ebx /* swapgs needed? */
16550 jnz nmi_restore
16551- testl $3,CS(%rsp)
16552+ testb $3,CS(%rsp)
16553 jnz nmi_userspace
16554+#ifdef CONFIG_PAX_MEMORY_UDEREF
16555+ pax_exit_kernel
16556+ SWAPGS_UNSAFE_STACK
16557+ RESTORE_ALL 8
16558+ pax_force_retaddr_bts
16559+ jmp irq_return
16560+#endif
16561 nmi_swapgs:
16562+#ifdef CONFIG_PAX_MEMORY_UDEREF
16563+ pax_exit_kernel_user
16564+#else
16565+ pax_exit_kernel
16566+#endif
16567 SWAPGS_UNSAFE_STACK
16568+ RESTORE_ALL 8
16569+ jmp irq_return
16570 nmi_restore:
16571+ pax_exit_kernel
16572 RESTORE_ALL 8
16573+ pax_force_retaddr_bts
16574 jmp irq_return
16575 nmi_userspace:
16576 GET_THREAD_INFO(%rcx)
16577@@ -1573,14 +1987,14 @@ nmi_schedule:
16578 jmp paranoid_exit
16579 CFI_ENDPROC
16580 #endif
16581-END(nmi)
16582+ENDPROC(nmi)
16583
16584 ENTRY(ignore_sysret)
16585 CFI_STARTPROC
16586 mov $-ENOSYS,%eax
16587 sysret
16588 CFI_ENDPROC
16589-END(ignore_sysret)
16590+ENDPROC(ignore_sysret)
16591
16592 /*
16593 * End of kprobes section
16594diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16595index 9dbb527..7b3615a 100644
16596--- a/arch/x86/kernel/ftrace.c
16597+++ b/arch/x86/kernel/ftrace.c
16598@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16599 static void *mod_code_newcode; /* holds the text to write to the IP */
16600
16601 static unsigned nmi_wait_count;
16602-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16603+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16604
16605 int ftrace_arch_read_dyn_info(char *buf, int size)
16606 {
16607@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16608
16609 r = snprintf(buf, size, "%u %u",
16610 nmi_wait_count,
16611- atomic_read(&nmi_update_count));
16612+ atomic_read_unchecked(&nmi_update_count));
16613 return r;
16614 }
16615
16616@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16617 {
16618 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16619 smp_rmb();
16620+ pax_open_kernel();
16621 ftrace_mod_code();
16622- atomic_inc(&nmi_update_count);
16623+ pax_close_kernel();
16624+ atomic_inc_unchecked(&nmi_update_count);
16625 }
16626 /* Must have previous changes seen before executions */
16627 smp_mb();
16628@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16629
16630
16631
16632-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16633+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16634
16635 static unsigned char *ftrace_nop_replace(void)
16636 {
16637@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16638 {
16639 unsigned char replaced[MCOUNT_INSN_SIZE];
16640
16641+ ip = ktla_ktva(ip);
16642+
16643 /*
16644 * Note: Due to modules and __init, code can
16645 * disappear and change, we need to protect against faulting
16646@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16647 unsigned char old[MCOUNT_INSN_SIZE], *new;
16648 int ret;
16649
16650- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16651+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16652 new = ftrace_call_replace(ip, (unsigned long)func);
16653 ret = ftrace_modify_code(ip, old, new);
16654
16655@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16656 switch (faulted) {
16657 case 0:
16658 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16659- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16660+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16661 break;
16662 case 1:
16663 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16664- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16665+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16666 break;
16667 case 2:
16668 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16669- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16670+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16671 break;
16672 }
16673
16674@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16675 {
16676 unsigned char code[MCOUNT_INSN_SIZE];
16677
16678+ ip = ktla_ktva(ip);
16679+
16680 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16681 return -EFAULT;
16682
16683diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16684index 4f8e250..df24706 100644
16685--- a/arch/x86/kernel/head32.c
16686+++ b/arch/x86/kernel/head32.c
16687@@ -16,6 +16,7 @@
16688 #include <asm/apic.h>
16689 #include <asm/io_apic.h>
16690 #include <asm/bios_ebda.h>
16691+#include <asm/boot.h>
16692
16693 static void __init i386_default_early_setup(void)
16694 {
16695@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16696 {
16697 reserve_trampoline_memory();
16698
16699- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16700+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16701
16702 #ifdef CONFIG_BLK_DEV_INITRD
16703 /* Reserve INITRD */
16704diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16705index 34c3308..6fc4e76 100644
16706--- a/arch/x86/kernel/head_32.S
16707+++ b/arch/x86/kernel/head_32.S
16708@@ -19,10 +19,17 @@
16709 #include <asm/setup.h>
16710 #include <asm/processor-flags.h>
16711 #include <asm/percpu.h>
16712+#include <asm/msr-index.h>
16713
16714 /* Physical address */
16715 #define pa(X) ((X) - __PAGE_OFFSET)
16716
16717+#ifdef CONFIG_PAX_KERNEXEC
16718+#define ta(X) (X)
16719+#else
16720+#define ta(X) ((X) - __PAGE_OFFSET)
16721+#endif
16722+
16723 /*
16724 * References to members of the new_cpu_data structure.
16725 */
16726@@ -52,11 +59,7 @@
16727 * and small than max_low_pfn, otherwise will waste some page table entries
16728 */
16729
16730-#if PTRS_PER_PMD > 1
16731-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16732-#else
16733-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16734-#endif
16735+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16736
16737 /* Enough space to fit pagetables for the low memory linear map */
16738 MAPPING_BEYOND_END = \
16739@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16740 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16741
16742 /*
16743+ * Real beginning of normal "text" segment
16744+ */
16745+ENTRY(stext)
16746+ENTRY(_stext)
16747+
16748+/*
16749 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16750 * %esi points to the real-mode code as a 32-bit pointer.
16751 * CS and DS must be 4 GB flat segments, but we don't depend on
16752@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16753 * can.
16754 */
16755 __HEAD
16756+
16757+#ifdef CONFIG_PAX_KERNEXEC
16758+ jmp startup_32
16759+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16760+.fill PAGE_SIZE-5,1,0xcc
16761+#endif
16762+
16763 ENTRY(startup_32)
16764+ movl pa(stack_start),%ecx
16765+
16766 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16767 us to not reload segments */
16768 testb $(1<<6), BP_loadflags(%esi)
16769@@ -95,7 +113,60 @@ ENTRY(startup_32)
16770 movl %eax,%es
16771 movl %eax,%fs
16772 movl %eax,%gs
16773+ movl %eax,%ss
16774 2:
16775+ leal -__PAGE_OFFSET(%ecx),%esp
16776+
16777+#ifdef CONFIG_SMP
16778+ movl $pa(cpu_gdt_table),%edi
16779+ movl $__per_cpu_load,%eax
16780+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16781+ rorl $16,%eax
16782+ movb %al,__KERNEL_PERCPU + 4(%edi)
16783+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16784+ movl $__per_cpu_end - 1,%eax
16785+ subl $__per_cpu_start,%eax
16786+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_MEMORY_UDEREF
16790+ movl $NR_CPUS,%ecx
16791+ movl $pa(cpu_gdt_table),%edi
16792+1:
16793+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16794+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16795+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16796+ addl $PAGE_SIZE_asm,%edi
16797+ loop 1b
16798+#endif
16799+
16800+#ifdef CONFIG_PAX_KERNEXEC
16801+ movl $pa(boot_gdt),%edi
16802+ movl $__LOAD_PHYSICAL_ADDR,%eax
16803+ movw %ax,__BOOT_CS + 2(%edi)
16804+ rorl $16,%eax
16805+ movb %al,__BOOT_CS + 4(%edi)
16806+ movb %ah,__BOOT_CS + 7(%edi)
16807+ rorl $16,%eax
16808+
16809+ ljmp $(__BOOT_CS),$1f
16810+1:
16811+
16812+ movl $NR_CPUS,%ecx
16813+ movl $pa(cpu_gdt_table),%edi
16814+ addl $__PAGE_OFFSET,%eax
16815+1:
16816+ movw %ax,__KERNEL_CS + 2(%edi)
16817+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16818+ rorl $16,%eax
16819+ movb %al,__KERNEL_CS + 4(%edi)
16820+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16821+ movb %ah,__KERNEL_CS + 7(%edi)
16822+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16823+ rorl $16,%eax
16824+ addl $PAGE_SIZE_asm,%edi
16825+ loop 1b
16826+#endif
16827
16828 /*
16829 * Clear BSS first so that there are no surprises...
16830@@ -140,9 +211,7 @@ ENTRY(startup_32)
16831 cmpl $num_subarch_entries, %eax
16832 jae bad_subarch
16833
16834- movl pa(subarch_entries)(,%eax,4), %eax
16835- subl $__PAGE_OFFSET, %eax
16836- jmp *%eax
16837+ jmp *pa(subarch_entries)(,%eax,4)
16838
16839 bad_subarch:
16840 WEAK(lguest_entry)
16841@@ -154,10 +223,10 @@ WEAK(xen_entry)
16842 __INITDATA
16843
16844 subarch_entries:
16845- .long default_entry /* normal x86/PC */
16846- .long lguest_entry /* lguest hypervisor */
16847- .long xen_entry /* Xen hypervisor */
16848- .long default_entry /* Moorestown MID */
16849+ .long ta(default_entry) /* normal x86/PC */
16850+ .long ta(lguest_entry) /* lguest hypervisor */
16851+ .long ta(xen_entry) /* Xen hypervisor */
16852+ .long ta(default_entry) /* Moorestown MID */
16853 num_subarch_entries = (. - subarch_entries) / 4
16854 .previous
16855 #endif /* CONFIG_PARAVIRT */
16856@@ -218,8 +287,11 @@ default_entry:
16857 movl %eax, pa(max_pfn_mapped)
16858
16859 /* Do early initialization of the fixmap area */
16860- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16861- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16862+#ifdef CONFIG_COMPAT_VDSO
16863+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16864+#else
16865+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16866+#endif
16867 #else /* Not PAE */
16868
16869 page_pde_offset = (__PAGE_OFFSET >> 20);
16870@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16871 movl %eax, pa(max_pfn_mapped)
16872
16873 /* Do early initialization of the fixmap area */
16874- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16875- movl %eax,pa(swapper_pg_dir+0xffc)
16876+#ifdef CONFIG_COMPAT_VDSO
16877+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16878+#else
16879+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16880+#endif
16881 #endif
16882 jmp 3f
16883 /*
16884@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16885 movl %eax,%es
16886 movl %eax,%fs
16887 movl %eax,%gs
16888+ movl pa(stack_start),%ecx
16889+ movl %eax,%ss
16890+ leal -__PAGE_OFFSET(%ecx),%esp
16891 #endif /* CONFIG_SMP */
16892 3:
16893
16894@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16895 orl %edx,%eax
16896 movl %eax,%cr4
16897
16898+#ifdef CONFIG_X86_PAE
16899 btl $5, %eax # check if PAE is enabled
16900 jnc 6f
16901
16902@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16903 cpuid
16904 cmpl $0x80000000, %eax
16905 jbe 6f
16906+
16907+ /* Clear bogus XD_DISABLE bits */
16908+ call verify_cpu
16909+
16910 mov $0x80000001, %eax
16911 cpuid
16912 /* Execute Disable bit supported? */
16913@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16914 jnc 6f
16915
16916 /* Setup EFER (Extended Feature Enable Register) */
16917- movl $0xc0000080, %ecx
16918+ movl $MSR_EFER, %ecx
16919 rdmsr
16920
16921 btsl $11, %eax
16922 /* Make changes effective */
16923 wrmsr
16924
16925+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16926+ movl $1,pa(nx_enabled)
16927+#endif
16928+
16929 6:
16930
16931 /*
16932@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16933 movl %eax,%cr0 /* ..and set paging (PG) bit */
16934 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16935 1:
16936- /* Set up the stack pointer */
16937- lss stack_start,%esp
16938+ /* Shift the stack pointer to a virtual address */
16939+ addl $__PAGE_OFFSET, %esp
16940
16941 /*
16942 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16943@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16944
16945 #ifdef CONFIG_SMP
16946 cmpb $0, ready
16947- jz 1f /* Initial CPU cleans BSS */
16948- jmp checkCPUtype
16949-1:
16950+ jnz checkCPUtype
16951 #endif /* CONFIG_SMP */
16952
16953 /*
16954@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16955 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16956 movl %eax,%ss # after changing gdt.
16957
16958- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16959+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16960 movl %eax,%ds
16961 movl %eax,%es
16962
16963@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16964 */
16965 cmpb $0,ready
16966 jne 1f
16967- movl $per_cpu__gdt_page,%eax
16968+ movl $cpu_gdt_table,%eax
16969 movl $per_cpu__stack_canary,%ecx
16970+#ifdef CONFIG_SMP
16971+ addl $__per_cpu_load,%ecx
16972+#endif
16973 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16974 shrl $16, %ecx
16975 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16976 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16977 1:
16978-#endif
16979 movl $(__KERNEL_STACK_CANARY),%eax
16980+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16981+ movl $(__USER_DS),%eax
16982+#else
16983+ xorl %eax,%eax
16984+#endif
16985 movl %eax,%gs
16986
16987 xorl %eax,%eax # Clear LDT
16988@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
16989
16990 cld # gcc2 wants the direction flag cleared at all times
16991 pushl $0 # fake return address for unwinder
16992-#ifdef CONFIG_SMP
16993- movb ready, %cl
16994 movb $1, ready
16995- cmpb $0,%cl # the first CPU calls start_kernel
16996- je 1f
16997- movl (stack_start), %esp
16998-1:
16999-#endif /* CONFIG_SMP */
17000 jmp *(initial_code)
17001
17002 /*
17003@@ -546,22 +631,22 @@ early_page_fault:
17004 jmp early_fault
17005
17006 early_fault:
17007- cld
17008 #ifdef CONFIG_PRINTK
17009+ cmpl $1,%ss:early_recursion_flag
17010+ je hlt_loop
17011+ incl %ss:early_recursion_flag
17012+ cld
17013 pusha
17014 movl $(__KERNEL_DS),%eax
17015 movl %eax,%ds
17016 movl %eax,%es
17017- cmpl $2,early_recursion_flag
17018- je hlt_loop
17019- incl early_recursion_flag
17020 movl %cr2,%eax
17021 pushl %eax
17022 pushl %edx /* trapno */
17023 pushl $fault_msg
17024 call printk
17025+; call dump_stack
17026 #endif
17027- call dump_stack
17028 hlt_loop:
17029 hlt
17030 jmp hlt_loop
17031@@ -569,8 +654,11 @@ hlt_loop:
17032 /* This is the default interrupt "handler" :-) */
17033 ALIGN
17034 ignore_int:
17035- cld
17036 #ifdef CONFIG_PRINTK
17037+ cmpl $2,%ss:early_recursion_flag
17038+ je hlt_loop
17039+ incl %ss:early_recursion_flag
17040+ cld
17041 pushl %eax
17042 pushl %ecx
17043 pushl %edx
17044@@ -579,9 +667,6 @@ ignore_int:
17045 movl $(__KERNEL_DS),%eax
17046 movl %eax,%ds
17047 movl %eax,%es
17048- cmpl $2,early_recursion_flag
17049- je hlt_loop
17050- incl early_recursion_flag
17051 pushl 16(%esp)
17052 pushl 24(%esp)
17053 pushl 32(%esp)
17054@@ -600,6 +685,8 @@ ignore_int:
17055 #endif
17056 iret
17057
17058+#include "verify_cpu.S"
17059+
17060 __REFDATA
17061 .align 4
17062 ENTRY(initial_code)
17063@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17064 /*
17065 * BSS section
17066 */
17067-__PAGE_ALIGNED_BSS
17068- .align PAGE_SIZE_asm
17069 #ifdef CONFIG_X86_PAE
17070+.section .swapper_pg_pmd,"a",@progbits
17071 swapper_pg_pmd:
17072 .fill 1024*KPMDS,4,0
17073 #else
17074+.section .swapper_pg_dir,"a",@progbits
17075 ENTRY(swapper_pg_dir)
17076 .fill 1024,4,0
17077 #endif
17078+.section .swapper_pg_fixmap,"a",@progbits
17079 swapper_pg_fixmap:
17080 .fill 1024,4,0
17081 #ifdef CONFIG_X86_TRAMPOLINE
17082+.section .trampoline_pg_dir,"a",@progbits
17083 ENTRY(trampoline_pg_dir)
17084+#ifdef CONFIG_X86_PAE
17085+ .fill 4,8,0
17086+#else
17087 .fill 1024,4,0
17088 #endif
17089+#endif
17090+
17091+.section .empty_zero_page,"a",@progbits
17092 ENTRY(empty_zero_page)
17093 .fill 4096,1,0
17094
17095 /*
17096+ * The IDT has to be page-aligned to simplify the Pentium
17097+ * F0 0F bug workaround.. We have a special link segment
17098+ * for this.
17099+ */
17100+.section .idt,"a",@progbits
17101+ENTRY(idt_table)
17102+ .fill 256,8,0
17103+
17104+/*
17105 * This starts the data section.
17106 */
17107 #ifdef CONFIG_X86_PAE
17108-__PAGE_ALIGNED_DATA
17109- /* Page-aligned for the benefit of paravirt? */
17110- .align PAGE_SIZE_asm
17111+.section .swapper_pg_dir,"a",@progbits
17112+
17113 ENTRY(swapper_pg_dir)
17114 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17115 # if KPMDS == 3
17116@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17117 # error "Kernel PMDs should be 1, 2 or 3"
17118 # endif
17119 .align PAGE_SIZE_asm /* needs to be page-sized too */
17120+
17121+#ifdef CONFIG_PAX_PER_CPU_PGD
17122+ENTRY(cpu_pgd)
17123+ .rept NR_CPUS
17124+ .fill 4,8,0
17125+ .endr
17126+#endif
17127+
17128 #endif
17129
17130 .data
17131+.balign 4
17132 ENTRY(stack_start)
17133- .long init_thread_union+THREAD_SIZE
17134- .long __BOOT_DS
17135+ .long init_thread_union+THREAD_SIZE-8
17136
17137 ready: .byte 0
17138
17139+.section .rodata,"a",@progbits
17140 early_recursion_flag:
17141 .long 0
17142
17143@@ -697,7 +809,7 @@ fault_msg:
17144 .word 0 # 32 bit align gdt_desc.address
17145 boot_gdt_descr:
17146 .word __BOOT_DS+7
17147- .long boot_gdt - __PAGE_OFFSET
17148+ .long pa(boot_gdt)
17149
17150 .word 0 # 32-bit align idt_desc.address
17151 idt_descr:
17152@@ -708,7 +820,7 @@ idt_descr:
17153 .word 0 # 32 bit align gdt_desc.address
17154 ENTRY(early_gdt_descr)
17155 .word GDT_ENTRIES*8-1
17156- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17157+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17158
17159 /*
17160 * The boot_gdt must mirror the equivalent in setup.S and is
17161@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17162 .align L1_CACHE_BYTES
17163 ENTRY(boot_gdt)
17164 .fill GDT_ENTRY_BOOT_CS,8,0
17165- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17166- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17167+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17168+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17169+
17170+ .align PAGE_SIZE_asm
17171+ENTRY(cpu_gdt_table)
17172+ .rept NR_CPUS
17173+ .quad 0x0000000000000000 /* NULL descriptor */
17174+ .quad 0x0000000000000000 /* 0x0b reserved */
17175+ .quad 0x0000000000000000 /* 0x13 reserved */
17176+ .quad 0x0000000000000000 /* 0x1b reserved */
17177+
17178+#ifdef CONFIG_PAX_KERNEXEC
17179+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17180+#else
17181+ .quad 0x0000000000000000 /* 0x20 unused */
17182+#endif
17183+
17184+ .quad 0x0000000000000000 /* 0x28 unused */
17185+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17186+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17187+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17188+ .quad 0x0000000000000000 /* 0x4b reserved */
17189+ .quad 0x0000000000000000 /* 0x53 reserved */
17190+ .quad 0x0000000000000000 /* 0x5b reserved */
17191+
17192+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17193+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17194+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17195+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17196+
17197+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17198+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17199+
17200+ /*
17201+ * Segments used for calling PnP BIOS have byte granularity.
17202+ * The code segments and data segments have fixed 64k limits,
17203+ * the transfer segment sizes are set at run time.
17204+ */
17205+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17206+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17207+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17208+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17209+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17210+
17211+ /*
17212+ * The APM segments have byte granularity and their bases
17213+ * are set at run time. All have 64k limits.
17214+ */
17215+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17216+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17217+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17218+
17219+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17220+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17221+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17222+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17223+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17224+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17225+
17226+ /* Be sure this is zeroed to avoid false validations in Xen */
17227+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17228+ .endr
17229diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17230index 780cd92..758b2a6 100644
17231--- a/arch/x86/kernel/head_64.S
17232+++ b/arch/x86/kernel/head_64.S
17233@@ -19,6 +19,8 @@
17234 #include <asm/cache.h>
17235 #include <asm/processor-flags.h>
17236 #include <asm/percpu.h>
17237+#include <asm/cpufeature.h>
17238+#include <asm/alternative-asm.h>
17239
17240 #ifdef CONFIG_PARAVIRT
17241 #include <asm/asm-offsets.h>
17242@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17243 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17244 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17245 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17246+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17247+L3_VMALLOC_START = pud_index(VMALLOC_START)
17248+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17249+L3_VMALLOC_END = pud_index(VMALLOC_END)
17250+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17251+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17252
17253 .text
17254 __HEAD
17255@@ -85,35 +93,23 @@ startup_64:
17256 */
17257 addq %rbp, init_level4_pgt + 0(%rip)
17258 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17259+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17260+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17261+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17262 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17263
17264 addq %rbp, level3_ident_pgt + 0(%rip)
17265+#ifndef CONFIG_XEN
17266+ addq %rbp, level3_ident_pgt + 8(%rip)
17267+#endif
17268
17269- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17270- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17271+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17272+
17273+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17274+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17275
17276 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17277-
17278- /* Add an Identity mapping if I am above 1G */
17279- leaq _text(%rip), %rdi
17280- andq $PMD_PAGE_MASK, %rdi
17281-
17282- movq %rdi, %rax
17283- shrq $PUD_SHIFT, %rax
17284- andq $(PTRS_PER_PUD - 1), %rax
17285- jz ident_complete
17286-
17287- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17288- leaq level3_ident_pgt(%rip), %rbx
17289- movq %rdx, 0(%rbx, %rax, 8)
17290-
17291- movq %rdi, %rax
17292- shrq $PMD_SHIFT, %rax
17293- andq $(PTRS_PER_PMD - 1), %rax
17294- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17295- leaq level2_spare_pgt(%rip), %rbx
17296- movq %rdx, 0(%rbx, %rax, 8)
17297-ident_complete:
17298+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17299
17300 /*
17301 * Fixup the kernel text+data virtual addresses. Note that
17302@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17303 * after the boot processor executes this code.
17304 */
17305
17306- /* Enable PAE mode and PGE */
17307- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17308+ /* Enable PAE mode and PSE/PGE */
17309+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17310 movq %rax, %cr4
17311
17312 /* Setup early boot stage 4 level pagetables. */
17313@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17314 movl $MSR_EFER, %ecx
17315 rdmsr
17316 btsl $_EFER_SCE, %eax /* Enable System Call */
17317- btl $20,%edi /* No Execute supported? */
17318+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17319 jnc 1f
17320 btsl $_EFER_NX, %eax
17321+ leaq init_level4_pgt(%rip), %rdi
17322+#ifndef CONFIG_EFI
17323+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17324+#endif
17325+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17326+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17327+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17328 1: wrmsr /* Make changes effective */
17329
17330 /* Setup cr0 */
17331@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17332 * jump. In addition we need to ensure %cs is set so we make this
17333 * a far return.
17334 */
17335+ pax_set_fptr_mask
17336 movq initial_code(%rip),%rax
17337 pushq $0 # fake return address to stop unwinder
17338 pushq $__KERNEL_CS # set correct cs
17339@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17340 .quad x86_64_start_kernel
17341 ENTRY(initial_gs)
17342 .quad INIT_PER_CPU_VAR(irq_stack_union)
17343- __FINITDATA
17344
17345 ENTRY(stack_start)
17346 .quad init_thread_union+THREAD_SIZE-8
17347 .word 0
17348+ __FINITDATA
17349
17350 bad_address:
17351 jmp bad_address
17352
17353- .section ".init.text","ax"
17354+ __INIT
17355 #ifdef CONFIG_EARLY_PRINTK
17356 .globl early_idt_handlers
17357 early_idt_handlers:
17358@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17359 #endif /* EARLY_PRINTK */
17360 1: hlt
17361 jmp 1b
17362+ .previous
17363
17364 #ifdef CONFIG_EARLY_PRINTK
17365+ __INITDATA
17366 early_recursion_flag:
17367 .long 0
17368+ .previous
17369
17370+ .section .rodata,"a",@progbits
17371 early_idt_msg:
17372 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17373 early_idt_ripmsg:
17374 .asciz "RIP %s\n"
17375+ .previous
17376 #endif /* CONFIG_EARLY_PRINTK */
17377- .previous
17378
17379+ .section .rodata,"a",@progbits
17380 #define NEXT_PAGE(name) \
17381 .balign PAGE_SIZE; \
17382 ENTRY(name)
17383@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17384 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17385 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17386 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17387+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17388+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17389+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17390+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17391+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17392+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17393 .org init_level4_pgt + L4_START_KERNEL*8, 0
17394 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17395 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17396
17397+#ifdef CONFIG_PAX_PER_CPU_PGD
17398+NEXT_PAGE(cpu_pgd)
17399+ .rept NR_CPUS
17400+ .fill 512,8,0
17401+ .endr
17402+#endif
17403+
17404 NEXT_PAGE(level3_ident_pgt)
17405 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17406+#ifdef CONFIG_XEN
17407 .fill 511,8,0
17408+#else
17409+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17410+ .fill 510,8,0
17411+#endif
17412+
17413+NEXT_PAGE(level3_vmalloc_start_pgt)
17414+ .fill 512,8,0
17415+
17416+NEXT_PAGE(level3_vmalloc_end_pgt)
17417+ .fill 512,8,0
17418+
17419+NEXT_PAGE(level3_vmemmap_pgt)
17420+ .fill L3_VMEMMAP_START,8,0
17421+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17422
17423 NEXT_PAGE(level3_kernel_pgt)
17424 .fill L3_START_KERNEL,8,0
17425@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17426 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17427 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17428
17429+NEXT_PAGE(level2_vmemmap_pgt)
17430+ .fill 512,8,0
17431+
17432 NEXT_PAGE(level2_fixmap_pgt)
17433- .fill 506,8,0
17434- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17435- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17436- .fill 5,8,0
17437+ .fill 507,8,0
17438+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17439+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17440+ .fill 4,8,0
17441
17442-NEXT_PAGE(level1_fixmap_pgt)
17443+NEXT_PAGE(level1_vsyscall_pgt)
17444 .fill 512,8,0
17445
17446-NEXT_PAGE(level2_ident_pgt)
17447- /* Since I easily can, map the first 1G.
17448+ /* Since I easily can, map the first 2G.
17449 * Don't set NX because code runs from these pages.
17450 */
17451- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17452+NEXT_PAGE(level2_ident_pgt)
17453+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17454
17455 NEXT_PAGE(level2_kernel_pgt)
17456 /*
17457@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17458 * If you want to increase this then increase MODULES_VADDR
17459 * too.)
17460 */
17461- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17462- KERNEL_IMAGE_SIZE/PMD_SIZE)
17463-
17464-NEXT_PAGE(level2_spare_pgt)
17465- .fill 512, 8, 0
17466+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17467
17468 #undef PMDS
17469 #undef NEXT_PAGE
17470
17471- .data
17472+ .align PAGE_SIZE
17473+ENTRY(cpu_gdt_table)
17474+ .rept NR_CPUS
17475+ .quad 0x0000000000000000 /* NULL descriptor */
17476+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17477+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17478+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17479+ .quad 0x00cffb000000ffff /* __USER32_CS */
17480+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17481+ .quad 0x00affb000000ffff /* __USER_CS */
17482+
17483+#ifdef CONFIG_PAX_KERNEXEC
17484+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17485+#else
17486+ .quad 0x0 /* unused */
17487+#endif
17488+
17489+ .quad 0,0 /* TSS */
17490+ .quad 0,0 /* LDT */
17491+ .quad 0,0,0 /* three TLS descriptors */
17492+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17493+ /* asm/segment.h:GDT_ENTRIES must match this */
17494+
17495+ /* zero the remaining page */
17496+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17497+ .endr
17498+
17499 .align 16
17500 .globl early_gdt_descr
17501 early_gdt_descr:
17502 .word GDT_ENTRIES*8-1
17503 early_gdt_descr_base:
17504- .quad INIT_PER_CPU_VAR(gdt_page)
17505+ .quad cpu_gdt_table
17506
17507 ENTRY(phys_base)
17508 /* This must match the first entry in level2_kernel_pgt */
17509 .quad 0x0000000000000000
17510
17511 #include "../../x86/xen/xen-head.S"
17512-
17513- .section .bss, "aw", @nobits
17514+
17515+ .section .rodata,"a",@progbits
17516 .align L1_CACHE_BYTES
17517 ENTRY(idt_table)
17518- .skip IDT_ENTRIES * 16
17519+ .fill 512,8,0
17520
17521 __PAGE_ALIGNED_BSS
17522 .align PAGE_SIZE
17523diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17524index 9c3bd4a..e1d9b35 100644
17525--- a/arch/x86/kernel/i386_ksyms_32.c
17526+++ b/arch/x86/kernel/i386_ksyms_32.c
17527@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17528 EXPORT_SYMBOL(cmpxchg8b_emu);
17529 #endif
17530
17531+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17532+
17533 /* Networking helper routines. */
17534 EXPORT_SYMBOL(csum_partial_copy_generic);
17535+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17536+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17537
17538 EXPORT_SYMBOL(__get_user_1);
17539 EXPORT_SYMBOL(__get_user_2);
17540@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17541
17542 EXPORT_SYMBOL(csum_partial);
17543 EXPORT_SYMBOL(empty_zero_page);
17544+
17545+#ifdef CONFIG_PAX_KERNEXEC
17546+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17547+#endif
17548diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17549index df89102..a244320 100644
17550--- a/arch/x86/kernel/i8259.c
17551+++ b/arch/x86/kernel/i8259.c
17552@@ -208,7 +208,7 @@ spurious_8259A_irq:
17553 "spurious 8259A interrupt: IRQ%d.\n", irq);
17554 spurious_irq_mask |= irqmask;
17555 }
17556- atomic_inc(&irq_err_count);
17557+ atomic_inc_unchecked(&irq_err_count);
17558 /*
17559 * Theoretically we do not have to handle this IRQ,
17560 * but in Linux this does not cause problems and is
17561diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17562index 3a54dcb..1c22348 100644
17563--- a/arch/x86/kernel/init_task.c
17564+++ b/arch/x86/kernel/init_task.c
17565@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17566 * way process stacks are handled. This is done by having a special
17567 * "init_task" linker map entry..
17568 */
17569-union thread_union init_thread_union __init_task_data =
17570- { INIT_THREAD_INFO(init_task) };
17571+union thread_union init_thread_union __init_task_data;
17572
17573 /*
17574 * Initial task structure.
17575@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17576 * section. Since TSS's are completely CPU-local, we want them
17577 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17578 */
17579-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17580-
17581+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17582+EXPORT_SYMBOL(init_tss);
17583diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17584index 99c4d30..74c84e9 100644
17585--- a/arch/x86/kernel/ioport.c
17586+++ b/arch/x86/kernel/ioport.c
17587@@ -6,6 +6,7 @@
17588 #include <linux/sched.h>
17589 #include <linux/kernel.h>
17590 #include <linux/capability.h>
17591+#include <linux/security.h>
17592 #include <linux/errno.h>
17593 #include <linux/types.h>
17594 #include <linux/ioport.h>
17595@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17596
17597 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17598 return -EINVAL;
17599+#ifdef CONFIG_GRKERNSEC_IO
17600+ if (turn_on && grsec_disable_privio) {
17601+ gr_handle_ioperm();
17602+ return -EPERM;
17603+ }
17604+#endif
17605 if (turn_on && !capable(CAP_SYS_RAWIO))
17606 return -EPERM;
17607
17608@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17609 * because the ->io_bitmap_max value must match the bitmap
17610 * contents:
17611 */
17612- tss = &per_cpu(init_tss, get_cpu());
17613+ tss = init_tss + get_cpu();
17614
17615 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17616
17617@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17618 return -EINVAL;
17619 /* Trying to gain more privileges? */
17620 if (level > old) {
17621+#ifdef CONFIG_GRKERNSEC_IO
17622+ if (grsec_disable_privio) {
17623+ gr_handle_iopl();
17624+ return -EPERM;
17625+ }
17626+#endif
17627 if (!capable(CAP_SYS_RAWIO))
17628 return -EPERM;
17629 }
17630diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17631index 04bbd52..83a07d9 100644
17632--- a/arch/x86/kernel/irq.c
17633+++ b/arch/x86/kernel/irq.c
17634@@ -15,7 +15,7 @@
17635 #include <asm/mce.h>
17636 #include <asm/hw_irq.h>
17637
17638-atomic_t irq_err_count;
17639+atomic_unchecked_t irq_err_count;
17640
17641 /* Function pointer for generic interrupt vector handling */
17642 void (*generic_interrupt_extension)(void) = NULL;
17643@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17644 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17645 seq_printf(p, " Machine check polls\n");
17646 #endif
17647- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17648+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17649 #if defined(CONFIG_X86_IO_APIC)
17650- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17651+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17652 #endif
17653 return 0;
17654 }
17655@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17656
17657 u64 arch_irq_stat(void)
17658 {
17659- u64 sum = atomic_read(&irq_err_count);
17660+ u64 sum = atomic_read_unchecked(&irq_err_count);
17661
17662 #ifdef CONFIG_X86_IO_APIC
17663- sum += atomic_read(&irq_mis_count);
17664+ sum += atomic_read_unchecked(&irq_mis_count);
17665 #endif
17666 return sum;
17667 }
17668diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17669index 7d35d0f..03f1d52 100644
17670--- a/arch/x86/kernel/irq_32.c
17671+++ b/arch/x86/kernel/irq_32.c
17672@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17673 __asm__ __volatile__("andl %%esp,%0" :
17674 "=r" (sp) : "0" (THREAD_SIZE - 1));
17675
17676- return sp < (sizeof(struct thread_info) + STACK_WARN);
17677+ return sp < STACK_WARN;
17678 }
17679
17680 static void print_stack_overflow(void)
17681@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17682 * per-CPU IRQ handling contexts (thread information and stack)
17683 */
17684 union irq_ctx {
17685- struct thread_info tinfo;
17686- u32 stack[THREAD_SIZE/sizeof(u32)];
17687-} __attribute__((aligned(PAGE_SIZE)));
17688+ unsigned long previous_esp;
17689+ u32 stack[THREAD_SIZE/sizeof(u32)];
17690+} __attribute__((aligned(THREAD_SIZE)));
17691
17692 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17693 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17694@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17695 static inline int
17696 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17697 {
17698- union irq_ctx *curctx, *irqctx;
17699+ union irq_ctx *irqctx;
17700 u32 *isp, arg1, arg2;
17701
17702- curctx = (union irq_ctx *) current_thread_info();
17703 irqctx = __get_cpu_var(hardirq_ctx);
17704
17705 /*
17706@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17707 * handler) we can't do that and just have to keep using the
17708 * current stack (which is the irq stack already after all)
17709 */
17710- if (unlikely(curctx == irqctx))
17711+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17712 return 0;
17713
17714 /* build the stack frame on the IRQ stack */
17715- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17716- irqctx->tinfo.task = curctx->tinfo.task;
17717- irqctx->tinfo.previous_esp = current_stack_pointer;
17718+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17719+ irqctx->previous_esp = current_stack_pointer;
17720
17721- /*
17722- * Copy the softirq bits in preempt_count so that the
17723- * softirq checks work in the hardirq context.
17724- */
17725- irqctx->tinfo.preempt_count =
17726- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17727- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17728+#ifdef CONFIG_PAX_MEMORY_UDEREF
17729+ __set_fs(MAKE_MM_SEG(0));
17730+#endif
17731
17732 if (unlikely(overflow))
17733 call_on_stack(print_stack_overflow, isp);
17734@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17735 : "0" (irq), "1" (desc), "2" (isp),
17736 "D" (desc->handle_irq)
17737 : "memory", "cc", "ecx");
17738+
17739+#ifdef CONFIG_PAX_MEMORY_UDEREF
17740+ __set_fs(current_thread_info()->addr_limit);
17741+#endif
17742+
17743 return 1;
17744 }
17745
17746@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17747 */
17748 void __cpuinit irq_ctx_init(int cpu)
17749 {
17750- union irq_ctx *irqctx;
17751-
17752 if (per_cpu(hardirq_ctx, cpu))
17753 return;
17754
17755- irqctx = &per_cpu(hardirq_stack, cpu);
17756- irqctx->tinfo.task = NULL;
17757- irqctx->tinfo.exec_domain = NULL;
17758- irqctx->tinfo.cpu = cpu;
17759- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17760- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17761-
17762- per_cpu(hardirq_ctx, cpu) = irqctx;
17763-
17764- irqctx = &per_cpu(softirq_stack, cpu);
17765- irqctx->tinfo.task = NULL;
17766- irqctx->tinfo.exec_domain = NULL;
17767- irqctx->tinfo.cpu = cpu;
17768- irqctx->tinfo.preempt_count = 0;
17769- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17770-
17771- per_cpu(softirq_ctx, cpu) = irqctx;
17772+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17773+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17774
17775 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17776 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17777@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17778 asmlinkage void do_softirq(void)
17779 {
17780 unsigned long flags;
17781- struct thread_info *curctx;
17782 union irq_ctx *irqctx;
17783 u32 *isp;
17784
17785@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17786 local_irq_save(flags);
17787
17788 if (local_softirq_pending()) {
17789- curctx = current_thread_info();
17790 irqctx = __get_cpu_var(softirq_ctx);
17791- irqctx->tinfo.task = curctx->task;
17792- irqctx->tinfo.previous_esp = current_stack_pointer;
17793+ irqctx->previous_esp = current_stack_pointer;
17794
17795 /* build the stack frame on the softirq stack */
17796- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17797+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17798+
17799+#ifdef CONFIG_PAX_MEMORY_UDEREF
17800+ __set_fs(MAKE_MM_SEG(0));
17801+#endif
17802
17803 call_on_stack(__do_softirq, isp);
17804+
17805+#ifdef CONFIG_PAX_MEMORY_UDEREF
17806+ __set_fs(current_thread_info()->addr_limit);
17807+#endif
17808+
17809 /*
17810 * Shouldnt happen, we returned above if in_interrupt():
17811 */
17812diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17813index 8d82a77..0baf312 100644
17814--- a/arch/x86/kernel/kgdb.c
17815+++ b/arch/x86/kernel/kgdb.c
17816@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17817
17818 /* clear the trace bit */
17819 linux_regs->flags &= ~X86_EFLAGS_TF;
17820- atomic_set(&kgdb_cpu_doing_single_step, -1);
17821+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17822
17823 /* set the trace bit if we're stepping */
17824 if (remcomInBuffer[0] == 's') {
17825 linux_regs->flags |= X86_EFLAGS_TF;
17826 kgdb_single_step = 1;
17827- atomic_set(&kgdb_cpu_doing_single_step,
17828+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17829 raw_smp_processor_id());
17830 }
17831
17832@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17833 break;
17834
17835 case DIE_DEBUG:
17836- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17837+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17838 raw_smp_processor_id()) {
17839 if (user_mode(regs))
17840 return single_step_cont(regs, args);
17841@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17842 return instruction_pointer(regs);
17843 }
17844
17845-struct kgdb_arch arch_kgdb_ops = {
17846+const struct kgdb_arch arch_kgdb_ops = {
17847 /* Breakpoint instruction: */
17848 .gdb_bpt_instr = { 0xcc },
17849 .flags = KGDB_HW_BREAKPOINT,
17850diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17851index 7a67820..8d15b75 100644
17852--- a/arch/x86/kernel/kprobes.c
17853+++ b/arch/x86/kernel/kprobes.c
17854@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17855 char op;
17856 s32 raddr;
17857 } __attribute__((packed)) * jop;
17858- jop = (struct __arch_jmp_op *)from;
17859+
17860+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17861+
17862+ pax_open_kernel();
17863 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17864 jop->op = RELATIVEJUMP_INSTRUCTION;
17865+ pax_close_kernel();
17866 }
17867
17868 /*
17869@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17870 kprobe_opcode_t opcode;
17871 kprobe_opcode_t *orig_opcodes = opcodes;
17872
17873- if (search_exception_tables((unsigned long)opcodes))
17874+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17875 return 0; /* Page fault may occur on this address. */
17876
17877 retry:
17878@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17879 disp = (u8 *) p->addr + *((s32 *) insn) -
17880 (u8 *) p->ainsn.insn;
17881 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17882+ pax_open_kernel();
17883 *(s32 *)insn = (s32) disp;
17884+ pax_close_kernel();
17885 }
17886 }
17887 #endif
17888@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17889
17890 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17891 {
17892- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17893+ pax_open_kernel();
17894+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17895+ pax_close_kernel();
17896
17897 fix_riprel(p);
17898
17899- if (can_boost(p->addr))
17900+ if (can_boost(ktla_ktva(p->addr)))
17901 p->ainsn.boostable = 0;
17902 else
17903 p->ainsn.boostable = -1;
17904
17905- p->opcode = *p->addr;
17906+ p->opcode = *(ktla_ktva(p->addr));
17907 }
17908
17909 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17910@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17911 if (p->opcode == BREAKPOINT_INSTRUCTION)
17912 regs->ip = (unsigned long)p->addr;
17913 else
17914- regs->ip = (unsigned long)p->ainsn.insn;
17915+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17916 }
17917
17918 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17919@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17920 if (p->ainsn.boostable == 1 && !p->post_handler) {
17921 /* Boost up -- we can execute copied instructions directly */
17922 reset_current_kprobe();
17923- regs->ip = (unsigned long)p->ainsn.insn;
17924+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17925 preempt_enable_no_resched();
17926 return;
17927 }
17928@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17929 struct kprobe_ctlblk *kcb;
17930
17931 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17932- if (*addr != BREAKPOINT_INSTRUCTION) {
17933+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17934 /*
17935 * The breakpoint instruction was removed right
17936 * after we hit it. Another cpu has removed
17937@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17938 /* Skip orig_ax, ip, cs */
17939 " addq $24, %rsp\n"
17940 " popfq\n"
17941+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17942+ " btsq $63,(%rsp)\n"
17943+#endif
17944 #else
17945 " pushf\n"
17946 /*
17947@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17948 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17949 {
17950 unsigned long *tos = stack_addr(regs);
17951- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17952+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17953 unsigned long orig_ip = (unsigned long)p->addr;
17954 kprobe_opcode_t *insn = p->ainsn.insn;
17955
17956@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17957 struct die_args *args = data;
17958 int ret = NOTIFY_DONE;
17959
17960- if (args->regs && user_mode_vm(args->regs))
17961+ if (args->regs && user_mode(args->regs))
17962 return ret;
17963
17964 switch (val) {
17965diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17966index 63b0ec8..6d92227 100644
17967--- a/arch/x86/kernel/kvm.c
17968+++ b/arch/x86/kernel/kvm.c
17969@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
17970 pv_mmu_ops.set_pud = kvm_set_pud;
17971 #if PAGETABLE_LEVELS == 4
17972 pv_mmu_ops.set_pgd = kvm_set_pgd;
17973+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17974 #endif
17975 #endif
17976 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17977diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17978index ec6ef60..ab2c824 100644
17979--- a/arch/x86/kernel/ldt.c
17980+++ b/arch/x86/kernel/ldt.c
17981@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17982 if (reload) {
17983 #ifdef CONFIG_SMP
17984 preempt_disable();
17985- load_LDT(pc);
17986+ load_LDT_nolock(pc);
17987 if (!cpumask_equal(mm_cpumask(current->mm),
17988 cpumask_of(smp_processor_id())))
17989 smp_call_function(flush_ldt, current->mm, 1);
17990 preempt_enable();
17991 #else
17992- load_LDT(pc);
17993+ load_LDT_nolock(pc);
17994 #endif
17995 }
17996 if (oldsize) {
17997@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17998 return err;
17999
18000 for (i = 0; i < old->size; i++)
18001- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18002+ write_ldt_entry(new->ldt, i, old->ldt + i);
18003 return 0;
18004 }
18005
18006@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18007 retval = copy_ldt(&mm->context, &old_mm->context);
18008 mutex_unlock(&old_mm->context.lock);
18009 }
18010+
18011+ if (tsk == current) {
18012+ mm->context.vdso = 0;
18013+
18014+#ifdef CONFIG_X86_32
18015+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18016+ mm->context.user_cs_base = 0UL;
18017+ mm->context.user_cs_limit = ~0UL;
18018+
18019+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18020+ cpus_clear(mm->context.cpu_user_cs_mask);
18021+#endif
18022+
18023+#endif
18024+#endif
18025+
18026+ }
18027+
18028 return retval;
18029 }
18030
18031@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18032 }
18033 }
18034
18035+#ifdef CONFIG_PAX_SEGMEXEC
18036+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18037+ error = -EINVAL;
18038+ goto out_unlock;
18039+ }
18040+#endif
18041+
18042 fill_ldt(&ldt, &ldt_info);
18043 if (oldmode)
18044 ldt.avl = 0;
18045diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18046index c1c429d..f02eaf9 100644
18047--- a/arch/x86/kernel/machine_kexec_32.c
18048+++ b/arch/x86/kernel/machine_kexec_32.c
18049@@ -26,7 +26,7 @@
18050 #include <asm/system.h>
18051 #include <asm/cacheflush.h>
18052
18053-static void set_idt(void *newidt, __u16 limit)
18054+static void set_idt(struct desc_struct *newidt, __u16 limit)
18055 {
18056 struct desc_ptr curidt;
18057
18058@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18059 }
18060
18061
18062-static void set_gdt(void *newgdt, __u16 limit)
18063+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18064 {
18065 struct desc_ptr curgdt;
18066
18067@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18068 }
18069
18070 control_page = page_address(image->control_code_page);
18071- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18072+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18073
18074 relocate_kernel_ptr = control_page;
18075 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18076diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18077index 1e47679..e73449d 100644
18078--- a/arch/x86/kernel/microcode_amd.c
18079+++ b/arch/x86/kernel/microcode_amd.c
18080@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18081 uci->mc = NULL;
18082 }
18083
18084-static struct microcode_ops microcode_amd_ops = {
18085+static const struct microcode_ops microcode_amd_ops = {
18086 .request_microcode_user = request_microcode_user,
18087 .request_microcode_fw = request_microcode_fw,
18088 .collect_cpu_info = collect_cpu_info_amd,
18089@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18090 .microcode_fini_cpu = microcode_fini_cpu_amd,
18091 };
18092
18093-struct microcode_ops * __init init_amd_microcode(void)
18094+const struct microcode_ops * __init init_amd_microcode(void)
18095 {
18096 return &microcode_amd_ops;
18097 }
18098diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18099index 378e9a8..b5a6ea9 100644
18100--- a/arch/x86/kernel/microcode_core.c
18101+++ b/arch/x86/kernel/microcode_core.c
18102@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18103
18104 #define MICROCODE_VERSION "2.00"
18105
18106-static struct microcode_ops *microcode_ops;
18107+static const struct microcode_ops *microcode_ops;
18108
18109 /*
18110 * Synchronization.
18111diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18112index 0d334dd..14cedaf 100644
18113--- a/arch/x86/kernel/microcode_intel.c
18114+++ b/arch/x86/kernel/microcode_intel.c
18115@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18116
18117 static int get_ucode_user(void *to, const void *from, size_t n)
18118 {
18119- return copy_from_user(to, from, n);
18120+ return copy_from_user(to, (const void __force_user *)from, n);
18121 }
18122
18123 static enum ucode_state
18124 request_microcode_user(int cpu, const void __user *buf, size_t size)
18125 {
18126- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18127+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18128 }
18129
18130 static void microcode_fini_cpu(int cpu)
18131@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18132 uci->mc = NULL;
18133 }
18134
18135-static struct microcode_ops microcode_intel_ops = {
18136+static const struct microcode_ops microcode_intel_ops = {
18137 .request_microcode_user = request_microcode_user,
18138 .request_microcode_fw = request_microcode_fw,
18139 .collect_cpu_info = collect_cpu_info,
18140@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18141 .microcode_fini_cpu = microcode_fini_cpu,
18142 };
18143
18144-struct microcode_ops * __init init_intel_microcode(void)
18145+const struct microcode_ops * __init init_intel_microcode(void)
18146 {
18147 return &microcode_intel_ops;
18148 }
18149diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18150index 89f386f..9028f51 100644
18151--- a/arch/x86/kernel/module.c
18152+++ b/arch/x86/kernel/module.c
18153@@ -34,7 +34,7 @@
18154 #define DEBUGP(fmt...)
18155 #endif
18156
18157-void *module_alloc(unsigned long size)
18158+static void *__module_alloc(unsigned long size, pgprot_t prot)
18159 {
18160 struct vm_struct *area;
18161
18162@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18163 if (!area)
18164 return NULL;
18165
18166- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18167- PAGE_KERNEL_EXEC);
18168+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18169+}
18170+
18171+void *module_alloc(unsigned long size)
18172+{
18173+
18174+#ifdef CONFIG_PAX_KERNEXEC
18175+ return __module_alloc(size, PAGE_KERNEL);
18176+#else
18177+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18178+#endif
18179+
18180 }
18181
18182 /* Free memory returned from module_alloc */
18183@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18184 vfree(module_region);
18185 }
18186
18187+#ifdef CONFIG_PAX_KERNEXEC
18188+#ifdef CONFIG_X86_32
18189+void *module_alloc_exec(unsigned long size)
18190+{
18191+ struct vm_struct *area;
18192+
18193+ if (size == 0)
18194+ return NULL;
18195+
18196+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18197+ return area ? area->addr : NULL;
18198+}
18199+EXPORT_SYMBOL(module_alloc_exec);
18200+
18201+void module_free_exec(struct module *mod, void *module_region)
18202+{
18203+ vunmap(module_region);
18204+}
18205+EXPORT_SYMBOL(module_free_exec);
18206+#else
18207+void module_free_exec(struct module *mod, void *module_region)
18208+{
18209+ module_free(mod, module_region);
18210+}
18211+EXPORT_SYMBOL(module_free_exec);
18212+
18213+void *module_alloc_exec(unsigned long size)
18214+{
18215+ return __module_alloc(size, PAGE_KERNEL_RX);
18216+}
18217+EXPORT_SYMBOL(module_alloc_exec);
18218+#endif
18219+#endif
18220+
18221 /* We don't need anything special. */
18222 int module_frob_arch_sections(Elf_Ehdr *hdr,
18223 Elf_Shdr *sechdrs,
18224@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18225 unsigned int i;
18226 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18227 Elf32_Sym *sym;
18228- uint32_t *location;
18229+ uint32_t *plocation, location;
18230
18231 DEBUGP("Applying relocate section %u to %u\n", relsec,
18232 sechdrs[relsec].sh_info);
18233 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18234 /* This is where to make the change */
18235- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18236- + rel[i].r_offset;
18237+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18238+ location = (uint32_t)plocation;
18239+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18240+ plocation = ktla_ktva((void *)plocation);
18241 /* This is the symbol it is referring to. Note that all
18242 undefined symbols have been resolved. */
18243 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18244@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18245 switch (ELF32_R_TYPE(rel[i].r_info)) {
18246 case R_386_32:
18247 /* We add the value into the location given */
18248- *location += sym->st_value;
18249+ pax_open_kernel();
18250+ *plocation += sym->st_value;
18251+ pax_close_kernel();
18252 break;
18253 case R_386_PC32:
18254 /* Add the value, subtract its postition */
18255- *location += sym->st_value - (uint32_t)location;
18256+ pax_open_kernel();
18257+ *plocation += sym->st_value - location;
18258+ pax_close_kernel();
18259 break;
18260 default:
18261 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18262@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18263 case R_X86_64_NONE:
18264 break;
18265 case R_X86_64_64:
18266+ pax_open_kernel();
18267 *(u64 *)loc = val;
18268+ pax_close_kernel();
18269 break;
18270 case R_X86_64_32:
18271+ pax_open_kernel();
18272 *(u32 *)loc = val;
18273+ pax_close_kernel();
18274 if (val != *(u32 *)loc)
18275 goto overflow;
18276 break;
18277 case R_X86_64_32S:
18278+ pax_open_kernel();
18279 *(s32 *)loc = val;
18280+ pax_close_kernel();
18281 if ((s64)val != *(s32 *)loc)
18282 goto overflow;
18283 break;
18284 case R_X86_64_PC32:
18285 val -= (u64)loc;
18286+ pax_open_kernel();
18287 *(u32 *)loc = val;
18288+ pax_close_kernel();
18289+
18290 #if 0
18291 if ((s64)val != *(s32 *)loc)
18292 goto overflow;
18293diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18294index 3a7c5a4..9191528 100644
18295--- a/arch/x86/kernel/paravirt-spinlocks.c
18296+++ b/arch/x86/kernel/paravirt-spinlocks.c
18297@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18298 __raw_spin_lock(lock);
18299 }
18300
18301-struct pv_lock_ops pv_lock_ops = {
18302+struct pv_lock_ops pv_lock_ops __read_only = {
18303 #ifdef CONFIG_SMP
18304 .spin_is_locked = __ticket_spin_is_locked,
18305 .spin_is_contended = __ticket_spin_is_contended,
18306diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18307index 1b1739d..dea6077 100644
18308--- a/arch/x86/kernel/paravirt.c
18309+++ b/arch/x86/kernel/paravirt.c
18310@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18311 {
18312 return x;
18313 }
18314+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18315+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18316+#endif
18317
18318 void __init default_banner(void)
18319 {
18320@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18321 * corresponding structure. */
18322 static void *get_call_destination(u8 type)
18323 {
18324- struct paravirt_patch_template tmpl = {
18325+ const struct paravirt_patch_template tmpl = {
18326 .pv_init_ops = pv_init_ops,
18327 .pv_time_ops = pv_time_ops,
18328 .pv_cpu_ops = pv_cpu_ops,
18329@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18330 .pv_lock_ops = pv_lock_ops,
18331 #endif
18332 };
18333+
18334+ pax_track_stack();
18335 return *((void **)&tmpl + type);
18336 }
18337
18338@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18339 if (opfunc == NULL)
18340 /* If there's no function, patch it with a ud2a (BUG) */
18341 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18342- else if (opfunc == _paravirt_nop)
18343+ else if (opfunc == (void *)_paravirt_nop)
18344 /* If the operation is a nop, then nop the callsite */
18345 ret = paravirt_patch_nop();
18346
18347 /* identity functions just return their single argument */
18348- else if (opfunc == _paravirt_ident_32)
18349+ else if (opfunc == (void *)_paravirt_ident_32)
18350 ret = paravirt_patch_ident_32(insnbuf, len);
18351- else if (opfunc == _paravirt_ident_64)
18352+ else if (opfunc == (void *)_paravirt_ident_64)
18353 ret = paravirt_patch_ident_64(insnbuf, len);
18354+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18355+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18356+ ret = paravirt_patch_ident_64(insnbuf, len);
18357+#endif
18358
18359 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18360 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18361@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18362 if (insn_len > len || start == NULL)
18363 insn_len = len;
18364 else
18365- memcpy(insnbuf, start, insn_len);
18366+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18367
18368 return insn_len;
18369 }
18370@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18371 preempt_enable();
18372 }
18373
18374-struct pv_info pv_info = {
18375+struct pv_info pv_info __read_only = {
18376 .name = "bare hardware",
18377 .paravirt_enabled = 0,
18378 .kernel_rpl = 0,
18379 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18380 };
18381
18382-struct pv_init_ops pv_init_ops = {
18383+struct pv_init_ops pv_init_ops __read_only = {
18384 .patch = native_patch,
18385 };
18386
18387-struct pv_time_ops pv_time_ops = {
18388+struct pv_time_ops pv_time_ops __read_only = {
18389 .sched_clock = native_sched_clock,
18390 };
18391
18392-struct pv_irq_ops pv_irq_ops = {
18393+struct pv_irq_ops pv_irq_ops __read_only = {
18394 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18395 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18396 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18397@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18398 #endif
18399 };
18400
18401-struct pv_cpu_ops pv_cpu_ops = {
18402+struct pv_cpu_ops pv_cpu_ops __read_only = {
18403 .cpuid = native_cpuid,
18404 .get_debugreg = native_get_debugreg,
18405 .set_debugreg = native_set_debugreg,
18406@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18407 .end_context_switch = paravirt_nop,
18408 };
18409
18410-struct pv_apic_ops pv_apic_ops = {
18411+struct pv_apic_ops pv_apic_ops __read_only = {
18412 #ifdef CONFIG_X86_LOCAL_APIC
18413 .startup_ipi_hook = paravirt_nop,
18414 #endif
18415 };
18416
18417-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18418+#ifdef CONFIG_X86_32
18419+#ifdef CONFIG_X86_PAE
18420+/* 64-bit pagetable entries */
18421+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18422+#else
18423 /* 32-bit pagetable entries */
18424 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18425+#endif
18426 #else
18427 /* 64-bit pagetable entries */
18428 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18429 #endif
18430
18431-struct pv_mmu_ops pv_mmu_ops = {
18432+struct pv_mmu_ops pv_mmu_ops __read_only = {
18433
18434 .read_cr2 = native_read_cr2,
18435 .write_cr2 = native_write_cr2,
18436@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18437 .make_pud = PTE_IDENT,
18438
18439 .set_pgd = native_set_pgd,
18440+ .set_pgd_batched = native_set_pgd_batched,
18441 #endif
18442 #endif /* PAGETABLE_LEVELS >= 3 */
18443
18444@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18445 },
18446
18447 .set_fixmap = native_set_fixmap,
18448+
18449+#ifdef CONFIG_PAX_KERNEXEC
18450+ .pax_open_kernel = native_pax_open_kernel,
18451+ .pax_close_kernel = native_pax_close_kernel,
18452+#endif
18453+
18454 };
18455
18456 EXPORT_SYMBOL_GPL(pv_time_ops);
18457diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18458index 1a2d4b1..6a0dd55 100644
18459--- a/arch/x86/kernel/pci-calgary_64.c
18460+++ b/arch/x86/kernel/pci-calgary_64.c
18461@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18462 free_pages((unsigned long)vaddr, get_order(size));
18463 }
18464
18465-static struct dma_map_ops calgary_dma_ops = {
18466+static const struct dma_map_ops calgary_dma_ops = {
18467 .alloc_coherent = calgary_alloc_coherent,
18468 .free_coherent = calgary_free_coherent,
18469 .map_sg = calgary_map_sg,
18470diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18471index 6ac3931..42b4414 100644
18472--- a/arch/x86/kernel/pci-dma.c
18473+++ b/arch/x86/kernel/pci-dma.c
18474@@ -14,7 +14,7 @@
18475
18476 static int forbid_dac __read_mostly;
18477
18478-struct dma_map_ops *dma_ops;
18479+const struct dma_map_ops *dma_ops;
18480 EXPORT_SYMBOL(dma_ops);
18481
18482 static int iommu_sac_force __read_mostly;
18483@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18484
18485 int dma_supported(struct device *dev, u64 mask)
18486 {
18487- struct dma_map_ops *ops = get_dma_ops(dev);
18488+ const struct dma_map_ops *ops = get_dma_ops(dev);
18489
18490 #ifdef CONFIG_PCI
18491 if (mask > 0xffffffff && forbid_dac > 0) {
18492diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18493index 1c76691..e3632db 100644
18494--- a/arch/x86/kernel/pci-gart_64.c
18495+++ b/arch/x86/kernel/pci-gart_64.c
18496@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18497 return -1;
18498 }
18499
18500-static struct dma_map_ops gart_dma_ops = {
18501+static const struct dma_map_ops gart_dma_ops = {
18502 .map_sg = gart_map_sg,
18503 .unmap_sg = gart_unmap_sg,
18504 .map_page = gart_map_page,
18505diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18506index a3933d4..c898869 100644
18507--- a/arch/x86/kernel/pci-nommu.c
18508+++ b/arch/x86/kernel/pci-nommu.c
18509@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18510 flush_write_buffers();
18511 }
18512
18513-struct dma_map_ops nommu_dma_ops = {
18514+const struct dma_map_ops nommu_dma_ops = {
18515 .alloc_coherent = dma_generic_alloc_coherent,
18516 .free_coherent = nommu_free_coherent,
18517 .map_sg = nommu_map_sg,
18518diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18519index aaa6b78..4de1881 100644
18520--- a/arch/x86/kernel/pci-swiotlb.c
18521+++ b/arch/x86/kernel/pci-swiotlb.c
18522@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18523 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18524 }
18525
18526-static struct dma_map_ops swiotlb_dma_ops = {
18527+static const struct dma_map_ops swiotlb_dma_ops = {
18528 .mapping_error = swiotlb_dma_mapping_error,
18529 .alloc_coherent = x86_swiotlb_alloc_coherent,
18530 .free_coherent = swiotlb_free_coherent,
18531diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18532index fc6c84d..0312ca2 100644
18533--- a/arch/x86/kernel/process.c
18534+++ b/arch/x86/kernel/process.c
18535@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18536
18537 void free_thread_info(struct thread_info *ti)
18538 {
18539- free_thread_xstate(ti->task);
18540 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18541 }
18542
18543+static struct kmem_cache *task_struct_cachep;
18544+
18545 void arch_task_cache_init(void)
18546 {
18547- task_xstate_cachep =
18548- kmem_cache_create("task_xstate", xstate_size,
18549+ /* create a slab on which task_structs can be allocated */
18550+ task_struct_cachep =
18551+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18552+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18553+
18554+ task_xstate_cachep =
18555+ kmem_cache_create("task_xstate", xstate_size,
18556 __alignof__(union thread_xstate),
18557- SLAB_PANIC | SLAB_NOTRACK, NULL);
18558+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18559+}
18560+
18561+struct task_struct *alloc_task_struct(void)
18562+{
18563+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18564+}
18565+
18566+void free_task_struct(struct task_struct *task)
18567+{
18568+ free_thread_xstate(task);
18569+ kmem_cache_free(task_struct_cachep, task);
18570 }
18571
18572 /*
18573@@ -73,7 +90,7 @@ void exit_thread(void)
18574 unsigned long *bp = t->io_bitmap_ptr;
18575
18576 if (bp) {
18577- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18578+ struct tss_struct *tss = init_tss + get_cpu();
18579
18580 t->io_bitmap_ptr = NULL;
18581 clear_thread_flag(TIF_IO_BITMAP);
18582@@ -93,6 +110,9 @@ void flush_thread(void)
18583
18584 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18585
18586+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18587+ loadsegment(gs, 0);
18588+#endif
18589 tsk->thread.debugreg0 = 0;
18590 tsk->thread.debugreg1 = 0;
18591 tsk->thread.debugreg2 = 0;
18592@@ -307,7 +327,7 @@ void default_idle(void)
18593 EXPORT_SYMBOL(default_idle);
18594 #endif
18595
18596-void stop_this_cpu(void *dummy)
18597+__noreturn void stop_this_cpu(void *dummy)
18598 {
18599 local_irq_disable();
18600 /*
18601@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18602 }
18603 early_param("idle", idle_setup);
18604
18605-unsigned long arch_align_stack(unsigned long sp)
18606+#ifdef CONFIG_PAX_RANDKSTACK
18607+void pax_randomize_kstack(struct pt_regs *regs)
18608 {
18609- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18610- sp -= get_random_int() % 8192;
18611- return sp & ~0xf;
18612-}
18613+ struct thread_struct *thread = &current->thread;
18614+ unsigned long time;
18615
18616-unsigned long arch_randomize_brk(struct mm_struct *mm)
18617-{
18618- unsigned long range_end = mm->brk + 0x02000000;
18619- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18620+ if (!randomize_va_space)
18621+ return;
18622+
18623+ if (v8086_mode(regs))
18624+ return;
18625+
18626+ rdtscl(time);
18627+
18628+ /* P4 seems to return a 0 LSB, ignore it */
18629+#ifdef CONFIG_MPENTIUM4
18630+ time &= 0x3EUL;
18631+ time <<= 2;
18632+#elif defined(CONFIG_X86_64)
18633+ time &= 0xFUL;
18634+ time <<= 4;
18635+#else
18636+ time &= 0x1FUL;
18637+ time <<= 3;
18638+#endif
18639+
18640+ thread->sp0 ^= time;
18641+ load_sp0(init_tss + smp_processor_id(), thread);
18642+
18643+#ifdef CONFIG_X86_64
18644+ percpu_write(kernel_stack, thread->sp0);
18645+#endif
18646 }
18647+#endif
18648
18649diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18650index c40c432..6e1df72 100644
18651--- a/arch/x86/kernel/process_32.c
18652+++ b/arch/x86/kernel/process_32.c
18653@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18654 unsigned long thread_saved_pc(struct task_struct *tsk)
18655 {
18656 return ((unsigned long *)tsk->thread.sp)[3];
18657+//XXX return tsk->thread.eip;
18658 }
18659
18660 #ifndef CONFIG_SMP
18661@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18662 unsigned short ss, gs;
18663 const char *board;
18664
18665- if (user_mode_vm(regs)) {
18666+ if (user_mode(regs)) {
18667 sp = regs->sp;
18668 ss = regs->ss & 0xffff;
18669- gs = get_user_gs(regs);
18670 } else {
18671 sp = (unsigned long) (&regs->sp);
18672 savesegment(ss, ss);
18673- savesegment(gs, gs);
18674 }
18675+ gs = get_user_gs(regs);
18676
18677 printk("\n");
18678
18679@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18680 regs.bx = (unsigned long) fn;
18681 regs.dx = (unsigned long) arg;
18682
18683- regs.ds = __USER_DS;
18684- regs.es = __USER_DS;
18685+ regs.ds = __KERNEL_DS;
18686+ regs.es = __KERNEL_DS;
18687 regs.fs = __KERNEL_PERCPU;
18688- regs.gs = __KERNEL_STACK_CANARY;
18689+ savesegment(gs, regs.gs);
18690 regs.orig_ax = -1;
18691 regs.ip = (unsigned long) kernel_thread_helper;
18692 regs.cs = __KERNEL_CS | get_kernel_rpl();
18693@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18694 struct task_struct *tsk;
18695 int err;
18696
18697- childregs = task_pt_regs(p);
18698+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18699 *childregs = *regs;
18700 childregs->ax = 0;
18701 childregs->sp = sp;
18702
18703 p->thread.sp = (unsigned long) childregs;
18704 p->thread.sp0 = (unsigned long) (childregs+1);
18705+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18706
18707 p->thread.ip = (unsigned long) ret_from_fork;
18708
18709@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18710 struct thread_struct *prev = &prev_p->thread,
18711 *next = &next_p->thread;
18712 int cpu = smp_processor_id();
18713- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18714+ struct tss_struct *tss = init_tss + cpu;
18715 bool preload_fpu;
18716
18717 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18718@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18719 */
18720 lazy_save_gs(prev->gs);
18721
18722+#ifdef CONFIG_PAX_MEMORY_UDEREF
18723+ __set_fs(task_thread_info(next_p)->addr_limit);
18724+#endif
18725+
18726 /*
18727 * Load the per-thread Thread-Local Storage descriptor.
18728 */
18729@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18730 */
18731 arch_end_context_switch(next_p);
18732
18733+ percpu_write(current_task, next_p);
18734+ percpu_write(current_tinfo, &next_p->tinfo);
18735+
18736 if (preload_fpu)
18737 __math_state_restore();
18738
18739@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18740 if (prev->gs | next->gs)
18741 lazy_load_gs(next->gs);
18742
18743- percpu_write(current_task, next_p);
18744-
18745 return prev_p;
18746 }
18747
18748@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18749 } while (count++ < 16);
18750 return 0;
18751 }
18752-
18753diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18754index 39493bc..196816d 100644
18755--- a/arch/x86/kernel/process_64.c
18756+++ b/arch/x86/kernel/process_64.c
18757@@ -91,7 +91,7 @@ static void __exit_idle(void)
18758 void exit_idle(void)
18759 {
18760 /* idle loop has pid 0 */
18761- if (current->pid)
18762+ if (task_pid_nr(current))
18763 return;
18764 __exit_idle();
18765 }
18766@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18767 if (!board)
18768 board = "";
18769 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18770- current->pid, current->comm, print_tainted(),
18771+ task_pid_nr(current), current->comm, print_tainted(),
18772 init_utsname()->release,
18773 (int)strcspn(init_utsname()->version, " "),
18774 init_utsname()->version, board);
18775@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18776 struct pt_regs *childregs;
18777 struct task_struct *me = current;
18778
18779- childregs = ((struct pt_regs *)
18780- (THREAD_SIZE + task_stack_page(p))) - 1;
18781+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18782 *childregs = *regs;
18783
18784 childregs->ax = 0;
18785@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18786 p->thread.sp = (unsigned long) childregs;
18787 p->thread.sp0 = (unsigned long) (childregs+1);
18788 p->thread.usersp = me->thread.usersp;
18789+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18790
18791 set_tsk_thread_flag(p, TIF_FORK);
18792
18793@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18794 struct thread_struct *prev = &prev_p->thread;
18795 struct thread_struct *next = &next_p->thread;
18796 int cpu = smp_processor_id();
18797- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18798+ struct tss_struct *tss = init_tss + cpu;
18799 unsigned fsindex, gsindex;
18800 bool preload_fpu;
18801
18802@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18803 prev->usersp = percpu_read(old_rsp);
18804 percpu_write(old_rsp, next->usersp);
18805 percpu_write(current_task, next_p);
18806+ percpu_write(current_tinfo, &next_p->tinfo);
18807
18808- percpu_write(kernel_stack,
18809- (unsigned long)task_stack_page(next_p) +
18810- THREAD_SIZE - KERNEL_STACK_OFFSET);
18811+ percpu_write(kernel_stack, next->sp0);
18812
18813 /*
18814 * Now maybe reload the debug registers and handle I/O bitmaps
18815@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18816 if (!p || p == current || p->state == TASK_RUNNING)
18817 return 0;
18818 stack = (unsigned long)task_stack_page(p);
18819- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18820+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18821 return 0;
18822 fp = *(u64 *)(p->thread.sp);
18823 do {
18824- if (fp < (unsigned long)stack ||
18825- fp >= (unsigned long)stack+THREAD_SIZE)
18826+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18827 return 0;
18828 ip = *(u64 *)(fp+8);
18829 if (!in_sched_functions(ip))
18830diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18831index c06acdd..3f5fff5 100644
18832--- a/arch/x86/kernel/ptrace.c
18833+++ b/arch/x86/kernel/ptrace.c
18834@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18835 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18836 {
18837 int ret;
18838- unsigned long __user *datap = (unsigned long __user *)data;
18839+ unsigned long __user *datap = (__force unsigned long __user *)data;
18840
18841 switch (request) {
18842 /* read the word at location addr in the USER area. */
18843@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18844 if (addr < 0)
18845 return -EIO;
18846 ret = do_get_thread_area(child, addr,
18847- (struct user_desc __user *) data);
18848+ (__force struct user_desc __user *) data);
18849 break;
18850
18851 case PTRACE_SET_THREAD_AREA:
18852 if (addr < 0)
18853 return -EIO;
18854 ret = do_set_thread_area(child, addr,
18855- (struct user_desc __user *) data, 0);
18856+ (__force struct user_desc __user *) data, 0);
18857 break;
18858 #endif
18859
18860@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18861 #ifdef CONFIG_X86_PTRACE_BTS
18862 case PTRACE_BTS_CONFIG:
18863 ret = ptrace_bts_config
18864- (child, data, (struct ptrace_bts_config __user *)addr);
18865+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18866 break;
18867
18868 case PTRACE_BTS_STATUS:
18869 ret = ptrace_bts_status
18870- (child, data, (struct ptrace_bts_config __user *)addr);
18871+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18872 break;
18873
18874 case PTRACE_BTS_SIZE:
18875@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18876
18877 case PTRACE_BTS_GET:
18878 ret = ptrace_bts_read_record
18879- (child, data, (struct bts_struct __user *) addr);
18880+ (child, data, (__force struct bts_struct __user *) addr);
18881 break;
18882
18883 case PTRACE_BTS_CLEAR:
18884@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18885
18886 case PTRACE_BTS_DRAIN:
18887 ret = ptrace_bts_drain
18888- (child, data, (struct bts_struct __user *) addr);
18889+ (child, data, (__force struct bts_struct __user *) addr);
18890 break;
18891 #endif /* CONFIG_X86_PTRACE_BTS */
18892
18893@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18894 info.si_code = si_code;
18895
18896 /* User-mode ip? */
18897- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18898+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18899
18900 /* Send us the fake SIGTRAP */
18901 force_sig_info(SIGTRAP, &info, tsk);
18902@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18903 * We must return the syscall number to actually look up in the table.
18904 * This can be -1L to skip running any syscall at all.
18905 */
18906-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18907+long syscall_trace_enter(struct pt_regs *regs)
18908 {
18909 long ret = 0;
18910
18911@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18912 return ret ?: regs->orig_ax;
18913 }
18914
18915-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18916+void syscall_trace_leave(struct pt_regs *regs)
18917 {
18918 if (unlikely(current->audit_context))
18919 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18920diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18921index cf98100..e76e03d 100644
18922--- a/arch/x86/kernel/reboot.c
18923+++ b/arch/x86/kernel/reboot.c
18924@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18925 EXPORT_SYMBOL(pm_power_off);
18926
18927 static const struct desc_ptr no_idt = {};
18928-static int reboot_mode;
18929+static unsigned short reboot_mode;
18930 enum reboot_type reboot_type = BOOT_KBD;
18931 int reboot_force;
18932
18933@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18934 controller to pulse the CPU reset line, which is more thorough, but
18935 doesn't work with at least one type of 486 motherboard. It is easy
18936 to stop this code working; hence the copious comments. */
18937-static const unsigned long long
18938-real_mode_gdt_entries [3] =
18939+static struct desc_struct
18940+real_mode_gdt_entries [3] __read_only =
18941 {
18942- 0x0000000000000000ULL, /* Null descriptor */
18943- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18944- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18945+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18946+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18947+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18948 };
18949
18950 static const struct desc_ptr
18951@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18952 * specified by the code and length parameters.
18953 * We assume that length will aways be less that 100!
18954 */
18955-void machine_real_restart(const unsigned char *code, int length)
18956+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18957 {
18958 local_irq_disable();
18959
18960@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18961 /* Remap the kernel at virtual address zero, as well as offset zero
18962 from the kernel segment. This assumes the kernel segment starts at
18963 virtual address PAGE_OFFSET. */
18964- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18965- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18966+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18967+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18968
18969 /*
18970 * Use `swapper_pg_dir' as our page directory.
18971@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
18972 boot)". This seems like a fairly standard thing that gets set by
18973 REBOOT.COM programs, and the previous reset routine did this
18974 too. */
18975- *((unsigned short *)0x472) = reboot_mode;
18976+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18977
18978 /* For the switch to real mode, copy some code to low memory. It has
18979 to be in the first 64k because it is running in 16-bit mode, and it
18980 has to have the same physical and virtual address, because it turns
18981 off paging. Copy it near the end of the first page, out of the way
18982 of BIOS variables. */
18983- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
18984- real_mode_switch, sizeof (real_mode_switch));
18985- memcpy((void *)(0x1000 - 100), code, length);
18986+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
18987+ memcpy(__va(0x1000 - 100), code, length);
18988
18989 /* Set up the IDT for real mode. */
18990 load_idt(&real_mode_idt);
18991@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
18992 __asm__ __volatile__ ("ljmp $0x0008,%0"
18993 :
18994 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
18995+ do { } while (1);
18996 }
18997 #ifdef CONFIG_APM_MODULE
18998 EXPORT_SYMBOL(machine_real_restart);
18999@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19000 {
19001 }
19002
19003-static void native_machine_emergency_restart(void)
19004+__noreturn static void native_machine_emergency_restart(void)
19005 {
19006 int i;
19007
19008@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19009 #endif
19010 }
19011
19012-static void __machine_emergency_restart(int emergency)
19013+static __noreturn void __machine_emergency_restart(int emergency)
19014 {
19015 reboot_emergency = emergency;
19016 machine_ops.emergency_restart();
19017 }
19018
19019-static void native_machine_restart(char *__unused)
19020+static __noreturn void native_machine_restart(char *__unused)
19021 {
19022 printk("machine restart\n");
19023
19024@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19025 __machine_emergency_restart(0);
19026 }
19027
19028-static void native_machine_halt(void)
19029+static __noreturn void native_machine_halt(void)
19030 {
19031 /* stop other cpus and apics */
19032 machine_shutdown();
19033@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19034 stop_this_cpu(NULL);
19035 }
19036
19037-static void native_machine_power_off(void)
19038+__noreturn static void native_machine_power_off(void)
19039 {
19040 if (pm_power_off) {
19041 if (!reboot_force)
19042@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19043 }
19044 /* a fallback in case there is no PM info available */
19045 tboot_shutdown(TB_SHUTDOWN_HALT);
19046+ do { } while (1);
19047 }
19048
19049 struct machine_ops machine_ops = {
19050diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19051index 7a6f3b3..976a959 100644
19052--- a/arch/x86/kernel/relocate_kernel_64.S
19053+++ b/arch/x86/kernel/relocate_kernel_64.S
19054@@ -11,6 +11,7 @@
19055 #include <asm/kexec.h>
19056 #include <asm/processor-flags.h>
19057 #include <asm/pgtable_types.h>
19058+#include <asm/alternative-asm.h>
19059
19060 /*
19061 * Must be relocatable PIC code callable as a C function
19062@@ -167,6 +168,7 @@ identity_mapped:
19063 xorq %r14, %r14
19064 xorq %r15, %r15
19065
19066+ pax_force_retaddr 0, 1
19067 ret
19068
19069 1:
19070diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19071index 5449a26..0b6c759 100644
19072--- a/arch/x86/kernel/setup.c
19073+++ b/arch/x86/kernel/setup.c
19074@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19075
19076 if (!boot_params.hdr.root_flags)
19077 root_mountflags &= ~MS_RDONLY;
19078- init_mm.start_code = (unsigned long) _text;
19079- init_mm.end_code = (unsigned long) _etext;
19080+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19081+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19082 init_mm.end_data = (unsigned long) _edata;
19083 init_mm.brk = _brk_end;
19084
19085- code_resource.start = virt_to_phys(_text);
19086- code_resource.end = virt_to_phys(_etext)-1;
19087- data_resource.start = virt_to_phys(_etext);
19088+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19089+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19090+ data_resource.start = virt_to_phys(_sdata);
19091 data_resource.end = virt_to_phys(_edata)-1;
19092 bss_resource.start = virt_to_phys(&__bss_start);
19093 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19094diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19095index d559af9..524c6ad 100644
19096--- a/arch/x86/kernel/setup_percpu.c
19097+++ b/arch/x86/kernel/setup_percpu.c
19098@@ -25,19 +25,17 @@
19099 # define DBG(x...)
19100 #endif
19101
19102-DEFINE_PER_CPU(int, cpu_number);
19103+#ifdef CONFIG_SMP
19104+DEFINE_PER_CPU(unsigned int, cpu_number);
19105 EXPORT_PER_CPU_SYMBOL(cpu_number);
19106+#endif
19107
19108-#ifdef CONFIG_X86_64
19109 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19110-#else
19111-#define BOOT_PERCPU_OFFSET 0
19112-#endif
19113
19114 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19115 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19116
19117-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19118+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19119 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19120 };
19121 EXPORT_SYMBOL(__per_cpu_offset);
19122@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19123 {
19124 #ifdef CONFIG_X86_32
19125 struct desc_struct gdt;
19126+ unsigned long base = per_cpu_offset(cpu);
19127
19128- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19129- 0x2 | DESCTYPE_S, 0x8);
19130- gdt.s = 1;
19131+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19132+ 0x83 | DESCTYPE_S, 0xC);
19133 write_gdt_entry(get_cpu_gdt_table(cpu),
19134 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19135 #endif
19136@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19137 /* alrighty, percpu areas up and running */
19138 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19139 for_each_possible_cpu(cpu) {
19140+#ifdef CONFIG_CC_STACKPROTECTOR
19141+#ifdef CONFIG_X86_32
19142+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19143+#endif
19144+#endif
19145 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19146 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19147 per_cpu(cpu_number, cpu) = cpu;
19148@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19149 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19150 #endif
19151 #endif
19152+#ifdef CONFIG_CC_STACKPROTECTOR
19153+#ifdef CONFIG_X86_32
19154+ if (!cpu)
19155+ per_cpu(stack_canary.canary, cpu) = canary;
19156+#endif
19157+#endif
19158 /*
19159 * Up to this point, the boot CPU has been using .data.init
19160 * area. Reload any changed state for the boot CPU.
19161diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19162index 6a44a76..a9287a1 100644
19163--- a/arch/x86/kernel/signal.c
19164+++ b/arch/x86/kernel/signal.c
19165@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19166 * Align the stack pointer according to the i386 ABI,
19167 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19168 */
19169- sp = ((sp + 4) & -16ul) - 4;
19170+ sp = ((sp - 12) & -16ul) - 4;
19171 #else /* !CONFIG_X86_32 */
19172 sp = round_down(sp, 16) - 8;
19173 #endif
19174@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19175 * Return an always-bogus address instead so we will die with SIGSEGV.
19176 */
19177 if (onsigstack && !likely(on_sig_stack(sp)))
19178- return (void __user *)-1L;
19179+ return (__force void __user *)-1L;
19180
19181 /* save i387 state */
19182 if (used_math() && save_i387_xstate(*fpstate) < 0)
19183- return (void __user *)-1L;
19184+ return (__force void __user *)-1L;
19185
19186 return (void __user *)sp;
19187 }
19188@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19189 }
19190
19191 if (current->mm->context.vdso)
19192- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19193+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19194 else
19195- restorer = &frame->retcode;
19196+ restorer = (void __user *)&frame->retcode;
19197 if (ka->sa.sa_flags & SA_RESTORER)
19198 restorer = ka->sa.sa_restorer;
19199
19200@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19201 * reasons and because gdb uses it as a signature to notice
19202 * signal handler stack frames.
19203 */
19204- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19205+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19206
19207 if (err)
19208 return -EFAULT;
19209@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19210 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19211
19212 /* Set up to return from userspace. */
19213- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19214+ if (current->mm->context.vdso)
19215+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19216+ else
19217+ restorer = (void __user *)&frame->retcode;
19218 if (ka->sa.sa_flags & SA_RESTORER)
19219 restorer = ka->sa.sa_restorer;
19220 put_user_ex(restorer, &frame->pretcode);
19221@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19222 * reasons and because gdb uses it as a signature to notice
19223 * signal handler stack frames.
19224 */
19225- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19226+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19227 } put_user_catch(err);
19228
19229 if (err)
19230@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19231 int signr;
19232 sigset_t *oldset;
19233
19234+ pax_track_stack();
19235+
19236 /*
19237 * We want the common case to go fast, which is why we may in certain
19238 * cases get here from kernel mode. Just return without doing anything
19239@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19240 * X86_32: vm86 regs switched out by assembly code before reaching
19241 * here, so testing against kernel CS suffices.
19242 */
19243- if (!user_mode(regs))
19244+ if (!user_mode_novm(regs))
19245 return;
19246
19247 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19248diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19249index 7e8e905..64d5c32 100644
19250--- a/arch/x86/kernel/smpboot.c
19251+++ b/arch/x86/kernel/smpboot.c
19252@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19253 */
19254 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19255
19256-void cpu_hotplug_driver_lock()
19257+void cpu_hotplug_driver_lock(void)
19258 {
19259- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19260+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19261 }
19262
19263-void cpu_hotplug_driver_unlock()
19264+void cpu_hotplug_driver_unlock(void)
19265 {
19266- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19267+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19268 }
19269
19270 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19271@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19272 * target processor state.
19273 */
19274 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19275- (unsigned long)stack_start.sp);
19276+ stack_start);
19277
19278 /*
19279 * Run STARTUP IPI loop.
19280@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19281 set_idle_for_cpu(cpu, c_idle.idle);
19282 do_rest:
19283 per_cpu(current_task, cpu) = c_idle.idle;
19284+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19285 #ifdef CONFIG_X86_32
19286 /* Stack for startup_32 can be just as for start_secondary onwards */
19287 irq_ctx_init(cpu);
19288@@ -750,13 +751,15 @@ do_rest:
19289 #else
19290 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19291 initial_gs = per_cpu_offset(cpu);
19292- per_cpu(kernel_stack, cpu) =
19293- (unsigned long)task_stack_page(c_idle.idle) -
19294- KERNEL_STACK_OFFSET + THREAD_SIZE;
19295+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19296 #endif
19297+
19298+ pax_open_kernel();
19299 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19300+ pax_close_kernel();
19301+
19302 initial_code = (unsigned long)start_secondary;
19303- stack_start.sp = (void *) c_idle.idle->thread.sp;
19304+ stack_start = c_idle.idle->thread.sp;
19305
19306 /* start_ip had better be page-aligned! */
19307 start_ip = setup_trampoline();
19308@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19309
19310 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19311
19312+#ifdef CONFIG_PAX_PER_CPU_PGD
19313+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19314+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19315+ KERNEL_PGD_PTRS);
19316+#endif
19317+
19318 err = do_boot_cpu(apicid, cpu);
19319
19320 if (err) {
19321diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19322index 3149032..14f1053 100644
19323--- a/arch/x86/kernel/step.c
19324+++ b/arch/x86/kernel/step.c
19325@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19326 struct desc_struct *desc;
19327 unsigned long base;
19328
19329- seg &= ~7UL;
19330+ seg >>= 3;
19331
19332 mutex_lock(&child->mm->context.lock);
19333- if (unlikely((seg >> 3) >= child->mm->context.size))
19334+ if (unlikely(seg >= child->mm->context.size))
19335 addr = -1L; /* bogus selector, access would fault */
19336 else {
19337 desc = child->mm->context.ldt + seg;
19338@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19339 addr += base;
19340 }
19341 mutex_unlock(&child->mm->context.lock);
19342- }
19343+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19344+ addr = ktla_ktva(addr);
19345
19346 return addr;
19347 }
19348@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19349 unsigned char opcode[15];
19350 unsigned long addr = convert_ip_to_linear(child, regs);
19351
19352+ if (addr == -EINVAL)
19353+ return 0;
19354+
19355 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19356 for (i = 0; i < copied; i++) {
19357 switch (opcode[i]) {
19358@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19359
19360 #ifdef CONFIG_X86_64
19361 case 0x40 ... 0x4f:
19362- if (regs->cs != __USER_CS)
19363+ if ((regs->cs & 0xffff) != __USER_CS)
19364 /* 32-bit mode: register increment */
19365 return 0;
19366 /* 64-bit mode: REX prefix */
19367diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19368index dee1ff7..a397f7f 100644
19369--- a/arch/x86/kernel/sys_i386_32.c
19370+++ b/arch/x86/kernel/sys_i386_32.c
19371@@ -24,6 +24,21 @@
19372
19373 #include <asm/syscalls.h>
19374
19375+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19376+{
19377+ unsigned long pax_task_size = TASK_SIZE;
19378+
19379+#ifdef CONFIG_PAX_SEGMEXEC
19380+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19381+ pax_task_size = SEGMEXEC_TASK_SIZE;
19382+#endif
19383+
19384+ if (len > pax_task_size || addr > pax_task_size - len)
19385+ return -EINVAL;
19386+
19387+ return 0;
19388+}
19389+
19390 /*
19391 * Perform the select(nd, in, out, ex, tv) and mmap() system
19392 * calls. Linux/i386 didn't use to be able to handle more than
19393@@ -58,6 +73,212 @@ out:
19394 return err;
19395 }
19396
19397+unsigned long
19398+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19399+ unsigned long len, unsigned long pgoff, unsigned long flags)
19400+{
19401+ struct mm_struct *mm = current->mm;
19402+ struct vm_area_struct *vma;
19403+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19404+
19405+#ifdef CONFIG_PAX_SEGMEXEC
19406+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19407+ pax_task_size = SEGMEXEC_TASK_SIZE;
19408+#endif
19409+
19410+ pax_task_size -= PAGE_SIZE;
19411+
19412+ if (len > pax_task_size)
19413+ return -ENOMEM;
19414+
19415+ if (flags & MAP_FIXED)
19416+ return addr;
19417+
19418+#ifdef CONFIG_PAX_RANDMMAP
19419+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19420+#endif
19421+
19422+ if (addr) {
19423+ addr = PAGE_ALIGN(addr);
19424+ if (pax_task_size - len >= addr) {
19425+ vma = find_vma(mm, addr);
19426+ if (check_heap_stack_gap(vma, addr, len))
19427+ return addr;
19428+ }
19429+ }
19430+ if (len > mm->cached_hole_size) {
19431+ start_addr = addr = mm->free_area_cache;
19432+ } else {
19433+ start_addr = addr = mm->mmap_base;
19434+ mm->cached_hole_size = 0;
19435+ }
19436+
19437+#ifdef CONFIG_PAX_PAGEEXEC
19438+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19439+ start_addr = 0x00110000UL;
19440+
19441+#ifdef CONFIG_PAX_RANDMMAP
19442+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19443+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19444+#endif
19445+
19446+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19447+ start_addr = addr = mm->mmap_base;
19448+ else
19449+ addr = start_addr;
19450+ }
19451+#endif
19452+
19453+full_search:
19454+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19455+ /* At this point: (!vma || addr < vma->vm_end). */
19456+ if (pax_task_size - len < addr) {
19457+ /*
19458+ * Start a new search - just in case we missed
19459+ * some holes.
19460+ */
19461+ if (start_addr != mm->mmap_base) {
19462+ start_addr = addr = mm->mmap_base;
19463+ mm->cached_hole_size = 0;
19464+ goto full_search;
19465+ }
19466+ return -ENOMEM;
19467+ }
19468+ if (check_heap_stack_gap(vma, addr, len))
19469+ break;
19470+ if (addr + mm->cached_hole_size < vma->vm_start)
19471+ mm->cached_hole_size = vma->vm_start - addr;
19472+ addr = vma->vm_end;
19473+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19474+ start_addr = addr = mm->mmap_base;
19475+ mm->cached_hole_size = 0;
19476+ goto full_search;
19477+ }
19478+ }
19479+
19480+ /*
19481+ * Remember the place where we stopped the search:
19482+ */
19483+ mm->free_area_cache = addr + len;
19484+ return addr;
19485+}
19486+
19487+unsigned long
19488+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19489+ const unsigned long len, const unsigned long pgoff,
19490+ const unsigned long flags)
19491+{
19492+ struct vm_area_struct *vma;
19493+ struct mm_struct *mm = current->mm;
19494+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19495+
19496+#ifdef CONFIG_PAX_SEGMEXEC
19497+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19498+ pax_task_size = SEGMEXEC_TASK_SIZE;
19499+#endif
19500+
19501+ pax_task_size -= PAGE_SIZE;
19502+
19503+ /* requested length too big for entire address space */
19504+ if (len > pax_task_size)
19505+ return -ENOMEM;
19506+
19507+ if (flags & MAP_FIXED)
19508+ return addr;
19509+
19510+#ifdef CONFIG_PAX_PAGEEXEC
19511+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19512+ goto bottomup;
19513+#endif
19514+
19515+#ifdef CONFIG_PAX_RANDMMAP
19516+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19517+#endif
19518+
19519+ /* requesting a specific address */
19520+ if (addr) {
19521+ addr = PAGE_ALIGN(addr);
19522+ if (pax_task_size - len >= addr) {
19523+ vma = find_vma(mm, addr);
19524+ if (check_heap_stack_gap(vma, addr, len))
19525+ return addr;
19526+ }
19527+ }
19528+
19529+ /* check if free_area_cache is useful for us */
19530+ if (len <= mm->cached_hole_size) {
19531+ mm->cached_hole_size = 0;
19532+ mm->free_area_cache = mm->mmap_base;
19533+ }
19534+
19535+ /* either no address requested or can't fit in requested address hole */
19536+ addr = mm->free_area_cache;
19537+
19538+ /* make sure it can fit in the remaining address space */
19539+ if (addr > len) {
19540+ vma = find_vma(mm, addr-len);
19541+ if (check_heap_stack_gap(vma, addr - len, len))
19542+ /* remember the address as a hint for next time */
19543+ return (mm->free_area_cache = addr-len);
19544+ }
19545+
19546+ if (mm->mmap_base < len)
19547+ goto bottomup;
19548+
19549+ addr = mm->mmap_base-len;
19550+
19551+ do {
19552+ /*
19553+ * Lookup failure means no vma is above this address,
19554+ * else if new region fits below vma->vm_start,
19555+ * return with success:
19556+ */
19557+ vma = find_vma(mm, addr);
19558+ if (check_heap_stack_gap(vma, addr, len))
19559+ /* remember the address as a hint for next time */
19560+ return (mm->free_area_cache = addr);
19561+
19562+ /* remember the largest hole we saw so far */
19563+ if (addr + mm->cached_hole_size < vma->vm_start)
19564+ mm->cached_hole_size = vma->vm_start - addr;
19565+
19566+ /* try just below the current vma->vm_start */
19567+ addr = skip_heap_stack_gap(vma, len);
19568+ } while (!IS_ERR_VALUE(addr));
19569+
19570+bottomup:
19571+ /*
19572+ * A failed mmap() very likely causes application failure,
19573+ * so fall back to the bottom-up function here. This scenario
19574+ * can happen with large stack limits and large mmap()
19575+ * allocations.
19576+ */
19577+
19578+#ifdef CONFIG_PAX_SEGMEXEC
19579+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19580+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19581+ else
19582+#endif
19583+
19584+ mm->mmap_base = TASK_UNMAPPED_BASE;
19585+
19586+#ifdef CONFIG_PAX_RANDMMAP
19587+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19588+ mm->mmap_base += mm->delta_mmap;
19589+#endif
19590+
19591+ mm->free_area_cache = mm->mmap_base;
19592+ mm->cached_hole_size = ~0UL;
19593+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19594+ /*
19595+ * Restore the topdown base:
19596+ */
19597+ mm->mmap_base = base;
19598+ mm->free_area_cache = base;
19599+ mm->cached_hole_size = ~0UL;
19600+
19601+ return addr;
19602+}
19603
19604 struct sel_arg_struct {
19605 unsigned long n;
19606@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19607 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19608 case SEMTIMEDOP:
19609 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19610- (const struct timespec __user *)fifth);
19611+ (__force const struct timespec __user *)fifth);
19612
19613 case SEMGET:
19614 return sys_semget(first, second, third);
19615@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19616 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19617 if (ret)
19618 return ret;
19619- return put_user(raddr, (ulong __user *) third);
19620+ return put_user(raddr, (__force ulong __user *) third);
19621 }
19622 case 1: /* iBCS2 emulator entry point */
19623 if (!segment_eq(get_fs(), get_ds()))
19624@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19625
19626 return error;
19627 }
19628-
19629-
19630-/*
19631- * Do a system call from kernel instead of calling sys_execve so we
19632- * end up with proper pt_regs.
19633- */
19634-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19635-{
19636- long __res;
19637- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19638- : "=a" (__res)
19639- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19640- return __res;
19641-}
19642diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19643index 8aa2057..b604bc1 100644
19644--- a/arch/x86/kernel/sys_x86_64.c
19645+++ b/arch/x86/kernel/sys_x86_64.c
19646@@ -32,8 +32,8 @@ out:
19647 return error;
19648 }
19649
19650-static void find_start_end(unsigned long flags, unsigned long *begin,
19651- unsigned long *end)
19652+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19653+ unsigned long *begin, unsigned long *end)
19654 {
19655 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19656 unsigned long new_begin;
19657@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19658 *begin = new_begin;
19659 }
19660 } else {
19661- *begin = TASK_UNMAPPED_BASE;
19662+ *begin = mm->mmap_base;
19663 *end = TASK_SIZE;
19664 }
19665 }
19666@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19667 if (flags & MAP_FIXED)
19668 return addr;
19669
19670- find_start_end(flags, &begin, &end);
19671+ find_start_end(mm, flags, &begin, &end);
19672
19673 if (len > end)
19674 return -ENOMEM;
19675
19676+#ifdef CONFIG_PAX_RANDMMAP
19677+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19678+#endif
19679+
19680 if (addr) {
19681 addr = PAGE_ALIGN(addr);
19682 vma = find_vma(mm, addr);
19683- if (end - len >= addr &&
19684- (!vma || addr + len <= vma->vm_start))
19685+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19686 return addr;
19687 }
19688 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19689@@ -106,7 +109,7 @@ full_search:
19690 }
19691 return -ENOMEM;
19692 }
19693- if (!vma || addr + len <= vma->vm_start) {
19694+ if (check_heap_stack_gap(vma, addr, len)) {
19695 /*
19696 * Remember the place where we stopped the search:
19697 */
19698@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19699 {
19700 struct vm_area_struct *vma;
19701 struct mm_struct *mm = current->mm;
19702- unsigned long addr = addr0;
19703+ unsigned long base = mm->mmap_base, addr = addr0;
19704
19705 /* requested length too big for entire address space */
19706 if (len > TASK_SIZE)
19707@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19708 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19709 goto bottomup;
19710
19711+#ifdef CONFIG_PAX_RANDMMAP
19712+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19713+#endif
19714+
19715 /* requesting a specific address */
19716 if (addr) {
19717 addr = PAGE_ALIGN(addr);
19718- vma = find_vma(mm, addr);
19719- if (TASK_SIZE - len >= addr &&
19720- (!vma || addr + len <= vma->vm_start))
19721- return addr;
19722+ if (TASK_SIZE - len >= addr) {
19723+ vma = find_vma(mm, addr);
19724+ if (check_heap_stack_gap(vma, addr, len))
19725+ return addr;
19726+ }
19727 }
19728
19729 /* check if free_area_cache is useful for us */
19730@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19731 /* make sure it can fit in the remaining address space */
19732 if (addr > len) {
19733 vma = find_vma(mm, addr-len);
19734- if (!vma || addr <= vma->vm_start)
19735+ if (check_heap_stack_gap(vma, addr - len, len))
19736 /* remember the address as a hint for next time */
19737 return mm->free_area_cache = addr-len;
19738 }
19739@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19740 * return with success:
19741 */
19742 vma = find_vma(mm, addr);
19743- if (!vma || addr+len <= vma->vm_start)
19744+ if (check_heap_stack_gap(vma, addr, len))
19745 /* remember the address as a hint for next time */
19746 return mm->free_area_cache = addr;
19747
19748@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19749 mm->cached_hole_size = vma->vm_start - addr;
19750
19751 /* try just below the current vma->vm_start */
19752- addr = vma->vm_start-len;
19753- } while (len < vma->vm_start);
19754+ addr = skip_heap_stack_gap(vma, len);
19755+ } while (!IS_ERR_VALUE(addr));
19756
19757 bottomup:
19758 /*
19759@@ -198,13 +206,21 @@ bottomup:
19760 * can happen with large stack limits and large mmap()
19761 * allocations.
19762 */
19763+ mm->mmap_base = TASK_UNMAPPED_BASE;
19764+
19765+#ifdef CONFIG_PAX_RANDMMAP
19766+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19767+ mm->mmap_base += mm->delta_mmap;
19768+#endif
19769+
19770+ mm->free_area_cache = mm->mmap_base;
19771 mm->cached_hole_size = ~0UL;
19772- mm->free_area_cache = TASK_UNMAPPED_BASE;
19773 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19774 /*
19775 * Restore the topdown base:
19776 */
19777- mm->free_area_cache = mm->mmap_base;
19778+ mm->mmap_base = base;
19779+ mm->free_area_cache = base;
19780 mm->cached_hole_size = ~0UL;
19781
19782 return addr;
19783diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19784index 76d70a4..4c94a44 100644
19785--- a/arch/x86/kernel/syscall_table_32.S
19786+++ b/arch/x86/kernel/syscall_table_32.S
19787@@ -1,3 +1,4 @@
19788+.section .rodata,"a",@progbits
19789 ENTRY(sys_call_table)
19790 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19791 .long sys_exit
19792diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19793index 46b8277..3349d55 100644
19794--- a/arch/x86/kernel/tboot.c
19795+++ b/arch/x86/kernel/tboot.c
19796@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19797
19798 void tboot_shutdown(u32 shutdown_type)
19799 {
19800- void (*shutdown)(void);
19801+ void (* __noreturn shutdown)(void);
19802
19803 if (!tboot_enabled())
19804 return;
19805@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19806
19807 switch_to_tboot_pt();
19808
19809- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19810+ shutdown = (void *)tboot->shutdown_entry;
19811 shutdown();
19812
19813 /* should not reach here */
19814@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19815 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19816 }
19817
19818-static atomic_t ap_wfs_count;
19819+static atomic_unchecked_t ap_wfs_count;
19820
19821 static int tboot_wait_for_aps(int num_aps)
19822 {
19823@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19824 {
19825 switch (action) {
19826 case CPU_DYING:
19827- atomic_inc(&ap_wfs_count);
19828+ atomic_inc_unchecked(&ap_wfs_count);
19829 if (num_online_cpus() == 1)
19830- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19831+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19832 return NOTIFY_BAD;
19833 break;
19834 }
19835@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19836
19837 tboot_create_trampoline();
19838
19839- atomic_set(&ap_wfs_count, 0);
19840+ atomic_set_unchecked(&ap_wfs_count, 0);
19841 register_hotcpu_notifier(&tboot_cpu_notifier);
19842 return 0;
19843 }
19844diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19845index be25734..87fe232 100644
19846--- a/arch/x86/kernel/time.c
19847+++ b/arch/x86/kernel/time.c
19848@@ -26,17 +26,13 @@
19849 int timer_ack;
19850 #endif
19851
19852-#ifdef CONFIG_X86_64
19853-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19854-#endif
19855-
19856 unsigned long profile_pc(struct pt_regs *regs)
19857 {
19858 unsigned long pc = instruction_pointer(regs);
19859
19860- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19861+ if (!user_mode(regs) && in_lock_functions(pc)) {
19862 #ifdef CONFIG_FRAME_POINTER
19863- return *(unsigned long *)(regs->bp + sizeof(long));
19864+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19865 #else
19866 unsigned long *sp =
19867 (unsigned long *)kernel_stack_pointer(regs);
19868@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19869 * or above a saved flags. Eflags has bits 22-31 zero,
19870 * kernel addresses don't.
19871 */
19872+
19873+#ifdef CONFIG_PAX_KERNEXEC
19874+ return ktla_ktva(sp[0]);
19875+#else
19876 if (sp[0] >> 22)
19877 return sp[0];
19878 if (sp[1] >> 22)
19879 return sp[1];
19880 #endif
19881+
19882+#endif
19883 }
19884 return pc;
19885 }
19886diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19887index 6bb7b85..dd853e1 100644
19888--- a/arch/x86/kernel/tls.c
19889+++ b/arch/x86/kernel/tls.c
19890@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19891 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19892 return -EINVAL;
19893
19894+#ifdef CONFIG_PAX_SEGMEXEC
19895+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19896+ return -EINVAL;
19897+#endif
19898+
19899 set_tls_desc(p, idx, &info, 1);
19900
19901 return 0;
19902diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19903index 8508237..229b664 100644
19904--- a/arch/x86/kernel/trampoline_32.S
19905+++ b/arch/x86/kernel/trampoline_32.S
19906@@ -32,6 +32,12 @@
19907 #include <asm/segment.h>
19908 #include <asm/page_types.h>
19909
19910+#ifdef CONFIG_PAX_KERNEXEC
19911+#define ta(X) (X)
19912+#else
19913+#define ta(X) ((X) - __PAGE_OFFSET)
19914+#endif
19915+
19916 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19917 __CPUINITRODATA
19918 .code16
19919@@ -60,7 +66,7 @@ r_base = .
19920 inc %ax # protected mode (PE) bit
19921 lmsw %ax # into protected mode
19922 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19923- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19924+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19925
19926 # These need to be in the same 64K segment as the above;
19927 # hence we don't use the boot_gdt_descr defined in head.S
19928diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19929index 3af2dff..ba8aa49 100644
19930--- a/arch/x86/kernel/trampoline_64.S
19931+++ b/arch/x86/kernel/trampoline_64.S
19932@@ -91,7 +91,7 @@ startup_32:
19933 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19934 movl %eax, %ds
19935
19936- movl $X86_CR4_PAE, %eax
19937+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19938 movl %eax, %cr4 # Enable PAE mode
19939
19940 # Setup trampoline 4 level pagetables
19941@@ -127,7 +127,7 @@ startup_64:
19942 no_longmode:
19943 hlt
19944 jmp no_longmode
19945-#include "verify_cpu_64.S"
19946+#include "verify_cpu.S"
19947
19948 # Careful these need to be in the same 64K segment as the above;
19949 tidt:
19950@@ -138,7 +138,7 @@ tidt:
19951 # so the kernel can live anywhere
19952 .balign 4
19953 tgdt:
19954- .short tgdt_end - tgdt # gdt limit
19955+ .short tgdt_end - tgdt - 1 # gdt limit
19956 .long tgdt - r_base
19957 .short 0
19958 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19959diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19960index 7e37dce..ec3f8e5 100644
19961--- a/arch/x86/kernel/traps.c
19962+++ b/arch/x86/kernel/traps.c
19963@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19964
19965 /* Do we ignore FPU interrupts ? */
19966 char ignore_fpu_irq;
19967-
19968-/*
19969- * The IDT has to be page-aligned to simplify the Pentium
19970- * F0 0F bug workaround.
19971- */
19972-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19973 #endif
19974
19975 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19976@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19977 static inline void
19978 die_if_kernel(const char *str, struct pt_regs *regs, long err)
19979 {
19980- if (!user_mode_vm(regs))
19981+ if (!user_mode(regs))
19982 die(str, regs, err);
19983 }
19984 #endif
19985
19986 static void __kprobes
19987-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19988+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19989 long error_code, siginfo_t *info)
19990 {
19991 struct task_struct *tsk = current;
19992
19993 #ifdef CONFIG_X86_32
19994- if (regs->flags & X86_VM_MASK) {
19995+ if (v8086_mode(regs)) {
19996 /*
19997 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19998 * On nmi (interrupt 2), do_trap should not be called.
19999@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20000 }
20001 #endif
20002
20003- if (!user_mode(regs))
20004+ if (!user_mode_novm(regs))
20005 goto kernel_trap;
20006
20007 #ifdef CONFIG_X86_32
20008@@ -158,7 +152,7 @@ trap_signal:
20009 printk_ratelimit()) {
20010 printk(KERN_INFO
20011 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20012- tsk->comm, tsk->pid, str,
20013+ tsk->comm, task_pid_nr(tsk), str,
20014 regs->ip, regs->sp, error_code);
20015 print_vma_addr(" in ", regs->ip);
20016 printk("\n");
20017@@ -175,8 +169,20 @@ kernel_trap:
20018 if (!fixup_exception(regs)) {
20019 tsk->thread.error_code = error_code;
20020 tsk->thread.trap_no = trapnr;
20021+
20022+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20023+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20024+ str = "PAX: suspicious stack segment fault";
20025+#endif
20026+
20027 die(str, regs, error_code);
20028 }
20029+
20030+#ifdef CONFIG_PAX_REFCOUNT
20031+ if (trapnr == 4)
20032+ pax_report_refcount_overflow(regs);
20033+#endif
20034+
20035 return;
20036
20037 #ifdef CONFIG_X86_32
20038@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20039 conditional_sti(regs);
20040
20041 #ifdef CONFIG_X86_32
20042- if (regs->flags & X86_VM_MASK)
20043+ if (v8086_mode(regs))
20044 goto gp_in_vm86;
20045 #endif
20046
20047 tsk = current;
20048- if (!user_mode(regs))
20049+ if (!user_mode_novm(regs))
20050 goto gp_in_kernel;
20051
20052+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20053+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20054+ struct mm_struct *mm = tsk->mm;
20055+ unsigned long limit;
20056+
20057+ down_write(&mm->mmap_sem);
20058+ limit = mm->context.user_cs_limit;
20059+ if (limit < TASK_SIZE) {
20060+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20061+ up_write(&mm->mmap_sem);
20062+ return;
20063+ }
20064+ up_write(&mm->mmap_sem);
20065+ }
20066+#endif
20067+
20068 tsk->thread.error_code = error_code;
20069 tsk->thread.trap_no = 13;
20070
20071@@ -305,6 +327,13 @@ gp_in_kernel:
20072 if (notify_die(DIE_GPF, "general protection fault", regs,
20073 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20074 return;
20075+
20076+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20077+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20078+ die("PAX: suspicious general protection fault", regs, error_code);
20079+ else
20080+#endif
20081+
20082 die("general protection fault", regs, error_code);
20083 }
20084
20085@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20086 dotraplinkage notrace __kprobes void
20087 do_nmi(struct pt_regs *regs, long error_code)
20088 {
20089+
20090+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20091+ if (!user_mode(regs)) {
20092+ unsigned long cs = regs->cs & 0xFFFF;
20093+ unsigned long ip = ktva_ktla(regs->ip);
20094+
20095+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20096+ regs->ip = ip;
20097+ }
20098+#endif
20099+
20100 nmi_enter();
20101
20102 inc_irq_stat(__nmi_count);
20103@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20104 }
20105
20106 #ifdef CONFIG_X86_32
20107- if (regs->flags & X86_VM_MASK)
20108+ if (v8086_mode(regs))
20109 goto debug_vm86;
20110 #endif
20111
20112@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20113 * kernel space (but re-enable TF when returning to user mode).
20114 */
20115 if (condition & DR_STEP) {
20116- if (!user_mode(regs))
20117+ if (!user_mode_novm(regs))
20118 goto clear_TF_reenable;
20119 }
20120
20121@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20122 * Handle strange cache flush from user space exception
20123 * in all other cases. This is undocumented behaviour.
20124 */
20125- if (regs->flags & X86_VM_MASK) {
20126+ if (v8086_mode(regs)) {
20127 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20128 return;
20129 }
20130@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20131 void __math_state_restore(void)
20132 {
20133 struct thread_info *thread = current_thread_info();
20134- struct task_struct *tsk = thread->task;
20135+ struct task_struct *tsk = current;
20136
20137 /*
20138 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20139@@ -825,8 +865,7 @@ void __math_state_restore(void)
20140 */
20141 asmlinkage void math_state_restore(void)
20142 {
20143- struct thread_info *thread = current_thread_info();
20144- struct task_struct *tsk = thread->task;
20145+ struct task_struct *tsk = current;
20146
20147 if (!tsk_used_math(tsk)) {
20148 local_irq_enable();
20149diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20150new file mode 100644
20151index 0000000..50c5edd
20152--- /dev/null
20153+++ b/arch/x86/kernel/verify_cpu.S
20154@@ -0,0 +1,140 @@
20155+/*
20156+ *
20157+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20158+ * code has been borrowed from boot/setup.S and was introduced by
20159+ * Andi Kleen.
20160+ *
20161+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20162+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20163+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20164+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20165+ *
20166+ * This source code is licensed under the GNU General Public License,
20167+ * Version 2. See the file COPYING for more details.
20168+ *
20169+ * This is a common code for verification whether CPU supports
20170+ * long mode and SSE or not. It is not called directly instead this
20171+ * file is included at various places and compiled in that context.
20172+ * This file is expected to run in 32bit code. Currently:
20173+ *
20174+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20175+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20176+ * arch/x86/kernel/head_32.S: processor startup
20177+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20178+ *
20179+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20180+ * 0: Success 1: Failure
20181+ *
20182+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20183+ *
20184+ * The caller needs to check for the error code and take the action
20185+ * appropriately. Either display a message or halt.
20186+ */
20187+
20188+#include <asm/cpufeature.h>
20189+#include <asm/msr-index.h>
20190+
20191+verify_cpu:
20192+ pushfl # Save caller passed flags
20193+ pushl $0 # Kill any dangerous flags
20194+ popfl
20195+
20196+ pushfl # standard way to check for cpuid
20197+ popl %eax
20198+ movl %eax,%ebx
20199+ xorl $0x200000,%eax
20200+ pushl %eax
20201+ popfl
20202+ pushfl
20203+ popl %eax
20204+ cmpl %eax,%ebx
20205+ jz verify_cpu_no_longmode # cpu has no cpuid
20206+
20207+ movl $0x0,%eax # See if cpuid 1 is implemented
20208+ cpuid
20209+ cmpl $0x1,%eax
20210+ jb verify_cpu_no_longmode # no cpuid 1
20211+
20212+ xor %di,%di
20213+ cmpl $0x68747541,%ebx # AuthenticAMD
20214+ jnz verify_cpu_noamd
20215+ cmpl $0x69746e65,%edx
20216+ jnz verify_cpu_noamd
20217+ cmpl $0x444d4163,%ecx
20218+ jnz verify_cpu_noamd
20219+ mov $1,%di # cpu is from AMD
20220+ jmp verify_cpu_check
20221+
20222+verify_cpu_noamd:
20223+ cmpl $0x756e6547,%ebx # GenuineIntel?
20224+ jnz verify_cpu_check
20225+ cmpl $0x49656e69,%edx
20226+ jnz verify_cpu_check
20227+ cmpl $0x6c65746e,%ecx
20228+ jnz verify_cpu_check
20229+
20230+ # only call IA32_MISC_ENABLE when:
20231+ # family > 6 || (family == 6 && model >= 0xd)
20232+ movl $0x1, %eax # check CPU family and model
20233+ cpuid
20234+ movl %eax, %ecx
20235+
20236+ andl $0x0ff00f00, %eax # mask family and extended family
20237+ shrl $8, %eax
20238+ cmpl $6, %eax
20239+ ja verify_cpu_clear_xd # family > 6, ok
20240+ jb verify_cpu_check # family < 6, skip
20241+
20242+ andl $0x000f00f0, %ecx # mask model and extended model
20243+ shrl $4, %ecx
20244+ cmpl $0xd, %ecx
20245+ jb verify_cpu_check # family == 6, model < 0xd, skip
20246+
20247+verify_cpu_clear_xd:
20248+ movl $MSR_IA32_MISC_ENABLE, %ecx
20249+ rdmsr
20250+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20251+ jnc verify_cpu_check # only write MSR if bit was changed
20252+ wrmsr
20253+
20254+verify_cpu_check:
20255+ movl $0x1,%eax # Does the cpu have what it takes
20256+ cpuid
20257+ andl $REQUIRED_MASK0,%edx
20258+ xorl $REQUIRED_MASK0,%edx
20259+ jnz verify_cpu_no_longmode
20260+
20261+ movl $0x80000000,%eax # See if extended cpuid is implemented
20262+ cpuid
20263+ cmpl $0x80000001,%eax
20264+ jb verify_cpu_no_longmode # no extended cpuid
20265+
20266+ movl $0x80000001,%eax # Does the cpu have what it takes
20267+ cpuid
20268+ andl $REQUIRED_MASK1,%edx
20269+ xorl $REQUIRED_MASK1,%edx
20270+ jnz verify_cpu_no_longmode
20271+
20272+verify_cpu_sse_test:
20273+ movl $1,%eax
20274+ cpuid
20275+ andl $SSE_MASK,%edx
20276+ cmpl $SSE_MASK,%edx
20277+ je verify_cpu_sse_ok
20278+ test %di,%di
20279+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20280+ movl $MSR_K7_HWCR,%ecx
20281+ rdmsr
20282+ btr $15,%eax # enable SSE
20283+ wrmsr
20284+ xor %di,%di # don't loop
20285+ jmp verify_cpu_sse_test # try again
20286+
20287+verify_cpu_no_longmode:
20288+ popfl # Restore caller passed flags
20289+ movl $1,%eax
20290+ ret
20291+verify_cpu_sse_ok:
20292+ popfl # Restore caller passed flags
20293+ xorl %eax, %eax
20294+ ret
20295diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20296deleted file mode 100644
20297index 45b6f8a..0000000
20298--- a/arch/x86/kernel/verify_cpu_64.S
20299+++ /dev/null
20300@@ -1,105 +0,0 @@
20301-/*
20302- *
20303- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20304- * code has been borrowed from boot/setup.S and was introduced by
20305- * Andi Kleen.
20306- *
20307- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20308- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20309- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20310- *
20311- * This source code is licensed under the GNU General Public License,
20312- * Version 2. See the file COPYING for more details.
20313- *
20314- * This is a common code for verification whether CPU supports
20315- * long mode and SSE or not. It is not called directly instead this
20316- * file is included at various places and compiled in that context.
20317- * Following are the current usage.
20318- *
20319- * This file is included by both 16bit and 32bit code.
20320- *
20321- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20322- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20323- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20324- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20325- *
20326- * verify_cpu, returns the status of cpu check in register %eax.
20327- * 0: Success 1: Failure
20328- *
20329- * The caller needs to check for the error code and take the action
20330- * appropriately. Either display a message or halt.
20331- */
20332-
20333-#include <asm/cpufeature.h>
20334-
20335-verify_cpu:
20336- pushfl # Save caller passed flags
20337- pushl $0 # Kill any dangerous flags
20338- popfl
20339-
20340- pushfl # standard way to check for cpuid
20341- popl %eax
20342- movl %eax,%ebx
20343- xorl $0x200000,%eax
20344- pushl %eax
20345- popfl
20346- pushfl
20347- popl %eax
20348- cmpl %eax,%ebx
20349- jz verify_cpu_no_longmode # cpu has no cpuid
20350-
20351- movl $0x0,%eax # See if cpuid 1 is implemented
20352- cpuid
20353- cmpl $0x1,%eax
20354- jb verify_cpu_no_longmode # no cpuid 1
20355-
20356- xor %di,%di
20357- cmpl $0x68747541,%ebx # AuthenticAMD
20358- jnz verify_cpu_noamd
20359- cmpl $0x69746e65,%edx
20360- jnz verify_cpu_noamd
20361- cmpl $0x444d4163,%ecx
20362- jnz verify_cpu_noamd
20363- mov $1,%di # cpu is from AMD
20364-
20365-verify_cpu_noamd:
20366- movl $0x1,%eax # Does the cpu have what it takes
20367- cpuid
20368- andl $REQUIRED_MASK0,%edx
20369- xorl $REQUIRED_MASK0,%edx
20370- jnz verify_cpu_no_longmode
20371-
20372- movl $0x80000000,%eax # See if extended cpuid is implemented
20373- cpuid
20374- cmpl $0x80000001,%eax
20375- jb verify_cpu_no_longmode # no extended cpuid
20376-
20377- movl $0x80000001,%eax # Does the cpu have what it takes
20378- cpuid
20379- andl $REQUIRED_MASK1,%edx
20380- xorl $REQUIRED_MASK1,%edx
20381- jnz verify_cpu_no_longmode
20382-
20383-verify_cpu_sse_test:
20384- movl $1,%eax
20385- cpuid
20386- andl $SSE_MASK,%edx
20387- cmpl $SSE_MASK,%edx
20388- je verify_cpu_sse_ok
20389- test %di,%di
20390- jz verify_cpu_no_longmode # only try to force SSE on AMD
20391- movl $0xc0010015,%ecx # HWCR
20392- rdmsr
20393- btr $15,%eax # enable SSE
20394- wrmsr
20395- xor %di,%di # don't loop
20396- jmp verify_cpu_sse_test # try again
20397-
20398-verify_cpu_no_longmode:
20399- popfl # Restore caller passed flags
20400- movl $1,%eax
20401- ret
20402-verify_cpu_sse_ok:
20403- popfl # Restore caller passed flags
20404- xorl %eax, %eax
20405- ret
20406diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20407index 9c4e625..c992817 100644
20408--- a/arch/x86/kernel/vm86_32.c
20409+++ b/arch/x86/kernel/vm86_32.c
20410@@ -41,6 +41,7 @@
20411 #include <linux/ptrace.h>
20412 #include <linux/audit.h>
20413 #include <linux/stddef.h>
20414+#include <linux/grsecurity.h>
20415
20416 #include <asm/uaccess.h>
20417 #include <asm/io.h>
20418@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20419 do_exit(SIGSEGV);
20420 }
20421
20422- tss = &per_cpu(init_tss, get_cpu());
20423+ tss = init_tss + get_cpu();
20424 current->thread.sp0 = current->thread.saved_sp0;
20425 current->thread.sysenter_cs = __KERNEL_CS;
20426 load_sp0(tss, &current->thread);
20427@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20428 struct task_struct *tsk;
20429 int tmp, ret = -EPERM;
20430
20431+#ifdef CONFIG_GRKERNSEC_VM86
20432+ if (!capable(CAP_SYS_RAWIO)) {
20433+ gr_handle_vm86();
20434+ goto out;
20435+ }
20436+#endif
20437+
20438 tsk = current;
20439 if (tsk->thread.saved_sp0)
20440 goto out;
20441@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20442 int tmp, ret;
20443 struct vm86plus_struct __user *v86;
20444
20445+#ifdef CONFIG_GRKERNSEC_VM86
20446+ if (!capable(CAP_SYS_RAWIO)) {
20447+ gr_handle_vm86();
20448+ ret = -EPERM;
20449+ goto out;
20450+ }
20451+#endif
20452+
20453 tsk = current;
20454 switch (regs->bx) {
20455 case VM86_REQUEST_IRQ:
20456@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20457 tsk->thread.saved_fs = info->regs32->fs;
20458 tsk->thread.saved_gs = get_user_gs(info->regs32);
20459
20460- tss = &per_cpu(init_tss, get_cpu());
20461+ tss = init_tss + get_cpu();
20462 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20463 if (cpu_has_sep)
20464 tsk->thread.sysenter_cs = 0;
20465@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20466 goto cannot_handle;
20467 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20468 goto cannot_handle;
20469- intr_ptr = (unsigned long __user *) (i << 2);
20470+ intr_ptr = (__force unsigned long __user *) (i << 2);
20471 if (get_user(segoffs, intr_ptr))
20472 goto cannot_handle;
20473 if ((segoffs >> 16) == BIOSSEG)
20474diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20475index d430e4c..831f817 100644
20476--- a/arch/x86/kernel/vmi_32.c
20477+++ b/arch/x86/kernel/vmi_32.c
20478@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20479 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20480
20481 #define call_vrom_func(rom,func) \
20482- (((VROMFUNC *)(rom->func))())
20483+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20484
20485 #define call_vrom_long_func(rom,func,arg) \
20486- (((VROMLONGFUNC *)(rom->func)) (arg))
20487+({\
20488+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20489+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20490+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20491+ __reloc;\
20492+})
20493
20494-static struct vrom_header *vmi_rom;
20495+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20496 static int disable_pge;
20497 static int disable_pse;
20498 static int disable_sep;
20499@@ -76,10 +81,10 @@ static struct {
20500 void (*set_initial_ap_state)(int, int);
20501 void (*halt)(void);
20502 void (*set_lazy_mode)(int mode);
20503-} vmi_ops;
20504+} __no_const vmi_ops __read_only;
20505
20506 /* Cached VMI operations */
20507-struct vmi_timer_ops vmi_timer_ops;
20508+struct vmi_timer_ops vmi_timer_ops __read_only;
20509
20510 /*
20511 * VMI patching routines.
20512@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20513 static inline void patch_offset(void *insnbuf,
20514 unsigned long ip, unsigned long dest)
20515 {
20516- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20517+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20518 }
20519
20520 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20521@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20522 {
20523 u64 reloc;
20524 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20525+
20526 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20527 switch(rel->type) {
20528 case VMI_RELOCATION_CALL_REL:
20529@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20530
20531 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20532 {
20533- const pte_t pte = { .pte = 0 };
20534+ const pte_t pte = __pte(0ULL);
20535 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20536 }
20537
20538 static void vmi_pmd_clear(pmd_t *pmd)
20539 {
20540- const pte_t pte = { .pte = 0 };
20541+ const pte_t pte = __pte(0ULL);
20542 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20543 }
20544 #endif
20545@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20546 ap.ss = __KERNEL_DS;
20547 ap.esp = (unsigned long) start_esp;
20548
20549- ap.ds = __USER_DS;
20550- ap.es = __USER_DS;
20551+ ap.ds = __KERNEL_DS;
20552+ ap.es = __KERNEL_DS;
20553 ap.fs = __KERNEL_PERCPU;
20554- ap.gs = __KERNEL_STACK_CANARY;
20555+ savesegment(gs, ap.gs);
20556
20557 ap.eflags = 0;
20558
20559@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20560 paravirt_leave_lazy_mmu();
20561 }
20562
20563+#ifdef CONFIG_PAX_KERNEXEC
20564+static unsigned long vmi_pax_open_kernel(void)
20565+{
20566+ return 0;
20567+}
20568+
20569+static unsigned long vmi_pax_close_kernel(void)
20570+{
20571+ return 0;
20572+}
20573+#endif
20574+
20575 static inline int __init check_vmi_rom(struct vrom_header *rom)
20576 {
20577 struct pci_header *pci;
20578@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20579 return 0;
20580 if (rom->vrom_signature != VMI_SIGNATURE)
20581 return 0;
20582+ if (rom->rom_length * 512 > sizeof(*rom)) {
20583+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20584+ return 0;
20585+ }
20586 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20587 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20588 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20589@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20590 struct vrom_header *romstart;
20591 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20592 if (check_vmi_rom(romstart)) {
20593- vmi_rom = romstart;
20594+ vmi_rom = *romstart;
20595 return 1;
20596 }
20597 }
20598@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20599
20600 para_fill(pv_irq_ops.safe_halt, Halt);
20601
20602+#ifdef CONFIG_PAX_KERNEXEC
20603+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20604+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20605+#endif
20606+
20607 /*
20608 * Alternative instruction rewriting doesn't happen soon enough
20609 * to convert VMI_IRET to a call instead of a jump; so we have
20610@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20611
20612 void __init vmi_init(void)
20613 {
20614- if (!vmi_rom)
20615+ if (!vmi_rom.rom_signature)
20616 probe_vmi_rom();
20617 else
20618- check_vmi_rom(vmi_rom);
20619+ check_vmi_rom(&vmi_rom);
20620
20621 /* In case probing for or validating the ROM failed, basil */
20622- if (!vmi_rom)
20623+ if (!vmi_rom.rom_signature)
20624 return;
20625
20626- reserve_top_address(-vmi_rom->virtual_top);
20627+ reserve_top_address(-vmi_rom.virtual_top);
20628
20629 #ifdef CONFIG_X86_IO_APIC
20630 /* This is virtual hardware; timer routing is wired correctly */
20631@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20632 {
20633 unsigned long flags;
20634
20635- if (!vmi_rom)
20636+ if (!vmi_rom.rom_signature)
20637 return;
20638
20639 local_irq_save(flags);
20640diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20641index 3c68fe2..12c8280 100644
20642--- a/arch/x86/kernel/vmlinux.lds.S
20643+++ b/arch/x86/kernel/vmlinux.lds.S
20644@@ -26,6 +26,13 @@
20645 #include <asm/page_types.h>
20646 #include <asm/cache.h>
20647 #include <asm/boot.h>
20648+#include <asm/segment.h>
20649+
20650+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20651+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20652+#else
20653+#define __KERNEL_TEXT_OFFSET 0
20654+#endif
20655
20656 #undef i386 /* in case the preprocessor is a 32bit one */
20657
20658@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20659 #ifdef CONFIG_X86_32
20660 OUTPUT_ARCH(i386)
20661 ENTRY(phys_startup_32)
20662-jiffies = jiffies_64;
20663 #else
20664 OUTPUT_ARCH(i386:x86-64)
20665 ENTRY(phys_startup_64)
20666-jiffies_64 = jiffies;
20667 #endif
20668
20669 PHDRS {
20670 text PT_LOAD FLAGS(5); /* R_E */
20671- data PT_LOAD FLAGS(7); /* RWE */
20672+#ifdef CONFIG_X86_32
20673+ module PT_LOAD FLAGS(5); /* R_E */
20674+#endif
20675+#ifdef CONFIG_XEN
20676+ rodata PT_LOAD FLAGS(5); /* R_E */
20677+#else
20678+ rodata PT_LOAD FLAGS(4); /* R__ */
20679+#endif
20680+ data PT_LOAD FLAGS(6); /* RW_ */
20681 #ifdef CONFIG_X86_64
20682 user PT_LOAD FLAGS(5); /* R_E */
20683+#endif
20684+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20685 #ifdef CONFIG_SMP
20686 percpu PT_LOAD FLAGS(6); /* RW_ */
20687 #endif
20688+ text.init PT_LOAD FLAGS(5); /* R_E */
20689+ text.exit PT_LOAD FLAGS(5); /* R_E */
20690 init PT_LOAD FLAGS(7); /* RWE */
20691-#endif
20692 note PT_NOTE FLAGS(0); /* ___ */
20693 }
20694
20695 SECTIONS
20696 {
20697 #ifdef CONFIG_X86_32
20698- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20699- phys_startup_32 = startup_32 - LOAD_OFFSET;
20700+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20701 #else
20702- . = __START_KERNEL;
20703- phys_startup_64 = startup_64 - LOAD_OFFSET;
20704+ . = __START_KERNEL;
20705 #endif
20706
20707 /* Text and read-only data */
20708- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20709- _text = .;
20710+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20711 /* bootstrapping code */
20712+#ifdef CONFIG_X86_32
20713+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20714+#else
20715+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20716+#endif
20717+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20718+ _text = .;
20719 HEAD_TEXT
20720 #ifdef CONFIG_X86_32
20721 . = ALIGN(PAGE_SIZE);
20722@@ -82,28 +102,71 @@ SECTIONS
20723 IRQENTRY_TEXT
20724 *(.fixup)
20725 *(.gnu.warning)
20726- /* End of text section */
20727- _etext = .;
20728 } :text = 0x9090
20729
20730- NOTES :text :note
20731+ . += __KERNEL_TEXT_OFFSET;
20732
20733- EXCEPTION_TABLE(16) :text = 0x9090
20734+#ifdef CONFIG_X86_32
20735+ . = ALIGN(PAGE_SIZE);
20736+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20737+ *(.vmi.rom)
20738+ } :module
20739+
20740+ . = ALIGN(PAGE_SIZE);
20741+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20742+
20743+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20744+ MODULES_EXEC_VADDR = .;
20745+ BYTE(0)
20746+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20747+ . = ALIGN(HPAGE_SIZE);
20748+ MODULES_EXEC_END = . - 1;
20749+#endif
20750+
20751+ } :module
20752+#endif
20753+
20754+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20755+ /* End of text section */
20756+ _etext = . - __KERNEL_TEXT_OFFSET;
20757+ }
20758+
20759+#ifdef CONFIG_X86_32
20760+ . = ALIGN(PAGE_SIZE);
20761+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20762+ *(.idt)
20763+ . = ALIGN(PAGE_SIZE);
20764+ *(.empty_zero_page)
20765+ *(.swapper_pg_fixmap)
20766+ *(.swapper_pg_pmd)
20767+ *(.swapper_pg_dir)
20768+ *(.trampoline_pg_dir)
20769+ } :rodata
20770+#endif
20771+
20772+ . = ALIGN(PAGE_SIZE);
20773+ NOTES :rodata :note
20774+
20775+ EXCEPTION_TABLE(16) :rodata
20776
20777 RO_DATA(PAGE_SIZE)
20778
20779 /* Data */
20780 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20781+
20782+#ifdef CONFIG_PAX_KERNEXEC
20783+ . = ALIGN(HPAGE_SIZE);
20784+#else
20785+ . = ALIGN(PAGE_SIZE);
20786+#endif
20787+
20788 /* Start of data section */
20789 _sdata = .;
20790
20791 /* init_task */
20792 INIT_TASK_DATA(THREAD_SIZE)
20793
20794-#ifdef CONFIG_X86_32
20795- /* 32 bit has nosave before _edata */
20796 NOSAVE_DATA
20797-#endif
20798
20799 PAGE_ALIGNED_DATA(PAGE_SIZE)
20800
20801@@ -112,6 +175,8 @@ SECTIONS
20802 DATA_DATA
20803 CONSTRUCTORS
20804
20805+ jiffies = jiffies_64;
20806+
20807 /* rarely changed data like cpu maps */
20808 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20809
20810@@ -166,12 +231,6 @@ SECTIONS
20811 }
20812 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20813
20814- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20815- .jiffies : AT(VLOAD(.jiffies)) {
20816- *(.jiffies)
20817- }
20818- jiffies = VVIRT(.jiffies);
20819-
20820 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20821 *(.vsyscall_3)
20822 }
20823@@ -187,12 +246,19 @@ SECTIONS
20824 #endif /* CONFIG_X86_64 */
20825
20826 /* Init code and data - will be freed after init */
20827- . = ALIGN(PAGE_SIZE);
20828 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20829+ BYTE(0)
20830+
20831+#ifdef CONFIG_PAX_KERNEXEC
20832+ . = ALIGN(HPAGE_SIZE);
20833+#else
20834+ . = ALIGN(PAGE_SIZE);
20835+#endif
20836+
20837 __init_begin = .; /* paired with __init_end */
20838- }
20839+ } :init.begin
20840
20841-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20842+#ifdef CONFIG_SMP
20843 /*
20844 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20845 * output PHDR, so the next output section - .init.text - should
20846@@ -201,12 +267,27 @@ SECTIONS
20847 PERCPU_VADDR(0, :percpu)
20848 #endif
20849
20850- INIT_TEXT_SECTION(PAGE_SIZE)
20851-#ifdef CONFIG_X86_64
20852- :init
20853-#endif
20854+ . = ALIGN(PAGE_SIZE);
20855+ init_begin = .;
20856+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20857+ VMLINUX_SYMBOL(_sinittext) = .;
20858+ INIT_TEXT
20859+ VMLINUX_SYMBOL(_einittext) = .;
20860+ . = ALIGN(PAGE_SIZE);
20861+ } :text.init
20862
20863- INIT_DATA_SECTION(16)
20864+ /*
20865+ * .exit.text is discard at runtime, not link time, to deal with
20866+ * references from .altinstructions and .eh_frame
20867+ */
20868+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20869+ EXIT_TEXT
20870+ . = ALIGN(16);
20871+ } :text.exit
20872+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20873+
20874+ . = ALIGN(PAGE_SIZE);
20875+ INIT_DATA_SECTION(16) :init
20876
20877 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20878 __x86_cpu_dev_start = .;
20879@@ -232,19 +313,11 @@ SECTIONS
20880 *(.altinstr_replacement)
20881 }
20882
20883- /*
20884- * .exit.text is discard at runtime, not link time, to deal with
20885- * references from .altinstructions and .eh_frame
20886- */
20887- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20888- EXIT_TEXT
20889- }
20890-
20891 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20892 EXIT_DATA
20893 }
20894
20895-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20896+#ifndef CONFIG_SMP
20897 PERCPU(PAGE_SIZE)
20898 #endif
20899
20900@@ -267,12 +340,6 @@ SECTIONS
20901 . = ALIGN(PAGE_SIZE);
20902 }
20903
20904-#ifdef CONFIG_X86_64
20905- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20906- NOSAVE_DATA
20907- }
20908-#endif
20909-
20910 /* BSS */
20911 . = ALIGN(PAGE_SIZE);
20912 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20913@@ -288,6 +355,7 @@ SECTIONS
20914 __brk_base = .;
20915 . += 64 * 1024; /* 64k alignment slop space */
20916 *(.brk_reservation) /* areas brk users have reserved */
20917+ . = ALIGN(HPAGE_SIZE);
20918 __brk_limit = .;
20919 }
20920
20921@@ -316,13 +384,12 @@ SECTIONS
20922 * for the boot processor.
20923 */
20924 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20925-INIT_PER_CPU(gdt_page);
20926 INIT_PER_CPU(irq_stack_union);
20927
20928 /*
20929 * Build-time check on the image size:
20930 */
20931-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20932+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20933 "kernel image bigger than KERNEL_IMAGE_SIZE");
20934
20935 #ifdef CONFIG_SMP
20936diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20937index 62f39d7..3bc46a1 100644
20938--- a/arch/x86/kernel/vsyscall_64.c
20939+++ b/arch/x86/kernel/vsyscall_64.c
20940@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20941
20942 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20943 /* copy vsyscall data */
20944+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20945 vsyscall_gtod_data.clock.vread = clock->vread;
20946 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20947 vsyscall_gtod_data.clock.mask = clock->mask;
20948@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20949 We do this here because otherwise user space would do it on
20950 its own in a likely inferior way (no access to jiffies).
20951 If you don't like it pass NULL. */
20952- if (tcache && tcache->blob[0] == (j = __jiffies)) {
20953+ if (tcache && tcache->blob[0] == (j = jiffies)) {
20954 p = tcache->blob[1];
20955 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20956 /* Load per CPU data from RDTSCP */
20957diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20958index 3909e3b..5433a97 100644
20959--- a/arch/x86/kernel/x8664_ksyms_64.c
20960+++ b/arch/x86/kernel/x8664_ksyms_64.c
20961@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20962
20963 EXPORT_SYMBOL(copy_user_generic);
20964 EXPORT_SYMBOL(__copy_user_nocache);
20965-EXPORT_SYMBOL(copy_from_user);
20966-EXPORT_SYMBOL(copy_to_user);
20967 EXPORT_SYMBOL(__copy_from_user_inatomic);
20968
20969 EXPORT_SYMBOL(copy_page);
20970diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20971index c5ee17e..d63218f 100644
20972--- a/arch/x86/kernel/xsave.c
20973+++ b/arch/x86/kernel/xsave.c
20974@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20975 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20976 return -1;
20977
20978- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20979+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20980 fx_sw_user->extended_size -
20981 FP_XSTATE_MAGIC2_SIZE));
20982 /*
20983@@ -196,7 +196,7 @@ fx_only:
20984 * the other extended state.
20985 */
20986 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20987- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20988+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20989 }
20990
20991 /*
20992@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
20993 if (task_thread_info(tsk)->status & TS_XSAVE)
20994 err = restore_user_xstate(buf);
20995 else
20996- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20997+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
20998 buf);
20999 if (unlikely(err)) {
21000 /*
21001diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21002index 1350e43..a94b011 100644
21003--- a/arch/x86/kvm/emulate.c
21004+++ b/arch/x86/kvm/emulate.c
21005@@ -81,8 +81,8 @@
21006 #define Src2CL (1<<29)
21007 #define Src2ImmByte (2<<29)
21008 #define Src2One (3<<29)
21009-#define Src2Imm16 (4<<29)
21010-#define Src2Mask (7<<29)
21011+#define Src2Imm16 (4U<<29)
21012+#define Src2Mask (7U<<29)
21013
21014 enum {
21015 Group1_80, Group1_81, Group1_82, Group1_83,
21016@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21017
21018 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21019 do { \
21020+ unsigned long _tmp; \
21021 __asm__ __volatile__ ( \
21022 _PRE_EFLAGS("0", "4", "2") \
21023 _op _suffix " %"_x"3,%1; " \
21024@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21025 /* Raw emulation: instruction has two explicit operands. */
21026 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21027 do { \
21028- unsigned long _tmp; \
21029- \
21030 switch ((_dst).bytes) { \
21031 case 2: \
21032 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21033@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21034
21035 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21036 do { \
21037- unsigned long _tmp; \
21038 switch ((_dst).bytes) { \
21039 case 1: \
21040 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21041diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21042index 8dfeaaa..4daa395 100644
21043--- a/arch/x86/kvm/lapic.c
21044+++ b/arch/x86/kvm/lapic.c
21045@@ -52,7 +52,7 @@
21046 #define APIC_BUS_CYCLE_NS 1
21047
21048 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21049-#define apic_debug(fmt, arg...)
21050+#define apic_debug(fmt, arg...) do {} while (0)
21051
21052 #define APIC_LVT_NUM 6
21053 /* 14 is the version for Xeon and Pentium 8.4.8*/
21054diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21055index 3bc2707..dd157e2 100644
21056--- a/arch/x86/kvm/paging_tmpl.h
21057+++ b/arch/x86/kvm/paging_tmpl.h
21058@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21059 int level = PT_PAGE_TABLE_LEVEL;
21060 unsigned long mmu_seq;
21061
21062+ pax_track_stack();
21063+
21064 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21065 kvm_mmu_audit(vcpu, "pre page fault");
21066
21067@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21068 kvm_mmu_free_some_pages(vcpu);
21069 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21070 level, &write_pt, pfn);
21071+ (void)sptep;
21072 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21073 sptep, *sptep, write_pt);
21074
21075diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21076index 7c6e63e..c5d92c1 100644
21077--- a/arch/x86/kvm/svm.c
21078+++ b/arch/x86/kvm/svm.c
21079@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21080 int cpu = raw_smp_processor_id();
21081
21082 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21083+
21084+ pax_open_kernel();
21085 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21086+ pax_close_kernel();
21087+
21088 load_TR_desc();
21089 }
21090
21091@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21092 return true;
21093 }
21094
21095-static struct kvm_x86_ops svm_x86_ops = {
21096+static const struct kvm_x86_ops svm_x86_ops = {
21097 .cpu_has_kvm_support = has_svm,
21098 .disabled_by_bios = is_disabled,
21099 .hardware_setup = svm_hardware_setup,
21100diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21101index e6d925f..e7a4af8 100644
21102--- a/arch/x86/kvm/vmx.c
21103+++ b/arch/x86/kvm/vmx.c
21104@@ -570,7 +570,11 @@ static void reload_tss(void)
21105
21106 kvm_get_gdt(&gdt);
21107 descs = (void *)gdt.base;
21108+
21109+ pax_open_kernel();
21110 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21111+ pax_close_kernel();
21112+
21113 load_TR_desc();
21114 }
21115
21116@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21117 if (!cpu_has_vmx_flexpriority())
21118 flexpriority_enabled = 0;
21119
21120- if (!cpu_has_vmx_tpr_shadow())
21121- kvm_x86_ops->update_cr8_intercept = NULL;
21122+ if (!cpu_has_vmx_tpr_shadow()) {
21123+ pax_open_kernel();
21124+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21125+ pax_close_kernel();
21126+ }
21127
21128 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21129 kvm_disable_largepages();
21130@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21131 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21132
21133 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21134- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21135+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21136 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21137 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21138 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21139@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21140 "jmp .Lkvm_vmx_return \n\t"
21141 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21142 ".Lkvm_vmx_return: "
21143+
21144+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21145+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21146+ ".Lkvm_vmx_return2: "
21147+#endif
21148+
21149 /* Save guest registers, load host registers, keep flags */
21150 "xchg %0, (%%"R"sp) \n\t"
21151 "mov %%"R"ax, %c[rax](%0) \n\t"
21152@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21153 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21154 #endif
21155 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21156+
21157+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21158+ ,[cs]"i"(__KERNEL_CS)
21159+#endif
21160+
21161 : "cc", "memory"
21162- , R"bx", R"di", R"si"
21163+ , R"ax", R"bx", R"di", R"si"
21164 #ifdef CONFIG_X86_64
21165 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21166 #endif
21167@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21168 if (vmx->rmode.irq.pending)
21169 fixup_rmode_irq(vmx);
21170
21171- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21172+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21173+
21174+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21175+ loadsegment(fs, __KERNEL_PERCPU);
21176+#endif
21177+
21178+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21179+ __set_fs(current_thread_info()->addr_limit);
21180+#endif
21181+
21182 vmx->launched = 1;
21183
21184 vmx_complete_interrupts(vmx);
21185@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21186 return false;
21187 }
21188
21189-static struct kvm_x86_ops vmx_x86_ops = {
21190+static const struct kvm_x86_ops vmx_x86_ops = {
21191 .cpu_has_kvm_support = cpu_has_kvm_support,
21192 .disabled_by_bios = vmx_disabled_by_bios,
21193 .hardware_setup = hardware_setup,
21194diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21195index df1cefb..5e882ad 100644
21196--- a/arch/x86/kvm/x86.c
21197+++ b/arch/x86/kvm/x86.c
21198@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21199 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21200 struct kvm_cpuid_entry2 __user *entries);
21201
21202-struct kvm_x86_ops *kvm_x86_ops;
21203+const struct kvm_x86_ops *kvm_x86_ops;
21204 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21205
21206 int ignore_msrs = 0;
21207@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21208 struct kvm_cpuid2 *cpuid,
21209 struct kvm_cpuid_entry2 __user *entries)
21210 {
21211- int r;
21212+ int r, i;
21213
21214 r = -E2BIG;
21215 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21216 goto out;
21217 r = -EFAULT;
21218- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21219- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21220+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21221 goto out;
21222+ for (i = 0; i < cpuid->nent; ++i) {
21223+ struct kvm_cpuid_entry2 cpuid_entry;
21224+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21225+ goto out;
21226+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21227+ }
21228 vcpu->arch.cpuid_nent = cpuid->nent;
21229 kvm_apic_set_version(vcpu);
21230 return 0;
21231@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21232 struct kvm_cpuid2 *cpuid,
21233 struct kvm_cpuid_entry2 __user *entries)
21234 {
21235- int r;
21236+ int r, i;
21237
21238 vcpu_load(vcpu);
21239 r = -E2BIG;
21240 if (cpuid->nent < vcpu->arch.cpuid_nent)
21241 goto out;
21242 r = -EFAULT;
21243- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21244- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21245+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21246 goto out;
21247+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21248+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21249+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21250+ goto out;
21251+ }
21252 return 0;
21253
21254 out:
21255@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21256 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21257 struct kvm_interrupt *irq)
21258 {
21259- if (irq->irq < 0 || irq->irq >= 256)
21260+ if (irq->irq >= 256)
21261 return -EINVAL;
21262 if (irqchip_in_kernel(vcpu->kvm))
21263 return -ENXIO;
21264@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21265 .notifier_call = kvmclock_cpufreq_notifier
21266 };
21267
21268-int kvm_arch_init(void *opaque)
21269+int kvm_arch_init(const void *opaque)
21270 {
21271 int r, cpu;
21272- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21273+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21274
21275 if (kvm_x86_ops) {
21276 printk(KERN_ERR "kvm: already loaded the other module\n");
21277diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21278index 7e59dc1..b88c98f 100644
21279--- a/arch/x86/lguest/boot.c
21280+++ b/arch/x86/lguest/boot.c
21281@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21282 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21283 * Launcher to reboot us.
21284 */
21285-static void lguest_restart(char *reason)
21286+static __noreturn void lguest_restart(char *reason)
21287 {
21288 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21289+ BUG();
21290 }
21291
21292 /*G:050
21293diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21294index 824fa0b..c619e96 100644
21295--- a/arch/x86/lib/atomic64_32.c
21296+++ b/arch/x86/lib/atomic64_32.c
21297@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21298 }
21299 EXPORT_SYMBOL(atomic64_cmpxchg);
21300
21301+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21302+{
21303+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21304+}
21305+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21306+
21307 /**
21308 * atomic64_xchg - xchg atomic64 variable
21309 * @ptr: pointer to type atomic64_t
21310@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21311 EXPORT_SYMBOL(atomic64_xchg);
21312
21313 /**
21314+ * atomic64_xchg_unchecked - xchg atomic64 variable
21315+ * @ptr: pointer to type atomic64_unchecked_t
21316+ * @new_val: value to assign
21317+ *
21318+ * Atomically xchgs the value of @ptr to @new_val and returns
21319+ * the old value.
21320+ */
21321+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21322+{
21323+ /*
21324+ * Try first with a (possibly incorrect) assumption about
21325+ * what we have there. We'll do two loops most likely,
21326+ * but we'll get an ownership MESI transaction straight away
21327+ * instead of a read transaction followed by a
21328+ * flush-for-ownership transaction:
21329+ */
21330+ u64 old_val, real_val = 0;
21331+
21332+ do {
21333+ old_val = real_val;
21334+
21335+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21336+
21337+ } while (real_val != old_val);
21338+
21339+ return old_val;
21340+}
21341+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21342+
21343+/**
21344 * atomic64_set - set atomic64 variable
21345 * @ptr: pointer to type atomic64_t
21346 * @new_val: value to assign
21347@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21348 EXPORT_SYMBOL(atomic64_set);
21349
21350 /**
21351-EXPORT_SYMBOL(atomic64_read);
21352+ * atomic64_unchecked_set - set atomic64 variable
21353+ * @ptr: pointer to type atomic64_unchecked_t
21354+ * @new_val: value to assign
21355+ *
21356+ * Atomically sets the value of @ptr to @new_val.
21357+ */
21358+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21359+{
21360+ atomic64_xchg_unchecked(ptr, new_val);
21361+}
21362+EXPORT_SYMBOL(atomic64_set_unchecked);
21363+
21364+/**
21365 * atomic64_add_return - add and return
21366 * @delta: integer value to add
21367 * @ptr: pointer to type atomic64_t
21368@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21369 }
21370 EXPORT_SYMBOL(atomic64_add_return);
21371
21372+/**
21373+ * atomic64_add_return_unchecked - add and return
21374+ * @delta: integer value to add
21375+ * @ptr: pointer to type atomic64_unchecked_t
21376+ *
21377+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21378+ */
21379+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21380+{
21381+ /*
21382+ * Try first with a (possibly incorrect) assumption about
21383+ * what we have there. We'll do two loops most likely,
21384+ * but we'll get an ownership MESI transaction straight away
21385+ * instead of a read transaction followed by a
21386+ * flush-for-ownership transaction:
21387+ */
21388+ u64 old_val, new_val, real_val = 0;
21389+
21390+ do {
21391+ old_val = real_val;
21392+ new_val = old_val + delta;
21393+
21394+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21395+
21396+ } while (real_val != old_val);
21397+
21398+ return new_val;
21399+}
21400+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21401+
21402 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21403 {
21404 return atomic64_add_return(-delta, ptr);
21405 }
21406 EXPORT_SYMBOL(atomic64_sub_return);
21407
21408+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21409+{
21410+ return atomic64_add_return_unchecked(-delta, ptr);
21411+}
21412+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21413+
21414 u64 atomic64_inc_return(atomic64_t *ptr)
21415 {
21416 return atomic64_add_return(1, ptr);
21417 }
21418 EXPORT_SYMBOL(atomic64_inc_return);
21419
21420+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21421+{
21422+ return atomic64_add_return_unchecked(1, ptr);
21423+}
21424+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21425+
21426 u64 atomic64_dec_return(atomic64_t *ptr)
21427 {
21428 return atomic64_sub_return(1, ptr);
21429 }
21430 EXPORT_SYMBOL(atomic64_dec_return);
21431
21432+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21433+{
21434+ return atomic64_sub_return_unchecked(1, ptr);
21435+}
21436+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21437+
21438 /**
21439 * atomic64_add - add integer to atomic64 variable
21440 * @delta: integer value to add
21441@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21442 EXPORT_SYMBOL(atomic64_add);
21443
21444 /**
21445+ * atomic64_add_unchecked - add integer to atomic64 variable
21446+ * @delta: integer value to add
21447+ * @ptr: pointer to type atomic64_unchecked_t
21448+ *
21449+ * Atomically adds @delta to @ptr.
21450+ */
21451+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21452+{
21453+ atomic64_add_return_unchecked(delta, ptr);
21454+}
21455+EXPORT_SYMBOL(atomic64_add_unchecked);
21456+
21457+/**
21458 * atomic64_sub - subtract the atomic64 variable
21459 * @delta: integer value to subtract
21460 * @ptr: pointer to type atomic64_t
21461@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21462 EXPORT_SYMBOL(atomic64_sub);
21463
21464 /**
21465+ * atomic64_sub_unchecked - subtract the atomic64 variable
21466+ * @delta: integer value to subtract
21467+ * @ptr: pointer to type atomic64_unchecked_t
21468+ *
21469+ * Atomically subtracts @delta from @ptr.
21470+ */
21471+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21472+{
21473+ atomic64_add_unchecked(-delta, ptr);
21474+}
21475+EXPORT_SYMBOL(atomic64_sub_unchecked);
21476+
21477+/**
21478 * atomic64_sub_and_test - subtract value from variable and test result
21479 * @delta: integer value to subtract
21480 * @ptr: pointer to type atomic64_t
21481@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21482 EXPORT_SYMBOL(atomic64_inc);
21483
21484 /**
21485+ * atomic64_inc_unchecked - increment atomic64 variable
21486+ * @ptr: pointer to type atomic64_unchecked_t
21487+ *
21488+ * Atomically increments @ptr by 1.
21489+ */
21490+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21491+{
21492+ atomic64_add_unchecked(1, ptr);
21493+}
21494+EXPORT_SYMBOL(atomic64_inc_unchecked);
21495+
21496+/**
21497 * atomic64_dec - decrement atomic64 variable
21498 * @ptr: pointer to type atomic64_t
21499 *
21500@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21501 EXPORT_SYMBOL(atomic64_dec);
21502
21503 /**
21504+ * atomic64_dec_unchecked - decrement atomic64 variable
21505+ * @ptr: pointer to type atomic64_unchecked_t
21506+ *
21507+ * Atomically decrements @ptr by 1.
21508+ */
21509+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21510+{
21511+ atomic64_sub_unchecked(1, ptr);
21512+}
21513+EXPORT_SYMBOL(atomic64_dec_unchecked);
21514+
21515+/**
21516 * atomic64_dec_and_test - decrement and test
21517 * @ptr: pointer to type atomic64_t
21518 *
21519diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21520index adbccd0..98f96c8 100644
21521--- a/arch/x86/lib/checksum_32.S
21522+++ b/arch/x86/lib/checksum_32.S
21523@@ -28,7 +28,8 @@
21524 #include <linux/linkage.h>
21525 #include <asm/dwarf2.h>
21526 #include <asm/errno.h>
21527-
21528+#include <asm/segment.h>
21529+
21530 /*
21531 * computes a partial checksum, e.g. for TCP/UDP fragments
21532 */
21533@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21534
21535 #define ARGBASE 16
21536 #define FP 12
21537-
21538-ENTRY(csum_partial_copy_generic)
21539+
21540+ENTRY(csum_partial_copy_generic_to_user)
21541 CFI_STARTPROC
21542+
21543+#ifdef CONFIG_PAX_MEMORY_UDEREF
21544+ pushl %gs
21545+ CFI_ADJUST_CFA_OFFSET 4
21546+ popl %es
21547+ CFI_ADJUST_CFA_OFFSET -4
21548+ jmp csum_partial_copy_generic
21549+#endif
21550+
21551+ENTRY(csum_partial_copy_generic_from_user)
21552+
21553+#ifdef CONFIG_PAX_MEMORY_UDEREF
21554+ pushl %gs
21555+ CFI_ADJUST_CFA_OFFSET 4
21556+ popl %ds
21557+ CFI_ADJUST_CFA_OFFSET -4
21558+#endif
21559+
21560+ENTRY(csum_partial_copy_generic)
21561 subl $4,%esp
21562 CFI_ADJUST_CFA_OFFSET 4
21563 pushl %edi
21564@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21565 jmp 4f
21566 SRC(1: movw (%esi), %bx )
21567 addl $2, %esi
21568-DST( movw %bx, (%edi) )
21569+DST( movw %bx, %es:(%edi) )
21570 addl $2, %edi
21571 addw %bx, %ax
21572 adcl $0, %eax
21573@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21574 SRC(1: movl (%esi), %ebx )
21575 SRC( movl 4(%esi), %edx )
21576 adcl %ebx, %eax
21577-DST( movl %ebx, (%edi) )
21578+DST( movl %ebx, %es:(%edi) )
21579 adcl %edx, %eax
21580-DST( movl %edx, 4(%edi) )
21581+DST( movl %edx, %es:4(%edi) )
21582
21583 SRC( movl 8(%esi), %ebx )
21584 SRC( movl 12(%esi), %edx )
21585 adcl %ebx, %eax
21586-DST( movl %ebx, 8(%edi) )
21587+DST( movl %ebx, %es:8(%edi) )
21588 adcl %edx, %eax
21589-DST( movl %edx, 12(%edi) )
21590+DST( movl %edx, %es:12(%edi) )
21591
21592 SRC( movl 16(%esi), %ebx )
21593 SRC( movl 20(%esi), %edx )
21594 adcl %ebx, %eax
21595-DST( movl %ebx, 16(%edi) )
21596+DST( movl %ebx, %es:16(%edi) )
21597 adcl %edx, %eax
21598-DST( movl %edx, 20(%edi) )
21599+DST( movl %edx, %es:20(%edi) )
21600
21601 SRC( movl 24(%esi), %ebx )
21602 SRC( movl 28(%esi), %edx )
21603 adcl %ebx, %eax
21604-DST( movl %ebx, 24(%edi) )
21605+DST( movl %ebx, %es:24(%edi) )
21606 adcl %edx, %eax
21607-DST( movl %edx, 28(%edi) )
21608+DST( movl %edx, %es:28(%edi) )
21609
21610 lea 32(%esi), %esi
21611 lea 32(%edi), %edi
21612@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21613 shrl $2, %edx # This clears CF
21614 SRC(3: movl (%esi), %ebx )
21615 adcl %ebx, %eax
21616-DST( movl %ebx, (%edi) )
21617+DST( movl %ebx, %es:(%edi) )
21618 lea 4(%esi), %esi
21619 lea 4(%edi), %edi
21620 dec %edx
21621@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21622 jb 5f
21623 SRC( movw (%esi), %cx )
21624 leal 2(%esi), %esi
21625-DST( movw %cx, (%edi) )
21626+DST( movw %cx, %es:(%edi) )
21627 leal 2(%edi), %edi
21628 je 6f
21629 shll $16,%ecx
21630 SRC(5: movb (%esi), %cl )
21631-DST( movb %cl, (%edi) )
21632+DST( movb %cl, %es:(%edi) )
21633 6: addl %ecx, %eax
21634 adcl $0, %eax
21635 7:
21636@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21637
21638 6001:
21639 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21640- movl $-EFAULT, (%ebx)
21641+ movl $-EFAULT, %ss:(%ebx)
21642
21643 # zero the complete destination - computing the rest
21644 # is too much work
21645@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21646
21647 6002:
21648 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21649- movl $-EFAULT,(%ebx)
21650+ movl $-EFAULT,%ss:(%ebx)
21651 jmp 5000b
21652
21653 .previous
21654
21655+ pushl %ss
21656+ CFI_ADJUST_CFA_OFFSET 4
21657+ popl %ds
21658+ CFI_ADJUST_CFA_OFFSET -4
21659+ pushl %ss
21660+ CFI_ADJUST_CFA_OFFSET 4
21661+ popl %es
21662+ CFI_ADJUST_CFA_OFFSET -4
21663 popl %ebx
21664 CFI_ADJUST_CFA_OFFSET -4
21665 CFI_RESTORE ebx
21666@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21667 CFI_ADJUST_CFA_OFFSET -4
21668 ret
21669 CFI_ENDPROC
21670-ENDPROC(csum_partial_copy_generic)
21671+ENDPROC(csum_partial_copy_generic_to_user)
21672
21673 #else
21674
21675 /* Version for PentiumII/PPro */
21676
21677 #define ROUND1(x) \
21678+ nop; nop; nop; \
21679 SRC(movl x(%esi), %ebx ) ; \
21680 addl %ebx, %eax ; \
21681- DST(movl %ebx, x(%edi) ) ;
21682+ DST(movl %ebx, %es:x(%edi)) ;
21683
21684 #define ROUND(x) \
21685+ nop; nop; nop; \
21686 SRC(movl x(%esi), %ebx ) ; \
21687 adcl %ebx, %eax ; \
21688- DST(movl %ebx, x(%edi) ) ;
21689+ DST(movl %ebx, %es:x(%edi)) ;
21690
21691 #define ARGBASE 12
21692-
21693-ENTRY(csum_partial_copy_generic)
21694+
21695+ENTRY(csum_partial_copy_generic_to_user)
21696 CFI_STARTPROC
21697+
21698+#ifdef CONFIG_PAX_MEMORY_UDEREF
21699+ pushl %gs
21700+ CFI_ADJUST_CFA_OFFSET 4
21701+ popl %es
21702+ CFI_ADJUST_CFA_OFFSET -4
21703+ jmp csum_partial_copy_generic
21704+#endif
21705+
21706+ENTRY(csum_partial_copy_generic_from_user)
21707+
21708+#ifdef CONFIG_PAX_MEMORY_UDEREF
21709+ pushl %gs
21710+ CFI_ADJUST_CFA_OFFSET 4
21711+ popl %ds
21712+ CFI_ADJUST_CFA_OFFSET -4
21713+#endif
21714+
21715+ENTRY(csum_partial_copy_generic)
21716 pushl %ebx
21717 CFI_ADJUST_CFA_OFFSET 4
21718 CFI_REL_OFFSET ebx, 0
21719@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21720 subl %ebx, %edi
21721 lea -1(%esi),%edx
21722 andl $-32,%edx
21723- lea 3f(%ebx,%ebx), %ebx
21724+ lea 3f(%ebx,%ebx,2), %ebx
21725 testl %esi, %esi
21726 jmp *%ebx
21727 1: addl $64,%esi
21728@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21729 jb 5f
21730 SRC( movw (%esi), %dx )
21731 leal 2(%esi), %esi
21732-DST( movw %dx, (%edi) )
21733+DST( movw %dx, %es:(%edi) )
21734 leal 2(%edi), %edi
21735 je 6f
21736 shll $16,%edx
21737 5:
21738 SRC( movb (%esi), %dl )
21739-DST( movb %dl, (%edi) )
21740+DST( movb %dl, %es:(%edi) )
21741 6: addl %edx, %eax
21742 adcl $0, %eax
21743 7:
21744 .section .fixup, "ax"
21745 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21746- movl $-EFAULT, (%ebx)
21747+ movl $-EFAULT, %ss:(%ebx)
21748 # zero the complete destination (computing the rest is too much work)
21749 movl ARGBASE+8(%esp),%edi # dst
21750 movl ARGBASE+12(%esp),%ecx # len
21751@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21752 rep; stosb
21753 jmp 7b
21754 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21755- movl $-EFAULT, (%ebx)
21756+ movl $-EFAULT, %ss:(%ebx)
21757 jmp 7b
21758 .previous
21759
21760+#ifdef CONFIG_PAX_MEMORY_UDEREF
21761+ pushl %ss
21762+ CFI_ADJUST_CFA_OFFSET 4
21763+ popl %ds
21764+ CFI_ADJUST_CFA_OFFSET -4
21765+ pushl %ss
21766+ CFI_ADJUST_CFA_OFFSET 4
21767+ popl %es
21768+ CFI_ADJUST_CFA_OFFSET -4
21769+#endif
21770+
21771 popl %esi
21772 CFI_ADJUST_CFA_OFFSET -4
21773 CFI_RESTORE esi
21774@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21775 CFI_RESTORE ebx
21776 ret
21777 CFI_ENDPROC
21778-ENDPROC(csum_partial_copy_generic)
21779+ENDPROC(csum_partial_copy_generic_to_user)
21780
21781 #undef ROUND
21782 #undef ROUND1
21783diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21784index ebeafcc..1e3a402 100644
21785--- a/arch/x86/lib/clear_page_64.S
21786+++ b/arch/x86/lib/clear_page_64.S
21787@@ -1,5 +1,6 @@
21788 #include <linux/linkage.h>
21789 #include <asm/dwarf2.h>
21790+#include <asm/alternative-asm.h>
21791
21792 /*
21793 * Zero a page.
21794@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21795 movl $4096/8,%ecx
21796 xorl %eax,%eax
21797 rep stosq
21798+ pax_force_retaddr
21799 ret
21800 CFI_ENDPROC
21801 ENDPROC(clear_page_c)
21802@@ -33,6 +35,7 @@ ENTRY(clear_page)
21803 leaq 64(%rdi),%rdi
21804 jnz .Lloop
21805 nop
21806+ pax_force_retaddr
21807 ret
21808 CFI_ENDPROC
21809 .Lclear_page_end:
21810@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21811
21812 #include <asm/cpufeature.h>
21813
21814- .section .altinstr_replacement,"ax"
21815+ .section .altinstr_replacement,"a"
21816 1: .byte 0xeb /* jmp <disp8> */
21817 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21818 2:
21819diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21820index 727a5d4..333818a 100644
21821--- a/arch/x86/lib/copy_page_64.S
21822+++ b/arch/x86/lib/copy_page_64.S
21823@@ -2,12 +2,14 @@
21824
21825 #include <linux/linkage.h>
21826 #include <asm/dwarf2.h>
21827+#include <asm/alternative-asm.h>
21828
21829 ALIGN
21830 copy_page_c:
21831 CFI_STARTPROC
21832 movl $4096/8,%ecx
21833 rep movsq
21834+ pax_force_retaddr
21835 ret
21836 CFI_ENDPROC
21837 ENDPROC(copy_page_c)
21838@@ -38,7 +40,7 @@ ENTRY(copy_page)
21839 movq 16 (%rsi), %rdx
21840 movq 24 (%rsi), %r8
21841 movq 32 (%rsi), %r9
21842- movq 40 (%rsi), %r10
21843+ movq 40 (%rsi), %r13
21844 movq 48 (%rsi), %r11
21845 movq 56 (%rsi), %r12
21846
21847@@ -49,7 +51,7 @@ ENTRY(copy_page)
21848 movq %rdx, 16 (%rdi)
21849 movq %r8, 24 (%rdi)
21850 movq %r9, 32 (%rdi)
21851- movq %r10, 40 (%rdi)
21852+ movq %r13, 40 (%rdi)
21853 movq %r11, 48 (%rdi)
21854 movq %r12, 56 (%rdi)
21855
21856@@ -68,7 +70,7 @@ ENTRY(copy_page)
21857 movq 16 (%rsi), %rdx
21858 movq 24 (%rsi), %r8
21859 movq 32 (%rsi), %r9
21860- movq 40 (%rsi), %r10
21861+ movq 40 (%rsi), %r13
21862 movq 48 (%rsi), %r11
21863 movq 56 (%rsi), %r12
21864
21865@@ -77,7 +79,7 @@ ENTRY(copy_page)
21866 movq %rdx, 16 (%rdi)
21867 movq %r8, 24 (%rdi)
21868 movq %r9, 32 (%rdi)
21869- movq %r10, 40 (%rdi)
21870+ movq %r13, 40 (%rdi)
21871 movq %r11, 48 (%rdi)
21872 movq %r12, 56 (%rdi)
21873
21874@@ -94,6 +96,7 @@ ENTRY(copy_page)
21875 CFI_RESTORE r13
21876 addq $3*8,%rsp
21877 CFI_ADJUST_CFA_OFFSET -3*8
21878+ pax_force_retaddr
21879 ret
21880 .Lcopy_page_end:
21881 CFI_ENDPROC
21882@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21883
21884 #include <asm/cpufeature.h>
21885
21886- .section .altinstr_replacement,"ax"
21887+ .section .altinstr_replacement,"a"
21888 1: .byte 0xeb /* jmp <disp8> */
21889 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21890 2:
21891diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21892index af8debd..40c75f3 100644
21893--- a/arch/x86/lib/copy_user_64.S
21894+++ b/arch/x86/lib/copy_user_64.S
21895@@ -15,13 +15,15 @@
21896 #include <asm/asm-offsets.h>
21897 #include <asm/thread_info.h>
21898 #include <asm/cpufeature.h>
21899+#include <asm/pgtable.h>
21900+#include <asm/alternative-asm.h>
21901
21902 .macro ALTERNATIVE_JUMP feature,orig,alt
21903 0:
21904 .byte 0xe9 /* 32bit jump */
21905 .long \orig-1f /* by default jump to orig */
21906 1:
21907- .section .altinstr_replacement,"ax"
21908+ .section .altinstr_replacement,"a"
21909 2: .byte 0xe9 /* near jump with 32bit immediate */
21910 .long \alt-1b /* offset */ /* or alternatively to alt */
21911 .previous
21912@@ -64,55 +66,26 @@
21913 #endif
21914 .endm
21915
21916-/* Standard copy_to_user with segment limit checking */
21917-ENTRY(copy_to_user)
21918- CFI_STARTPROC
21919- GET_THREAD_INFO(%rax)
21920- movq %rdi,%rcx
21921- addq %rdx,%rcx
21922- jc bad_to_user
21923- cmpq TI_addr_limit(%rax),%rcx
21924- ja bad_to_user
21925- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21926- CFI_ENDPROC
21927-ENDPROC(copy_to_user)
21928-
21929-/* Standard copy_from_user with segment limit checking */
21930-ENTRY(copy_from_user)
21931- CFI_STARTPROC
21932- GET_THREAD_INFO(%rax)
21933- movq %rsi,%rcx
21934- addq %rdx,%rcx
21935- jc bad_from_user
21936- cmpq TI_addr_limit(%rax),%rcx
21937- ja bad_from_user
21938- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21939- CFI_ENDPROC
21940-ENDPROC(copy_from_user)
21941-
21942 ENTRY(copy_user_generic)
21943 CFI_STARTPROC
21944 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21945 CFI_ENDPROC
21946 ENDPROC(copy_user_generic)
21947
21948-ENTRY(__copy_from_user_inatomic)
21949- CFI_STARTPROC
21950- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21951- CFI_ENDPROC
21952-ENDPROC(__copy_from_user_inatomic)
21953-
21954 .section .fixup,"ax"
21955 /* must zero dest */
21956 ENTRY(bad_from_user)
21957 bad_from_user:
21958 CFI_STARTPROC
21959+ testl %edx,%edx
21960+ js bad_to_user
21961 movl %edx,%ecx
21962 xorl %eax,%eax
21963 rep
21964 stosb
21965 bad_to_user:
21966 movl %edx,%eax
21967+ pax_force_retaddr
21968 ret
21969 CFI_ENDPROC
21970 ENDPROC(bad_from_user)
21971@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21972 jz 17f
21973 1: movq (%rsi),%r8
21974 2: movq 1*8(%rsi),%r9
21975-3: movq 2*8(%rsi),%r10
21976+3: movq 2*8(%rsi),%rax
21977 4: movq 3*8(%rsi),%r11
21978 5: movq %r8,(%rdi)
21979 6: movq %r9,1*8(%rdi)
21980-7: movq %r10,2*8(%rdi)
21981+7: movq %rax,2*8(%rdi)
21982 8: movq %r11,3*8(%rdi)
21983 9: movq 4*8(%rsi),%r8
21984 10: movq 5*8(%rsi),%r9
21985-11: movq 6*8(%rsi),%r10
21986+11: movq 6*8(%rsi),%rax
21987 12: movq 7*8(%rsi),%r11
21988 13: movq %r8,4*8(%rdi)
21989 14: movq %r9,5*8(%rdi)
21990-15: movq %r10,6*8(%rdi)
21991+15: movq %rax,6*8(%rdi)
21992 16: movq %r11,7*8(%rdi)
21993 leaq 64(%rsi),%rsi
21994 leaq 64(%rdi),%rdi
21995@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21996 decl %ecx
21997 jnz 21b
21998 23: xor %eax,%eax
21999+ pax_force_retaddr
22000 ret
22001
22002 .section .fixup,"ax"
22003@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22004 3: rep
22005 movsb
22006 4: xorl %eax,%eax
22007+ pax_force_retaddr
22008 ret
22009
22010 .section .fixup,"ax"
22011diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22012index cb0c112..e3a6895 100644
22013--- a/arch/x86/lib/copy_user_nocache_64.S
22014+++ b/arch/x86/lib/copy_user_nocache_64.S
22015@@ -8,12 +8,14 @@
22016
22017 #include <linux/linkage.h>
22018 #include <asm/dwarf2.h>
22019+#include <asm/alternative-asm.h>
22020
22021 #define FIX_ALIGNMENT 1
22022
22023 #include <asm/current.h>
22024 #include <asm/asm-offsets.h>
22025 #include <asm/thread_info.h>
22026+#include <asm/pgtable.h>
22027
22028 .macro ALIGN_DESTINATION
22029 #ifdef FIX_ALIGNMENT
22030@@ -50,6 +52,15 @@
22031 */
22032 ENTRY(__copy_user_nocache)
22033 CFI_STARTPROC
22034+
22035+#ifdef CONFIG_PAX_MEMORY_UDEREF
22036+ mov $PAX_USER_SHADOW_BASE,%rcx
22037+ cmp %rcx,%rsi
22038+ jae 1f
22039+ add %rcx,%rsi
22040+1:
22041+#endif
22042+
22043 cmpl $8,%edx
22044 jb 20f /* less then 8 bytes, go to byte copy loop */
22045 ALIGN_DESTINATION
22046@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22047 jz 17f
22048 1: movq (%rsi),%r8
22049 2: movq 1*8(%rsi),%r9
22050-3: movq 2*8(%rsi),%r10
22051+3: movq 2*8(%rsi),%rax
22052 4: movq 3*8(%rsi),%r11
22053 5: movnti %r8,(%rdi)
22054 6: movnti %r9,1*8(%rdi)
22055-7: movnti %r10,2*8(%rdi)
22056+7: movnti %rax,2*8(%rdi)
22057 8: movnti %r11,3*8(%rdi)
22058 9: movq 4*8(%rsi),%r8
22059 10: movq 5*8(%rsi),%r9
22060-11: movq 6*8(%rsi),%r10
22061+11: movq 6*8(%rsi),%rax
22062 12: movq 7*8(%rsi),%r11
22063 13: movnti %r8,4*8(%rdi)
22064 14: movnti %r9,5*8(%rdi)
22065-15: movnti %r10,6*8(%rdi)
22066+15: movnti %rax,6*8(%rdi)
22067 16: movnti %r11,7*8(%rdi)
22068 leaq 64(%rsi),%rsi
22069 leaq 64(%rdi),%rdi
22070@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22071 jnz 21b
22072 23: xorl %eax,%eax
22073 sfence
22074+ pax_force_retaddr
22075 ret
22076
22077 .section .fixup,"ax"
22078diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22079index f0dba36..48cb4d6 100644
22080--- a/arch/x86/lib/csum-copy_64.S
22081+++ b/arch/x86/lib/csum-copy_64.S
22082@@ -8,6 +8,7 @@
22083 #include <linux/linkage.h>
22084 #include <asm/dwarf2.h>
22085 #include <asm/errno.h>
22086+#include <asm/alternative-asm.h>
22087
22088 /*
22089 * Checksum copy with exception handling.
22090@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22091 CFI_RESTORE rbp
22092 addq $7*8,%rsp
22093 CFI_ADJUST_CFA_OFFSET -7*8
22094+ pax_force_retaddr 0, 1
22095 ret
22096 CFI_RESTORE_STATE
22097
22098diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22099index 459b58a..9570bc7 100644
22100--- a/arch/x86/lib/csum-wrappers_64.c
22101+++ b/arch/x86/lib/csum-wrappers_64.c
22102@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22103 len -= 2;
22104 }
22105 }
22106- isum = csum_partial_copy_generic((__force const void *)src,
22107+
22108+#ifdef CONFIG_PAX_MEMORY_UDEREF
22109+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22110+ src += PAX_USER_SHADOW_BASE;
22111+#endif
22112+
22113+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22114 dst, len, isum, errp, NULL);
22115 if (unlikely(*errp))
22116 goto out_err;
22117@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22118 }
22119
22120 *errp = 0;
22121- return csum_partial_copy_generic(src, (void __force *)dst,
22122+
22123+#ifdef CONFIG_PAX_MEMORY_UDEREF
22124+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22125+ dst += PAX_USER_SHADOW_BASE;
22126+#endif
22127+
22128+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22129 len, isum, NULL, errp);
22130 }
22131 EXPORT_SYMBOL(csum_partial_copy_to_user);
22132diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22133index 51f1504..ddac4c1 100644
22134--- a/arch/x86/lib/getuser.S
22135+++ b/arch/x86/lib/getuser.S
22136@@ -33,15 +33,38 @@
22137 #include <asm/asm-offsets.h>
22138 #include <asm/thread_info.h>
22139 #include <asm/asm.h>
22140+#include <asm/segment.h>
22141+#include <asm/pgtable.h>
22142+#include <asm/alternative-asm.h>
22143+
22144+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22145+#define __copyuser_seg gs;
22146+#else
22147+#define __copyuser_seg
22148+#endif
22149
22150 .text
22151 ENTRY(__get_user_1)
22152 CFI_STARTPROC
22153+
22154+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22155 GET_THREAD_INFO(%_ASM_DX)
22156 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22157 jae bad_get_user
22158-1: movzb (%_ASM_AX),%edx
22159+
22160+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22161+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22162+ cmp %_ASM_DX,%_ASM_AX
22163+ jae 1234f
22164+ add %_ASM_DX,%_ASM_AX
22165+1234:
22166+#endif
22167+
22168+#endif
22169+
22170+1: __copyuser_seg movzb (%_ASM_AX),%edx
22171 xor %eax,%eax
22172+ pax_force_retaddr
22173 ret
22174 CFI_ENDPROC
22175 ENDPROC(__get_user_1)
22176@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22177 ENTRY(__get_user_2)
22178 CFI_STARTPROC
22179 add $1,%_ASM_AX
22180+
22181+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22182 jc bad_get_user
22183 GET_THREAD_INFO(%_ASM_DX)
22184 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22185 jae bad_get_user
22186-2: movzwl -1(%_ASM_AX),%edx
22187+
22188+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22189+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22190+ cmp %_ASM_DX,%_ASM_AX
22191+ jae 1234f
22192+ add %_ASM_DX,%_ASM_AX
22193+1234:
22194+#endif
22195+
22196+#endif
22197+
22198+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22199 xor %eax,%eax
22200+ pax_force_retaddr
22201 ret
22202 CFI_ENDPROC
22203 ENDPROC(__get_user_2)
22204@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22205 ENTRY(__get_user_4)
22206 CFI_STARTPROC
22207 add $3,%_ASM_AX
22208+
22209+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22210 jc bad_get_user
22211 GET_THREAD_INFO(%_ASM_DX)
22212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22213 jae bad_get_user
22214-3: mov -3(%_ASM_AX),%edx
22215+
22216+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22217+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22218+ cmp %_ASM_DX,%_ASM_AX
22219+ jae 1234f
22220+ add %_ASM_DX,%_ASM_AX
22221+1234:
22222+#endif
22223+
22224+#endif
22225+
22226+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22227 xor %eax,%eax
22228+ pax_force_retaddr
22229 ret
22230 CFI_ENDPROC
22231 ENDPROC(__get_user_4)
22232@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22233 GET_THREAD_INFO(%_ASM_DX)
22234 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22235 jae bad_get_user
22236+
22237+#ifdef CONFIG_PAX_MEMORY_UDEREF
22238+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22239+ cmp %_ASM_DX,%_ASM_AX
22240+ jae 1234f
22241+ add %_ASM_DX,%_ASM_AX
22242+1234:
22243+#endif
22244+
22245 4: movq -7(%_ASM_AX),%_ASM_DX
22246 xor %eax,%eax
22247+ pax_force_retaddr
22248 ret
22249 CFI_ENDPROC
22250 ENDPROC(__get_user_8)
22251@@ -91,6 +152,7 @@ bad_get_user:
22252 CFI_STARTPROC
22253 xor %edx,%edx
22254 mov $(-EFAULT),%_ASM_AX
22255+ pax_force_retaddr
22256 ret
22257 CFI_ENDPROC
22258 END(bad_get_user)
22259diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22260index 05a95e7..326f2fa 100644
22261--- a/arch/x86/lib/iomap_copy_64.S
22262+++ b/arch/x86/lib/iomap_copy_64.S
22263@@ -17,6 +17,7 @@
22264
22265 #include <linux/linkage.h>
22266 #include <asm/dwarf2.h>
22267+#include <asm/alternative-asm.h>
22268
22269 /*
22270 * override generic version in lib/iomap_copy.c
22271@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22272 CFI_STARTPROC
22273 movl %edx,%ecx
22274 rep movsd
22275+ pax_force_retaddr
22276 ret
22277 CFI_ENDPROC
22278 ENDPROC(__iowrite32_copy)
22279diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22280index ad5441e..610e351 100644
22281--- a/arch/x86/lib/memcpy_64.S
22282+++ b/arch/x86/lib/memcpy_64.S
22283@@ -4,6 +4,7 @@
22284
22285 #include <asm/cpufeature.h>
22286 #include <asm/dwarf2.h>
22287+#include <asm/alternative-asm.h>
22288
22289 /*
22290 * memcpy - Copy a memory block.
22291@@ -34,6 +35,7 @@ memcpy_c:
22292 rep movsq
22293 movl %edx, %ecx
22294 rep movsb
22295+ pax_force_retaddr
22296 ret
22297 CFI_ENDPROC
22298 ENDPROC(memcpy_c)
22299@@ -118,6 +120,7 @@ ENTRY(memcpy)
22300 jnz .Lloop_1
22301
22302 .Lend:
22303+ pax_force_retaddr 0, 1
22304 ret
22305 CFI_ENDPROC
22306 ENDPROC(memcpy)
22307@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22308 * It is also a lot simpler. Use this when possible:
22309 */
22310
22311- .section .altinstr_replacement, "ax"
22312+ .section .altinstr_replacement, "a"
22313 1: .byte 0xeb /* jmp <disp8> */
22314 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22315 2:
22316diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22317index 2c59481..7e9ba4e 100644
22318--- a/arch/x86/lib/memset_64.S
22319+++ b/arch/x86/lib/memset_64.S
22320@@ -2,6 +2,7 @@
22321
22322 #include <linux/linkage.h>
22323 #include <asm/dwarf2.h>
22324+#include <asm/alternative-asm.h>
22325
22326 /*
22327 * ISO C memset - set a memory block to a byte value.
22328@@ -28,6 +29,7 @@ memset_c:
22329 movl %r8d,%ecx
22330 rep stosb
22331 movq %r9,%rax
22332+ pax_force_retaddr
22333 ret
22334 CFI_ENDPROC
22335 ENDPROC(memset_c)
22336@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22337 ENTRY(memset)
22338 ENTRY(__memset)
22339 CFI_STARTPROC
22340- movq %rdi,%r10
22341 movq %rdx,%r11
22342
22343 /* expand byte value */
22344 movzbl %sil,%ecx
22345 movabs $0x0101010101010101,%rax
22346 mul %rcx /* with rax, clobbers rdx */
22347+ movq %rdi,%rdx
22348
22349 /* align dst */
22350 movl %edi,%r9d
22351@@ -95,7 +97,8 @@ ENTRY(__memset)
22352 jnz .Lloop_1
22353
22354 .Lende:
22355- movq %r10,%rax
22356+ movq %rdx,%rax
22357+ pax_force_retaddr
22358 ret
22359
22360 CFI_RESTORE_STATE
22361@@ -118,7 +121,7 @@ ENDPROC(__memset)
22362
22363 #include <asm/cpufeature.h>
22364
22365- .section .altinstr_replacement,"ax"
22366+ .section .altinstr_replacement,"a"
22367 1: .byte 0xeb /* jmp <disp8> */
22368 .byte (memset_c - memset) - (2f - 1b) /* offset */
22369 2:
22370diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22371index c9f2d9b..e7fd2c0 100644
22372--- a/arch/x86/lib/mmx_32.c
22373+++ b/arch/x86/lib/mmx_32.c
22374@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22375 {
22376 void *p;
22377 int i;
22378+ unsigned long cr0;
22379
22380 if (unlikely(in_interrupt()))
22381 return __memcpy(to, from, len);
22382@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22383 kernel_fpu_begin();
22384
22385 __asm__ __volatile__ (
22386- "1: prefetch (%0)\n" /* This set is 28 bytes */
22387- " prefetch 64(%0)\n"
22388- " prefetch 128(%0)\n"
22389- " prefetch 192(%0)\n"
22390- " prefetch 256(%0)\n"
22391+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22392+ " prefetch 64(%1)\n"
22393+ " prefetch 128(%1)\n"
22394+ " prefetch 192(%1)\n"
22395+ " prefetch 256(%1)\n"
22396 "2: \n"
22397 ".section .fixup, \"ax\"\n"
22398- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22399+ "3: \n"
22400+
22401+#ifdef CONFIG_PAX_KERNEXEC
22402+ " movl %%cr0, %0\n"
22403+ " movl %0, %%eax\n"
22404+ " andl $0xFFFEFFFF, %%eax\n"
22405+ " movl %%eax, %%cr0\n"
22406+#endif
22407+
22408+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22409+
22410+#ifdef CONFIG_PAX_KERNEXEC
22411+ " movl %0, %%cr0\n"
22412+#endif
22413+
22414 " jmp 2b\n"
22415 ".previous\n"
22416 _ASM_EXTABLE(1b, 3b)
22417- : : "r" (from));
22418+ : "=&r" (cr0) : "r" (from) : "ax");
22419
22420 for ( ; i > 5; i--) {
22421 __asm__ __volatile__ (
22422- "1: prefetch 320(%0)\n"
22423- "2: movq (%0), %%mm0\n"
22424- " movq 8(%0), %%mm1\n"
22425- " movq 16(%0), %%mm2\n"
22426- " movq 24(%0), %%mm3\n"
22427- " movq %%mm0, (%1)\n"
22428- " movq %%mm1, 8(%1)\n"
22429- " movq %%mm2, 16(%1)\n"
22430- " movq %%mm3, 24(%1)\n"
22431- " movq 32(%0), %%mm0\n"
22432- " movq 40(%0), %%mm1\n"
22433- " movq 48(%0), %%mm2\n"
22434- " movq 56(%0), %%mm3\n"
22435- " movq %%mm0, 32(%1)\n"
22436- " movq %%mm1, 40(%1)\n"
22437- " movq %%mm2, 48(%1)\n"
22438- " movq %%mm3, 56(%1)\n"
22439+ "1: prefetch 320(%1)\n"
22440+ "2: movq (%1), %%mm0\n"
22441+ " movq 8(%1), %%mm1\n"
22442+ " movq 16(%1), %%mm2\n"
22443+ " movq 24(%1), %%mm3\n"
22444+ " movq %%mm0, (%2)\n"
22445+ " movq %%mm1, 8(%2)\n"
22446+ " movq %%mm2, 16(%2)\n"
22447+ " movq %%mm3, 24(%2)\n"
22448+ " movq 32(%1), %%mm0\n"
22449+ " movq 40(%1), %%mm1\n"
22450+ " movq 48(%1), %%mm2\n"
22451+ " movq 56(%1), %%mm3\n"
22452+ " movq %%mm0, 32(%2)\n"
22453+ " movq %%mm1, 40(%2)\n"
22454+ " movq %%mm2, 48(%2)\n"
22455+ " movq %%mm3, 56(%2)\n"
22456 ".section .fixup, \"ax\"\n"
22457- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22458+ "3:\n"
22459+
22460+#ifdef CONFIG_PAX_KERNEXEC
22461+ " movl %%cr0, %0\n"
22462+ " movl %0, %%eax\n"
22463+ " andl $0xFFFEFFFF, %%eax\n"
22464+ " movl %%eax, %%cr0\n"
22465+#endif
22466+
22467+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22468+
22469+#ifdef CONFIG_PAX_KERNEXEC
22470+ " movl %0, %%cr0\n"
22471+#endif
22472+
22473 " jmp 2b\n"
22474 ".previous\n"
22475 _ASM_EXTABLE(1b, 3b)
22476- : : "r" (from), "r" (to) : "memory");
22477+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22478
22479 from += 64;
22480 to += 64;
22481@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22482 static void fast_copy_page(void *to, void *from)
22483 {
22484 int i;
22485+ unsigned long cr0;
22486
22487 kernel_fpu_begin();
22488
22489@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22490 * but that is for later. -AV
22491 */
22492 __asm__ __volatile__(
22493- "1: prefetch (%0)\n"
22494- " prefetch 64(%0)\n"
22495- " prefetch 128(%0)\n"
22496- " prefetch 192(%0)\n"
22497- " prefetch 256(%0)\n"
22498+ "1: prefetch (%1)\n"
22499+ " prefetch 64(%1)\n"
22500+ " prefetch 128(%1)\n"
22501+ " prefetch 192(%1)\n"
22502+ " prefetch 256(%1)\n"
22503 "2: \n"
22504 ".section .fixup, \"ax\"\n"
22505- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22506+ "3: \n"
22507+
22508+#ifdef CONFIG_PAX_KERNEXEC
22509+ " movl %%cr0, %0\n"
22510+ " movl %0, %%eax\n"
22511+ " andl $0xFFFEFFFF, %%eax\n"
22512+ " movl %%eax, %%cr0\n"
22513+#endif
22514+
22515+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22516+
22517+#ifdef CONFIG_PAX_KERNEXEC
22518+ " movl %0, %%cr0\n"
22519+#endif
22520+
22521 " jmp 2b\n"
22522 ".previous\n"
22523- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22524+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22525
22526 for (i = 0; i < (4096-320)/64; i++) {
22527 __asm__ __volatile__ (
22528- "1: prefetch 320(%0)\n"
22529- "2: movq (%0), %%mm0\n"
22530- " movntq %%mm0, (%1)\n"
22531- " movq 8(%0), %%mm1\n"
22532- " movntq %%mm1, 8(%1)\n"
22533- " movq 16(%0), %%mm2\n"
22534- " movntq %%mm2, 16(%1)\n"
22535- " movq 24(%0), %%mm3\n"
22536- " movntq %%mm3, 24(%1)\n"
22537- " movq 32(%0), %%mm4\n"
22538- " movntq %%mm4, 32(%1)\n"
22539- " movq 40(%0), %%mm5\n"
22540- " movntq %%mm5, 40(%1)\n"
22541- " movq 48(%0), %%mm6\n"
22542- " movntq %%mm6, 48(%1)\n"
22543- " movq 56(%0), %%mm7\n"
22544- " movntq %%mm7, 56(%1)\n"
22545+ "1: prefetch 320(%1)\n"
22546+ "2: movq (%1), %%mm0\n"
22547+ " movntq %%mm0, (%2)\n"
22548+ " movq 8(%1), %%mm1\n"
22549+ " movntq %%mm1, 8(%2)\n"
22550+ " movq 16(%1), %%mm2\n"
22551+ " movntq %%mm2, 16(%2)\n"
22552+ " movq 24(%1), %%mm3\n"
22553+ " movntq %%mm3, 24(%2)\n"
22554+ " movq 32(%1), %%mm4\n"
22555+ " movntq %%mm4, 32(%2)\n"
22556+ " movq 40(%1), %%mm5\n"
22557+ " movntq %%mm5, 40(%2)\n"
22558+ " movq 48(%1), %%mm6\n"
22559+ " movntq %%mm6, 48(%2)\n"
22560+ " movq 56(%1), %%mm7\n"
22561+ " movntq %%mm7, 56(%2)\n"
22562 ".section .fixup, \"ax\"\n"
22563- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22564+ "3:\n"
22565+
22566+#ifdef CONFIG_PAX_KERNEXEC
22567+ " movl %%cr0, %0\n"
22568+ " movl %0, %%eax\n"
22569+ " andl $0xFFFEFFFF, %%eax\n"
22570+ " movl %%eax, %%cr0\n"
22571+#endif
22572+
22573+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22574+
22575+#ifdef CONFIG_PAX_KERNEXEC
22576+ " movl %0, %%cr0\n"
22577+#endif
22578+
22579 " jmp 2b\n"
22580 ".previous\n"
22581- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22582+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22583
22584 from += 64;
22585 to += 64;
22586@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22587 static void fast_copy_page(void *to, void *from)
22588 {
22589 int i;
22590+ unsigned long cr0;
22591
22592 kernel_fpu_begin();
22593
22594 __asm__ __volatile__ (
22595- "1: prefetch (%0)\n"
22596- " prefetch 64(%0)\n"
22597- " prefetch 128(%0)\n"
22598- " prefetch 192(%0)\n"
22599- " prefetch 256(%0)\n"
22600+ "1: prefetch (%1)\n"
22601+ " prefetch 64(%1)\n"
22602+ " prefetch 128(%1)\n"
22603+ " prefetch 192(%1)\n"
22604+ " prefetch 256(%1)\n"
22605 "2: \n"
22606 ".section .fixup, \"ax\"\n"
22607- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22608+ "3: \n"
22609+
22610+#ifdef CONFIG_PAX_KERNEXEC
22611+ " movl %%cr0, %0\n"
22612+ " movl %0, %%eax\n"
22613+ " andl $0xFFFEFFFF, %%eax\n"
22614+ " movl %%eax, %%cr0\n"
22615+#endif
22616+
22617+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22618+
22619+#ifdef CONFIG_PAX_KERNEXEC
22620+ " movl %0, %%cr0\n"
22621+#endif
22622+
22623 " jmp 2b\n"
22624 ".previous\n"
22625- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22626+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22627
22628 for (i = 0; i < 4096/64; i++) {
22629 __asm__ __volatile__ (
22630- "1: prefetch 320(%0)\n"
22631- "2: movq (%0), %%mm0\n"
22632- " movq 8(%0), %%mm1\n"
22633- " movq 16(%0), %%mm2\n"
22634- " movq 24(%0), %%mm3\n"
22635- " movq %%mm0, (%1)\n"
22636- " movq %%mm1, 8(%1)\n"
22637- " movq %%mm2, 16(%1)\n"
22638- " movq %%mm3, 24(%1)\n"
22639- " movq 32(%0), %%mm0\n"
22640- " movq 40(%0), %%mm1\n"
22641- " movq 48(%0), %%mm2\n"
22642- " movq 56(%0), %%mm3\n"
22643- " movq %%mm0, 32(%1)\n"
22644- " movq %%mm1, 40(%1)\n"
22645- " movq %%mm2, 48(%1)\n"
22646- " movq %%mm3, 56(%1)\n"
22647+ "1: prefetch 320(%1)\n"
22648+ "2: movq (%1), %%mm0\n"
22649+ " movq 8(%1), %%mm1\n"
22650+ " movq 16(%1), %%mm2\n"
22651+ " movq 24(%1), %%mm3\n"
22652+ " movq %%mm0, (%2)\n"
22653+ " movq %%mm1, 8(%2)\n"
22654+ " movq %%mm2, 16(%2)\n"
22655+ " movq %%mm3, 24(%2)\n"
22656+ " movq 32(%1), %%mm0\n"
22657+ " movq 40(%1), %%mm1\n"
22658+ " movq 48(%1), %%mm2\n"
22659+ " movq 56(%1), %%mm3\n"
22660+ " movq %%mm0, 32(%2)\n"
22661+ " movq %%mm1, 40(%2)\n"
22662+ " movq %%mm2, 48(%2)\n"
22663+ " movq %%mm3, 56(%2)\n"
22664 ".section .fixup, \"ax\"\n"
22665- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22666+ "3:\n"
22667+
22668+#ifdef CONFIG_PAX_KERNEXEC
22669+ " movl %%cr0, %0\n"
22670+ " movl %0, %%eax\n"
22671+ " andl $0xFFFEFFFF, %%eax\n"
22672+ " movl %%eax, %%cr0\n"
22673+#endif
22674+
22675+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22676+
22677+#ifdef CONFIG_PAX_KERNEXEC
22678+ " movl %0, %%cr0\n"
22679+#endif
22680+
22681 " jmp 2b\n"
22682 ".previous\n"
22683 _ASM_EXTABLE(1b, 3b)
22684- : : "r" (from), "r" (to) : "memory");
22685+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22686
22687 from += 64;
22688 to += 64;
22689diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22690index 69fa106..adda88b 100644
22691--- a/arch/x86/lib/msr-reg.S
22692+++ b/arch/x86/lib/msr-reg.S
22693@@ -3,6 +3,7 @@
22694 #include <asm/dwarf2.h>
22695 #include <asm/asm.h>
22696 #include <asm/msr.h>
22697+#include <asm/alternative-asm.h>
22698
22699 #ifdef CONFIG_X86_64
22700 /*
22701@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22702 CFI_STARTPROC
22703 pushq_cfi %rbx
22704 pushq_cfi %rbp
22705- movq %rdi, %r10 /* Save pointer */
22706+ movq %rdi, %r9 /* Save pointer */
22707 xorl %r11d, %r11d /* Return value */
22708 movl (%rdi), %eax
22709 movl 4(%rdi), %ecx
22710@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22711 movl 28(%rdi), %edi
22712 CFI_REMEMBER_STATE
22713 1: \op
22714-2: movl %eax, (%r10)
22715+2: movl %eax, (%r9)
22716 movl %r11d, %eax /* Return value */
22717- movl %ecx, 4(%r10)
22718- movl %edx, 8(%r10)
22719- movl %ebx, 12(%r10)
22720- movl %ebp, 20(%r10)
22721- movl %esi, 24(%r10)
22722- movl %edi, 28(%r10)
22723+ movl %ecx, 4(%r9)
22724+ movl %edx, 8(%r9)
22725+ movl %ebx, 12(%r9)
22726+ movl %ebp, 20(%r9)
22727+ movl %esi, 24(%r9)
22728+ movl %edi, 28(%r9)
22729 popq_cfi %rbp
22730 popq_cfi %rbx
22731+ pax_force_retaddr
22732 ret
22733 3:
22734 CFI_RESTORE_STATE
22735diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22736index 36b0d15..d381858 100644
22737--- a/arch/x86/lib/putuser.S
22738+++ b/arch/x86/lib/putuser.S
22739@@ -15,7 +15,9 @@
22740 #include <asm/thread_info.h>
22741 #include <asm/errno.h>
22742 #include <asm/asm.h>
22743-
22744+#include <asm/segment.h>
22745+#include <asm/pgtable.h>
22746+#include <asm/alternative-asm.h>
22747
22748 /*
22749 * __put_user_X
22750@@ -29,52 +31,119 @@
22751 * as they get called from within inline assembly.
22752 */
22753
22754-#define ENTER CFI_STARTPROC ; \
22755- GET_THREAD_INFO(%_ASM_BX)
22756-#define EXIT ret ; \
22757+#define ENTER CFI_STARTPROC
22758+#define EXIT pax_force_retaddr; ret ; \
22759 CFI_ENDPROC
22760
22761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22762+#define _DEST %_ASM_CX,%_ASM_BX
22763+#else
22764+#define _DEST %_ASM_CX
22765+#endif
22766+
22767+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22768+#define __copyuser_seg gs;
22769+#else
22770+#define __copyuser_seg
22771+#endif
22772+
22773 .text
22774 ENTRY(__put_user_1)
22775 ENTER
22776+
22777+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22778+ GET_THREAD_INFO(%_ASM_BX)
22779 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22780 jae bad_put_user
22781-1: movb %al,(%_ASM_CX)
22782+
22783+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22784+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22785+ cmp %_ASM_BX,%_ASM_CX
22786+ jb 1234f
22787+ xor %ebx,%ebx
22788+1234:
22789+#endif
22790+
22791+#endif
22792+
22793+1: __copyuser_seg movb %al,(_DEST)
22794 xor %eax,%eax
22795 EXIT
22796 ENDPROC(__put_user_1)
22797
22798 ENTRY(__put_user_2)
22799 ENTER
22800+
22801+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22802+ GET_THREAD_INFO(%_ASM_BX)
22803 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22804 sub $1,%_ASM_BX
22805 cmp %_ASM_BX,%_ASM_CX
22806 jae bad_put_user
22807-2: movw %ax,(%_ASM_CX)
22808+
22809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22810+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22811+ cmp %_ASM_BX,%_ASM_CX
22812+ jb 1234f
22813+ xor %ebx,%ebx
22814+1234:
22815+#endif
22816+
22817+#endif
22818+
22819+2: __copyuser_seg movw %ax,(_DEST)
22820 xor %eax,%eax
22821 EXIT
22822 ENDPROC(__put_user_2)
22823
22824 ENTRY(__put_user_4)
22825 ENTER
22826+
22827+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22828+ GET_THREAD_INFO(%_ASM_BX)
22829 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22830 sub $3,%_ASM_BX
22831 cmp %_ASM_BX,%_ASM_CX
22832 jae bad_put_user
22833-3: movl %eax,(%_ASM_CX)
22834+
22835+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22836+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22837+ cmp %_ASM_BX,%_ASM_CX
22838+ jb 1234f
22839+ xor %ebx,%ebx
22840+1234:
22841+#endif
22842+
22843+#endif
22844+
22845+3: __copyuser_seg movl %eax,(_DEST)
22846 xor %eax,%eax
22847 EXIT
22848 ENDPROC(__put_user_4)
22849
22850 ENTRY(__put_user_8)
22851 ENTER
22852+
22853+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22854+ GET_THREAD_INFO(%_ASM_BX)
22855 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22856 sub $7,%_ASM_BX
22857 cmp %_ASM_BX,%_ASM_CX
22858 jae bad_put_user
22859-4: mov %_ASM_AX,(%_ASM_CX)
22860+
22861+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22862+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22863+ cmp %_ASM_BX,%_ASM_CX
22864+ jb 1234f
22865+ xor %ebx,%ebx
22866+1234:
22867+#endif
22868+
22869+#endif
22870+
22871+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22872 #ifdef CONFIG_X86_32
22873-5: movl %edx,4(%_ASM_CX)
22874+5: __copyuser_seg movl %edx,4(_DEST)
22875 #endif
22876 xor %eax,%eax
22877 EXIT
22878diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22879index 05ea55f..6345b9a 100644
22880--- a/arch/x86/lib/rwlock_64.S
22881+++ b/arch/x86/lib/rwlock_64.S
22882@@ -2,6 +2,7 @@
22883
22884 #include <linux/linkage.h>
22885 #include <asm/rwlock.h>
22886+#include <asm/asm.h>
22887 #include <asm/alternative-asm.h>
22888 #include <asm/dwarf2.h>
22889
22890@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22891 CFI_STARTPROC
22892 LOCK_PREFIX
22893 addl $RW_LOCK_BIAS,(%rdi)
22894+
22895+#ifdef CONFIG_PAX_REFCOUNT
22896+ jno 1234f
22897+ LOCK_PREFIX
22898+ subl $RW_LOCK_BIAS,(%rdi)
22899+ int $4
22900+1234:
22901+ _ASM_EXTABLE(1234b, 1234b)
22902+#endif
22903+
22904 1: rep
22905 nop
22906 cmpl $RW_LOCK_BIAS,(%rdi)
22907 jne 1b
22908 LOCK_PREFIX
22909 subl $RW_LOCK_BIAS,(%rdi)
22910+
22911+#ifdef CONFIG_PAX_REFCOUNT
22912+ jno 1234f
22913+ LOCK_PREFIX
22914+ addl $RW_LOCK_BIAS,(%rdi)
22915+ int $4
22916+1234:
22917+ _ASM_EXTABLE(1234b, 1234b)
22918+#endif
22919+
22920 jnz __write_lock_failed
22921+ pax_force_retaddr
22922 ret
22923 CFI_ENDPROC
22924 END(__write_lock_failed)
22925@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22926 CFI_STARTPROC
22927 LOCK_PREFIX
22928 incl (%rdi)
22929+
22930+#ifdef CONFIG_PAX_REFCOUNT
22931+ jno 1234f
22932+ LOCK_PREFIX
22933+ decl (%rdi)
22934+ int $4
22935+1234:
22936+ _ASM_EXTABLE(1234b, 1234b)
22937+#endif
22938+
22939 1: rep
22940 nop
22941 cmpl $1,(%rdi)
22942 js 1b
22943 LOCK_PREFIX
22944 decl (%rdi)
22945+
22946+#ifdef CONFIG_PAX_REFCOUNT
22947+ jno 1234f
22948+ LOCK_PREFIX
22949+ incl (%rdi)
22950+ int $4
22951+1234:
22952+ _ASM_EXTABLE(1234b, 1234b)
22953+#endif
22954+
22955 js __read_lock_failed
22956+ pax_force_retaddr
22957 ret
22958 CFI_ENDPROC
22959 END(__read_lock_failed)
22960diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22961index 15acecf..f768b10 100644
22962--- a/arch/x86/lib/rwsem_64.S
22963+++ b/arch/x86/lib/rwsem_64.S
22964@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22965 call rwsem_down_read_failed
22966 popq %rdx
22967 restore_common_regs
22968+ pax_force_retaddr
22969 ret
22970 ENDPROC(call_rwsem_down_read_failed)
22971
22972@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
22973 movq %rax,%rdi
22974 call rwsem_down_write_failed
22975 restore_common_regs
22976+ pax_force_retaddr
22977 ret
22978 ENDPROC(call_rwsem_down_write_failed)
22979
22980@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
22981 movq %rax,%rdi
22982 call rwsem_wake
22983 restore_common_regs
22984-1: ret
22985+1: pax_force_retaddr
22986+ ret
22987 ENDPROC(call_rwsem_wake)
22988
22989 /* Fix up special calling conventions */
22990@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
22991 call rwsem_downgrade_wake
22992 popq %rdx
22993 restore_common_regs
22994+ pax_force_retaddr
22995 ret
22996 ENDPROC(call_rwsem_downgrade_wake)
22997diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22998index bf9a7d5..fb06ab5 100644
22999--- a/arch/x86/lib/thunk_64.S
23000+++ b/arch/x86/lib/thunk_64.S
23001@@ -10,7 +10,8 @@
23002 #include <asm/dwarf2.h>
23003 #include <asm/calling.h>
23004 #include <asm/rwlock.h>
23005-
23006+ #include <asm/alternative-asm.h>
23007+
23008 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23009 .macro thunk name,func
23010 .globl \name
23011@@ -70,6 +71,7 @@
23012 SAVE_ARGS
23013 restore:
23014 RESTORE_ARGS
23015+ pax_force_retaddr
23016 ret
23017 CFI_ENDPROC
23018
23019@@ -77,5 +79,6 @@ restore:
23020 SAVE_ARGS
23021 restore_norax:
23022 RESTORE_ARGS 1
23023+ pax_force_retaddr
23024 ret
23025 CFI_ENDPROC
23026diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23027index 1f118d4..ec4a953 100644
23028--- a/arch/x86/lib/usercopy_32.c
23029+++ b/arch/x86/lib/usercopy_32.c
23030@@ -43,7 +43,7 @@ do { \
23031 __asm__ __volatile__( \
23032 " testl %1,%1\n" \
23033 " jz 2f\n" \
23034- "0: lodsb\n" \
23035+ "0: "__copyuser_seg"lodsb\n" \
23036 " stosb\n" \
23037 " testb %%al,%%al\n" \
23038 " jz 1f\n" \
23039@@ -128,10 +128,12 @@ do { \
23040 int __d0; \
23041 might_fault(); \
23042 __asm__ __volatile__( \
23043+ __COPYUSER_SET_ES \
23044 "0: rep; stosl\n" \
23045 " movl %2,%0\n" \
23046 "1: rep; stosb\n" \
23047 "2:\n" \
23048+ __COPYUSER_RESTORE_ES \
23049 ".section .fixup,\"ax\"\n" \
23050 "3: lea 0(%2,%0,4),%0\n" \
23051 " jmp 2b\n" \
23052@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23053 might_fault();
23054
23055 __asm__ __volatile__(
23056+ __COPYUSER_SET_ES
23057 " testl %0, %0\n"
23058 " jz 3f\n"
23059 " andl %0,%%ecx\n"
23060@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23061 " subl %%ecx,%0\n"
23062 " addl %0,%%eax\n"
23063 "1:\n"
23064+ __COPYUSER_RESTORE_ES
23065 ".section .fixup,\"ax\"\n"
23066 "2: xorl %%eax,%%eax\n"
23067 " jmp 1b\n"
23068@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23069
23070 #ifdef CONFIG_X86_INTEL_USERCOPY
23071 static unsigned long
23072-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23073+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23074 {
23075 int d0, d1;
23076 __asm__ __volatile__(
23077@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23078 " .align 2,0x90\n"
23079 "3: movl 0(%4), %%eax\n"
23080 "4: movl 4(%4), %%edx\n"
23081- "5: movl %%eax, 0(%3)\n"
23082- "6: movl %%edx, 4(%3)\n"
23083+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23084+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23085 "7: movl 8(%4), %%eax\n"
23086 "8: movl 12(%4),%%edx\n"
23087- "9: movl %%eax, 8(%3)\n"
23088- "10: movl %%edx, 12(%3)\n"
23089+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23090+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23091 "11: movl 16(%4), %%eax\n"
23092 "12: movl 20(%4), %%edx\n"
23093- "13: movl %%eax, 16(%3)\n"
23094- "14: movl %%edx, 20(%3)\n"
23095+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23096+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23097 "15: movl 24(%4), %%eax\n"
23098 "16: movl 28(%4), %%edx\n"
23099- "17: movl %%eax, 24(%3)\n"
23100- "18: movl %%edx, 28(%3)\n"
23101+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23102+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23103 "19: movl 32(%4), %%eax\n"
23104 "20: movl 36(%4), %%edx\n"
23105- "21: movl %%eax, 32(%3)\n"
23106- "22: movl %%edx, 36(%3)\n"
23107+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23108+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23109 "23: movl 40(%4), %%eax\n"
23110 "24: movl 44(%4), %%edx\n"
23111- "25: movl %%eax, 40(%3)\n"
23112- "26: movl %%edx, 44(%3)\n"
23113+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23114+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23115 "27: movl 48(%4), %%eax\n"
23116 "28: movl 52(%4), %%edx\n"
23117- "29: movl %%eax, 48(%3)\n"
23118- "30: movl %%edx, 52(%3)\n"
23119+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23120+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23121 "31: movl 56(%4), %%eax\n"
23122 "32: movl 60(%4), %%edx\n"
23123- "33: movl %%eax, 56(%3)\n"
23124- "34: movl %%edx, 60(%3)\n"
23125+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23126+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23127 " addl $-64, %0\n"
23128 " addl $64, %4\n"
23129 " addl $64, %3\n"
23130@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23131 " shrl $2, %0\n"
23132 " andl $3, %%eax\n"
23133 " cld\n"
23134+ __COPYUSER_SET_ES
23135 "99: rep; movsl\n"
23136 "36: movl %%eax, %0\n"
23137 "37: rep; movsb\n"
23138 "100:\n"
23139+ __COPYUSER_RESTORE_ES
23140+ ".section .fixup,\"ax\"\n"
23141+ "101: lea 0(%%eax,%0,4),%0\n"
23142+ " jmp 100b\n"
23143+ ".previous\n"
23144+ ".section __ex_table,\"a\"\n"
23145+ " .align 4\n"
23146+ " .long 1b,100b\n"
23147+ " .long 2b,100b\n"
23148+ " .long 3b,100b\n"
23149+ " .long 4b,100b\n"
23150+ " .long 5b,100b\n"
23151+ " .long 6b,100b\n"
23152+ " .long 7b,100b\n"
23153+ " .long 8b,100b\n"
23154+ " .long 9b,100b\n"
23155+ " .long 10b,100b\n"
23156+ " .long 11b,100b\n"
23157+ " .long 12b,100b\n"
23158+ " .long 13b,100b\n"
23159+ " .long 14b,100b\n"
23160+ " .long 15b,100b\n"
23161+ " .long 16b,100b\n"
23162+ " .long 17b,100b\n"
23163+ " .long 18b,100b\n"
23164+ " .long 19b,100b\n"
23165+ " .long 20b,100b\n"
23166+ " .long 21b,100b\n"
23167+ " .long 22b,100b\n"
23168+ " .long 23b,100b\n"
23169+ " .long 24b,100b\n"
23170+ " .long 25b,100b\n"
23171+ " .long 26b,100b\n"
23172+ " .long 27b,100b\n"
23173+ " .long 28b,100b\n"
23174+ " .long 29b,100b\n"
23175+ " .long 30b,100b\n"
23176+ " .long 31b,100b\n"
23177+ " .long 32b,100b\n"
23178+ " .long 33b,100b\n"
23179+ " .long 34b,100b\n"
23180+ " .long 35b,100b\n"
23181+ " .long 36b,100b\n"
23182+ " .long 37b,100b\n"
23183+ " .long 99b,101b\n"
23184+ ".previous"
23185+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23186+ : "1"(to), "2"(from), "0"(size)
23187+ : "eax", "edx", "memory");
23188+ return size;
23189+}
23190+
23191+static unsigned long
23192+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23193+{
23194+ int d0, d1;
23195+ __asm__ __volatile__(
23196+ " .align 2,0x90\n"
23197+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23198+ " cmpl $67, %0\n"
23199+ " jbe 3f\n"
23200+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23201+ " .align 2,0x90\n"
23202+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23203+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23204+ "5: movl %%eax, 0(%3)\n"
23205+ "6: movl %%edx, 4(%3)\n"
23206+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23207+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23208+ "9: movl %%eax, 8(%3)\n"
23209+ "10: movl %%edx, 12(%3)\n"
23210+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23211+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23212+ "13: movl %%eax, 16(%3)\n"
23213+ "14: movl %%edx, 20(%3)\n"
23214+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23215+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23216+ "17: movl %%eax, 24(%3)\n"
23217+ "18: movl %%edx, 28(%3)\n"
23218+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23219+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23220+ "21: movl %%eax, 32(%3)\n"
23221+ "22: movl %%edx, 36(%3)\n"
23222+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23223+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23224+ "25: movl %%eax, 40(%3)\n"
23225+ "26: movl %%edx, 44(%3)\n"
23226+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23227+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23228+ "29: movl %%eax, 48(%3)\n"
23229+ "30: movl %%edx, 52(%3)\n"
23230+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23231+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23232+ "33: movl %%eax, 56(%3)\n"
23233+ "34: movl %%edx, 60(%3)\n"
23234+ " addl $-64, %0\n"
23235+ " addl $64, %4\n"
23236+ " addl $64, %3\n"
23237+ " cmpl $63, %0\n"
23238+ " ja 1b\n"
23239+ "35: movl %0, %%eax\n"
23240+ " shrl $2, %0\n"
23241+ " andl $3, %%eax\n"
23242+ " cld\n"
23243+ "99: rep; "__copyuser_seg" movsl\n"
23244+ "36: movl %%eax, %0\n"
23245+ "37: rep; "__copyuser_seg" movsb\n"
23246+ "100:\n"
23247 ".section .fixup,\"ax\"\n"
23248 "101: lea 0(%%eax,%0,4),%0\n"
23249 " jmp 100b\n"
23250@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23251 int d0, d1;
23252 __asm__ __volatile__(
23253 " .align 2,0x90\n"
23254- "0: movl 32(%4), %%eax\n"
23255+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23256 " cmpl $67, %0\n"
23257 " jbe 2f\n"
23258- "1: movl 64(%4), %%eax\n"
23259+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23260 " .align 2,0x90\n"
23261- "2: movl 0(%4), %%eax\n"
23262- "21: movl 4(%4), %%edx\n"
23263+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23264+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23265 " movl %%eax, 0(%3)\n"
23266 " movl %%edx, 4(%3)\n"
23267- "3: movl 8(%4), %%eax\n"
23268- "31: movl 12(%4),%%edx\n"
23269+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23270+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23271 " movl %%eax, 8(%3)\n"
23272 " movl %%edx, 12(%3)\n"
23273- "4: movl 16(%4), %%eax\n"
23274- "41: movl 20(%4), %%edx\n"
23275+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23276+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23277 " movl %%eax, 16(%3)\n"
23278 " movl %%edx, 20(%3)\n"
23279- "10: movl 24(%4), %%eax\n"
23280- "51: movl 28(%4), %%edx\n"
23281+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23282+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23283 " movl %%eax, 24(%3)\n"
23284 " movl %%edx, 28(%3)\n"
23285- "11: movl 32(%4), %%eax\n"
23286- "61: movl 36(%4), %%edx\n"
23287+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23288+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23289 " movl %%eax, 32(%3)\n"
23290 " movl %%edx, 36(%3)\n"
23291- "12: movl 40(%4), %%eax\n"
23292- "71: movl 44(%4), %%edx\n"
23293+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23294+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23295 " movl %%eax, 40(%3)\n"
23296 " movl %%edx, 44(%3)\n"
23297- "13: movl 48(%4), %%eax\n"
23298- "81: movl 52(%4), %%edx\n"
23299+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23300+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23301 " movl %%eax, 48(%3)\n"
23302 " movl %%edx, 52(%3)\n"
23303- "14: movl 56(%4), %%eax\n"
23304- "91: movl 60(%4), %%edx\n"
23305+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23306+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23307 " movl %%eax, 56(%3)\n"
23308 " movl %%edx, 60(%3)\n"
23309 " addl $-64, %0\n"
23310@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23311 " shrl $2, %0\n"
23312 " andl $3, %%eax\n"
23313 " cld\n"
23314- "6: rep; movsl\n"
23315+ "6: rep; "__copyuser_seg" movsl\n"
23316 " movl %%eax,%0\n"
23317- "7: rep; movsb\n"
23318+ "7: rep; "__copyuser_seg" movsb\n"
23319 "8:\n"
23320 ".section .fixup,\"ax\"\n"
23321 "9: lea 0(%%eax,%0,4),%0\n"
23322@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23323
23324 __asm__ __volatile__(
23325 " .align 2,0x90\n"
23326- "0: movl 32(%4), %%eax\n"
23327+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23328 " cmpl $67, %0\n"
23329 " jbe 2f\n"
23330- "1: movl 64(%4), %%eax\n"
23331+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23332 " .align 2,0x90\n"
23333- "2: movl 0(%4), %%eax\n"
23334- "21: movl 4(%4), %%edx\n"
23335+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23336+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23337 " movnti %%eax, 0(%3)\n"
23338 " movnti %%edx, 4(%3)\n"
23339- "3: movl 8(%4), %%eax\n"
23340- "31: movl 12(%4),%%edx\n"
23341+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23342+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23343 " movnti %%eax, 8(%3)\n"
23344 " movnti %%edx, 12(%3)\n"
23345- "4: movl 16(%4), %%eax\n"
23346- "41: movl 20(%4), %%edx\n"
23347+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23348+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23349 " movnti %%eax, 16(%3)\n"
23350 " movnti %%edx, 20(%3)\n"
23351- "10: movl 24(%4), %%eax\n"
23352- "51: movl 28(%4), %%edx\n"
23353+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23354+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23355 " movnti %%eax, 24(%3)\n"
23356 " movnti %%edx, 28(%3)\n"
23357- "11: movl 32(%4), %%eax\n"
23358- "61: movl 36(%4), %%edx\n"
23359+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23360+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23361 " movnti %%eax, 32(%3)\n"
23362 " movnti %%edx, 36(%3)\n"
23363- "12: movl 40(%4), %%eax\n"
23364- "71: movl 44(%4), %%edx\n"
23365+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23366+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23367 " movnti %%eax, 40(%3)\n"
23368 " movnti %%edx, 44(%3)\n"
23369- "13: movl 48(%4), %%eax\n"
23370- "81: movl 52(%4), %%edx\n"
23371+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23372+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23373 " movnti %%eax, 48(%3)\n"
23374 " movnti %%edx, 52(%3)\n"
23375- "14: movl 56(%4), %%eax\n"
23376- "91: movl 60(%4), %%edx\n"
23377+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23378+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23379 " movnti %%eax, 56(%3)\n"
23380 " movnti %%edx, 60(%3)\n"
23381 " addl $-64, %0\n"
23382@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23383 " shrl $2, %0\n"
23384 " andl $3, %%eax\n"
23385 " cld\n"
23386- "6: rep; movsl\n"
23387+ "6: rep; "__copyuser_seg" movsl\n"
23388 " movl %%eax,%0\n"
23389- "7: rep; movsb\n"
23390+ "7: rep; "__copyuser_seg" movsb\n"
23391 "8:\n"
23392 ".section .fixup,\"ax\"\n"
23393 "9: lea 0(%%eax,%0,4),%0\n"
23394@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23395
23396 __asm__ __volatile__(
23397 " .align 2,0x90\n"
23398- "0: movl 32(%4), %%eax\n"
23399+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23400 " cmpl $67, %0\n"
23401 " jbe 2f\n"
23402- "1: movl 64(%4), %%eax\n"
23403+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23404 " .align 2,0x90\n"
23405- "2: movl 0(%4), %%eax\n"
23406- "21: movl 4(%4), %%edx\n"
23407+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23408+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23409 " movnti %%eax, 0(%3)\n"
23410 " movnti %%edx, 4(%3)\n"
23411- "3: movl 8(%4), %%eax\n"
23412- "31: movl 12(%4),%%edx\n"
23413+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23414+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23415 " movnti %%eax, 8(%3)\n"
23416 " movnti %%edx, 12(%3)\n"
23417- "4: movl 16(%4), %%eax\n"
23418- "41: movl 20(%4), %%edx\n"
23419+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23420+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23421 " movnti %%eax, 16(%3)\n"
23422 " movnti %%edx, 20(%3)\n"
23423- "10: movl 24(%4), %%eax\n"
23424- "51: movl 28(%4), %%edx\n"
23425+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23426+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23427 " movnti %%eax, 24(%3)\n"
23428 " movnti %%edx, 28(%3)\n"
23429- "11: movl 32(%4), %%eax\n"
23430- "61: movl 36(%4), %%edx\n"
23431+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23432+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23433 " movnti %%eax, 32(%3)\n"
23434 " movnti %%edx, 36(%3)\n"
23435- "12: movl 40(%4), %%eax\n"
23436- "71: movl 44(%4), %%edx\n"
23437+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23438+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23439 " movnti %%eax, 40(%3)\n"
23440 " movnti %%edx, 44(%3)\n"
23441- "13: movl 48(%4), %%eax\n"
23442- "81: movl 52(%4), %%edx\n"
23443+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23444+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23445 " movnti %%eax, 48(%3)\n"
23446 " movnti %%edx, 52(%3)\n"
23447- "14: movl 56(%4), %%eax\n"
23448- "91: movl 60(%4), %%edx\n"
23449+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23450+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23451 " movnti %%eax, 56(%3)\n"
23452 " movnti %%edx, 60(%3)\n"
23453 " addl $-64, %0\n"
23454@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23455 " shrl $2, %0\n"
23456 " andl $3, %%eax\n"
23457 " cld\n"
23458- "6: rep; movsl\n"
23459+ "6: rep; "__copyuser_seg" movsl\n"
23460 " movl %%eax,%0\n"
23461- "7: rep; movsb\n"
23462+ "7: rep; "__copyuser_seg" movsb\n"
23463 "8:\n"
23464 ".section .fixup,\"ax\"\n"
23465 "9: lea 0(%%eax,%0,4),%0\n"
23466@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23467 */
23468 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23469 unsigned long size);
23470-unsigned long __copy_user_intel(void __user *to, const void *from,
23471+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23472+ unsigned long size);
23473+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23474 unsigned long size);
23475 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23476 const void __user *from, unsigned long size);
23477 #endif /* CONFIG_X86_INTEL_USERCOPY */
23478
23479 /* Generic arbitrary sized copy. */
23480-#define __copy_user(to, from, size) \
23481+#define __copy_user(to, from, size, prefix, set, restore) \
23482 do { \
23483 int __d0, __d1, __d2; \
23484 __asm__ __volatile__( \
23485+ set \
23486 " cmp $7,%0\n" \
23487 " jbe 1f\n" \
23488 " movl %1,%0\n" \
23489 " negl %0\n" \
23490 " andl $7,%0\n" \
23491 " subl %0,%3\n" \
23492- "4: rep; movsb\n" \
23493+ "4: rep; "prefix"movsb\n" \
23494 " movl %3,%0\n" \
23495 " shrl $2,%0\n" \
23496 " andl $3,%3\n" \
23497 " .align 2,0x90\n" \
23498- "0: rep; movsl\n" \
23499+ "0: rep; "prefix"movsl\n" \
23500 " movl %3,%0\n" \
23501- "1: rep; movsb\n" \
23502+ "1: rep; "prefix"movsb\n" \
23503 "2:\n" \
23504+ restore \
23505 ".section .fixup,\"ax\"\n" \
23506 "5: addl %3,%0\n" \
23507 " jmp 2b\n" \
23508@@ -682,14 +799,14 @@ do { \
23509 " negl %0\n" \
23510 " andl $7,%0\n" \
23511 " subl %0,%3\n" \
23512- "4: rep; movsb\n" \
23513+ "4: rep; "__copyuser_seg"movsb\n" \
23514 " movl %3,%0\n" \
23515 " shrl $2,%0\n" \
23516 " andl $3,%3\n" \
23517 " .align 2,0x90\n" \
23518- "0: rep; movsl\n" \
23519+ "0: rep; "__copyuser_seg"movsl\n" \
23520 " movl %3,%0\n" \
23521- "1: rep; movsb\n" \
23522+ "1: rep; "__copyuser_seg"movsb\n" \
23523 "2:\n" \
23524 ".section .fixup,\"ax\"\n" \
23525 "5: addl %3,%0\n" \
23526@@ -775,9 +892,9 @@ survive:
23527 }
23528 #endif
23529 if (movsl_is_ok(to, from, n))
23530- __copy_user(to, from, n);
23531+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23532 else
23533- n = __copy_user_intel(to, from, n);
23534+ n = __generic_copy_to_user_intel(to, from, n);
23535 return n;
23536 }
23537 EXPORT_SYMBOL(__copy_to_user_ll);
23538@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23539 unsigned long n)
23540 {
23541 if (movsl_is_ok(to, from, n))
23542- __copy_user(to, from, n);
23543+ __copy_user(to, from, n, __copyuser_seg, "", "");
23544 else
23545- n = __copy_user_intel((void __user *)to,
23546- (const void *)from, n);
23547+ n = __generic_copy_from_user_intel(to, from, n);
23548 return n;
23549 }
23550 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23551@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23552 if (n > 64 && cpu_has_xmm2)
23553 n = __copy_user_intel_nocache(to, from, n);
23554 else
23555- __copy_user(to, from, n);
23556+ __copy_user(to, from, n, __copyuser_seg, "", "");
23557 #else
23558- __copy_user(to, from, n);
23559+ __copy_user(to, from, n, __copyuser_seg, "", "");
23560 #endif
23561 return n;
23562 }
23563 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23564
23565-/**
23566- * copy_to_user: - Copy a block of data into user space.
23567- * @to: Destination address, in user space.
23568- * @from: Source address, in kernel space.
23569- * @n: Number of bytes to copy.
23570- *
23571- * Context: User context only. This function may sleep.
23572- *
23573- * Copy data from kernel space to user space.
23574- *
23575- * Returns number of bytes that could not be copied.
23576- * On success, this will be zero.
23577- */
23578-unsigned long
23579-copy_to_user(void __user *to, const void *from, unsigned long n)
23580+#ifdef CONFIG_PAX_MEMORY_UDEREF
23581+void __set_fs(mm_segment_t x)
23582 {
23583- if (access_ok(VERIFY_WRITE, to, n))
23584- n = __copy_to_user(to, from, n);
23585- return n;
23586+ switch (x.seg) {
23587+ case 0:
23588+ loadsegment(gs, 0);
23589+ break;
23590+ case TASK_SIZE_MAX:
23591+ loadsegment(gs, __USER_DS);
23592+ break;
23593+ case -1UL:
23594+ loadsegment(gs, __KERNEL_DS);
23595+ break;
23596+ default:
23597+ BUG();
23598+ }
23599+ return;
23600 }
23601-EXPORT_SYMBOL(copy_to_user);
23602+EXPORT_SYMBOL(__set_fs);
23603
23604-/**
23605- * copy_from_user: - Copy a block of data from user space.
23606- * @to: Destination address, in kernel space.
23607- * @from: Source address, in user space.
23608- * @n: Number of bytes to copy.
23609- *
23610- * Context: User context only. This function may sleep.
23611- *
23612- * Copy data from user space to kernel space.
23613- *
23614- * Returns number of bytes that could not be copied.
23615- * On success, this will be zero.
23616- *
23617- * If some data could not be copied, this function will pad the copied
23618- * data to the requested size using zero bytes.
23619- */
23620-unsigned long
23621-copy_from_user(void *to, const void __user *from, unsigned long n)
23622+void set_fs(mm_segment_t x)
23623 {
23624- if (access_ok(VERIFY_READ, from, n))
23625- n = __copy_from_user(to, from, n);
23626- else
23627- memset(to, 0, n);
23628- return n;
23629+ current_thread_info()->addr_limit = x;
23630+ __set_fs(x);
23631 }
23632-EXPORT_SYMBOL(copy_from_user);
23633+EXPORT_SYMBOL(set_fs);
23634+#endif
23635diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23636index b7c2849..8633ad8 100644
23637--- a/arch/x86/lib/usercopy_64.c
23638+++ b/arch/x86/lib/usercopy_64.c
23639@@ -42,6 +42,12 @@ long
23640 __strncpy_from_user(char *dst, const char __user *src, long count)
23641 {
23642 long res;
23643+
23644+#ifdef CONFIG_PAX_MEMORY_UDEREF
23645+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23646+ src += PAX_USER_SHADOW_BASE;
23647+#endif
23648+
23649 __do_strncpy_from_user(dst, src, count, res);
23650 return res;
23651 }
23652@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23653 {
23654 long __d0;
23655 might_fault();
23656+
23657+#ifdef CONFIG_PAX_MEMORY_UDEREF
23658+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23659+ addr += PAX_USER_SHADOW_BASE;
23660+#endif
23661+
23662 /* no memory constraint because it doesn't change any memory gcc knows
23663 about */
23664 asm volatile(
23665@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23666 }
23667 EXPORT_SYMBOL(strlen_user);
23668
23669-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23670+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23671 {
23672- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23673- return copy_user_generic((__force void *)to, (__force void *)from, len);
23674- }
23675- return len;
23676+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23677+
23678+#ifdef CONFIG_PAX_MEMORY_UDEREF
23679+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23680+ to += PAX_USER_SHADOW_BASE;
23681+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23682+ from += PAX_USER_SHADOW_BASE;
23683+#endif
23684+
23685+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23686+ }
23687+ return len;
23688 }
23689 EXPORT_SYMBOL(copy_in_user);
23690
23691@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23692 * it is not necessary to optimize tail handling.
23693 */
23694 unsigned long
23695-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23696+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23697 {
23698 char c;
23699 unsigned zero_len;
23700diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23701index 61b41ca..5fef66a 100644
23702--- a/arch/x86/mm/extable.c
23703+++ b/arch/x86/mm/extable.c
23704@@ -1,14 +1,71 @@
23705 #include <linux/module.h>
23706 #include <linux/spinlock.h>
23707+#include <linux/sort.h>
23708 #include <asm/uaccess.h>
23709+#include <asm/pgtable.h>
23710
23711+/*
23712+ * The exception table needs to be sorted so that the binary
23713+ * search that we use to find entries in it works properly.
23714+ * This is used both for the kernel exception table and for
23715+ * the exception tables of modules that get loaded.
23716+ */
23717+static int cmp_ex(const void *a, const void *b)
23718+{
23719+ const struct exception_table_entry *x = a, *y = b;
23720+
23721+ /* avoid overflow */
23722+ if (x->insn > y->insn)
23723+ return 1;
23724+ if (x->insn < y->insn)
23725+ return -1;
23726+ return 0;
23727+}
23728+
23729+static void swap_ex(void *a, void *b, int size)
23730+{
23731+ struct exception_table_entry t, *x = a, *y = b;
23732+
23733+ t = *x;
23734+
23735+ pax_open_kernel();
23736+ *x = *y;
23737+ *y = t;
23738+ pax_close_kernel();
23739+}
23740+
23741+void sort_extable(struct exception_table_entry *start,
23742+ struct exception_table_entry *finish)
23743+{
23744+ sort(start, finish - start, sizeof(struct exception_table_entry),
23745+ cmp_ex, swap_ex);
23746+}
23747+
23748+#ifdef CONFIG_MODULES
23749+/*
23750+ * If the exception table is sorted, any referring to the module init
23751+ * will be at the beginning or the end.
23752+ */
23753+void trim_init_extable(struct module *m)
23754+{
23755+ /*trim the beginning*/
23756+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23757+ m->extable++;
23758+ m->num_exentries--;
23759+ }
23760+ /*trim the end*/
23761+ while (m->num_exentries &&
23762+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23763+ m->num_exentries--;
23764+}
23765+#endif /* CONFIG_MODULES */
23766
23767 int fixup_exception(struct pt_regs *regs)
23768 {
23769 const struct exception_table_entry *fixup;
23770
23771 #ifdef CONFIG_PNPBIOS
23772- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23773+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23774 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23775 extern u32 pnp_bios_is_utter_crap;
23776 pnp_bios_is_utter_crap = 1;
23777diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23778index 8ac0d76..3f191dc 100644
23779--- a/arch/x86/mm/fault.c
23780+++ b/arch/x86/mm/fault.c
23781@@ -11,10 +11,19 @@
23782 #include <linux/kprobes.h> /* __kprobes, ... */
23783 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23784 #include <linux/perf_event.h> /* perf_sw_event */
23785+#include <linux/unistd.h>
23786+#include <linux/compiler.h>
23787
23788 #include <asm/traps.h> /* dotraplinkage, ... */
23789 #include <asm/pgalloc.h> /* pgd_*(), ... */
23790 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23791+#include <asm/vsyscall.h>
23792+#include <asm/tlbflush.h>
23793+
23794+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23795+#include <asm/stacktrace.h>
23796+#include "../kernel/dumpstack.h"
23797+#endif
23798
23799 /*
23800 * Page fault error code bits:
23801@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23802 int ret = 0;
23803
23804 /* kprobe_running() needs smp_processor_id() */
23805- if (kprobes_built_in() && !user_mode_vm(regs)) {
23806+ if (kprobes_built_in() && !user_mode(regs)) {
23807 preempt_disable();
23808 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23809 ret = 1;
23810@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23811 return !instr_lo || (instr_lo>>1) == 1;
23812 case 0x00:
23813 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23814- if (probe_kernel_address(instr, opcode))
23815+ if (user_mode(regs)) {
23816+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23817+ return 0;
23818+ } else if (probe_kernel_address(instr, opcode))
23819 return 0;
23820
23821 *prefetch = (instr_lo == 0xF) &&
23822@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23823 while (instr < max_instr) {
23824 unsigned char opcode;
23825
23826- if (probe_kernel_address(instr, opcode))
23827+ if (user_mode(regs)) {
23828+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23829+ break;
23830+ } else if (probe_kernel_address(instr, opcode))
23831 break;
23832
23833 instr++;
23834@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23835 force_sig_info(si_signo, &info, tsk);
23836 }
23837
23838+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23839+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23840+#endif
23841+
23842+#ifdef CONFIG_PAX_EMUTRAMP
23843+static int pax_handle_fetch_fault(struct pt_regs *regs);
23844+#endif
23845+
23846+#ifdef CONFIG_PAX_PAGEEXEC
23847+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23848+{
23849+ pgd_t *pgd;
23850+ pud_t *pud;
23851+ pmd_t *pmd;
23852+
23853+ pgd = pgd_offset(mm, address);
23854+ if (!pgd_present(*pgd))
23855+ return NULL;
23856+ pud = pud_offset(pgd, address);
23857+ if (!pud_present(*pud))
23858+ return NULL;
23859+ pmd = pmd_offset(pud, address);
23860+ if (!pmd_present(*pmd))
23861+ return NULL;
23862+ return pmd;
23863+}
23864+#endif
23865+
23866 DEFINE_SPINLOCK(pgd_lock);
23867 LIST_HEAD(pgd_list);
23868
23869@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23870 address += PMD_SIZE) {
23871
23872 unsigned long flags;
23873+
23874+#ifdef CONFIG_PAX_PER_CPU_PGD
23875+ unsigned long cpu;
23876+#else
23877 struct page *page;
23878+#endif
23879
23880 spin_lock_irqsave(&pgd_lock, flags);
23881+
23882+#ifdef CONFIG_PAX_PER_CPU_PGD
23883+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23884+ pgd_t *pgd = get_cpu_pgd(cpu);
23885+#else
23886 list_for_each_entry(page, &pgd_list, lru) {
23887- if (!vmalloc_sync_one(page_address(page), address))
23888+ pgd_t *pgd = page_address(page);
23889+#endif
23890+
23891+ if (!vmalloc_sync_one(pgd, address))
23892 break;
23893 }
23894 spin_unlock_irqrestore(&pgd_lock, flags);
23895@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23896 * an interrupt in the middle of a task switch..
23897 */
23898 pgd_paddr = read_cr3();
23899+
23900+#ifdef CONFIG_PAX_PER_CPU_PGD
23901+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23902+#endif
23903+
23904 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23905 if (!pmd_k)
23906 return -1;
23907@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23908
23909 const pgd_t *pgd_ref = pgd_offset_k(address);
23910 unsigned long flags;
23911+
23912+#ifdef CONFIG_PAX_PER_CPU_PGD
23913+ unsigned long cpu;
23914+#else
23915 struct page *page;
23916+#endif
23917
23918 if (pgd_none(*pgd_ref))
23919 continue;
23920
23921 spin_lock_irqsave(&pgd_lock, flags);
23922+
23923+#ifdef CONFIG_PAX_PER_CPU_PGD
23924+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23925+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23926+#else
23927 list_for_each_entry(page, &pgd_list, lru) {
23928 pgd_t *pgd;
23929 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23930+#endif
23931+
23932 if (pgd_none(*pgd))
23933 set_pgd(pgd, *pgd_ref);
23934 else
23935@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23936 * happen within a race in page table update. In the later
23937 * case just flush:
23938 */
23939+
23940+#ifdef CONFIG_PAX_PER_CPU_PGD
23941+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23942+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23943+#else
23944 pgd = pgd_offset(current->active_mm, address);
23945+#endif
23946+
23947 pgd_ref = pgd_offset_k(address);
23948 if (pgd_none(*pgd_ref))
23949 return -1;
23950@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23951 static int is_errata100(struct pt_regs *regs, unsigned long address)
23952 {
23953 #ifdef CONFIG_X86_64
23954- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23955+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23956 return 1;
23957 #endif
23958 return 0;
23959@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23960 }
23961
23962 static const char nx_warning[] = KERN_CRIT
23963-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23964+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23965
23966 static void
23967 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23968@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23969 if (!oops_may_print())
23970 return;
23971
23972- if (error_code & PF_INSTR) {
23973+ if (nx_enabled && (error_code & PF_INSTR)) {
23974 unsigned int level;
23975
23976 pte_t *pte = lookup_address(address, &level);
23977
23978 if (pte && pte_present(*pte) && !pte_exec(*pte))
23979- printk(nx_warning, current_uid());
23980+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23981 }
23982
23983+#ifdef CONFIG_PAX_KERNEXEC
23984+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23985+ if (current->signal->curr_ip)
23986+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23987+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23988+ else
23989+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23990+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23991+ }
23992+#endif
23993+
23994 printk(KERN_ALERT "BUG: unable to handle kernel ");
23995 if (address < PAGE_SIZE)
23996 printk(KERN_CONT "NULL pointer dereference");
23997@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23998 {
23999 struct task_struct *tsk = current;
24000
24001+#ifdef CONFIG_X86_64
24002+ struct mm_struct *mm = tsk->mm;
24003+
24004+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24005+ if (regs->ip == (unsigned long)vgettimeofday) {
24006+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24007+ return;
24008+ } else if (regs->ip == (unsigned long)vtime) {
24009+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24010+ return;
24011+ } else if (regs->ip == (unsigned long)vgetcpu) {
24012+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24013+ return;
24014+ }
24015+ }
24016+#endif
24017+
24018 /* User mode accesses just cause a SIGSEGV */
24019 if (error_code & PF_USER) {
24020 /*
24021@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24022 if (is_errata100(regs, address))
24023 return;
24024
24025+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24026+ if (pax_is_fetch_fault(regs, error_code, address)) {
24027+
24028+#ifdef CONFIG_PAX_EMUTRAMP
24029+ switch (pax_handle_fetch_fault(regs)) {
24030+ case 2:
24031+ return;
24032+ }
24033+#endif
24034+
24035+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24036+ do_group_exit(SIGKILL);
24037+ }
24038+#endif
24039+
24040 if (unlikely(show_unhandled_signals))
24041 show_signal_msg(regs, error_code, address, tsk);
24042
24043@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24044 if (fault & VM_FAULT_HWPOISON) {
24045 printk(KERN_ERR
24046 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24047- tsk->comm, tsk->pid, address);
24048+ tsk->comm, task_pid_nr(tsk), address);
24049 code = BUS_MCEERR_AR;
24050 }
24051 #endif
24052@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24053 return 1;
24054 }
24055
24056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24057+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24058+{
24059+ pte_t *pte;
24060+ pmd_t *pmd;
24061+ spinlock_t *ptl;
24062+ unsigned char pte_mask;
24063+
24064+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24065+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24066+ return 0;
24067+
24068+ /* PaX: it's our fault, let's handle it if we can */
24069+
24070+ /* PaX: take a look at read faults before acquiring any locks */
24071+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24072+ /* instruction fetch attempt from a protected page in user mode */
24073+ up_read(&mm->mmap_sem);
24074+
24075+#ifdef CONFIG_PAX_EMUTRAMP
24076+ switch (pax_handle_fetch_fault(regs)) {
24077+ case 2:
24078+ return 1;
24079+ }
24080+#endif
24081+
24082+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24083+ do_group_exit(SIGKILL);
24084+ }
24085+
24086+ pmd = pax_get_pmd(mm, address);
24087+ if (unlikely(!pmd))
24088+ return 0;
24089+
24090+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24091+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24092+ pte_unmap_unlock(pte, ptl);
24093+ return 0;
24094+ }
24095+
24096+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24097+ /* write attempt to a protected page in user mode */
24098+ pte_unmap_unlock(pte, ptl);
24099+ return 0;
24100+ }
24101+
24102+#ifdef CONFIG_SMP
24103+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24104+#else
24105+ if (likely(address > get_limit(regs->cs)))
24106+#endif
24107+ {
24108+ set_pte(pte, pte_mkread(*pte));
24109+ __flush_tlb_one(address);
24110+ pte_unmap_unlock(pte, ptl);
24111+ up_read(&mm->mmap_sem);
24112+ return 1;
24113+ }
24114+
24115+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24116+
24117+ /*
24118+ * PaX: fill DTLB with user rights and retry
24119+ */
24120+ __asm__ __volatile__ (
24121+ "orb %2,(%1)\n"
24122+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24123+/*
24124+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24125+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24126+ * page fault when examined during a TLB load attempt. this is true not only
24127+ * for PTEs holding a non-present entry but also present entries that will
24128+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24129+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24130+ * for our target pages since their PTEs are simply not in the TLBs at all.
24131+
24132+ * the best thing in omitting it is that we gain around 15-20% speed in the
24133+ * fast path of the page fault handler and can get rid of tracing since we
24134+ * can no longer flush unintended entries.
24135+ */
24136+ "invlpg (%0)\n"
24137+#endif
24138+ __copyuser_seg"testb $0,(%0)\n"
24139+ "xorb %3,(%1)\n"
24140+ :
24141+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24142+ : "memory", "cc");
24143+ pte_unmap_unlock(pte, ptl);
24144+ up_read(&mm->mmap_sem);
24145+ return 1;
24146+}
24147+#endif
24148+
24149 /*
24150 * Handle a spurious fault caused by a stale TLB entry.
24151 *
24152@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24153 static inline int
24154 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24155 {
24156+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24157+ return 1;
24158+
24159 if (write) {
24160 /* write, present and write, not present: */
24161 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24162@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24163 {
24164 struct vm_area_struct *vma;
24165 struct task_struct *tsk;
24166- unsigned long address;
24167 struct mm_struct *mm;
24168 int write;
24169 int fault;
24170
24171- tsk = current;
24172- mm = tsk->mm;
24173-
24174 /* Get the faulting address: */
24175- address = read_cr2();
24176+ unsigned long address = read_cr2();
24177+
24178+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24179+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24180+ if (!search_exception_tables(regs->ip)) {
24181+ bad_area_nosemaphore(regs, error_code, address);
24182+ return;
24183+ }
24184+ if (address < PAX_USER_SHADOW_BASE) {
24185+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24186+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24187+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24188+ } else
24189+ address -= PAX_USER_SHADOW_BASE;
24190+ }
24191+#endif
24192+
24193+ tsk = current;
24194+ mm = tsk->mm;
24195
24196 /*
24197 * Detect and handle instructions that would cause a page fault for
24198@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24199 * User-mode registers count as a user access even for any
24200 * potential system fault or CPU buglet:
24201 */
24202- if (user_mode_vm(regs)) {
24203+ if (user_mode(regs)) {
24204 local_irq_enable();
24205 error_code |= PF_USER;
24206 } else {
24207@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24208 might_sleep();
24209 }
24210
24211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24212+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24213+ return;
24214+#endif
24215+
24216 vma = find_vma(mm, address);
24217 if (unlikely(!vma)) {
24218 bad_area(regs, error_code, address);
24219@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24220 bad_area(regs, error_code, address);
24221 return;
24222 }
24223- if (error_code & PF_USER) {
24224- /*
24225- * Accessing the stack below %sp is always a bug.
24226- * The large cushion allows instructions like enter
24227- * and pusha to work. ("enter $65535, $31" pushes
24228- * 32 pointers and then decrements %sp by 65535.)
24229- */
24230- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24231- bad_area(regs, error_code, address);
24232- return;
24233- }
24234+ /*
24235+ * Accessing the stack below %sp is always a bug.
24236+ * The large cushion allows instructions like enter
24237+ * and pusha to work. ("enter $65535, $31" pushes
24238+ * 32 pointers and then decrements %sp by 65535.)
24239+ */
24240+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24241+ bad_area(regs, error_code, address);
24242+ return;
24243 }
24244+
24245+#ifdef CONFIG_PAX_SEGMEXEC
24246+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24247+ bad_area(regs, error_code, address);
24248+ return;
24249+ }
24250+#endif
24251+
24252 if (unlikely(expand_stack(vma, address))) {
24253 bad_area(regs, error_code, address);
24254 return;
24255@@ -1146,3 +1390,240 @@ good_area:
24256
24257 up_read(&mm->mmap_sem);
24258 }
24259+
24260+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24261+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24262+{
24263+ struct mm_struct *mm = current->mm;
24264+ unsigned long ip = regs->ip;
24265+
24266+ if (v8086_mode(regs))
24267+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24268+
24269+#ifdef CONFIG_PAX_PAGEEXEC
24270+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24271+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24272+ return true;
24273+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24274+ return true;
24275+ return false;
24276+ }
24277+#endif
24278+
24279+#ifdef CONFIG_PAX_SEGMEXEC
24280+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24281+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24282+ return true;
24283+ return false;
24284+ }
24285+#endif
24286+
24287+ return false;
24288+}
24289+#endif
24290+
24291+#ifdef CONFIG_PAX_EMUTRAMP
24292+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24293+{
24294+ int err;
24295+
24296+ do { /* PaX: gcc trampoline emulation #1 */
24297+ unsigned char mov1, mov2;
24298+ unsigned short jmp;
24299+ unsigned int addr1, addr2;
24300+
24301+#ifdef CONFIG_X86_64
24302+ if ((regs->ip + 11) >> 32)
24303+ break;
24304+#endif
24305+
24306+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24307+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24308+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24309+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24310+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24311+
24312+ if (err)
24313+ break;
24314+
24315+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24316+ regs->cx = addr1;
24317+ regs->ax = addr2;
24318+ regs->ip = addr2;
24319+ return 2;
24320+ }
24321+ } while (0);
24322+
24323+ do { /* PaX: gcc trampoline emulation #2 */
24324+ unsigned char mov, jmp;
24325+ unsigned int addr1, addr2;
24326+
24327+#ifdef CONFIG_X86_64
24328+ if ((regs->ip + 9) >> 32)
24329+ break;
24330+#endif
24331+
24332+ err = get_user(mov, (unsigned char __user *)regs->ip);
24333+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24334+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24335+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24336+
24337+ if (err)
24338+ break;
24339+
24340+ if (mov == 0xB9 && jmp == 0xE9) {
24341+ regs->cx = addr1;
24342+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24343+ return 2;
24344+ }
24345+ } while (0);
24346+
24347+ return 1; /* PaX in action */
24348+}
24349+
24350+#ifdef CONFIG_X86_64
24351+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24352+{
24353+ int err;
24354+
24355+ do { /* PaX: gcc trampoline emulation #1 */
24356+ unsigned short mov1, mov2, jmp1;
24357+ unsigned char jmp2;
24358+ unsigned int addr1;
24359+ unsigned long addr2;
24360+
24361+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24362+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24363+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24364+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24365+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24366+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24367+
24368+ if (err)
24369+ break;
24370+
24371+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24372+ regs->r11 = addr1;
24373+ regs->r10 = addr2;
24374+ regs->ip = addr1;
24375+ return 2;
24376+ }
24377+ } while (0);
24378+
24379+ do { /* PaX: gcc trampoline emulation #2 */
24380+ unsigned short mov1, mov2, jmp1;
24381+ unsigned char jmp2;
24382+ unsigned long addr1, addr2;
24383+
24384+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24385+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24386+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24387+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24388+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24389+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24390+
24391+ if (err)
24392+ break;
24393+
24394+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24395+ regs->r11 = addr1;
24396+ regs->r10 = addr2;
24397+ regs->ip = addr1;
24398+ return 2;
24399+ }
24400+ } while (0);
24401+
24402+ return 1; /* PaX in action */
24403+}
24404+#endif
24405+
24406+/*
24407+ * PaX: decide what to do with offenders (regs->ip = fault address)
24408+ *
24409+ * returns 1 when task should be killed
24410+ * 2 when gcc trampoline was detected
24411+ */
24412+static int pax_handle_fetch_fault(struct pt_regs *regs)
24413+{
24414+ if (v8086_mode(regs))
24415+ return 1;
24416+
24417+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24418+ return 1;
24419+
24420+#ifdef CONFIG_X86_32
24421+ return pax_handle_fetch_fault_32(regs);
24422+#else
24423+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24424+ return pax_handle_fetch_fault_32(regs);
24425+ else
24426+ return pax_handle_fetch_fault_64(regs);
24427+#endif
24428+}
24429+#endif
24430+
24431+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24432+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24433+{
24434+ long i;
24435+
24436+ printk(KERN_ERR "PAX: bytes at PC: ");
24437+ for (i = 0; i < 20; i++) {
24438+ unsigned char c;
24439+ if (get_user(c, (unsigned char __force_user *)pc+i))
24440+ printk(KERN_CONT "?? ");
24441+ else
24442+ printk(KERN_CONT "%02x ", c);
24443+ }
24444+ printk("\n");
24445+
24446+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24447+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24448+ unsigned long c;
24449+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24450+#ifdef CONFIG_X86_32
24451+ printk(KERN_CONT "???????? ");
24452+#else
24453+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24454+ printk(KERN_CONT "???????? ???????? ");
24455+ else
24456+ printk(KERN_CONT "???????????????? ");
24457+#endif
24458+ } else {
24459+#ifdef CONFIG_X86_64
24460+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24461+ printk(KERN_CONT "%08x ", (unsigned int)c);
24462+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24463+ } else
24464+#endif
24465+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24466+ }
24467+ }
24468+ printk("\n");
24469+}
24470+#endif
24471+
24472+/**
24473+ * probe_kernel_write(): safely attempt to write to a location
24474+ * @dst: address to write to
24475+ * @src: pointer to the data that shall be written
24476+ * @size: size of the data chunk
24477+ *
24478+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24479+ * happens, handle that and return -EFAULT.
24480+ */
24481+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24482+{
24483+ long ret;
24484+ mm_segment_t old_fs = get_fs();
24485+
24486+ set_fs(KERNEL_DS);
24487+ pagefault_disable();
24488+ pax_open_kernel();
24489+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24490+ pax_close_kernel();
24491+ pagefault_enable();
24492+ set_fs(old_fs);
24493+
24494+ return ret ? -EFAULT : 0;
24495+}
24496diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24497index 71da1bc..7a16bf4 100644
24498--- a/arch/x86/mm/gup.c
24499+++ b/arch/x86/mm/gup.c
24500@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24501 addr = start;
24502 len = (unsigned long) nr_pages << PAGE_SHIFT;
24503 end = start + len;
24504- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24505+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24506 (void __user *)start, len)))
24507 return 0;
24508
24509diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24510index 63a6ba6..79abd7a 100644
24511--- a/arch/x86/mm/highmem_32.c
24512+++ b/arch/x86/mm/highmem_32.c
24513@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24514 idx = type + KM_TYPE_NR*smp_processor_id();
24515 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24516 BUG_ON(!pte_none(*(kmap_pte-idx)));
24517+
24518+ pax_open_kernel();
24519 set_pte(kmap_pte-idx, mk_pte(page, prot));
24520+ pax_close_kernel();
24521
24522 return (void *)vaddr;
24523 }
24524diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24525index f46c340..6ff9a26 100644
24526--- a/arch/x86/mm/hugetlbpage.c
24527+++ b/arch/x86/mm/hugetlbpage.c
24528@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24529 struct hstate *h = hstate_file(file);
24530 struct mm_struct *mm = current->mm;
24531 struct vm_area_struct *vma;
24532- unsigned long start_addr;
24533+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24534+
24535+#ifdef CONFIG_PAX_SEGMEXEC
24536+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24537+ pax_task_size = SEGMEXEC_TASK_SIZE;
24538+#endif
24539+
24540+ pax_task_size -= PAGE_SIZE;
24541
24542 if (len > mm->cached_hole_size) {
24543- start_addr = mm->free_area_cache;
24544+ start_addr = mm->free_area_cache;
24545 } else {
24546- start_addr = TASK_UNMAPPED_BASE;
24547- mm->cached_hole_size = 0;
24548+ start_addr = mm->mmap_base;
24549+ mm->cached_hole_size = 0;
24550 }
24551
24552 full_search:
24553@@ -281,26 +288,27 @@ full_search:
24554
24555 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24556 /* At this point: (!vma || addr < vma->vm_end). */
24557- if (TASK_SIZE - len < addr) {
24558+ if (pax_task_size - len < addr) {
24559 /*
24560 * Start a new search - just in case we missed
24561 * some holes.
24562 */
24563- if (start_addr != TASK_UNMAPPED_BASE) {
24564- start_addr = TASK_UNMAPPED_BASE;
24565+ if (start_addr != mm->mmap_base) {
24566+ start_addr = mm->mmap_base;
24567 mm->cached_hole_size = 0;
24568 goto full_search;
24569 }
24570 return -ENOMEM;
24571 }
24572- if (!vma || addr + len <= vma->vm_start) {
24573- mm->free_area_cache = addr + len;
24574- return addr;
24575- }
24576+ if (check_heap_stack_gap(vma, addr, len))
24577+ break;
24578 if (addr + mm->cached_hole_size < vma->vm_start)
24579 mm->cached_hole_size = vma->vm_start - addr;
24580 addr = ALIGN(vma->vm_end, huge_page_size(h));
24581 }
24582+
24583+ mm->free_area_cache = addr + len;
24584+ return addr;
24585 }
24586
24587 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24588@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24589 {
24590 struct hstate *h = hstate_file(file);
24591 struct mm_struct *mm = current->mm;
24592- struct vm_area_struct *vma, *prev_vma;
24593- unsigned long base = mm->mmap_base, addr = addr0;
24594+ struct vm_area_struct *vma;
24595+ unsigned long base = mm->mmap_base, addr;
24596 unsigned long largest_hole = mm->cached_hole_size;
24597- int first_time = 1;
24598
24599 /* don't allow allocations above current base */
24600 if (mm->free_area_cache > base)
24601@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24602 largest_hole = 0;
24603 mm->free_area_cache = base;
24604 }
24605-try_again:
24606+
24607 /* make sure it can fit in the remaining address space */
24608 if (mm->free_area_cache < len)
24609 goto fail;
24610
24611 /* either no address requested or cant fit in requested address hole */
24612- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24613+ addr = (mm->free_area_cache - len);
24614 do {
24615+ addr &= huge_page_mask(h);
24616+ vma = find_vma(mm, addr);
24617 /*
24618 * Lookup failure means no vma is above this address,
24619 * i.e. return with success:
24620- */
24621- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24622- return addr;
24623-
24624- /*
24625 * new region fits between prev_vma->vm_end and
24626 * vma->vm_start, use it:
24627 */
24628- if (addr + len <= vma->vm_start &&
24629- (!prev_vma || (addr >= prev_vma->vm_end))) {
24630+ if (check_heap_stack_gap(vma, addr, len)) {
24631 /* remember the address as a hint for next time */
24632- mm->cached_hole_size = largest_hole;
24633- return (mm->free_area_cache = addr);
24634- } else {
24635- /* pull free_area_cache down to the first hole */
24636- if (mm->free_area_cache == vma->vm_end) {
24637- mm->free_area_cache = vma->vm_start;
24638- mm->cached_hole_size = largest_hole;
24639- }
24640+ mm->cached_hole_size = largest_hole;
24641+ return (mm->free_area_cache = addr);
24642+ }
24643+ /* pull free_area_cache down to the first hole */
24644+ if (mm->free_area_cache == vma->vm_end) {
24645+ mm->free_area_cache = vma->vm_start;
24646+ mm->cached_hole_size = largest_hole;
24647 }
24648
24649 /* remember the largest hole we saw so far */
24650 if (addr + largest_hole < vma->vm_start)
24651- largest_hole = vma->vm_start - addr;
24652+ largest_hole = vma->vm_start - addr;
24653
24654 /* try just below the current vma->vm_start */
24655- addr = (vma->vm_start - len) & huge_page_mask(h);
24656- } while (len <= vma->vm_start);
24657+ addr = skip_heap_stack_gap(vma, len);
24658+ } while (!IS_ERR_VALUE(addr));
24659
24660 fail:
24661 /*
24662- * if hint left us with no space for the requested
24663- * mapping then try again:
24664- */
24665- if (first_time) {
24666- mm->free_area_cache = base;
24667- largest_hole = 0;
24668- first_time = 0;
24669- goto try_again;
24670- }
24671- /*
24672 * A failed mmap() very likely causes application failure,
24673 * so fall back to the bottom-up function here. This scenario
24674 * can happen with large stack limits and large mmap()
24675 * allocations.
24676 */
24677- mm->free_area_cache = TASK_UNMAPPED_BASE;
24678+
24679+#ifdef CONFIG_PAX_SEGMEXEC
24680+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24681+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24682+ else
24683+#endif
24684+
24685+ mm->mmap_base = TASK_UNMAPPED_BASE;
24686+
24687+#ifdef CONFIG_PAX_RANDMMAP
24688+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24689+ mm->mmap_base += mm->delta_mmap;
24690+#endif
24691+
24692+ mm->free_area_cache = mm->mmap_base;
24693 mm->cached_hole_size = ~0UL;
24694 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24695 len, pgoff, flags);
24696@@ -387,6 +393,7 @@ fail:
24697 /*
24698 * Restore the topdown base:
24699 */
24700+ mm->mmap_base = base;
24701 mm->free_area_cache = base;
24702 mm->cached_hole_size = ~0UL;
24703
24704@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24705 struct hstate *h = hstate_file(file);
24706 struct mm_struct *mm = current->mm;
24707 struct vm_area_struct *vma;
24708+ unsigned long pax_task_size = TASK_SIZE;
24709
24710 if (len & ~huge_page_mask(h))
24711 return -EINVAL;
24712- if (len > TASK_SIZE)
24713+
24714+#ifdef CONFIG_PAX_SEGMEXEC
24715+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24716+ pax_task_size = SEGMEXEC_TASK_SIZE;
24717+#endif
24718+
24719+ pax_task_size -= PAGE_SIZE;
24720+
24721+ if (len > pax_task_size)
24722 return -ENOMEM;
24723
24724 if (flags & MAP_FIXED) {
24725@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24726 if (addr) {
24727 addr = ALIGN(addr, huge_page_size(h));
24728 vma = find_vma(mm, addr);
24729- if (TASK_SIZE - len >= addr &&
24730- (!vma || addr + len <= vma->vm_start))
24731+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24732 return addr;
24733 }
24734 if (mm->get_unmapped_area == arch_get_unmapped_area)
24735diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24736index 73ffd55..ad78676 100644
24737--- a/arch/x86/mm/init.c
24738+++ b/arch/x86/mm/init.c
24739@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24740 * cause a hotspot and fill up ZONE_DMA. The page tables
24741 * need roughly 0.5KB per GB.
24742 */
24743-#ifdef CONFIG_X86_32
24744- start = 0x7000;
24745-#else
24746- start = 0x8000;
24747-#endif
24748+ start = 0x100000;
24749 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24750 tables, PAGE_SIZE);
24751 if (e820_table_start == -1UL)
24752@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24753 #endif
24754
24755 set_nx();
24756- if (nx_enabled)
24757+ if (nx_enabled && cpu_has_nx)
24758 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24759
24760 /* Enable PSE if available */
24761@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24762 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24763 * mmio resources as well as potential bios/acpi data regions.
24764 */
24765+
24766 int devmem_is_allowed(unsigned long pagenr)
24767 {
24768+#ifdef CONFIG_GRKERNSEC_KMEM
24769+ /* allow BDA */
24770+ if (!pagenr)
24771+ return 1;
24772+ /* allow EBDA */
24773+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24774+ return 1;
24775+ /* allow ISA/video mem */
24776+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24777+ return 1;
24778+ /* throw out everything else below 1MB */
24779+ if (pagenr <= 256)
24780+ return 0;
24781+#else
24782 if (pagenr <= 256)
24783 return 1;
24784+#endif
24785+
24786 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24787 return 0;
24788 if (!page_is_ram(pagenr))
24789@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24790
24791 void free_initmem(void)
24792 {
24793+
24794+#ifdef CONFIG_PAX_KERNEXEC
24795+#ifdef CONFIG_X86_32
24796+ /* PaX: limit KERNEL_CS to actual size */
24797+ unsigned long addr, limit;
24798+ struct desc_struct d;
24799+ int cpu;
24800+
24801+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24802+ limit = (limit - 1UL) >> PAGE_SHIFT;
24803+
24804+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24805+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24806+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24807+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24808+ }
24809+
24810+ /* PaX: make KERNEL_CS read-only */
24811+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24812+ if (!paravirt_enabled())
24813+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24814+/*
24815+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24816+ pgd = pgd_offset_k(addr);
24817+ pud = pud_offset(pgd, addr);
24818+ pmd = pmd_offset(pud, addr);
24819+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24820+ }
24821+*/
24822+#ifdef CONFIG_X86_PAE
24823+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24824+/*
24825+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24826+ pgd = pgd_offset_k(addr);
24827+ pud = pud_offset(pgd, addr);
24828+ pmd = pmd_offset(pud, addr);
24829+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24830+ }
24831+*/
24832+#endif
24833+
24834+#ifdef CONFIG_MODULES
24835+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24836+#endif
24837+
24838+#else
24839+ pgd_t *pgd;
24840+ pud_t *pud;
24841+ pmd_t *pmd;
24842+ unsigned long addr, end;
24843+
24844+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24845+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24846+ pgd = pgd_offset_k(addr);
24847+ pud = pud_offset(pgd, addr);
24848+ pmd = pmd_offset(pud, addr);
24849+ if (!pmd_present(*pmd))
24850+ continue;
24851+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24852+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24853+ else
24854+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24855+ }
24856+
24857+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24858+ end = addr + KERNEL_IMAGE_SIZE;
24859+ for (; addr < end; addr += PMD_SIZE) {
24860+ pgd = pgd_offset_k(addr);
24861+ pud = pud_offset(pgd, addr);
24862+ pmd = pmd_offset(pud, addr);
24863+ if (!pmd_present(*pmd))
24864+ continue;
24865+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24866+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24867+ }
24868+#endif
24869+
24870+ flush_tlb_all();
24871+#endif
24872+
24873 free_init_pages("unused kernel memory",
24874 (unsigned long)(&__init_begin),
24875 (unsigned long)(&__init_end));
24876diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24877index 30938c1..bda3d5d 100644
24878--- a/arch/x86/mm/init_32.c
24879+++ b/arch/x86/mm/init_32.c
24880@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24881 }
24882
24883 /*
24884- * Creates a middle page table and puts a pointer to it in the
24885- * given global directory entry. This only returns the gd entry
24886- * in non-PAE compilation mode, since the middle layer is folded.
24887- */
24888-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24889-{
24890- pud_t *pud;
24891- pmd_t *pmd_table;
24892-
24893-#ifdef CONFIG_X86_PAE
24894- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24895- if (after_bootmem)
24896- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24897- else
24898- pmd_table = (pmd_t *)alloc_low_page();
24899- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24900- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24901- pud = pud_offset(pgd, 0);
24902- BUG_ON(pmd_table != pmd_offset(pud, 0));
24903-
24904- return pmd_table;
24905- }
24906-#endif
24907- pud = pud_offset(pgd, 0);
24908- pmd_table = pmd_offset(pud, 0);
24909-
24910- return pmd_table;
24911-}
24912-
24913-/*
24914 * Create a page table and place a pointer to it in a middle page
24915 * directory entry:
24916 */
24917@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24918 page_table = (pte_t *)alloc_low_page();
24919
24920 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24921+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24922+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24923+#else
24924 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24925+#endif
24926 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24927 }
24928
24929 return pte_offset_kernel(pmd, 0);
24930 }
24931
24932+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24933+{
24934+ pud_t *pud;
24935+ pmd_t *pmd_table;
24936+
24937+ pud = pud_offset(pgd, 0);
24938+ pmd_table = pmd_offset(pud, 0);
24939+
24940+ return pmd_table;
24941+}
24942+
24943 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24944 {
24945 int pgd_idx = pgd_index(vaddr);
24946@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24947 int pgd_idx, pmd_idx;
24948 unsigned long vaddr;
24949 pgd_t *pgd;
24950+ pud_t *pud;
24951 pmd_t *pmd;
24952 pte_t *pte = NULL;
24953
24954@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24955 pgd = pgd_base + pgd_idx;
24956
24957 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24958- pmd = one_md_table_init(pgd);
24959- pmd = pmd + pmd_index(vaddr);
24960+ pud = pud_offset(pgd, vaddr);
24961+ pmd = pmd_offset(pud, vaddr);
24962+
24963+#ifdef CONFIG_X86_PAE
24964+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24965+#endif
24966+
24967 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24968 pmd++, pmd_idx++) {
24969 pte = page_table_kmap_check(one_page_table_init(pmd),
24970@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24971 }
24972 }
24973
24974-static inline int is_kernel_text(unsigned long addr)
24975+static inline int is_kernel_text(unsigned long start, unsigned long end)
24976 {
24977- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
24978- return 1;
24979- return 0;
24980+ if ((start > ktla_ktva((unsigned long)_etext) ||
24981+ end <= ktla_ktva((unsigned long)_stext)) &&
24982+ (start > ktla_ktva((unsigned long)_einittext) ||
24983+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24984+
24985+#ifdef CONFIG_ACPI_SLEEP
24986+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24987+#endif
24988+
24989+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24990+ return 0;
24991+ return 1;
24992 }
24993
24994 /*
24995@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
24996 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
24997 unsigned long start_pfn, end_pfn;
24998 pgd_t *pgd_base = swapper_pg_dir;
24999- int pgd_idx, pmd_idx, pte_ofs;
25000+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25001 unsigned long pfn;
25002 pgd_t *pgd;
25003+ pud_t *pud;
25004 pmd_t *pmd;
25005 pte_t *pte;
25006 unsigned pages_2m, pages_4k;
25007@@ -278,8 +279,13 @@ repeat:
25008 pfn = start_pfn;
25009 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25010 pgd = pgd_base + pgd_idx;
25011- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25012- pmd = one_md_table_init(pgd);
25013+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25014+ pud = pud_offset(pgd, 0);
25015+ pmd = pmd_offset(pud, 0);
25016+
25017+#ifdef CONFIG_X86_PAE
25018+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25019+#endif
25020
25021 if (pfn >= end_pfn)
25022 continue;
25023@@ -291,14 +297,13 @@ repeat:
25024 #endif
25025 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25026 pmd++, pmd_idx++) {
25027- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25028+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25029
25030 /*
25031 * Map with big pages if possible, otherwise
25032 * create normal page tables:
25033 */
25034 if (use_pse) {
25035- unsigned int addr2;
25036 pgprot_t prot = PAGE_KERNEL_LARGE;
25037 /*
25038 * first pass will use the same initial
25039@@ -308,11 +313,7 @@ repeat:
25040 __pgprot(PTE_IDENT_ATTR |
25041 _PAGE_PSE);
25042
25043- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25044- PAGE_OFFSET + PAGE_SIZE-1;
25045-
25046- if (is_kernel_text(addr) ||
25047- is_kernel_text(addr2))
25048+ if (is_kernel_text(address, address + PMD_SIZE))
25049 prot = PAGE_KERNEL_LARGE_EXEC;
25050
25051 pages_2m++;
25052@@ -329,7 +330,7 @@ repeat:
25053 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25054 pte += pte_ofs;
25055 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25056- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25057+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25058 pgprot_t prot = PAGE_KERNEL;
25059 /*
25060 * first pass will use the same initial
25061@@ -337,7 +338,7 @@ repeat:
25062 */
25063 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25064
25065- if (is_kernel_text(addr))
25066+ if (is_kernel_text(address, address + PAGE_SIZE))
25067 prot = PAGE_KERNEL_EXEC;
25068
25069 pages_4k++;
25070@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25071
25072 pud = pud_offset(pgd, va);
25073 pmd = pmd_offset(pud, va);
25074- if (!pmd_present(*pmd))
25075+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25076 break;
25077
25078 pte = pte_offset_kernel(pmd, va);
25079@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25080
25081 static void __init pagetable_init(void)
25082 {
25083- pgd_t *pgd_base = swapper_pg_dir;
25084-
25085- permanent_kmaps_init(pgd_base);
25086+ permanent_kmaps_init(swapper_pg_dir);
25087 }
25088
25089 #ifdef CONFIG_ACPI_SLEEP
25090@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25091 * ACPI suspend needs this for resume, because things like the intel-agp
25092 * driver might have split up a kernel 4MB mapping.
25093 */
25094-char swsusp_pg_dir[PAGE_SIZE]
25095+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25096 __attribute__ ((aligned(PAGE_SIZE)));
25097
25098 static inline void save_pg_dir(void)
25099 {
25100- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25101+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25102 }
25103 #else /* !CONFIG_ACPI_SLEEP */
25104 static inline void save_pg_dir(void)
25105@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25106 flush_tlb_all();
25107 }
25108
25109-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25110+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25111 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25112
25113 /* user-defined highmem size */
25114@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25115 * Initialize the boot-time allocator (with low memory only):
25116 */
25117 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25118- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25119+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25120 PAGE_SIZE);
25121 if (bootmap == -1L)
25122 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25123@@ -864,6 +863,12 @@ void __init mem_init(void)
25124
25125 pci_iommu_alloc();
25126
25127+#ifdef CONFIG_PAX_PER_CPU_PGD
25128+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25129+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25130+ KERNEL_PGD_PTRS);
25131+#endif
25132+
25133 #ifdef CONFIG_FLATMEM
25134 BUG_ON(!mem_map);
25135 #endif
25136@@ -881,7 +886,7 @@ void __init mem_init(void)
25137 set_highmem_pages_init();
25138
25139 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25140- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25141+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25142 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25143
25144 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25145@@ -923,10 +928,10 @@ void __init mem_init(void)
25146 ((unsigned long)&__init_end -
25147 (unsigned long)&__init_begin) >> 10,
25148
25149- (unsigned long)&_etext, (unsigned long)&_edata,
25150- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25151+ (unsigned long)&_sdata, (unsigned long)&_edata,
25152+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25153
25154- (unsigned long)&_text, (unsigned long)&_etext,
25155+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25156 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25157
25158 /*
25159@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25160 if (!kernel_set_to_readonly)
25161 return;
25162
25163+ start = ktla_ktva(start);
25164 pr_debug("Set kernel text: %lx - %lx for read write\n",
25165 start, start+size);
25166
25167@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25168 if (!kernel_set_to_readonly)
25169 return;
25170
25171+ start = ktla_ktva(start);
25172 pr_debug("Set kernel text: %lx - %lx for read only\n",
25173 start, start+size);
25174
25175@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25176 unsigned long start = PFN_ALIGN(_text);
25177 unsigned long size = PFN_ALIGN(_etext) - start;
25178
25179+ start = ktla_ktva(start);
25180 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25181 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25182 size >> 10);
25183diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25184index 7d095ad..25d2549 100644
25185--- a/arch/x86/mm/init_64.c
25186+++ b/arch/x86/mm/init_64.c
25187@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25188 pmd = fill_pmd(pud, vaddr);
25189 pte = fill_pte(pmd, vaddr);
25190
25191+ pax_open_kernel();
25192 set_pte(pte, new_pte);
25193+ pax_close_kernel();
25194
25195 /*
25196 * It's enough to flush this one mapping.
25197@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25198 pgd = pgd_offset_k((unsigned long)__va(phys));
25199 if (pgd_none(*pgd)) {
25200 pud = (pud_t *) spp_getpage();
25201- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25202- _PAGE_USER));
25203+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25204 }
25205 pud = pud_offset(pgd, (unsigned long)__va(phys));
25206 if (pud_none(*pud)) {
25207 pmd = (pmd_t *) spp_getpage();
25208- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25209- _PAGE_USER));
25210+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25211 }
25212 pmd = pmd_offset(pud, phys);
25213 BUG_ON(!pmd_none(*pmd));
25214@@ -675,6 +675,12 @@ void __init mem_init(void)
25215
25216 pci_iommu_alloc();
25217
25218+#ifdef CONFIG_PAX_PER_CPU_PGD
25219+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25220+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25221+ KERNEL_PGD_PTRS);
25222+#endif
25223+
25224 /* clear_bss() already clear the empty_zero_page */
25225
25226 reservedpages = 0;
25227@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25228 static struct vm_area_struct gate_vma = {
25229 .vm_start = VSYSCALL_START,
25230 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25231- .vm_page_prot = PAGE_READONLY_EXEC,
25232- .vm_flags = VM_READ | VM_EXEC
25233+ .vm_page_prot = PAGE_READONLY,
25234+ .vm_flags = VM_READ
25235 };
25236
25237 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25238@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25239
25240 const char *arch_vma_name(struct vm_area_struct *vma)
25241 {
25242- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25243+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25244 return "[vdso]";
25245 if (vma == &gate_vma)
25246 return "[vsyscall]";
25247diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25248index 84e236c..69bd3f6 100644
25249--- a/arch/x86/mm/iomap_32.c
25250+++ b/arch/x86/mm/iomap_32.c
25251@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25252 debug_kmap_atomic(type);
25253 idx = type + KM_TYPE_NR * smp_processor_id();
25254 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25255+
25256+ pax_open_kernel();
25257 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25258+ pax_close_kernel();
25259+
25260 arch_flush_lazy_mmu_mode();
25261
25262 return (void *)vaddr;
25263diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25264index 2feb9bd..3646202 100644
25265--- a/arch/x86/mm/ioremap.c
25266+++ b/arch/x86/mm/ioremap.c
25267@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25268 * Second special case: Some BIOSen report the PC BIOS
25269 * area (640->1Mb) as ram even though it is not.
25270 */
25271- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25272- pagenr < (BIOS_END >> PAGE_SHIFT))
25273+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25274+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25275 return 0;
25276
25277 for (i = 0; i < e820.nr_map; i++) {
25278@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25279 /*
25280 * Don't allow anybody to remap normal RAM that we're using..
25281 */
25282- for (pfn = phys_addr >> PAGE_SHIFT;
25283- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25284- pfn++) {
25285-
25286+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25287 int is_ram = page_is_ram(pfn);
25288
25289- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25290+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25291 return NULL;
25292 WARN_ON_ONCE(is_ram);
25293 }
25294@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25295 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25296
25297 static __initdata int after_paging_init;
25298-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25299+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25300
25301 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25302 {
25303@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25304 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25305
25306 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25307- memset(bm_pte, 0, sizeof(bm_pte));
25308- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25309+ pmd_populate_user(&init_mm, pmd, bm_pte);
25310
25311 /*
25312 * The boot-ioremap range spans multiple pmds, for which
25313diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25314index 8cc1833..1abbc5b 100644
25315--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25316+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25317@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25318 * memory (e.g. tracked pages)? For now, we need this to avoid
25319 * invoking kmemcheck for PnP BIOS calls.
25320 */
25321- if (regs->flags & X86_VM_MASK)
25322+ if (v8086_mode(regs))
25323 return false;
25324- if (regs->cs != __KERNEL_CS)
25325+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25326 return false;
25327
25328 pte = kmemcheck_pte_lookup(address);
25329diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25330index c8191de..2975082 100644
25331--- a/arch/x86/mm/mmap.c
25332+++ b/arch/x86/mm/mmap.c
25333@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25334 * Leave an at least ~128 MB hole with possible stack randomization.
25335 */
25336 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25337-#define MAX_GAP (TASK_SIZE/6*5)
25338+#define MAX_GAP (pax_task_size/6*5)
25339
25340 /*
25341 * True on X86_32 or when emulating IA32 on X86_64
25342@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25343 return rnd << PAGE_SHIFT;
25344 }
25345
25346-static unsigned long mmap_base(void)
25347+static unsigned long mmap_base(struct mm_struct *mm)
25348 {
25349 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25350+ unsigned long pax_task_size = TASK_SIZE;
25351+
25352+#ifdef CONFIG_PAX_SEGMEXEC
25353+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25354+ pax_task_size = SEGMEXEC_TASK_SIZE;
25355+#endif
25356
25357 if (gap < MIN_GAP)
25358 gap = MIN_GAP;
25359 else if (gap > MAX_GAP)
25360 gap = MAX_GAP;
25361
25362- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25363+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25364 }
25365
25366 /*
25367 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25368 * does, but not when emulating X86_32
25369 */
25370-static unsigned long mmap_legacy_base(void)
25371+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25372 {
25373- if (mmap_is_ia32())
25374+ if (mmap_is_ia32()) {
25375+
25376+#ifdef CONFIG_PAX_SEGMEXEC
25377+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25378+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25379+ else
25380+#endif
25381+
25382 return TASK_UNMAPPED_BASE;
25383- else
25384+ } else
25385 return TASK_UNMAPPED_BASE + mmap_rnd();
25386 }
25387
25388@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25389 void arch_pick_mmap_layout(struct mm_struct *mm)
25390 {
25391 if (mmap_is_legacy()) {
25392- mm->mmap_base = mmap_legacy_base();
25393+ mm->mmap_base = mmap_legacy_base(mm);
25394+
25395+#ifdef CONFIG_PAX_RANDMMAP
25396+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25397+ mm->mmap_base += mm->delta_mmap;
25398+#endif
25399+
25400 mm->get_unmapped_area = arch_get_unmapped_area;
25401 mm->unmap_area = arch_unmap_area;
25402 } else {
25403- mm->mmap_base = mmap_base();
25404+ mm->mmap_base = mmap_base(mm);
25405+
25406+#ifdef CONFIG_PAX_RANDMMAP
25407+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25408+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25409+#endif
25410+
25411 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25412 mm->unmap_area = arch_unmap_area_topdown;
25413 }
25414diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25415index 132772a..b961f11 100644
25416--- a/arch/x86/mm/mmio-mod.c
25417+++ b/arch/x86/mm/mmio-mod.c
25418@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25419 break;
25420 default:
25421 {
25422- unsigned char *ip = (unsigned char *)instptr;
25423+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25424 my_trace->opcode = MMIO_UNKNOWN_OP;
25425 my_trace->width = 0;
25426 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25427@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25428 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25429 void __iomem *addr)
25430 {
25431- static atomic_t next_id;
25432+ static atomic_unchecked_t next_id;
25433 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25434 /* These are page-unaligned. */
25435 struct mmiotrace_map map = {
25436@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25437 .private = trace
25438 },
25439 .phys = offset,
25440- .id = atomic_inc_return(&next_id)
25441+ .id = atomic_inc_return_unchecked(&next_id)
25442 };
25443 map.map_id = trace->id;
25444
25445diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25446index d253006..e56dd6a 100644
25447--- a/arch/x86/mm/numa_32.c
25448+++ b/arch/x86/mm/numa_32.c
25449@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25450 }
25451 #endif
25452
25453-extern unsigned long find_max_low_pfn(void);
25454 extern unsigned long highend_pfn, highstart_pfn;
25455
25456 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25457diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25458index e1d1069..2251ff3 100644
25459--- a/arch/x86/mm/pageattr-test.c
25460+++ b/arch/x86/mm/pageattr-test.c
25461@@ -36,7 +36,7 @@ enum {
25462
25463 static int pte_testbit(pte_t pte)
25464 {
25465- return pte_flags(pte) & _PAGE_UNUSED1;
25466+ return pte_flags(pte) & _PAGE_CPA_TEST;
25467 }
25468
25469 struct split_state {
25470diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25471index dd38bfb..8c12306 100644
25472--- a/arch/x86/mm/pageattr.c
25473+++ b/arch/x86/mm/pageattr.c
25474@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25475 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25476 */
25477 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25478- pgprot_val(forbidden) |= _PAGE_NX;
25479+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25480
25481 /*
25482 * The kernel text needs to be executable for obvious reasons
25483 * Does not cover __inittext since that is gone later on. On
25484 * 64bit we do not enforce !NX on the low mapping
25485 */
25486- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25487- pgprot_val(forbidden) |= _PAGE_NX;
25488+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25489+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25490
25491+#ifdef CONFIG_DEBUG_RODATA
25492 /*
25493 * The .rodata section needs to be read-only. Using the pfn
25494 * catches all aliases.
25495@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25496 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25497 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25498 pgprot_val(forbidden) |= _PAGE_RW;
25499+#endif
25500+
25501+#ifdef CONFIG_PAX_KERNEXEC
25502+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25503+ pgprot_val(forbidden) |= _PAGE_RW;
25504+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25505+ }
25506+#endif
25507
25508 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25509
25510@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25511 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25512 {
25513 /* change init_mm */
25514+ pax_open_kernel();
25515 set_pte_atomic(kpte, pte);
25516+
25517 #ifdef CONFIG_X86_32
25518 if (!SHARED_KERNEL_PMD) {
25519+
25520+#ifdef CONFIG_PAX_PER_CPU_PGD
25521+ unsigned long cpu;
25522+#else
25523 struct page *page;
25524+#endif
25525
25526+#ifdef CONFIG_PAX_PER_CPU_PGD
25527+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25528+ pgd_t *pgd = get_cpu_pgd(cpu);
25529+#else
25530 list_for_each_entry(page, &pgd_list, lru) {
25531- pgd_t *pgd;
25532+ pgd_t *pgd = (pgd_t *)page_address(page);
25533+#endif
25534+
25535 pud_t *pud;
25536 pmd_t *pmd;
25537
25538- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25539+ pgd += pgd_index(address);
25540 pud = pud_offset(pgd, address);
25541 pmd = pmd_offset(pud, address);
25542 set_pte_atomic((pte_t *)pmd, pte);
25543 }
25544 }
25545 #endif
25546+ pax_close_kernel();
25547 }
25548
25549 static int
25550diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25551index e78cd0e..de0a817 100644
25552--- a/arch/x86/mm/pat.c
25553+++ b/arch/x86/mm/pat.c
25554@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25555
25556 conflict:
25557 printk(KERN_INFO "%s:%d conflicting memory types "
25558- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25559+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25560 new->end, cattr_name(new->type), cattr_name(entry->type));
25561 return -EBUSY;
25562 }
25563@@ -559,7 +559,7 @@ unlock_ret:
25564
25565 if (err) {
25566 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25567- current->comm, current->pid, start, end);
25568+ current->comm, task_pid_nr(current), start, end);
25569 }
25570
25571 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25572@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25573 while (cursor < to) {
25574 if (!devmem_is_allowed(pfn)) {
25575 printk(KERN_INFO
25576- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25577- current->comm, from, to);
25578+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25579+ current->comm, from, to, cursor);
25580 return 0;
25581 }
25582 cursor += PAGE_SIZE;
25583@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25584 printk(KERN_INFO
25585 "%s:%d ioremap_change_attr failed %s "
25586 "for %Lx-%Lx\n",
25587- current->comm, current->pid,
25588+ current->comm, task_pid_nr(current),
25589 cattr_name(flags),
25590 base, (unsigned long long)(base + size));
25591 return -EINVAL;
25592@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25593 free_memtype(paddr, paddr + size);
25594 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25595 " for %Lx-%Lx, got %s\n",
25596- current->comm, current->pid,
25597+ current->comm, task_pid_nr(current),
25598 cattr_name(want_flags),
25599 (unsigned long long)paddr,
25600 (unsigned long long)(paddr + size),
25601diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25602index df3d5c8..c2223e1 100644
25603--- a/arch/x86/mm/pf_in.c
25604+++ b/arch/x86/mm/pf_in.c
25605@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25606 int i;
25607 enum reason_type rv = OTHERS;
25608
25609- p = (unsigned char *)ins_addr;
25610+ p = (unsigned char *)ktla_ktva(ins_addr);
25611 p += skip_prefix(p, &prf);
25612 p += get_opcode(p, &opcode);
25613
25614@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25615 struct prefix_bits prf;
25616 int i;
25617
25618- p = (unsigned char *)ins_addr;
25619+ p = (unsigned char *)ktla_ktva(ins_addr);
25620 p += skip_prefix(p, &prf);
25621 p += get_opcode(p, &opcode);
25622
25623@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25624 struct prefix_bits prf;
25625 int i;
25626
25627- p = (unsigned char *)ins_addr;
25628+ p = (unsigned char *)ktla_ktva(ins_addr);
25629 p += skip_prefix(p, &prf);
25630 p += get_opcode(p, &opcode);
25631
25632@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25633 int i;
25634 unsigned long rv;
25635
25636- p = (unsigned char *)ins_addr;
25637+ p = (unsigned char *)ktla_ktva(ins_addr);
25638 p += skip_prefix(p, &prf);
25639 p += get_opcode(p, &opcode);
25640 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25641@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25642 int i;
25643 unsigned long rv;
25644
25645- p = (unsigned char *)ins_addr;
25646+ p = (unsigned char *)ktla_ktva(ins_addr);
25647 p += skip_prefix(p, &prf);
25648 p += get_opcode(p, &opcode);
25649 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25650diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25651index e0e6fad..6b90017 100644
25652--- a/arch/x86/mm/pgtable.c
25653+++ b/arch/x86/mm/pgtable.c
25654@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25655 list_del(&page->lru);
25656 }
25657
25658-#define UNSHARED_PTRS_PER_PGD \
25659- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25660+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25661+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25662
25663+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25664+{
25665+ while (count--)
25666+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25667+}
25668+#endif
25669+
25670+#ifdef CONFIG_PAX_PER_CPU_PGD
25671+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25672+{
25673+ while (count--)
25674+
25675+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25676+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25677+#else
25678+ *dst++ = *src++;
25679+#endif
25680+
25681+}
25682+#endif
25683+
25684+#ifdef CONFIG_X86_64
25685+#define pxd_t pud_t
25686+#define pyd_t pgd_t
25687+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25688+#define pxd_free(mm, pud) pud_free((mm), (pud))
25689+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25690+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25691+#define PYD_SIZE PGDIR_SIZE
25692+#else
25693+#define pxd_t pmd_t
25694+#define pyd_t pud_t
25695+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25696+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25697+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25698+#define pyd_offset(mm ,address) pud_offset((mm), (address))
25699+#define PYD_SIZE PUD_SIZE
25700+#endif
25701+
25702+#ifdef CONFIG_PAX_PER_CPU_PGD
25703+static inline void pgd_ctor(pgd_t *pgd) {}
25704+static inline void pgd_dtor(pgd_t *pgd) {}
25705+#else
25706 static void pgd_ctor(pgd_t *pgd)
25707 {
25708 /* If the pgd points to a shared pagetable level (either the
25709@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25710 pgd_list_del(pgd);
25711 spin_unlock_irqrestore(&pgd_lock, flags);
25712 }
25713+#endif
25714
25715 /*
25716 * List of all pgd's needed for non-PAE so it can invalidate entries
25717@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25718 * -- wli
25719 */
25720
25721-#ifdef CONFIG_X86_PAE
25722+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25723 /*
25724 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25725 * updating the top-level pagetable entries to guarantee the
25726@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25727 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25728 * and initialize the kernel pmds here.
25729 */
25730-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25731+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25732
25733 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25734 {
25735@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25736 */
25737 flush_tlb_mm(mm);
25738 }
25739+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25740+#define PREALLOCATED_PXDS USER_PGD_PTRS
25741 #else /* !CONFIG_X86_PAE */
25742
25743 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25744-#define PREALLOCATED_PMDS 0
25745+#define PREALLOCATED_PXDS 0
25746
25747 #endif /* CONFIG_X86_PAE */
25748
25749-static void free_pmds(pmd_t *pmds[])
25750+static void free_pxds(pxd_t *pxds[])
25751 {
25752 int i;
25753
25754- for(i = 0; i < PREALLOCATED_PMDS; i++)
25755- if (pmds[i])
25756- free_page((unsigned long)pmds[i]);
25757+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25758+ if (pxds[i])
25759+ free_page((unsigned long)pxds[i]);
25760 }
25761
25762-static int preallocate_pmds(pmd_t *pmds[])
25763+static int preallocate_pxds(pxd_t *pxds[])
25764 {
25765 int i;
25766 bool failed = false;
25767
25768- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25769- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25770- if (pmd == NULL)
25771+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25772+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25773+ if (pxd == NULL)
25774 failed = true;
25775- pmds[i] = pmd;
25776+ pxds[i] = pxd;
25777 }
25778
25779 if (failed) {
25780- free_pmds(pmds);
25781+ free_pxds(pxds);
25782 return -ENOMEM;
25783 }
25784
25785@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25786 * preallocate which never got a corresponding vma will need to be
25787 * freed manually.
25788 */
25789-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25790+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25791 {
25792 int i;
25793
25794- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25795+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25796 pgd_t pgd = pgdp[i];
25797
25798 if (pgd_val(pgd) != 0) {
25799- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25800+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25801
25802- pgdp[i] = native_make_pgd(0);
25803+ set_pgd(pgdp + i, native_make_pgd(0));
25804
25805- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25806- pmd_free(mm, pmd);
25807+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25808+ pxd_free(mm, pxd);
25809 }
25810 }
25811 }
25812
25813-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25814+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25815 {
25816- pud_t *pud;
25817+ pyd_t *pyd;
25818 unsigned long addr;
25819 int i;
25820
25821- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25822+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25823 return;
25824
25825- pud = pud_offset(pgd, 0);
25826+#ifdef CONFIG_X86_64
25827+ pyd = pyd_offset(mm, 0L);
25828+#else
25829+ pyd = pyd_offset(pgd, 0L);
25830+#endif
25831
25832- for (addr = i = 0; i < PREALLOCATED_PMDS;
25833- i++, pud++, addr += PUD_SIZE) {
25834- pmd_t *pmd = pmds[i];
25835+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25836+ i++, pyd++, addr += PYD_SIZE) {
25837+ pxd_t *pxd = pxds[i];
25838
25839 if (i >= KERNEL_PGD_BOUNDARY)
25840- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25841- sizeof(pmd_t) * PTRS_PER_PMD);
25842+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25843+ sizeof(pxd_t) * PTRS_PER_PMD);
25844
25845- pud_populate(mm, pud, pmd);
25846+ pyd_populate(mm, pyd, pxd);
25847 }
25848 }
25849
25850 pgd_t *pgd_alloc(struct mm_struct *mm)
25851 {
25852 pgd_t *pgd;
25853- pmd_t *pmds[PREALLOCATED_PMDS];
25854+ pxd_t *pxds[PREALLOCATED_PXDS];
25855+
25856 unsigned long flags;
25857
25858 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25859@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25860
25861 mm->pgd = pgd;
25862
25863- if (preallocate_pmds(pmds) != 0)
25864+ if (preallocate_pxds(pxds) != 0)
25865 goto out_free_pgd;
25866
25867 if (paravirt_pgd_alloc(mm) != 0)
25868- goto out_free_pmds;
25869+ goto out_free_pxds;
25870
25871 /*
25872 * Make sure that pre-populating the pmds is atomic with
25873@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25874 spin_lock_irqsave(&pgd_lock, flags);
25875
25876 pgd_ctor(pgd);
25877- pgd_prepopulate_pmd(mm, pgd, pmds);
25878+ pgd_prepopulate_pxd(mm, pgd, pxds);
25879
25880 spin_unlock_irqrestore(&pgd_lock, flags);
25881
25882 return pgd;
25883
25884-out_free_pmds:
25885- free_pmds(pmds);
25886+out_free_pxds:
25887+ free_pxds(pxds);
25888 out_free_pgd:
25889 free_page((unsigned long)pgd);
25890 out:
25891@@ -287,7 +338,7 @@ out:
25892
25893 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25894 {
25895- pgd_mop_up_pmds(mm, pgd);
25896+ pgd_mop_up_pxds(mm, pgd);
25897 pgd_dtor(pgd);
25898 paravirt_pgd_free(mm, pgd);
25899 free_page((unsigned long)pgd);
25900diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25901index 46c8834..fcab43d 100644
25902--- a/arch/x86/mm/pgtable_32.c
25903+++ b/arch/x86/mm/pgtable_32.c
25904@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25905 return;
25906 }
25907 pte = pte_offset_kernel(pmd, vaddr);
25908+
25909+ pax_open_kernel();
25910 if (pte_val(pteval))
25911 set_pte_at(&init_mm, vaddr, pte, pteval);
25912 else
25913 pte_clear(&init_mm, vaddr, pte);
25914+ pax_close_kernel();
25915
25916 /*
25917 * It's enough to flush this one mapping.
25918diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25919index 513d8ed..978c161 100644
25920--- a/arch/x86/mm/setup_nx.c
25921+++ b/arch/x86/mm/setup_nx.c
25922@@ -4,11 +4,10 @@
25923
25924 #include <asm/pgtable.h>
25925
25926+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25927 int nx_enabled;
25928
25929-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25930-static int disable_nx __cpuinitdata;
25931-
25932+#ifndef CONFIG_PAX_PAGEEXEC
25933 /*
25934 * noexec = on|off
25935 *
25936@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
25937 if (!str)
25938 return -EINVAL;
25939 if (!strncmp(str, "on", 2)) {
25940- __supported_pte_mask |= _PAGE_NX;
25941- disable_nx = 0;
25942+ nx_enabled = 1;
25943 } else if (!strncmp(str, "off", 3)) {
25944- disable_nx = 1;
25945- __supported_pte_mask &= ~_PAGE_NX;
25946+ nx_enabled = 0;
25947 }
25948 return 0;
25949 }
25950 early_param("noexec", noexec_setup);
25951 #endif
25952+#endif
25953
25954 #ifdef CONFIG_X86_PAE
25955 void __init set_nx(void)
25956 {
25957- unsigned int v[4], l, h;
25958+ if (!nx_enabled && cpu_has_nx) {
25959+ unsigned l, h;
25960
25961- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
25962- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
25963-
25964- if ((v[3] & (1 << 20)) && !disable_nx) {
25965- rdmsr(MSR_EFER, l, h);
25966- l |= EFER_NX;
25967- wrmsr(MSR_EFER, l, h);
25968- nx_enabled = 1;
25969- __supported_pte_mask |= _PAGE_NX;
25970- }
25971+ __supported_pte_mask &= ~_PAGE_NX;
25972+ rdmsr(MSR_EFER, l, h);
25973+ l &= ~EFER_NX;
25974+ wrmsr(MSR_EFER, l, h);
25975 }
25976 }
25977 #else
25978@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
25979 unsigned long efer;
25980
25981 rdmsrl(MSR_EFER, efer);
25982- if (!(efer & EFER_NX) || disable_nx)
25983+ if (!(efer & EFER_NX) || !nx_enabled)
25984 __supported_pte_mask &= ~_PAGE_NX;
25985 }
25986 #endif
25987diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25988index 36fe08e..b123d3a 100644
25989--- a/arch/x86/mm/tlb.c
25990+++ b/arch/x86/mm/tlb.c
25991@@ -61,7 +61,11 @@ void leave_mm(int cpu)
25992 BUG();
25993 cpumask_clear_cpu(cpu,
25994 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25995+
25996+#ifndef CONFIG_PAX_PER_CPU_PGD
25997 load_cr3(swapper_pg_dir);
25998+#endif
25999+
26000 }
26001 EXPORT_SYMBOL_GPL(leave_mm);
26002
26003diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26004index 044897b..a195924 100644
26005--- a/arch/x86/oprofile/backtrace.c
26006+++ b/arch/x86/oprofile/backtrace.c
26007@@ -57,7 +57,7 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
26008 struct frame_head bufhead[2];
26009
26010 /* Also check accessibility of one struct frame_head beyond */
26011- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
26012+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
26013 return NULL;
26014 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
26015 return NULL;
26016@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26017 {
26018 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26019
26020- if (!user_mode_vm(regs)) {
26021+ if (!user_mode(regs)) {
26022 unsigned long stack = kernel_stack_pointer(regs);
26023 if (depth)
26024 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26025diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26026index e6a160a..36deff6 100644
26027--- a/arch/x86/oprofile/op_model_p4.c
26028+++ b/arch/x86/oprofile/op_model_p4.c
26029@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26030 #endif
26031 }
26032
26033-static int inline addr_increment(void)
26034+static inline int addr_increment(void)
26035 {
26036 #ifdef CONFIG_SMP
26037 return smp_num_siblings == 2 ? 2 : 1;
26038diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26039index 1331fcf..03901b2 100644
26040--- a/arch/x86/pci/common.c
26041+++ b/arch/x86/pci/common.c
26042@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26043 int pcibios_last_bus = -1;
26044 unsigned long pirq_table_addr;
26045 struct pci_bus *pci_root_bus;
26046-struct pci_raw_ops *raw_pci_ops;
26047-struct pci_raw_ops *raw_pci_ext_ops;
26048+const struct pci_raw_ops *raw_pci_ops;
26049+const struct pci_raw_ops *raw_pci_ext_ops;
26050
26051 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26052 int reg, int len, u32 *val)
26053diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26054index 347d882..4baf6b6 100644
26055--- a/arch/x86/pci/direct.c
26056+++ b/arch/x86/pci/direct.c
26057@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26058
26059 #undef PCI_CONF1_ADDRESS
26060
26061-struct pci_raw_ops pci_direct_conf1 = {
26062+const struct pci_raw_ops pci_direct_conf1 = {
26063 .read = pci_conf1_read,
26064 .write = pci_conf1_write,
26065 };
26066@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26067
26068 #undef PCI_CONF2_ADDRESS
26069
26070-struct pci_raw_ops pci_direct_conf2 = {
26071+const struct pci_raw_ops pci_direct_conf2 = {
26072 .read = pci_conf2_read,
26073 .write = pci_conf2_write,
26074 };
26075@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26076 * This should be close to trivial, but it isn't, because there are buggy
26077 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26078 */
26079-static int __init pci_sanity_check(struct pci_raw_ops *o)
26080+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26081 {
26082 u32 x = 0;
26083 int year, devfn;
26084diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26085index f10a7e9..0425342 100644
26086--- a/arch/x86/pci/mmconfig_32.c
26087+++ b/arch/x86/pci/mmconfig_32.c
26088@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26089 return 0;
26090 }
26091
26092-static struct pci_raw_ops pci_mmcfg = {
26093+static const struct pci_raw_ops pci_mmcfg = {
26094 .read = pci_mmcfg_read,
26095 .write = pci_mmcfg_write,
26096 };
26097diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26098index 94349f8..41600a7 100644
26099--- a/arch/x86/pci/mmconfig_64.c
26100+++ b/arch/x86/pci/mmconfig_64.c
26101@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26102 return 0;
26103 }
26104
26105-static struct pci_raw_ops pci_mmcfg = {
26106+static const struct pci_raw_ops pci_mmcfg = {
26107 .read = pci_mmcfg_read,
26108 .write = pci_mmcfg_write,
26109 };
26110diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26111index 8eb295e..86bd657 100644
26112--- a/arch/x86/pci/numaq_32.c
26113+++ b/arch/x86/pci/numaq_32.c
26114@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26115
26116 #undef PCI_CONF1_MQ_ADDRESS
26117
26118-static struct pci_raw_ops pci_direct_conf1_mq = {
26119+static const struct pci_raw_ops pci_direct_conf1_mq = {
26120 .read = pci_conf1_mq_read,
26121 .write = pci_conf1_mq_write
26122 };
26123diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26124index b889d82..5a58a0a 100644
26125--- a/arch/x86/pci/olpc.c
26126+++ b/arch/x86/pci/olpc.c
26127@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26128 return 0;
26129 }
26130
26131-static struct pci_raw_ops pci_olpc_conf = {
26132+static const struct pci_raw_ops pci_olpc_conf = {
26133 .read = pci_olpc_read,
26134 .write = pci_olpc_write,
26135 };
26136diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26137index 1c975cc..ffd0536 100644
26138--- a/arch/x86/pci/pcbios.c
26139+++ b/arch/x86/pci/pcbios.c
26140@@ -56,50 +56,93 @@ union bios32 {
26141 static struct {
26142 unsigned long address;
26143 unsigned short segment;
26144-} bios32_indirect = { 0, __KERNEL_CS };
26145+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26146
26147 /*
26148 * Returns the entry point for the given service, NULL on error
26149 */
26150
26151-static unsigned long bios32_service(unsigned long service)
26152+static unsigned long __devinit bios32_service(unsigned long service)
26153 {
26154 unsigned char return_code; /* %al */
26155 unsigned long address; /* %ebx */
26156 unsigned long length; /* %ecx */
26157 unsigned long entry; /* %edx */
26158 unsigned long flags;
26159+ struct desc_struct d, *gdt;
26160
26161 local_irq_save(flags);
26162- __asm__("lcall *(%%edi); cld"
26163+
26164+ gdt = get_cpu_gdt_table(smp_processor_id());
26165+
26166+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26167+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26168+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26169+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26170+
26171+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26172 : "=a" (return_code),
26173 "=b" (address),
26174 "=c" (length),
26175 "=d" (entry)
26176 : "0" (service),
26177 "1" (0),
26178- "D" (&bios32_indirect));
26179+ "D" (&bios32_indirect),
26180+ "r"(__PCIBIOS_DS)
26181+ : "memory");
26182+
26183+ pax_open_kernel();
26184+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26185+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26186+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26187+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26188+ pax_close_kernel();
26189+
26190 local_irq_restore(flags);
26191
26192 switch (return_code) {
26193- case 0:
26194- return address + entry;
26195- case 0x80: /* Not present */
26196- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26197- return 0;
26198- default: /* Shouldn't happen */
26199- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26200- service, return_code);
26201+ case 0: {
26202+ int cpu;
26203+ unsigned char flags;
26204+
26205+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26206+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26207+ printk(KERN_WARNING "bios32_service: not valid\n");
26208 return 0;
26209+ }
26210+ address = address + PAGE_OFFSET;
26211+ length += 16UL; /* some BIOSs underreport this... */
26212+ flags = 4;
26213+ if (length >= 64*1024*1024) {
26214+ length >>= PAGE_SHIFT;
26215+ flags |= 8;
26216+ }
26217+
26218+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
26219+ gdt = get_cpu_gdt_table(cpu);
26220+ pack_descriptor(&d, address, length, 0x9b, flags);
26221+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26222+ pack_descriptor(&d, address, length, 0x93, flags);
26223+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26224+ }
26225+ return entry;
26226+ }
26227+ case 0x80: /* Not present */
26228+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26229+ return 0;
26230+ default: /* Shouldn't happen */
26231+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26232+ service, return_code);
26233+ return 0;
26234 }
26235 }
26236
26237 static struct {
26238 unsigned long address;
26239 unsigned short segment;
26240-} pci_indirect = { 0, __KERNEL_CS };
26241+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26242
26243-static int pci_bios_present;
26244+static int pci_bios_present __read_only;
26245
26246 static int __devinit check_pcibios(void)
26247 {
26248@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26249 unsigned long flags, pcibios_entry;
26250
26251 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26252- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26253+ pci_indirect.address = pcibios_entry;
26254
26255 local_irq_save(flags);
26256- __asm__(
26257- "lcall *(%%edi); cld\n\t"
26258+ __asm__("movw %w6, %%ds\n\t"
26259+ "lcall *%%ss:(%%edi); cld\n\t"
26260+ "push %%ss\n\t"
26261+ "pop %%ds\n\t"
26262 "jc 1f\n\t"
26263 "xor %%ah, %%ah\n"
26264 "1:"
26265@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26266 "=b" (ebx),
26267 "=c" (ecx)
26268 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26269- "D" (&pci_indirect)
26270+ "D" (&pci_indirect),
26271+ "r" (__PCIBIOS_DS)
26272 : "memory");
26273 local_irq_restore(flags);
26274
26275@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26276
26277 switch (len) {
26278 case 1:
26279- __asm__("lcall *(%%esi); cld\n\t"
26280+ __asm__("movw %w6, %%ds\n\t"
26281+ "lcall *%%ss:(%%esi); cld\n\t"
26282+ "push %%ss\n\t"
26283+ "pop %%ds\n\t"
26284 "jc 1f\n\t"
26285 "xor %%ah, %%ah\n"
26286 "1:"
26287@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26288 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26289 "b" (bx),
26290 "D" ((long)reg),
26291- "S" (&pci_indirect));
26292+ "S" (&pci_indirect),
26293+ "r" (__PCIBIOS_DS));
26294 /*
26295 * Zero-extend the result beyond 8 bits, do not trust the
26296 * BIOS having done it:
26297@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26298 *value &= 0xff;
26299 break;
26300 case 2:
26301- __asm__("lcall *(%%esi); cld\n\t"
26302+ __asm__("movw %w6, %%ds\n\t"
26303+ "lcall *%%ss:(%%esi); cld\n\t"
26304+ "push %%ss\n\t"
26305+ "pop %%ds\n\t"
26306 "jc 1f\n\t"
26307 "xor %%ah, %%ah\n"
26308 "1:"
26309@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26310 : "1" (PCIBIOS_READ_CONFIG_WORD),
26311 "b" (bx),
26312 "D" ((long)reg),
26313- "S" (&pci_indirect));
26314+ "S" (&pci_indirect),
26315+ "r" (__PCIBIOS_DS));
26316 /*
26317 * Zero-extend the result beyond 16 bits, do not trust the
26318 * BIOS having done it:
26319@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26320 *value &= 0xffff;
26321 break;
26322 case 4:
26323- __asm__("lcall *(%%esi); cld\n\t"
26324+ __asm__("movw %w6, %%ds\n\t"
26325+ "lcall *%%ss:(%%esi); cld\n\t"
26326+ "push %%ss\n\t"
26327+ "pop %%ds\n\t"
26328 "jc 1f\n\t"
26329 "xor %%ah, %%ah\n"
26330 "1:"
26331@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26332 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26333 "b" (bx),
26334 "D" ((long)reg),
26335- "S" (&pci_indirect));
26336+ "S" (&pci_indirect),
26337+ "r" (__PCIBIOS_DS));
26338 break;
26339 }
26340
26341@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26342
26343 switch (len) {
26344 case 1:
26345- __asm__("lcall *(%%esi); cld\n\t"
26346+ __asm__("movw %w6, %%ds\n\t"
26347+ "lcall *%%ss:(%%esi); cld\n\t"
26348+ "push %%ss\n\t"
26349+ "pop %%ds\n\t"
26350 "jc 1f\n\t"
26351 "xor %%ah, %%ah\n"
26352 "1:"
26353@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26354 "c" (value),
26355 "b" (bx),
26356 "D" ((long)reg),
26357- "S" (&pci_indirect));
26358+ "S" (&pci_indirect),
26359+ "r" (__PCIBIOS_DS));
26360 break;
26361 case 2:
26362- __asm__("lcall *(%%esi); cld\n\t"
26363+ __asm__("movw %w6, %%ds\n\t"
26364+ "lcall *%%ss:(%%esi); cld\n\t"
26365+ "push %%ss\n\t"
26366+ "pop %%ds\n\t"
26367 "jc 1f\n\t"
26368 "xor %%ah, %%ah\n"
26369 "1:"
26370@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26371 "c" (value),
26372 "b" (bx),
26373 "D" ((long)reg),
26374- "S" (&pci_indirect));
26375+ "S" (&pci_indirect),
26376+ "r" (__PCIBIOS_DS));
26377 break;
26378 case 4:
26379- __asm__("lcall *(%%esi); cld\n\t"
26380+ __asm__("movw %w6, %%ds\n\t"
26381+ "lcall *%%ss:(%%esi); cld\n\t"
26382+ "push %%ss\n\t"
26383+ "pop %%ds\n\t"
26384 "jc 1f\n\t"
26385 "xor %%ah, %%ah\n"
26386 "1:"
26387@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26388 "c" (value),
26389 "b" (bx),
26390 "D" ((long)reg),
26391- "S" (&pci_indirect));
26392+ "S" (&pci_indirect),
26393+ "r" (__PCIBIOS_DS));
26394 break;
26395 }
26396
26397@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26398 * Function table for BIOS32 access
26399 */
26400
26401-static struct pci_raw_ops pci_bios_access = {
26402+static const struct pci_raw_ops pci_bios_access = {
26403 .read = pci_bios_read,
26404 .write = pci_bios_write
26405 };
26406@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26407 * Try to find PCI BIOS.
26408 */
26409
26410-static struct pci_raw_ops * __devinit pci_find_bios(void)
26411+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26412 {
26413 union bios32 *check;
26414 unsigned char sum;
26415@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26416
26417 DBG("PCI: Fetching IRQ routing table... ");
26418 __asm__("push %%es\n\t"
26419+ "movw %w8, %%ds\n\t"
26420 "push %%ds\n\t"
26421 "pop %%es\n\t"
26422- "lcall *(%%esi); cld\n\t"
26423+ "lcall *%%ss:(%%esi); cld\n\t"
26424 "pop %%es\n\t"
26425+ "push %%ss\n\t"
26426+ "pop %%ds\n"
26427 "jc 1f\n\t"
26428 "xor %%ah, %%ah\n"
26429 "1:"
26430@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26431 "1" (0),
26432 "D" ((long) &opt),
26433 "S" (&pci_indirect),
26434- "m" (opt)
26435+ "m" (opt),
26436+ "r" (__PCIBIOS_DS)
26437 : "memory");
26438 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26439 if (ret & 0xff00)
26440@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26441 {
26442 int ret;
26443
26444- __asm__("lcall *(%%esi); cld\n\t"
26445+ __asm__("movw %w5, %%ds\n\t"
26446+ "lcall *%%ss:(%%esi); cld\n\t"
26447+ "push %%ss\n\t"
26448+ "pop %%ds\n"
26449 "jc 1f\n\t"
26450 "xor %%ah, %%ah\n"
26451 "1:"
26452@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26453 : "0" (PCIBIOS_SET_PCI_HW_INT),
26454 "b" ((dev->bus->number << 8) | dev->devfn),
26455 "c" ((irq << 8) | (pin + 10)),
26456- "S" (&pci_indirect));
26457+ "S" (&pci_indirect),
26458+ "r" (__PCIBIOS_DS));
26459 return !(ret & 0xff00);
26460 }
26461 EXPORT_SYMBOL(pcibios_set_irq_routing);
26462diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26463index fa0f651..9d8f3d9 100644
26464--- a/arch/x86/power/cpu.c
26465+++ b/arch/x86/power/cpu.c
26466@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26467 static void fix_processor_context(void)
26468 {
26469 int cpu = smp_processor_id();
26470- struct tss_struct *t = &per_cpu(init_tss, cpu);
26471+ struct tss_struct *t = init_tss + cpu;
26472
26473 set_tss_desc(cpu, t); /*
26474 * This just modifies memory; should not be
26475@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26476 */
26477
26478 #ifdef CONFIG_X86_64
26479+ pax_open_kernel();
26480 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26481+ pax_close_kernel();
26482
26483 syscall_init(); /* This sets MSR_*STAR and related */
26484 #endif
26485diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26486index dd78ef6..f9d928d 100644
26487--- a/arch/x86/vdso/Makefile
26488+++ b/arch/x86/vdso/Makefile
26489@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26490 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26491 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26492
26493-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26494+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26495 GCOV_PROFILE := n
26496
26497 #
26498diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26499index ee55754..0013b2e 100644
26500--- a/arch/x86/vdso/vclock_gettime.c
26501+++ b/arch/x86/vdso/vclock_gettime.c
26502@@ -22,24 +22,48 @@
26503 #include <asm/hpet.h>
26504 #include <asm/unistd.h>
26505 #include <asm/io.h>
26506+#include <asm/fixmap.h>
26507 #include "vextern.h"
26508
26509 #define gtod vdso_vsyscall_gtod_data
26510
26511+notrace noinline long __vdso_fallback_time(long *t)
26512+{
26513+ long secs;
26514+ asm volatile("syscall"
26515+ : "=a" (secs)
26516+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26517+ return secs;
26518+}
26519+
26520 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26521 {
26522 long ret;
26523 asm("syscall" : "=a" (ret) :
26524- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26525+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26526 return ret;
26527 }
26528
26529+notrace static inline cycle_t __vdso_vread_hpet(void)
26530+{
26531+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26532+}
26533+
26534+notrace static inline cycle_t __vdso_vread_tsc(void)
26535+{
26536+ cycle_t ret = (cycle_t)vget_cycles();
26537+
26538+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26539+}
26540+
26541 notrace static inline long vgetns(void)
26542 {
26543 long v;
26544- cycles_t (*vread)(void);
26545- vread = gtod->clock.vread;
26546- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26547+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26548+ v = __vdso_vread_tsc();
26549+ else
26550+ v = __vdso_vread_hpet();
26551+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26552 return (v * gtod->clock.mult) >> gtod->clock.shift;
26553 }
26554
26555@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26556
26557 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26558 {
26559- if (likely(gtod->sysctl_enabled))
26560+ if (likely(gtod->sysctl_enabled &&
26561+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26562+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26563 switch (clock) {
26564 case CLOCK_REALTIME:
26565 if (likely(gtod->clock.vread))
26566@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26567 int clock_gettime(clockid_t, struct timespec *)
26568 __attribute__((weak, alias("__vdso_clock_gettime")));
26569
26570+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26571+{
26572+ long ret;
26573+ asm("syscall" : "=a" (ret) :
26574+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26575+ return ret;
26576+}
26577+
26578 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26579 {
26580- long ret;
26581- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26582+ if (likely(gtod->sysctl_enabled &&
26583+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26584+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26585+ {
26586 if (likely(tv != NULL)) {
26587 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26588 offsetof(struct timespec, tv_nsec) ||
26589@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26590 }
26591 return 0;
26592 }
26593- asm("syscall" : "=a" (ret) :
26594- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26595- return ret;
26596+ return __vdso_fallback_gettimeofday(tv, tz);
26597 }
26598 int gettimeofday(struct timeval *, struct timezone *)
26599 __attribute__((weak, alias("__vdso_gettimeofday")));
26600diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26601index 4e5dd3b..00ba15e 100644
26602--- a/arch/x86/vdso/vdso.lds.S
26603+++ b/arch/x86/vdso/vdso.lds.S
26604@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26605 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26606 #include "vextern.h"
26607 #undef VEXTERN
26608+
26609+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26610+VEXTERN(fallback_gettimeofday)
26611+VEXTERN(fallback_time)
26612+VEXTERN(getcpu)
26613+#undef VEXTERN
26614diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26615index 58bc00f..d53fb48 100644
26616--- a/arch/x86/vdso/vdso32-setup.c
26617+++ b/arch/x86/vdso/vdso32-setup.c
26618@@ -25,6 +25,7 @@
26619 #include <asm/tlbflush.h>
26620 #include <asm/vdso.h>
26621 #include <asm/proto.h>
26622+#include <asm/mman.h>
26623
26624 enum {
26625 VDSO_DISABLED = 0,
26626@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26627 void enable_sep_cpu(void)
26628 {
26629 int cpu = get_cpu();
26630- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26631+ struct tss_struct *tss = init_tss + cpu;
26632
26633 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26634 put_cpu();
26635@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26636 gate_vma.vm_start = FIXADDR_USER_START;
26637 gate_vma.vm_end = FIXADDR_USER_END;
26638 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26639- gate_vma.vm_page_prot = __P101;
26640+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26641 /*
26642 * Make sure the vDSO gets into every core dump.
26643 * Dumping its contents makes post-mortem fully interpretable later
26644@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26645 if (compat)
26646 addr = VDSO_HIGH_BASE;
26647 else {
26648- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26649+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26650 if (IS_ERR_VALUE(addr)) {
26651 ret = addr;
26652 goto up_fail;
26653 }
26654 }
26655
26656- current->mm->context.vdso = (void *)addr;
26657+ current->mm->context.vdso = addr;
26658
26659 if (compat_uses_vma || !compat) {
26660 /*
26661@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26662 }
26663
26664 current_thread_info()->sysenter_return =
26665- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26666+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26667
26668 up_fail:
26669 if (ret)
26670- current->mm->context.vdso = NULL;
26671+ current->mm->context.vdso = 0;
26672
26673 up_write(&mm->mmap_sem);
26674
26675@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26676
26677 const char *arch_vma_name(struct vm_area_struct *vma)
26678 {
26679- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26680+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26681 return "[vdso]";
26682+
26683+#ifdef CONFIG_PAX_SEGMEXEC
26684+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26685+ return "[vdso]";
26686+#endif
26687+
26688 return NULL;
26689 }
26690
26691@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26692 struct mm_struct *mm = tsk->mm;
26693
26694 /* Check to see if this task was created in compat vdso mode */
26695- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26696+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26697 return &gate_vma;
26698 return NULL;
26699 }
26700diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26701index 1683ba2..48d07f3 100644
26702--- a/arch/x86/vdso/vextern.h
26703+++ b/arch/x86/vdso/vextern.h
26704@@ -11,6 +11,5 @@
26705 put into vextern.h and be referenced as a pointer with vdso prefix.
26706 The main kernel later fills in the values. */
26707
26708-VEXTERN(jiffies)
26709 VEXTERN(vgetcpu_mode)
26710 VEXTERN(vsyscall_gtod_data)
26711diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26712index 21e1aeb..2c0b3c4 100644
26713--- a/arch/x86/vdso/vma.c
26714+++ b/arch/x86/vdso/vma.c
26715@@ -17,8 +17,6 @@
26716 #include "vextern.h" /* Just for VMAGIC. */
26717 #undef VEXTERN
26718
26719-unsigned int __read_mostly vdso_enabled = 1;
26720-
26721 extern char vdso_start[], vdso_end[];
26722 extern unsigned short vdso_sync_cpuid;
26723
26724@@ -27,10 +25,8 @@ static unsigned vdso_size;
26725
26726 static inline void *var_ref(void *p, char *name)
26727 {
26728- if (*(void **)p != (void *)VMAGIC) {
26729- printk("VDSO: variable %s broken\n", name);
26730- vdso_enabled = 0;
26731- }
26732+ if (*(void **)p != (void *)VMAGIC)
26733+ panic("VDSO: variable %s broken\n", name);
26734 return p;
26735 }
26736
26737@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26738 if (!vbase)
26739 goto oom;
26740
26741- if (memcmp(vbase, "\177ELF", 4)) {
26742- printk("VDSO: I'm broken; not ELF\n");
26743- vdso_enabled = 0;
26744- }
26745+ if (memcmp(vbase, ELFMAG, SELFMAG))
26746+ panic("VDSO: I'm broken; not ELF\n");
26747
26748 #define VEXTERN(x) \
26749 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26750 #include "vextern.h"
26751 #undef VEXTERN
26752+ vunmap(vbase);
26753 return 0;
26754
26755 oom:
26756- printk("Cannot allocate vdso\n");
26757- vdso_enabled = 0;
26758- return -ENOMEM;
26759+ panic("Cannot allocate vdso\n");
26760 }
26761 __initcall(init_vdso_vars);
26762
26763@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26764 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26765 {
26766 struct mm_struct *mm = current->mm;
26767- unsigned long addr;
26768+ unsigned long addr = 0;
26769 int ret;
26770
26771- if (!vdso_enabled)
26772- return 0;
26773-
26774 down_write(&mm->mmap_sem);
26775+
26776+#ifdef CONFIG_PAX_RANDMMAP
26777+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26778+#endif
26779+
26780 addr = vdso_addr(mm->start_stack, vdso_size);
26781 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26782 if (IS_ERR_VALUE(addr)) {
26783@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26784 goto up_fail;
26785 }
26786
26787- current->mm->context.vdso = (void *)addr;
26788+ current->mm->context.vdso = addr;
26789
26790 ret = install_special_mapping(mm, addr, vdso_size,
26791 VM_READ|VM_EXEC|
26792@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26793 VM_ALWAYSDUMP,
26794 vdso_pages);
26795 if (ret) {
26796- current->mm->context.vdso = NULL;
26797+ current->mm->context.vdso = 0;
26798 goto up_fail;
26799 }
26800
26801@@ -132,10 +127,3 @@ up_fail:
26802 up_write(&mm->mmap_sem);
26803 return ret;
26804 }
26805-
26806-static __init int vdso_setup(char *s)
26807-{
26808- vdso_enabled = simple_strtoul(s, NULL, 0);
26809- return 0;
26810-}
26811-__setup("vdso=", vdso_setup);
26812diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26813index 0087b00..eecb34f 100644
26814--- a/arch/x86/xen/enlighten.c
26815+++ b/arch/x86/xen/enlighten.c
26816@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26817
26818 struct shared_info xen_dummy_shared_info;
26819
26820-void *xen_initial_gdt;
26821-
26822 /*
26823 * Point at some empty memory to start with. We map the real shared_info
26824 * page as soon as fixmap is up and running.
26825@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26826
26827 preempt_disable();
26828
26829- start = __get_cpu_var(idt_desc).address;
26830+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26831 end = start + __get_cpu_var(idt_desc).size + 1;
26832
26833 xen_mc_flush();
26834@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26835 #endif
26836 };
26837
26838-static void xen_reboot(int reason)
26839+static __noreturn void xen_reboot(int reason)
26840 {
26841 struct sched_shutdown r = { .reason = reason };
26842
26843@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26844 BUG();
26845 }
26846
26847-static void xen_restart(char *msg)
26848+static __noreturn void xen_restart(char *msg)
26849 {
26850 xen_reboot(SHUTDOWN_reboot);
26851 }
26852
26853-static void xen_emergency_restart(void)
26854+static __noreturn void xen_emergency_restart(void)
26855 {
26856 xen_reboot(SHUTDOWN_reboot);
26857 }
26858
26859-static void xen_machine_halt(void)
26860+static __noreturn void xen_machine_halt(void)
26861 {
26862 xen_reboot(SHUTDOWN_poweroff);
26863 }
26864@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26865 */
26866 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26867
26868-#ifdef CONFIG_X86_64
26869 /* Work out if we support NX */
26870- check_efer();
26871+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26872+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26873+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26874+ unsigned l, h;
26875+
26876+#ifdef CONFIG_X86_PAE
26877+ nx_enabled = 1;
26878+#endif
26879+ __supported_pte_mask |= _PAGE_NX;
26880+ rdmsr(MSR_EFER, l, h);
26881+ l |= EFER_NX;
26882+ wrmsr(MSR_EFER, l, h);
26883+ }
26884 #endif
26885
26886 xen_setup_features();
26887@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26888
26889 machine_ops = xen_machine_ops;
26890
26891- /*
26892- * The only reliable way to retain the initial address of the
26893- * percpu gdt_page is to remember it here, so we can go and
26894- * mark it RW later, when the initial percpu area is freed.
26895- */
26896- xen_initial_gdt = &per_cpu(gdt_page, 0);
26897-
26898 xen_smp_init();
26899
26900 pgd = (pgd_t *)xen_start_info->pt_base;
26901diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26902index 3f90a2c..ee0d992 100644
26903--- a/arch/x86/xen/mmu.c
26904+++ b/arch/x86/xen/mmu.c
26905@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26906 convert_pfn_mfn(init_level4_pgt);
26907 convert_pfn_mfn(level3_ident_pgt);
26908 convert_pfn_mfn(level3_kernel_pgt);
26909+ convert_pfn_mfn(level3_vmalloc_pgt);
26910+ convert_pfn_mfn(level3_vmemmap_pgt);
26911
26912 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26913 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26914@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26915 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26916 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26917 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26918+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
26919+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26920 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26921+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26922 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26923 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26924
26925@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_init(void)
26926 pv_mmu_ops.set_pud = xen_set_pud;
26927 #if PAGETABLE_LEVELS == 4
26928 pv_mmu_ops.set_pgd = xen_set_pgd;
26929+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26930 #endif
26931
26932 /* This will work as long as patching hasn't happened yet
26933@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
26934 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26935 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26936 .set_pgd = xen_set_pgd_hyper,
26937+ .set_pgd_batched = xen_set_pgd_hyper,
26938
26939 .alloc_pud = xen_alloc_pmd_init,
26940 .release_pud = xen_release_pmd_init,
26941diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26942index a96204a..fca9b8e 100644
26943--- a/arch/x86/xen/smp.c
26944+++ b/arch/x86/xen/smp.c
26945@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26946 {
26947 BUG_ON(smp_processor_id() != 0);
26948 native_smp_prepare_boot_cpu();
26949-
26950- /* We've switched to the "real" per-cpu gdt, so make sure the
26951- old memory can be recycled */
26952- make_lowmem_page_readwrite(xen_initial_gdt);
26953-
26954 xen_setup_vcpu_info_placement();
26955 }
26956
26957@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26958 gdt = get_cpu_gdt_table(cpu);
26959
26960 ctxt->flags = VGCF_IN_KERNEL;
26961- ctxt->user_regs.ds = __USER_DS;
26962- ctxt->user_regs.es = __USER_DS;
26963+ ctxt->user_regs.ds = __KERNEL_DS;
26964+ ctxt->user_regs.es = __KERNEL_DS;
26965 ctxt->user_regs.ss = __KERNEL_DS;
26966 #ifdef CONFIG_X86_32
26967 ctxt->user_regs.fs = __KERNEL_PERCPU;
26968- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26969+ savesegment(gs, ctxt->user_regs.gs);
26970 #else
26971 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26972 #endif
26973@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26974 int rc;
26975
26976 per_cpu(current_task, cpu) = idle;
26977+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26978 #ifdef CONFIG_X86_32
26979 irq_ctx_init(cpu);
26980 #else
26981 clear_tsk_thread_flag(idle, TIF_FORK);
26982- per_cpu(kernel_stack, cpu) =
26983- (unsigned long)task_stack_page(idle) -
26984- KERNEL_STACK_OFFSET + THREAD_SIZE;
26985+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26986 #endif
26987 xen_setup_runstate_info(cpu);
26988 xen_setup_timer(cpu);
26989diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26990index 9a95a9c..4f39e774 100644
26991--- a/arch/x86/xen/xen-asm_32.S
26992+++ b/arch/x86/xen/xen-asm_32.S
26993@@ -83,14 +83,14 @@ ENTRY(xen_iret)
26994 ESP_OFFSET=4 # bytes pushed onto stack
26995
26996 /*
26997- * Store vcpu_info pointer for easy access. Do it this way to
26998- * avoid having to reload %fs
26999+ * Store vcpu_info pointer for easy access.
27000 */
27001 #ifdef CONFIG_SMP
27002- GET_THREAD_INFO(%eax)
27003- movl TI_cpu(%eax), %eax
27004- movl __per_cpu_offset(,%eax,4), %eax
27005- mov per_cpu__xen_vcpu(%eax), %eax
27006+ push %fs
27007+ mov $(__KERNEL_PERCPU), %eax
27008+ mov %eax, %fs
27009+ mov PER_CPU_VAR(xen_vcpu), %eax
27010+ pop %fs
27011 #else
27012 movl per_cpu__xen_vcpu, %eax
27013 #endif
27014diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27015index 1a5ff24..a187d40 100644
27016--- a/arch/x86/xen/xen-head.S
27017+++ b/arch/x86/xen/xen-head.S
27018@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27019 #ifdef CONFIG_X86_32
27020 mov %esi,xen_start_info
27021 mov $init_thread_union+THREAD_SIZE,%esp
27022+#ifdef CONFIG_SMP
27023+ movl $cpu_gdt_table,%edi
27024+ movl $__per_cpu_load,%eax
27025+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27026+ rorl $16,%eax
27027+ movb %al,__KERNEL_PERCPU + 4(%edi)
27028+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27029+ movl $__per_cpu_end - 1,%eax
27030+ subl $__per_cpu_start,%eax
27031+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27032+#endif
27033 #else
27034 mov %rsi,xen_start_info
27035 mov $init_thread_union+THREAD_SIZE,%rsp
27036diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27037index f9153a3..51eab3d 100644
27038--- a/arch/x86/xen/xen-ops.h
27039+++ b/arch/x86/xen/xen-ops.h
27040@@ -10,8 +10,6 @@
27041 extern const char xen_hypervisor_callback[];
27042 extern const char xen_failsafe_callback[];
27043
27044-extern void *xen_initial_gdt;
27045-
27046 struct trap_info;
27047 void xen_copy_trap_info(struct trap_info *traps);
27048
27049diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27050index 15c6308..96e83c2 100644
27051--- a/block/blk-integrity.c
27052+++ b/block/blk-integrity.c
27053@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27054 NULL,
27055 };
27056
27057-static struct sysfs_ops integrity_ops = {
27058+static const struct sysfs_ops integrity_ops = {
27059 .show = &integrity_attr_show,
27060 .store = &integrity_attr_store,
27061 };
27062diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27063index ca56420..f2fc409 100644
27064--- a/block/blk-iopoll.c
27065+++ b/block/blk-iopoll.c
27066@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27067 }
27068 EXPORT_SYMBOL(blk_iopoll_complete);
27069
27070-static void blk_iopoll_softirq(struct softirq_action *h)
27071+static void blk_iopoll_softirq(void)
27072 {
27073 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27074 int rearm = 0, budget = blk_iopoll_budget;
27075diff --git a/block/blk-map.c b/block/blk-map.c
27076index 30a7e51..0aeec6a 100644
27077--- a/block/blk-map.c
27078+++ b/block/blk-map.c
27079@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27080 * direct dma. else, set up kernel bounce buffers
27081 */
27082 uaddr = (unsigned long) ubuf;
27083- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27084+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27085 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27086 else
27087 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27088@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27089 for (i = 0; i < iov_count; i++) {
27090 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27091
27092+ if (!iov[i].iov_len)
27093+ return -EINVAL;
27094+
27095 if (uaddr & queue_dma_alignment(q)) {
27096 unaligned = 1;
27097 break;
27098 }
27099- if (!iov[i].iov_len)
27100- return -EINVAL;
27101 }
27102
27103 if (unaligned || (q->dma_pad_mask & len) || map_data)
27104@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27105 if (!len || !kbuf)
27106 return -EINVAL;
27107
27108- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27109+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27110 if (do_copy)
27111 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27112 else
27113diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27114index ee9c216..58d410a 100644
27115--- a/block/blk-softirq.c
27116+++ b/block/blk-softirq.c
27117@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27118 * Softirq action handler - move entries to local list and loop over them
27119 * while passing them to the queue registered handler.
27120 */
27121-static void blk_done_softirq(struct softirq_action *h)
27122+static void blk_done_softirq(void)
27123 {
27124 struct list_head *cpu_list, local_list;
27125
27126diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27127index bb9c5ea..5330d48 100644
27128--- a/block/blk-sysfs.c
27129+++ b/block/blk-sysfs.c
27130@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27131 kmem_cache_free(blk_requestq_cachep, q);
27132 }
27133
27134-static struct sysfs_ops queue_sysfs_ops = {
27135+static const struct sysfs_ops queue_sysfs_ops = {
27136 .show = queue_attr_show,
27137 .store = queue_attr_store,
27138 };
27139diff --git a/block/bsg.c b/block/bsg.c
27140index 7154a7a..08ac2f0 100644
27141--- a/block/bsg.c
27142+++ b/block/bsg.c
27143@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27144 struct sg_io_v4 *hdr, struct bsg_device *bd,
27145 fmode_t has_write_perm)
27146 {
27147+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27148+ unsigned char *cmdptr;
27149+
27150 if (hdr->request_len > BLK_MAX_CDB) {
27151 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27152 if (!rq->cmd)
27153 return -ENOMEM;
27154- }
27155+ cmdptr = rq->cmd;
27156+ } else
27157+ cmdptr = tmpcmd;
27158
27159- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27160+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27161 hdr->request_len))
27162 return -EFAULT;
27163
27164+ if (cmdptr != rq->cmd)
27165+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27166+
27167 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27168 if (blk_verify_command(rq->cmd, has_write_perm))
27169 return -EPERM;
27170@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27171 rq->next_rq = next_rq;
27172 next_rq->cmd_type = rq->cmd_type;
27173
27174- dxferp = (void*)(unsigned long)hdr->din_xferp;
27175+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27176 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27177 hdr->din_xfer_len, GFP_KERNEL);
27178 if (ret)
27179@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27180
27181 if (hdr->dout_xfer_len) {
27182 dxfer_len = hdr->dout_xfer_len;
27183- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27184+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27185 } else if (hdr->din_xfer_len) {
27186 dxfer_len = hdr->din_xfer_len;
27187- dxferp = (void*)(unsigned long)hdr->din_xferp;
27188+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27189 } else
27190 dxfer_len = 0;
27191
27192@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27193 int len = min_t(unsigned int, hdr->max_response_len,
27194 rq->sense_len);
27195
27196- ret = copy_to_user((void*)(unsigned long)hdr->response,
27197+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27198 rq->sense, len);
27199 if (!ret)
27200 hdr->response_len = len;
27201diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27202index 9bd086c..ca1fc22 100644
27203--- a/block/compat_ioctl.c
27204+++ b/block/compat_ioctl.c
27205@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27206 err |= __get_user(f->spec1, &uf->spec1);
27207 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27208 err |= __get_user(name, &uf->name);
27209- f->name = compat_ptr(name);
27210+ f->name = (void __force_kernel *)compat_ptr(name);
27211 if (err) {
27212 err = -EFAULT;
27213 goto out;
27214diff --git a/block/elevator.c b/block/elevator.c
27215index a847046..75a1746 100644
27216--- a/block/elevator.c
27217+++ b/block/elevator.c
27218@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27219 return error;
27220 }
27221
27222-static struct sysfs_ops elv_sysfs_ops = {
27223+static const struct sysfs_ops elv_sysfs_ops = {
27224 .show = elv_attr_show,
27225 .store = elv_attr_store,
27226 };
27227diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27228index 1d5a780..0e2fb8c 100644
27229--- a/block/scsi_ioctl.c
27230+++ b/block/scsi_ioctl.c
27231@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
27232 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27233 struct sg_io_hdr *hdr, fmode_t mode)
27234 {
27235- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27236+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27237+ unsigned char *cmdptr;
27238+
27239+ if (rq->cmd != rq->__cmd)
27240+ cmdptr = rq->cmd;
27241+ else
27242+ cmdptr = tmpcmd;
27243+
27244+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27245 return -EFAULT;
27246+
27247+ if (cmdptr != rq->cmd)
27248+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27249+
27250 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27251 return -EPERM;
27252
27253@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27254 int err;
27255 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27256 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27257+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27258+ unsigned char *cmdptr;
27259
27260 if (!sic)
27261 return -EINVAL;
27262@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27263 */
27264 err = -EFAULT;
27265 rq->cmd_len = cmdlen;
27266- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27267+
27268+ if (rq->cmd != rq->__cmd)
27269+ cmdptr = rq->cmd;
27270+ else
27271+ cmdptr = tmpcmd;
27272+
27273+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27274 goto error;
27275
27276+ if (rq->cmd != cmdptr)
27277+ memcpy(rq->cmd, cmdptr, cmdlen);
27278+
27279 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27280 goto error;
27281
27282diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27283index 3533582..f143117 100644
27284--- a/crypto/cryptd.c
27285+++ b/crypto/cryptd.c
27286@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27287
27288 struct cryptd_blkcipher_request_ctx {
27289 crypto_completion_t complete;
27290-};
27291+} __no_const;
27292
27293 struct cryptd_hash_ctx {
27294 struct crypto_shash *child;
27295diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27296index a90d260..7a9765e 100644
27297--- a/crypto/gf128mul.c
27298+++ b/crypto/gf128mul.c
27299@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27300 for (i = 0; i < 7; ++i)
27301 gf128mul_x_lle(&p[i + 1], &p[i]);
27302
27303- memset(r, 0, sizeof(r));
27304+ memset(r, 0, sizeof(*r));
27305 for (i = 0;;) {
27306 u8 ch = ((u8 *)b)[15 - i];
27307
27308@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27309 for (i = 0; i < 7; ++i)
27310 gf128mul_x_bbe(&p[i + 1], &p[i]);
27311
27312- memset(r, 0, sizeof(r));
27313+ memset(r, 0, sizeof(*r));
27314 for (i = 0;;) {
27315 u8 ch = ((u8 *)b)[i];
27316
27317diff --git a/crypto/serpent.c b/crypto/serpent.c
27318index b651a55..023297d 100644
27319--- a/crypto/serpent.c
27320+++ b/crypto/serpent.c
27321@@ -21,6 +21,7 @@
27322 #include <asm/byteorder.h>
27323 #include <linux/crypto.h>
27324 #include <linux/types.h>
27325+#include <linux/sched.h>
27326
27327 /* Key is padded to the maximum of 256 bits before round key generation.
27328 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27329@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27330 u32 r0,r1,r2,r3,r4;
27331 int i;
27332
27333+ pax_track_stack();
27334+
27335 /* Copy key, add padding */
27336
27337 for (i = 0; i < keylen; ++i)
27338diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27339index 0d2cdb8..d8de48d 100644
27340--- a/drivers/acpi/acpi_pad.c
27341+++ b/drivers/acpi/acpi_pad.c
27342@@ -30,7 +30,7 @@
27343 #include <acpi/acpi_bus.h>
27344 #include <acpi/acpi_drivers.h>
27345
27346-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27347+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27348 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27349 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27350 static DEFINE_MUTEX(isolated_cpus_lock);
27351diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27352index 3f4602b..2e41d36 100644
27353--- a/drivers/acpi/battery.c
27354+++ b/drivers/acpi/battery.c
27355@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27356 }
27357
27358 static struct battery_file {
27359- struct file_operations ops;
27360+ const struct file_operations ops;
27361 mode_t mode;
27362 const char *name;
27363 } acpi_battery_file[] = {
27364diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27365index 7338b6a..82f0257 100644
27366--- a/drivers/acpi/dock.c
27367+++ b/drivers/acpi/dock.c
27368@@ -77,7 +77,7 @@ struct dock_dependent_device {
27369 struct list_head list;
27370 struct list_head hotplug_list;
27371 acpi_handle handle;
27372- struct acpi_dock_ops *ops;
27373+ const struct acpi_dock_ops *ops;
27374 void *context;
27375 };
27376
27377@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27378 * the dock driver after _DCK is executed.
27379 */
27380 int
27381-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27382+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27383 void *context)
27384 {
27385 struct dock_dependent_device *dd;
27386diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27387index 7c1c59e..2993595 100644
27388--- a/drivers/acpi/osl.c
27389+++ b/drivers/acpi/osl.c
27390@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27391 void __iomem *virt_addr;
27392
27393 virt_addr = ioremap(phys_addr, width);
27394+ if (!virt_addr)
27395+ return AE_NO_MEMORY;
27396 if (!value)
27397 value = &dummy;
27398
27399@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27400 void __iomem *virt_addr;
27401
27402 virt_addr = ioremap(phys_addr, width);
27403+ if (!virt_addr)
27404+ return AE_NO_MEMORY;
27405
27406 switch (width) {
27407 case 8:
27408diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27409index c216062..eec10d2 100644
27410--- a/drivers/acpi/power_meter.c
27411+++ b/drivers/acpi/power_meter.c
27412@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27413 return res;
27414
27415 temp /= 1000;
27416- if (temp < 0)
27417- return -EINVAL;
27418
27419 mutex_lock(&resource->lock);
27420 resource->trip[attr->index - 7] = temp;
27421diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27422index d0d25e2..961643d 100644
27423--- a/drivers/acpi/proc.c
27424+++ b/drivers/acpi/proc.c
27425@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27426 size_t count, loff_t * ppos)
27427 {
27428 struct list_head *node, *next;
27429- char strbuf[5];
27430- char str[5] = "";
27431- unsigned int len = count;
27432+ char strbuf[5] = {0};
27433 struct acpi_device *found_dev = NULL;
27434
27435- if (len > 4)
27436- len = 4;
27437- if (len < 0)
27438- return -EFAULT;
27439+ if (count > 4)
27440+ count = 4;
27441
27442- if (copy_from_user(strbuf, buffer, len))
27443+ if (copy_from_user(strbuf, buffer, count))
27444 return -EFAULT;
27445- strbuf[len] = '\0';
27446- sscanf(strbuf, "%s", str);
27447+ strbuf[count] = '\0';
27448
27449 mutex_lock(&acpi_device_lock);
27450 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27451@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27452 if (!dev->wakeup.flags.valid)
27453 continue;
27454
27455- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27456+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27457 dev->wakeup.state.enabled =
27458 dev->wakeup.state.enabled ? 0 : 1;
27459 found_dev = dev;
27460diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27461index 7102474..de8ad22 100644
27462--- a/drivers/acpi/processor_core.c
27463+++ b/drivers/acpi/processor_core.c
27464@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27465 return 0;
27466 }
27467
27468- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27469+ BUG_ON(pr->id >= nr_cpu_ids);
27470
27471 /*
27472 * Buggy BIOS check
27473diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27474index d933980..5761f13 100644
27475--- a/drivers/acpi/sbshc.c
27476+++ b/drivers/acpi/sbshc.c
27477@@ -17,7 +17,7 @@
27478
27479 #define PREFIX "ACPI: "
27480
27481-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27482+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27483 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27484
27485 struct acpi_smb_hc {
27486diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27487index 0458094..6978e7b 100644
27488--- a/drivers/acpi/sleep.c
27489+++ b/drivers/acpi/sleep.c
27490@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27491 }
27492 }
27493
27494-static struct platform_suspend_ops acpi_suspend_ops = {
27495+static const struct platform_suspend_ops acpi_suspend_ops = {
27496 .valid = acpi_suspend_state_valid,
27497 .begin = acpi_suspend_begin,
27498 .prepare_late = acpi_pm_prepare,
27499@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27500 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27501 * been requested.
27502 */
27503-static struct platform_suspend_ops acpi_suspend_ops_old = {
27504+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27505 .valid = acpi_suspend_state_valid,
27506 .begin = acpi_suspend_begin_old,
27507 .prepare_late = acpi_pm_disable_gpes,
27508@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27509 acpi_enable_all_runtime_gpes();
27510 }
27511
27512-static struct platform_hibernation_ops acpi_hibernation_ops = {
27513+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27514 .begin = acpi_hibernation_begin,
27515 .end = acpi_pm_end,
27516 .pre_snapshot = acpi_hibernation_pre_snapshot,
27517@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27518 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27519 * been requested.
27520 */
27521-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27522+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27523 .begin = acpi_hibernation_begin_old,
27524 .end = acpi_pm_end,
27525 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27526diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27527index 05dff63..b662ab7 100644
27528--- a/drivers/acpi/video.c
27529+++ b/drivers/acpi/video.c
27530@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27531 vd->brightness->levels[request_level]);
27532 }
27533
27534-static struct backlight_ops acpi_backlight_ops = {
27535+static const struct backlight_ops acpi_backlight_ops = {
27536 .get_brightness = acpi_video_get_brightness,
27537 .update_status = acpi_video_set_brightness,
27538 };
27539diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27540index 6787aab..23ffb0e 100644
27541--- a/drivers/ata/ahci.c
27542+++ b/drivers/ata/ahci.c
27543@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27544 .sdev_attrs = ahci_sdev_attrs,
27545 };
27546
27547-static struct ata_port_operations ahci_ops = {
27548+static const struct ata_port_operations ahci_ops = {
27549 .inherits = &sata_pmp_port_ops,
27550
27551 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27552@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27553 .port_stop = ahci_port_stop,
27554 };
27555
27556-static struct ata_port_operations ahci_vt8251_ops = {
27557+static const struct ata_port_operations ahci_vt8251_ops = {
27558 .inherits = &ahci_ops,
27559 .hardreset = ahci_vt8251_hardreset,
27560 };
27561
27562-static struct ata_port_operations ahci_p5wdh_ops = {
27563+static const struct ata_port_operations ahci_p5wdh_ops = {
27564 .inherits = &ahci_ops,
27565 .hardreset = ahci_p5wdh_hardreset,
27566 };
27567
27568-static struct ata_port_operations ahci_sb600_ops = {
27569+static const struct ata_port_operations ahci_sb600_ops = {
27570 .inherits = &ahci_ops,
27571 .softreset = ahci_sb600_softreset,
27572 .pmp_softreset = ahci_sb600_softreset,
27573diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27574index 99e7196..4968c77 100644
27575--- a/drivers/ata/ata_generic.c
27576+++ b/drivers/ata/ata_generic.c
27577@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27578 ATA_BMDMA_SHT(DRV_NAME),
27579 };
27580
27581-static struct ata_port_operations generic_port_ops = {
27582+static const struct ata_port_operations generic_port_ops = {
27583 .inherits = &ata_bmdma_port_ops,
27584 .cable_detect = ata_cable_unknown,
27585 .set_mode = generic_set_mode,
27586diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27587index c33591d..000c121 100644
27588--- a/drivers/ata/ata_piix.c
27589+++ b/drivers/ata/ata_piix.c
27590@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27591 ATA_BMDMA_SHT(DRV_NAME),
27592 };
27593
27594-static struct ata_port_operations piix_pata_ops = {
27595+static const struct ata_port_operations piix_pata_ops = {
27596 .inherits = &ata_bmdma32_port_ops,
27597 .cable_detect = ata_cable_40wire,
27598 .set_piomode = piix_set_piomode,
27599@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27600 .prereset = piix_pata_prereset,
27601 };
27602
27603-static struct ata_port_operations piix_vmw_ops = {
27604+static const struct ata_port_operations piix_vmw_ops = {
27605 .inherits = &piix_pata_ops,
27606 .bmdma_status = piix_vmw_bmdma_status,
27607 };
27608
27609-static struct ata_port_operations ich_pata_ops = {
27610+static const struct ata_port_operations ich_pata_ops = {
27611 .inherits = &piix_pata_ops,
27612 .cable_detect = ich_pata_cable_detect,
27613 .set_dmamode = ich_set_dmamode,
27614 };
27615
27616-static struct ata_port_operations piix_sata_ops = {
27617+static const struct ata_port_operations piix_sata_ops = {
27618 .inherits = &ata_bmdma_port_ops,
27619 };
27620
27621-static struct ata_port_operations piix_sidpr_sata_ops = {
27622+static const struct ata_port_operations piix_sidpr_sata_ops = {
27623 .inherits = &piix_sata_ops,
27624 .hardreset = sata_std_hardreset,
27625 .scr_read = piix_sidpr_scr_read,
27626diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27627index b0882cd..c295d65 100644
27628--- a/drivers/ata/libata-acpi.c
27629+++ b/drivers/ata/libata-acpi.c
27630@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27631 ata_acpi_uevent(dev->link->ap, dev, event);
27632 }
27633
27634-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27635+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27636 .handler = ata_acpi_dev_notify_dock,
27637 .uevent = ata_acpi_dev_uevent,
27638 };
27639
27640-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27641+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27642 .handler = ata_acpi_ap_notify_dock,
27643 .uevent = ata_acpi_ap_uevent,
27644 };
27645diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27646index d4f7f99..94f603e 100644
27647--- a/drivers/ata/libata-core.c
27648+++ b/drivers/ata/libata-core.c
27649@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27650 struct ata_port *ap;
27651 unsigned int tag;
27652
27653- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27654+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27655 ap = qc->ap;
27656
27657 qc->flags = 0;
27658@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27659 struct ata_port *ap;
27660 struct ata_link *link;
27661
27662- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27663+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27664 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27665 ap = qc->ap;
27666 link = qc->dev->link;
27667@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27668 * LOCKING:
27669 * None.
27670 */
27671-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27672+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27673 {
27674 static DEFINE_SPINLOCK(lock);
27675 const struct ata_port_operations *cur;
27676@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27677 return;
27678
27679 spin_lock(&lock);
27680+ pax_open_kernel();
27681
27682 for (cur = ops->inherits; cur; cur = cur->inherits) {
27683 void **inherit = (void **)cur;
27684@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27685 if (IS_ERR(*pp))
27686 *pp = NULL;
27687
27688- ops->inherits = NULL;
27689+ *(struct ata_port_operations **)&ops->inherits = NULL;
27690
27691+ pax_close_kernel();
27692 spin_unlock(&lock);
27693 }
27694
27695@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27696 */
27697 /* KILLME - the only user left is ipr */
27698 void ata_host_init(struct ata_host *host, struct device *dev,
27699- unsigned long flags, struct ata_port_operations *ops)
27700+ unsigned long flags, const struct ata_port_operations *ops)
27701 {
27702 spin_lock_init(&host->lock);
27703 host->dev = dev;
27704@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27705 /* truly dummy */
27706 }
27707
27708-struct ata_port_operations ata_dummy_port_ops = {
27709+const struct ata_port_operations ata_dummy_port_ops = {
27710 .qc_prep = ata_noop_qc_prep,
27711 .qc_issue = ata_dummy_qc_issue,
27712 .error_handler = ata_dummy_error_handler,
27713diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27714index e5bdb9b..45a8e72 100644
27715--- a/drivers/ata/libata-eh.c
27716+++ b/drivers/ata/libata-eh.c
27717@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27718 {
27719 struct ata_link *link;
27720
27721+ pax_track_stack();
27722+
27723 ata_for_each_link(link, ap, HOST_FIRST)
27724 ata_eh_link_report(link);
27725 }
27726@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27727 */
27728 void ata_std_error_handler(struct ata_port *ap)
27729 {
27730- struct ata_port_operations *ops = ap->ops;
27731+ const struct ata_port_operations *ops = ap->ops;
27732 ata_reset_fn_t hardreset = ops->hardreset;
27733
27734 /* ignore built-in hardreset if SCR access is not available */
27735diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27736index 51f0ffb..19ce3e3 100644
27737--- a/drivers/ata/libata-pmp.c
27738+++ b/drivers/ata/libata-pmp.c
27739@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27740 */
27741 static int sata_pmp_eh_recover(struct ata_port *ap)
27742 {
27743- struct ata_port_operations *ops = ap->ops;
27744+ const struct ata_port_operations *ops = ap->ops;
27745 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27746 struct ata_link *pmp_link = &ap->link;
27747 struct ata_device *pmp_dev = pmp_link->device;
27748diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27749index d8f35fe..288180a 100644
27750--- a/drivers/ata/pata_acpi.c
27751+++ b/drivers/ata/pata_acpi.c
27752@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27753 ATA_BMDMA_SHT(DRV_NAME),
27754 };
27755
27756-static struct ata_port_operations pacpi_ops = {
27757+static const struct ata_port_operations pacpi_ops = {
27758 .inherits = &ata_bmdma_port_ops,
27759 .qc_issue = pacpi_qc_issue,
27760 .cable_detect = pacpi_cable_detect,
27761diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27762index 9434114..1f2f364 100644
27763--- a/drivers/ata/pata_ali.c
27764+++ b/drivers/ata/pata_ali.c
27765@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27766 * Port operations for PIO only ALi
27767 */
27768
27769-static struct ata_port_operations ali_early_port_ops = {
27770+static const struct ata_port_operations ali_early_port_ops = {
27771 .inherits = &ata_sff_port_ops,
27772 .cable_detect = ata_cable_40wire,
27773 .set_piomode = ali_set_piomode,
27774@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27775 * Port operations for DMA capable ALi without cable
27776 * detect
27777 */
27778-static struct ata_port_operations ali_20_port_ops = {
27779+static const struct ata_port_operations ali_20_port_ops = {
27780 .inherits = &ali_dma_base_ops,
27781 .cable_detect = ata_cable_40wire,
27782 .mode_filter = ali_20_filter,
27783@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27784 /*
27785 * Port operations for DMA capable ALi with cable detect
27786 */
27787-static struct ata_port_operations ali_c2_port_ops = {
27788+static const struct ata_port_operations ali_c2_port_ops = {
27789 .inherits = &ali_dma_base_ops,
27790 .check_atapi_dma = ali_check_atapi_dma,
27791 .cable_detect = ali_c2_cable_detect,
27792@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27793 /*
27794 * Port operations for DMA capable ALi with cable detect
27795 */
27796-static struct ata_port_operations ali_c4_port_ops = {
27797+static const struct ata_port_operations ali_c4_port_ops = {
27798 .inherits = &ali_dma_base_ops,
27799 .check_atapi_dma = ali_check_atapi_dma,
27800 .cable_detect = ali_c2_cable_detect,
27801@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27802 /*
27803 * Port operations for DMA capable ALi with cable detect and LBA48
27804 */
27805-static struct ata_port_operations ali_c5_port_ops = {
27806+static const struct ata_port_operations ali_c5_port_ops = {
27807 .inherits = &ali_dma_base_ops,
27808 .check_atapi_dma = ali_check_atapi_dma,
27809 .dev_config = ali_warn_atapi_dma,
27810diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27811index 567f3f7..c8ee0da 100644
27812--- a/drivers/ata/pata_amd.c
27813+++ b/drivers/ata/pata_amd.c
27814@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27815 .prereset = amd_pre_reset,
27816 };
27817
27818-static struct ata_port_operations amd33_port_ops = {
27819+static const struct ata_port_operations amd33_port_ops = {
27820 .inherits = &amd_base_port_ops,
27821 .cable_detect = ata_cable_40wire,
27822 .set_piomode = amd33_set_piomode,
27823 .set_dmamode = amd33_set_dmamode,
27824 };
27825
27826-static struct ata_port_operations amd66_port_ops = {
27827+static const struct ata_port_operations amd66_port_ops = {
27828 .inherits = &amd_base_port_ops,
27829 .cable_detect = ata_cable_unknown,
27830 .set_piomode = amd66_set_piomode,
27831 .set_dmamode = amd66_set_dmamode,
27832 };
27833
27834-static struct ata_port_operations amd100_port_ops = {
27835+static const struct ata_port_operations amd100_port_ops = {
27836 .inherits = &amd_base_port_ops,
27837 .cable_detect = ata_cable_unknown,
27838 .set_piomode = amd100_set_piomode,
27839 .set_dmamode = amd100_set_dmamode,
27840 };
27841
27842-static struct ata_port_operations amd133_port_ops = {
27843+static const struct ata_port_operations amd133_port_ops = {
27844 .inherits = &amd_base_port_ops,
27845 .cable_detect = amd_cable_detect,
27846 .set_piomode = amd133_set_piomode,
27847@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27848 .host_stop = nv_host_stop,
27849 };
27850
27851-static struct ata_port_operations nv100_port_ops = {
27852+static const struct ata_port_operations nv100_port_ops = {
27853 .inherits = &nv_base_port_ops,
27854 .set_piomode = nv100_set_piomode,
27855 .set_dmamode = nv100_set_dmamode,
27856 };
27857
27858-static struct ata_port_operations nv133_port_ops = {
27859+static const struct ata_port_operations nv133_port_ops = {
27860 .inherits = &nv_base_port_ops,
27861 .set_piomode = nv133_set_piomode,
27862 .set_dmamode = nv133_set_dmamode,
27863diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27864index d332cfd..4b7eaae 100644
27865--- a/drivers/ata/pata_artop.c
27866+++ b/drivers/ata/pata_artop.c
27867@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27868 ATA_BMDMA_SHT(DRV_NAME),
27869 };
27870
27871-static struct ata_port_operations artop6210_ops = {
27872+static const struct ata_port_operations artop6210_ops = {
27873 .inherits = &ata_bmdma_port_ops,
27874 .cable_detect = ata_cable_40wire,
27875 .set_piomode = artop6210_set_piomode,
27876@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27877 .qc_defer = artop6210_qc_defer,
27878 };
27879
27880-static struct ata_port_operations artop6260_ops = {
27881+static const struct ata_port_operations artop6260_ops = {
27882 .inherits = &ata_bmdma_port_ops,
27883 .cable_detect = artop6260_cable_detect,
27884 .set_piomode = artop6260_set_piomode,
27885diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27886index 5c129f9..7bb7ccb 100644
27887--- a/drivers/ata/pata_at32.c
27888+++ b/drivers/ata/pata_at32.c
27889@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27890 ATA_PIO_SHT(DRV_NAME),
27891 };
27892
27893-static struct ata_port_operations at32_port_ops = {
27894+static const struct ata_port_operations at32_port_ops = {
27895 .inherits = &ata_sff_port_ops,
27896 .cable_detect = ata_cable_40wire,
27897 .set_piomode = pata_at32_set_piomode,
27898diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
27899index 41c94b1..829006d 100644
27900--- a/drivers/ata/pata_at91.c
27901+++ b/drivers/ata/pata_at91.c
27902@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
27903 ATA_PIO_SHT(DRV_NAME),
27904 };
27905
27906-static struct ata_port_operations pata_at91_port_ops = {
27907+static const struct ata_port_operations pata_at91_port_ops = {
27908 .inherits = &ata_sff_port_ops,
27909
27910 .sff_data_xfer = pata_at91_data_xfer_noirq,
27911diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
27912index ae4454d..d391eb4 100644
27913--- a/drivers/ata/pata_atiixp.c
27914+++ b/drivers/ata/pata_atiixp.c
27915@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
27916 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27917 };
27918
27919-static struct ata_port_operations atiixp_port_ops = {
27920+static const struct ata_port_operations atiixp_port_ops = {
27921 .inherits = &ata_bmdma_port_ops,
27922
27923 .qc_prep = ata_sff_dumb_qc_prep,
27924diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
27925index 6fe7ded..2a425dc 100644
27926--- a/drivers/ata/pata_atp867x.c
27927+++ b/drivers/ata/pata_atp867x.c
27928@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
27929 ATA_BMDMA_SHT(DRV_NAME),
27930 };
27931
27932-static struct ata_port_operations atp867x_ops = {
27933+static const struct ata_port_operations atp867x_ops = {
27934 .inherits = &ata_bmdma_port_ops,
27935 .cable_detect = atp867x_cable_detect,
27936 .set_piomode = atp867x_set_piomode,
27937diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
27938index c4b47a3..b27a367 100644
27939--- a/drivers/ata/pata_bf54x.c
27940+++ b/drivers/ata/pata_bf54x.c
27941@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
27942 .dma_boundary = ATA_DMA_BOUNDARY,
27943 };
27944
27945-static struct ata_port_operations bfin_pata_ops = {
27946+static const struct ata_port_operations bfin_pata_ops = {
27947 .inherits = &ata_sff_port_ops,
27948
27949 .set_piomode = bfin_set_piomode,
27950diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
27951index 5acf9fa..84248be 100644
27952--- a/drivers/ata/pata_cmd640.c
27953+++ b/drivers/ata/pata_cmd640.c
27954@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
27955 ATA_BMDMA_SHT(DRV_NAME),
27956 };
27957
27958-static struct ata_port_operations cmd640_port_ops = {
27959+static const struct ata_port_operations cmd640_port_ops = {
27960 .inherits = &ata_bmdma_port_ops,
27961 /* In theory xfer_noirq is not needed once we kill the prefetcher */
27962 .sff_data_xfer = ata_sff_data_xfer_noirq,
27963diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
27964index ccd2694..c869c3d 100644
27965--- a/drivers/ata/pata_cmd64x.c
27966+++ b/drivers/ata/pata_cmd64x.c
27967@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
27968 .set_dmamode = cmd64x_set_dmamode,
27969 };
27970
27971-static struct ata_port_operations cmd64x_port_ops = {
27972+static const struct ata_port_operations cmd64x_port_ops = {
27973 .inherits = &cmd64x_base_ops,
27974 .cable_detect = ata_cable_40wire,
27975 };
27976
27977-static struct ata_port_operations cmd646r1_port_ops = {
27978+static const struct ata_port_operations cmd646r1_port_ops = {
27979 .inherits = &cmd64x_base_ops,
27980 .bmdma_stop = cmd646r1_bmdma_stop,
27981 .cable_detect = ata_cable_40wire,
27982 };
27983
27984-static struct ata_port_operations cmd648_port_ops = {
27985+static const struct ata_port_operations cmd648_port_ops = {
27986 .inherits = &cmd64x_base_ops,
27987 .bmdma_stop = cmd648_bmdma_stop,
27988 .cable_detect = cmd648_cable_detect,
27989diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
27990index 0df83cf..d7595b0 100644
27991--- a/drivers/ata/pata_cs5520.c
27992+++ b/drivers/ata/pata_cs5520.c
27993@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
27994 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27995 };
27996
27997-static struct ata_port_operations cs5520_port_ops = {
27998+static const struct ata_port_operations cs5520_port_ops = {
27999 .inherits = &ata_bmdma_port_ops,
28000 .qc_prep = ata_sff_dumb_qc_prep,
28001 .cable_detect = ata_cable_40wire,
28002diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28003index c974b05..6d26b11 100644
28004--- a/drivers/ata/pata_cs5530.c
28005+++ b/drivers/ata/pata_cs5530.c
28006@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28007 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28008 };
28009
28010-static struct ata_port_operations cs5530_port_ops = {
28011+static const struct ata_port_operations cs5530_port_ops = {
28012 .inherits = &ata_bmdma_port_ops,
28013
28014 .qc_prep = ata_sff_dumb_qc_prep,
28015diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28016index 403f561..aacd26b 100644
28017--- a/drivers/ata/pata_cs5535.c
28018+++ b/drivers/ata/pata_cs5535.c
28019@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28020 ATA_BMDMA_SHT(DRV_NAME),
28021 };
28022
28023-static struct ata_port_operations cs5535_port_ops = {
28024+static const struct ata_port_operations cs5535_port_ops = {
28025 .inherits = &ata_bmdma_port_ops,
28026 .cable_detect = cs5535_cable_detect,
28027 .set_piomode = cs5535_set_piomode,
28028diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28029index 6da4cb4..de24a25 100644
28030--- a/drivers/ata/pata_cs5536.c
28031+++ b/drivers/ata/pata_cs5536.c
28032@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28033 ATA_BMDMA_SHT(DRV_NAME),
28034 };
28035
28036-static struct ata_port_operations cs5536_port_ops = {
28037+static const struct ata_port_operations cs5536_port_ops = {
28038 .inherits = &ata_bmdma_port_ops,
28039 .cable_detect = cs5536_cable_detect,
28040 .set_piomode = cs5536_set_piomode,
28041diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28042index 8fb040b..b16a9c9 100644
28043--- a/drivers/ata/pata_cypress.c
28044+++ b/drivers/ata/pata_cypress.c
28045@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28046 ATA_BMDMA_SHT(DRV_NAME),
28047 };
28048
28049-static struct ata_port_operations cy82c693_port_ops = {
28050+static const struct ata_port_operations cy82c693_port_ops = {
28051 .inherits = &ata_bmdma_port_ops,
28052 .cable_detect = ata_cable_40wire,
28053 .set_piomode = cy82c693_set_piomode,
28054diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28055index 2a6412f..555ee11 100644
28056--- a/drivers/ata/pata_efar.c
28057+++ b/drivers/ata/pata_efar.c
28058@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28059 ATA_BMDMA_SHT(DRV_NAME),
28060 };
28061
28062-static struct ata_port_operations efar_ops = {
28063+static const struct ata_port_operations efar_ops = {
28064 .inherits = &ata_bmdma_port_ops,
28065 .cable_detect = efar_cable_detect,
28066 .set_piomode = efar_set_piomode,
28067diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28068index b9d8836..0b92030 100644
28069--- a/drivers/ata/pata_hpt366.c
28070+++ b/drivers/ata/pata_hpt366.c
28071@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28072 * Configuration for HPT366/68
28073 */
28074
28075-static struct ata_port_operations hpt366_port_ops = {
28076+static const struct ata_port_operations hpt366_port_ops = {
28077 .inherits = &ata_bmdma_port_ops,
28078 .cable_detect = hpt36x_cable_detect,
28079 .mode_filter = hpt366_filter,
28080diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28081index 5af7f19..00c4980 100644
28082--- a/drivers/ata/pata_hpt37x.c
28083+++ b/drivers/ata/pata_hpt37x.c
28084@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28085 * Configuration for HPT370
28086 */
28087
28088-static struct ata_port_operations hpt370_port_ops = {
28089+static const struct ata_port_operations hpt370_port_ops = {
28090 .inherits = &ata_bmdma_port_ops,
28091
28092 .bmdma_stop = hpt370_bmdma_stop,
28093@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28094 * Configuration for HPT370A. Close to 370 but less filters
28095 */
28096
28097-static struct ata_port_operations hpt370a_port_ops = {
28098+static const struct ata_port_operations hpt370a_port_ops = {
28099 .inherits = &hpt370_port_ops,
28100 .mode_filter = hpt370a_filter,
28101 };
28102@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28103 * and DMA mode setting functionality.
28104 */
28105
28106-static struct ata_port_operations hpt372_port_ops = {
28107+static const struct ata_port_operations hpt372_port_ops = {
28108 .inherits = &ata_bmdma_port_ops,
28109
28110 .bmdma_stop = hpt37x_bmdma_stop,
28111@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28112 * but we have a different cable detection procedure for function 1.
28113 */
28114
28115-static struct ata_port_operations hpt374_fn1_port_ops = {
28116+static const struct ata_port_operations hpt374_fn1_port_ops = {
28117 .inherits = &hpt372_port_ops,
28118 .prereset = hpt374_fn1_pre_reset,
28119 };
28120diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28121index 100f227..2e39382 100644
28122--- a/drivers/ata/pata_hpt3x2n.c
28123+++ b/drivers/ata/pata_hpt3x2n.c
28124@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28125 * Configuration for HPT3x2n.
28126 */
28127
28128-static struct ata_port_operations hpt3x2n_port_ops = {
28129+static const struct ata_port_operations hpt3x2n_port_ops = {
28130 .inherits = &ata_bmdma_port_ops,
28131
28132 .bmdma_stop = hpt3x2n_bmdma_stop,
28133diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28134index 7e31025..6fca8f4 100644
28135--- a/drivers/ata/pata_hpt3x3.c
28136+++ b/drivers/ata/pata_hpt3x3.c
28137@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28138 ATA_BMDMA_SHT(DRV_NAME),
28139 };
28140
28141-static struct ata_port_operations hpt3x3_port_ops = {
28142+static const struct ata_port_operations hpt3x3_port_ops = {
28143 .inherits = &ata_bmdma_port_ops,
28144 .cable_detect = ata_cable_40wire,
28145 .set_piomode = hpt3x3_set_piomode,
28146diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28147index b663b7f..9a26c2a 100644
28148--- a/drivers/ata/pata_icside.c
28149+++ b/drivers/ata/pata_icside.c
28150@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28151 }
28152 }
28153
28154-static struct ata_port_operations pata_icside_port_ops = {
28155+static const struct ata_port_operations pata_icside_port_ops = {
28156 .inherits = &ata_sff_port_ops,
28157 /* no need to build any PRD tables for DMA */
28158 .qc_prep = ata_noop_qc_prep,
28159diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28160index 4bceb88..457dfb6 100644
28161--- a/drivers/ata/pata_isapnp.c
28162+++ b/drivers/ata/pata_isapnp.c
28163@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28164 ATA_PIO_SHT(DRV_NAME),
28165 };
28166
28167-static struct ata_port_operations isapnp_port_ops = {
28168+static const struct ata_port_operations isapnp_port_ops = {
28169 .inherits = &ata_sff_port_ops,
28170 .cable_detect = ata_cable_40wire,
28171 };
28172
28173-static struct ata_port_operations isapnp_noalt_port_ops = {
28174+static const struct ata_port_operations isapnp_noalt_port_ops = {
28175 .inherits = &ata_sff_port_ops,
28176 .cable_detect = ata_cable_40wire,
28177 /* No altstatus so we don't want to use the lost interrupt poll */
28178diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28179index f156da8..24976e2 100644
28180--- a/drivers/ata/pata_it8213.c
28181+++ b/drivers/ata/pata_it8213.c
28182@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28183 };
28184
28185
28186-static struct ata_port_operations it8213_ops = {
28187+static const struct ata_port_operations it8213_ops = {
28188 .inherits = &ata_bmdma_port_ops,
28189 .cable_detect = it8213_cable_detect,
28190 .set_piomode = it8213_set_piomode,
28191diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28192index 188bc2f..ca9e785 100644
28193--- a/drivers/ata/pata_it821x.c
28194+++ b/drivers/ata/pata_it821x.c
28195@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28196 ATA_BMDMA_SHT(DRV_NAME),
28197 };
28198
28199-static struct ata_port_operations it821x_smart_port_ops = {
28200+static const struct ata_port_operations it821x_smart_port_ops = {
28201 .inherits = &ata_bmdma_port_ops,
28202
28203 .check_atapi_dma= it821x_check_atapi_dma,
28204@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28205 .port_start = it821x_port_start,
28206 };
28207
28208-static struct ata_port_operations it821x_passthru_port_ops = {
28209+static const struct ata_port_operations it821x_passthru_port_ops = {
28210 .inherits = &ata_bmdma_port_ops,
28211
28212 .check_atapi_dma= it821x_check_atapi_dma,
28213@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28214 .port_start = it821x_port_start,
28215 };
28216
28217-static struct ata_port_operations it821x_rdc_port_ops = {
28218+static const struct ata_port_operations it821x_rdc_port_ops = {
28219 .inherits = &ata_bmdma_port_ops,
28220
28221 .check_atapi_dma= it821x_check_atapi_dma,
28222diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28223index ba54b08..4b952b7 100644
28224--- a/drivers/ata/pata_ixp4xx_cf.c
28225+++ b/drivers/ata/pata_ixp4xx_cf.c
28226@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28227 ATA_PIO_SHT(DRV_NAME),
28228 };
28229
28230-static struct ata_port_operations ixp4xx_port_ops = {
28231+static const struct ata_port_operations ixp4xx_port_ops = {
28232 .inherits = &ata_sff_port_ops,
28233 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28234 .cable_detect = ata_cable_40wire,
28235diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28236index 3a1474a..434b0ff 100644
28237--- a/drivers/ata/pata_jmicron.c
28238+++ b/drivers/ata/pata_jmicron.c
28239@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28240 ATA_BMDMA_SHT(DRV_NAME),
28241 };
28242
28243-static struct ata_port_operations jmicron_ops = {
28244+static const struct ata_port_operations jmicron_ops = {
28245 .inherits = &ata_bmdma_port_ops,
28246 .prereset = jmicron_pre_reset,
28247 };
28248diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28249index 6932e56..220e71d 100644
28250--- a/drivers/ata/pata_legacy.c
28251+++ b/drivers/ata/pata_legacy.c
28252@@ -106,7 +106,7 @@ struct legacy_probe {
28253
28254 struct legacy_controller {
28255 const char *name;
28256- struct ata_port_operations *ops;
28257+ const struct ata_port_operations *ops;
28258 unsigned int pio_mask;
28259 unsigned int flags;
28260 unsigned int pflags;
28261@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28262 * pio_mask as well.
28263 */
28264
28265-static struct ata_port_operations simple_port_ops = {
28266+static const struct ata_port_operations simple_port_ops = {
28267 .inherits = &legacy_base_port_ops,
28268 .sff_data_xfer = ata_sff_data_xfer_noirq,
28269 };
28270
28271-static struct ata_port_operations legacy_port_ops = {
28272+static const struct ata_port_operations legacy_port_ops = {
28273 .inherits = &legacy_base_port_ops,
28274 .sff_data_xfer = ata_sff_data_xfer_noirq,
28275 .set_mode = legacy_set_mode,
28276@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28277 return buflen;
28278 }
28279
28280-static struct ata_port_operations pdc20230_port_ops = {
28281+static const struct ata_port_operations pdc20230_port_ops = {
28282 .inherits = &legacy_base_port_ops,
28283 .set_piomode = pdc20230_set_piomode,
28284 .sff_data_xfer = pdc_data_xfer_vlb,
28285@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28286 ioread8(ap->ioaddr.status_addr);
28287 }
28288
28289-static struct ata_port_operations ht6560a_port_ops = {
28290+static const struct ata_port_operations ht6560a_port_ops = {
28291 .inherits = &legacy_base_port_ops,
28292 .set_piomode = ht6560a_set_piomode,
28293 };
28294@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28295 ioread8(ap->ioaddr.status_addr);
28296 }
28297
28298-static struct ata_port_operations ht6560b_port_ops = {
28299+static const struct ata_port_operations ht6560b_port_ops = {
28300 .inherits = &legacy_base_port_ops,
28301 .set_piomode = ht6560b_set_piomode,
28302 };
28303@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28304 }
28305
28306
28307-static struct ata_port_operations opti82c611a_port_ops = {
28308+static const struct ata_port_operations opti82c611a_port_ops = {
28309 .inherits = &legacy_base_port_ops,
28310 .set_piomode = opti82c611a_set_piomode,
28311 };
28312@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28313 return ata_sff_qc_issue(qc);
28314 }
28315
28316-static struct ata_port_operations opti82c46x_port_ops = {
28317+static const struct ata_port_operations opti82c46x_port_ops = {
28318 .inherits = &legacy_base_port_ops,
28319 .set_piomode = opti82c46x_set_piomode,
28320 .qc_issue = opti82c46x_qc_issue,
28321@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28322 return 0;
28323 }
28324
28325-static struct ata_port_operations qdi6500_port_ops = {
28326+static const struct ata_port_operations qdi6500_port_ops = {
28327 .inherits = &legacy_base_port_ops,
28328 .set_piomode = qdi6500_set_piomode,
28329 .qc_issue = qdi_qc_issue,
28330 .sff_data_xfer = vlb32_data_xfer,
28331 };
28332
28333-static struct ata_port_operations qdi6580_port_ops = {
28334+static const struct ata_port_operations qdi6580_port_ops = {
28335 .inherits = &legacy_base_port_ops,
28336 .set_piomode = qdi6580_set_piomode,
28337 .sff_data_xfer = vlb32_data_xfer,
28338 };
28339
28340-static struct ata_port_operations qdi6580dp_port_ops = {
28341+static const struct ata_port_operations qdi6580dp_port_ops = {
28342 .inherits = &legacy_base_port_ops,
28343 .set_piomode = qdi6580dp_set_piomode,
28344 .sff_data_xfer = vlb32_data_xfer,
28345@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28346 return 0;
28347 }
28348
28349-static struct ata_port_operations winbond_port_ops = {
28350+static const struct ata_port_operations winbond_port_ops = {
28351 .inherits = &legacy_base_port_ops,
28352 .set_piomode = winbond_set_piomode,
28353 .sff_data_xfer = vlb32_data_xfer,
28354@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28355 int pio_modes = controller->pio_mask;
28356 unsigned long io = probe->port;
28357 u32 mask = (1 << probe->slot);
28358- struct ata_port_operations *ops = controller->ops;
28359+ const struct ata_port_operations *ops = controller->ops;
28360 struct legacy_data *ld = &legacy_data[probe->slot];
28361 struct ata_host *host = NULL;
28362 struct ata_port *ap;
28363diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28364index 2096fb7..4d090fc 100644
28365--- a/drivers/ata/pata_marvell.c
28366+++ b/drivers/ata/pata_marvell.c
28367@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28368 ATA_BMDMA_SHT(DRV_NAME),
28369 };
28370
28371-static struct ata_port_operations marvell_ops = {
28372+static const struct ata_port_operations marvell_ops = {
28373 .inherits = &ata_bmdma_port_ops,
28374 .cable_detect = marvell_cable_detect,
28375 .prereset = marvell_pre_reset,
28376diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28377index 99d41be..7d56aa8 100644
28378--- a/drivers/ata/pata_mpc52xx.c
28379+++ b/drivers/ata/pata_mpc52xx.c
28380@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28381 ATA_PIO_SHT(DRV_NAME),
28382 };
28383
28384-static struct ata_port_operations mpc52xx_ata_port_ops = {
28385+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28386 .inherits = &ata_bmdma_port_ops,
28387 .sff_dev_select = mpc52xx_ata_dev_select,
28388 .set_piomode = mpc52xx_ata_set_piomode,
28389diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28390index b21f002..0a27e7f 100644
28391--- a/drivers/ata/pata_mpiix.c
28392+++ b/drivers/ata/pata_mpiix.c
28393@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28394 ATA_PIO_SHT(DRV_NAME),
28395 };
28396
28397-static struct ata_port_operations mpiix_port_ops = {
28398+static const struct ata_port_operations mpiix_port_ops = {
28399 .inherits = &ata_sff_port_ops,
28400 .qc_issue = mpiix_qc_issue,
28401 .cable_detect = ata_cable_40wire,
28402diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28403index f0d52f7..89c3be3 100644
28404--- a/drivers/ata/pata_netcell.c
28405+++ b/drivers/ata/pata_netcell.c
28406@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28407 ATA_BMDMA_SHT(DRV_NAME),
28408 };
28409
28410-static struct ata_port_operations netcell_ops = {
28411+static const struct ata_port_operations netcell_ops = {
28412 .inherits = &ata_bmdma_port_ops,
28413 .cable_detect = ata_cable_80wire,
28414 .read_id = netcell_read_id,
28415diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28416index dd53a66..a3f4317 100644
28417--- a/drivers/ata/pata_ninja32.c
28418+++ b/drivers/ata/pata_ninja32.c
28419@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28420 ATA_BMDMA_SHT(DRV_NAME),
28421 };
28422
28423-static struct ata_port_operations ninja32_port_ops = {
28424+static const struct ata_port_operations ninja32_port_ops = {
28425 .inherits = &ata_bmdma_port_ops,
28426 .sff_dev_select = ninja32_dev_select,
28427 .cable_detect = ata_cable_40wire,
28428diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28429index ca53fac..9aa93ef 100644
28430--- a/drivers/ata/pata_ns87410.c
28431+++ b/drivers/ata/pata_ns87410.c
28432@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28433 ATA_PIO_SHT(DRV_NAME),
28434 };
28435
28436-static struct ata_port_operations ns87410_port_ops = {
28437+static const struct ata_port_operations ns87410_port_ops = {
28438 .inherits = &ata_sff_port_ops,
28439 .qc_issue = ns87410_qc_issue,
28440 .cable_detect = ata_cable_40wire,
28441diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28442index 773b159..55f454e 100644
28443--- a/drivers/ata/pata_ns87415.c
28444+++ b/drivers/ata/pata_ns87415.c
28445@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28446 }
28447 #endif /* 87560 SuperIO Support */
28448
28449-static struct ata_port_operations ns87415_pata_ops = {
28450+static const struct ata_port_operations ns87415_pata_ops = {
28451 .inherits = &ata_bmdma_port_ops,
28452
28453 .check_atapi_dma = ns87415_check_atapi_dma,
28454@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28455 };
28456
28457 #if defined(CONFIG_SUPERIO)
28458-static struct ata_port_operations ns87560_pata_ops = {
28459+static const struct ata_port_operations ns87560_pata_ops = {
28460 .inherits = &ns87415_pata_ops,
28461 .sff_tf_read = ns87560_tf_read,
28462 .sff_check_status = ns87560_check_status,
28463diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28464index d6f6956..639295b 100644
28465--- a/drivers/ata/pata_octeon_cf.c
28466+++ b/drivers/ata/pata_octeon_cf.c
28467@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28468 return 0;
28469 }
28470
28471+/* cannot be const */
28472 static struct ata_port_operations octeon_cf_ops = {
28473 .inherits = &ata_sff_port_ops,
28474 .check_atapi_dma = octeon_cf_check_atapi_dma,
28475diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28476index 84ac503..adee1cd 100644
28477--- a/drivers/ata/pata_oldpiix.c
28478+++ b/drivers/ata/pata_oldpiix.c
28479@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28480 ATA_BMDMA_SHT(DRV_NAME),
28481 };
28482
28483-static struct ata_port_operations oldpiix_pata_ops = {
28484+static const struct ata_port_operations oldpiix_pata_ops = {
28485 .inherits = &ata_bmdma_port_ops,
28486 .qc_issue = oldpiix_qc_issue,
28487 .cable_detect = ata_cable_40wire,
28488diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28489index 99eddda..3a4c0aa 100644
28490--- a/drivers/ata/pata_opti.c
28491+++ b/drivers/ata/pata_opti.c
28492@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28493 ATA_PIO_SHT(DRV_NAME),
28494 };
28495
28496-static struct ata_port_operations opti_port_ops = {
28497+static const struct ata_port_operations opti_port_ops = {
28498 .inherits = &ata_sff_port_ops,
28499 .cable_detect = ata_cable_40wire,
28500 .set_piomode = opti_set_piomode,
28501diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28502index 86885a4..8e9968d 100644
28503--- a/drivers/ata/pata_optidma.c
28504+++ b/drivers/ata/pata_optidma.c
28505@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28506 ATA_BMDMA_SHT(DRV_NAME),
28507 };
28508
28509-static struct ata_port_operations optidma_port_ops = {
28510+static const struct ata_port_operations optidma_port_ops = {
28511 .inherits = &ata_bmdma_port_ops,
28512 .cable_detect = ata_cable_40wire,
28513 .set_piomode = optidma_set_pio_mode,
28514@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28515 .prereset = optidma_pre_reset,
28516 };
28517
28518-static struct ata_port_operations optiplus_port_ops = {
28519+static const struct ata_port_operations optiplus_port_ops = {
28520 .inherits = &optidma_port_ops,
28521 .set_piomode = optiplus_set_pio_mode,
28522 .set_dmamode = optiplus_set_dma_mode,
28523diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28524index 11fb4cc..1a14022 100644
28525--- a/drivers/ata/pata_palmld.c
28526+++ b/drivers/ata/pata_palmld.c
28527@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28528 ATA_PIO_SHT(DRV_NAME),
28529 };
28530
28531-static struct ata_port_operations palmld_port_ops = {
28532+static const struct ata_port_operations palmld_port_ops = {
28533 .inherits = &ata_sff_port_ops,
28534 .sff_data_xfer = ata_sff_data_xfer_noirq,
28535 .cable_detect = ata_cable_40wire,
28536diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28537index dc99e26..7f4b1e4 100644
28538--- a/drivers/ata/pata_pcmcia.c
28539+++ b/drivers/ata/pata_pcmcia.c
28540@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28541 ATA_PIO_SHT(DRV_NAME),
28542 };
28543
28544-static struct ata_port_operations pcmcia_port_ops = {
28545+static const struct ata_port_operations pcmcia_port_ops = {
28546 .inherits = &ata_sff_port_ops,
28547 .sff_data_xfer = ata_sff_data_xfer_noirq,
28548 .cable_detect = ata_cable_40wire,
28549 .set_mode = pcmcia_set_mode,
28550 };
28551
28552-static struct ata_port_operations pcmcia_8bit_port_ops = {
28553+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28554 .inherits = &ata_sff_port_ops,
28555 .sff_data_xfer = ata_data_xfer_8bit,
28556 .cable_detect = ata_cable_40wire,
28557@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28558 unsigned long io_base, ctl_base;
28559 void __iomem *io_addr, *ctl_addr;
28560 int n_ports = 1;
28561- struct ata_port_operations *ops = &pcmcia_port_ops;
28562+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28563
28564 info = kzalloc(sizeof(*info), GFP_KERNEL);
28565 if (info == NULL)
28566diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28567index ca5cad0..3a1f125 100644
28568--- a/drivers/ata/pata_pdc2027x.c
28569+++ b/drivers/ata/pata_pdc2027x.c
28570@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28571 ATA_BMDMA_SHT(DRV_NAME),
28572 };
28573
28574-static struct ata_port_operations pdc2027x_pata100_ops = {
28575+static const struct ata_port_operations pdc2027x_pata100_ops = {
28576 .inherits = &ata_bmdma_port_ops,
28577 .check_atapi_dma = pdc2027x_check_atapi_dma,
28578 .cable_detect = pdc2027x_cable_detect,
28579 .prereset = pdc2027x_prereset,
28580 };
28581
28582-static struct ata_port_operations pdc2027x_pata133_ops = {
28583+static const struct ata_port_operations pdc2027x_pata133_ops = {
28584 .inherits = &pdc2027x_pata100_ops,
28585 .mode_filter = pdc2027x_mode_filter,
28586 .set_piomode = pdc2027x_set_piomode,
28587diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28588index 2911120..4bf62aa 100644
28589--- a/drivers/ata/pata_pdc202xx_old.c
28590+++ b/drivers/ata/pata_pdc202xx_old.c
28591@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28592 ATA_BMDMA_SHT(DRV_NAME),
28593 };
28594
28595-static struct ata_port_operations pdc2024x_port_ops = {
28596+static const struct ata_port_operations pdc2024x_port_ops = {
28597 .inherits = &ata_bmdma_port_ops,
28598
28599 .cable_detect = ata_cable_40wire,
28600@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28601 .sff_exec_command = pdc202xx_exec_command,
28602 };
28603
28604-static struct ata_port_operations pdc2026x_port_ops = {
28605+static const struct ata_port_operations pdc2026x_port_ops = {
28606 .inherits = &pdc2024x_port_ops,
28607
28608 .check_atapi_dma = pdc2026x_check_atapi_dma,
28609diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28610index 3f6ebc6..a18c358 100644
28611--- a/drivers/ata/pata_platform.c
28612+++ b/drivers/ata/pata_platform.c
28613@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28614 ATA_PIO_SHT(DRV_NAME),
28615 };
28616
28617-static struct ata_port_operations pata_platform_port_ops = {
28618+static const struct ata_port_operations pata_platform_port_ops = {
28619 .inherits = &ata_sff_port_ops,
28620 .sff_data_xfer = ata_sff_data_xfer_noirq,
28621 .cable_detect = ata_cable_unknown,
28622diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28623index 45879dc..165a9f9 100644
28624--- a/drivers/ata/pata_qdi.c
28625+++ b/drivers/ata/pata_qdi.c
28626@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28627 ATA_PIO_SHT(DRV_NAME),
28628 };
28629
28630-static struct ata_port_operations qdi6500_port_ops = {
28631+static const struct ata_port_operations qdi6500_port_ops = {
28632 .inherits = &ata_sff_port_ops,
28633 .qc_issue = qdi_qc_issue,
28634 .sff_data_xfer = qdi_data_xfer,
28635@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28636 .set_piomode = qdi6500_set_piomode,
28637 };
28638
28639-static struct ata_port_operations qdi6580_port_ops = {
28640+static const struct ata_port_operations qdi6580_port_ops = {
28641 .inherits = &qdi6500_port_ops,
28642 .set_piomode = qdi6580_set_piomode,
28643 };
28644diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28645index 4401b33..716c5cc 100644
28646--- a/drivers/ata/pata_radisys.c
28647+++ b/drivers/ata/pata_radisys.c
28648@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28649 ATA_BMDMA_SHT(DRV_NAME),
28650 };
28651
28652-static struct ata_port_operations radisys_pata_ops = {
28653+static const struct ata_port_operations radisys_pata_ops = {
28654 .inherits = &ata_bmdma_port_ops,
28655 .qc_issue = radisys_qc_issue,
28656 .cable_detect = ata_cable_unknown,
28657diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28658index 45f1e10..fab6bca 100644
28659--- a/drivers/ata/pata_rb532_cf.c
28660+++ b/drivers/ata/pata_rb532_cf.c
28661@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28662 return IRQ_HANDLED;
28663 }
28664
28665-static struct ata_port_operations rb532_pata_port_ops = {
28666+static const struct ata_port_operations rb532_pata_port_ops = {
28667 .inherits = &ata_sff_port_ops,
28668 .sff_data_xfer = ata_sff_data_xfer32,
28669 };
28670diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28671index c843a1e..b5853c3 100644
28672--- a/drivers/ata/pata_rdc.c
28673+++ b/drivers/ata/pata_rdc.c
28674@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28675 pci_write_config_byte(dev, 0x48, udma_enable);
28676 }
28677
28678-static struct ata_port_operations rdc_pata_ops = {
28679+static const struct ata_port_operations rdc_pata_ops = {
28680 .inherits = &ata_bmdma32_port_ops,
28681 .cable_detect = rdc_pata_cable_detect,
28682 .set_piomode = rdc_set_piomode,
28683diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28684index a5e4dfe..080c8c9 100644
28685--- a/drivers/ata/pata_rz1000.c
28686+++ b/drivers/ata/pata_rz1000.c
28687@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28688 ATA_PIO_SHT(DRV_NAME),
28689 };
28690
28691-static struct ata_port_operations rz1000_port_ops = {
28692+static const struct ata_port_operations rz1000_port_ops = {
28693 .inherits = &ata_sff_port_ops,
28694 .cable_detect = ata_cable_40wire,
28695 .set_mode = rz1000_set_mode,
28696diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28697index 3bbed83..e309daf 100644
28698--- a/drivers/ata/pata_sc1200.c
28699+++ b/drivers/ata/pata_sc1200.c
28700@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28701 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28702 };
28703
28704-static struct ata_port_operations sc1200_port_ops = {
28705+static const struct ata_port_operations sc1200_port_ops = {
28706 .inherits = &ata_bmdma_port_ops,
28707 .qc_prep = ata_sff_dumb_qc_prep,
28708 .qc_issue = sc1200_qc_issue,
28709diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28710index 4257d6b..4c1d9d5 100644
28711--- a/drivers/ata/pata_scc.c
28712+++ b/drivers/ata/pata_scc.c
28713@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28714 ATA_BMDMA_SHT(DRV_NAME),
28715 };
28716
28717-static struct ata_port_operations scc_pata_ops = {
28718+static const struct ata_port_operations scc_pata_ops = {
28719 .inherits = &ata_bmdma_port_ops,
28720
28721 .set_piomode = scc_set_piomode,
28722diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28723index 99cceb4..e2e0a87 100644
28724--- a/drivers/ata/pata_sch.c
28725+++ b/drivers/ata/pata_sch.c
28726@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28727 ATA_BMDMA_SHT(DRV_NAME),
28728 };
28729
28730-static struct ata_port_operations sch_pata_ops = {
28731+static const struct ata_port_operations sch_pata_ops = {
28732 .inherits = &ata_bmdma_port_ops,
28733 .cable_detect = ata_cable_unknown,
28734 .set_piomode = sch_set_piomode,
28735diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28736index beaed12..39969f1 100644
28737--- a/drivers/ata/pata_serverworks.c
28738+++ b/drivers/ata/pata_serverworks.c
28739@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28740 ATA_BMDMA_SHT(DRV_NAME),
28741 };
28742
28743-static struct ata_port_operations serverworks_osb4_port_ops = {
28744+static const struct ata_port_operations serverworks_osb4_port_ops = {
28745 .inherits = &ata_bmdma_port_ops,
28746 .cable_detect = serverworks_cable_detect,
28747 .mode_filter = serverworks_osb4_filter,
28748@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28749 .set_dmamode = serverworks_set_dmamode,
28750 };
28751
28752-static struct ata_port_operations serverworks_csb_port_ops = {
28753+static const struct ata_port_operations serverworks_csb_port_ops = {
28754 .inherits = &serverworks_osb4_port_ops,
28755 .mode_filter = serverworks_csb_filter,
28756 };
28757diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28758index a2ace48..0463b44 100644
28759--- a/drivers/ata/pata_sil680.c
28760+++ b/drivers/ata/pata_sil680.c
28761@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28762 ATA_BMDMA_SHT(DRV_NAME),
28763 };
28764
28765-static struct ata_port_operations sil680_port_ops = {
28766+static const struct ata_port_operations sil680_port_ops = {
28767 .inherits = &ata_bmdma32_port_ops,
28768 .cable_detect = sil680_cable_detect,
28769 .set_piomode = sil680_set_piomode,
28770diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28771index 488e77b..b3724d5 100644
28772--- a/drivers/ata/pata_sis.c
28773+++ b/drivers/ata/pata_sis.c
28774@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28775 ATA_BMDMA_SHT(DRV_NAME),
28776 };
28777
28778-static struct ata_port_operations sis_133_for_sata_ops = {
28779+static const struct ata_port_operations sis_133_for_sata_ops = {
28780 .inherits = &ata_bmdma_port_ops,
28781 .set_piomode = sis_133_set_piomode,
28782 .set_dmamode = sis_133_set_dmamode,
28783 .cable_detect = sis_133_cable_detect,
28784 };
28785
28786-static struct ata_port_operations sis_base_ops = {
28787+static const struct ata_port_operations sis_base_ops = {
28788 .inherits = &ata_bmdma_port_ops,
28789 .prereset = sis_pre_reset,
28790 };
28791
28792-static struct ata_port_operations sis_133_ops = {
28793+static const struct ata_port_operations sis_133_ops = {
28794 .inherits = &sis_base_ops,
28795 .set_piomode = sis_133_set_piomode,
28796 .set_dmamode = sis_133_set_dmamode,
28797 .cable_detect = sis_133_cable_detect,
28798 };
28799
28800-static struct ata_port_operations sis_133_early_ops = {
28801+static const struct ata_port_operations sis_133_early_ops = {
28802 .inherits = &sis_base_ops,
28803 .set_piomode = sis_100_set_piomode,
28804 .set_dmamode = sis_133_early_set_dmamode,
28805 .cable_detect = sis_66_cable_detect,
28806 };
28807
28808-static struct ata_port_operations sis_100_ops = {
28809+static const struct ata_port_operations sis_100_ops = {
28810 .inherits = &sis_base_ops,
28811 .set_piomode = sis_100_set_piomode,
28812 .set_dmamode = sis_100_set_dmamode,
28813 .cable_detect = sis_66_cable_detect,
28814 };
28815
28816-static struct ata_port_operations sis_66_ops = {
28817+static const struct ata_port_operations sis_66_ops = {
28818 .inherits = &sis_base_ops,
28819 .set_piomode = sis_old_set_piomode,
28820 .set_dmamode = sis_66_set_dmamode,
28821 .cable_detect = sis_66_cable_detect,
28822 };
28823
28824-static struct ata_port_operations sis_old_ops = {
28825+static const struct ata_port_operations sis_old_ops = {
28826 .inherits = &sis_base_ops,
28827 .set_piomode = sis_old_set_piomode,
28828 .set_dmamode = sis_old_set_dmamode,
28829diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28830index 29f733c..43e9ca0 100644
28831--- a/drivers/ata/pata_sl82c105.c
28832+++ b/drivers/ata/pata_sl82c105.c
28833@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28834 ATA_BMDMA_SHT(DRV_NAME),
28835 };
28836
28837-static struct ata_port_operations sl82c105_port_ops = {
28838+static const struct ata_port_operations sl82c105_port_ops = {
28839 .inherits = &ata_bmdma_port_ops,
28840 .qc_defer = sl82c105_qc_defer,
28841 .bmdma_start = sl82c105_bmdma_start,
28842diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28843index f1f13ff..df39e99 100644
28844--- a/drivers/ata/pata_triflex.c
28845+++ b/drivers/ata/pata_triflex.c
28846@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28847 ATA_BMDMA_SHT(DRV_NAME),
28848 };
28849
28850-static struct ata_port_operations triflex_port_ops = {
28851+static const struct ata_port_operations triflex_port_ops = {
28852 .inherits = &ata_bmdma_port_ops,
28853 .bmdma_start = triflex_bmdma_start,
28854 .bmdma_stop = triflex_bmdma_stop,
28855diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28856index 1d73b8d..98a4b29 100644
28857--- a/drivers/ata/pata_via.c
28858+++ b/drivers/ata/pata_via.c
28859@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28860 ATA_BMDMA_SHT(DRV_NAME),
28861 };
28862
28863-static struct ata_port_operations via_port_ops = {
28864+static const struct ata_port_operations via_port_ops = {
28865 .inherits = &ata_bmdma_port_ops,
28866 .cable_detect = via_cable_detect,
28867 .set_piomode = via_set_piomode,
28868@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28869 .port_start = via_port_start,
28870 };
28871
28872-static struct ata_port_operations via_port_ops_noirq = {
28873+static const struct ata_port_operations via_port_ops_noirq = {
28874 .inherits = &via_port_ops,
28875 .sff_data_xfer = ata_sff_data_xfer_noirq,
28876 };
28877diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28878index 6d8619b..ad511c4 100644
28879--- a/drivers/ata/pata_winbond.c
28880+++ b/drivers/ata/pata_winbond.c
28881@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28882 ATA_PIO_SHT(DRV_NAME),
28883 };
28884
28885-static struct ata_port_operations winbond_port_ops = {
28886+static const struct ata_port_operations winbond_port_ops = {
28887 .inherits = &ata_sff_port_ops,
28888 .sff_data_xfer = winbond_data_xfer,
28889 .cable_detect = ata_cable_40wire,
28890diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28891index 6c65b07..f996ec7 100644
28892--- a/drivers/ata/pdc_adma.c
28893+++ b/drivers/ata/pdc_adma.c
28894@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28895 .dma_boundary = ADMA_DMA_BOUNDARY,
28896 };
28897
28898-static struct ata_port_operations adma_ata_ops = {
28899+static const struct ata_port_operations adma_ata_ops = {
28900 .inherits = &ata_sff_port_ops,
28901
28902 .lost_interrupt = ATA_OP_NULL,
28903diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
28904index 172b57e..c49bc1e 100644
28905--- a/drivers/ata/sata_fsl.c
28906+++ b/drivers/ata/sata_fsl.c
28907@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
28908 .dma_boundary = ATA_DMA_BOUNDARY,
28909 };
28910
28911-static struct ata_port_operations sata_fsl_ops = {
28912+static const struct ata_port_operations sata_fsl_ops = {
28913 .inherits = &sata_pmp_port_ops,
28914
28915 .qc_defer = ata_std_qc_defer,
28916diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
28917index 4406902..60603ef 100644
28918--- a/drivers/ata/sata_inic162x.c
28919+++ b/drivers/ata/sata_inic162x.c
28920@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
28921 return 0;
28922 }
28923
28924-static struct ata_port_operations inic_port_ops = {
28925+static const struct ata_port_operations inic_port_ops = {
28926 .inherits = &sata_port_ops,
28927
28928 .check_atapi_dma = inic_check_atapi_dma,
28929diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
28930index cf41126..8107be6 100644
28931--- a/drivers/ata/sata_mv.c
28932+++ b/drivers/ata/sata_mv.c
28933@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
28934 .dma_boundary = MV_DMA_BOUNDARY,
28935 };
28936
28937-static struct ata_port_operations mv5_ops = {
28938+static const struct ata_port_operations mv5_ops = {
28939 .inherits = &ata_sff_port_ops,
28940
28941 .lost_interrupt = ATA_OP_NULL,
28942@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
28943 .port_stop = mv_port_stop,
28944 };
28945
28946-static struct ata_port_operations mv6_ops = {
28947+static const struct ata_port_operations mv6_ops = {
28948 .inherits = &mv5_ops,
28949 .dev_config = mv6_dev_config,
28950 .scr_read = mv_scr_read,
28951@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
28952 .bmdma_status = mv_bmdma_status,
28953 };
28954
28955-static struct ata_port_operations mv_iie_ops = {
28956+static const struct ata_port_operations mv_iie_ops = {
28957 .inherits = &mv6_ops,
28958 .dev_config = ATA_OP_NULL,
28959 .qc_prep = mv_qc_prep_iie,
28960diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
28961index ae2297c..d5c9c33 100644
28962--- a/drivers/ata/sata_nv.c
28963+++ b/drivers/ata/sata_nv.c
28964@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
28965 * cases. Define nv_hardreset() which only kicks in for post-boot
28966 * probing and use it for all variants.
28967 */
28968-static struct ata_port_operations nv_generic_ops = {
28969+static const struct ata_port_operations nv_generic_ops = {
28970 .inherits = &ata_bmdma_port_ops,
28971 .lost_interrupt = ATA_OP_NULL,
28972 .scr_read = nv_scr_read,
28973@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
28974 .hardreset = nv_hardreset,
28975 };
28976
28977-static struct ata_port_operations nv_nf2_ops = {
28978+static const struct ata_port_operations nv_nf2_ops = {
28979 .inherits = &nv_generic_ops,
28980 .freeze = nv_nf2_freeze,
28981 .thaw = nv_nf2_thaw,
28982 };
28983
28984-static struct ata_port_operations nv_ck804_ops = {
28985+static const struct ata_port_operations nv_ck804_ops = {
28986 .inherits = &nv_generic_ops,
28987 .freeze = nv_ck804_freeze,
28988 .thaw = nv_ck804_thaw,
28989 .host_stop = nv_ck804_host_stop,
28990 };
28991
28992-static struct ata_port_operations nv_adma_ops = {
28993+static const struct ata_port_operations nv_adma_ops = {
28994 .inherits = &nv_ck804_ops,
28995
28996 .check_atapi_dma = nv_adma_check_atapi_dma,
28997@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
28998 .host_stop = nv_adma_host_stop,
28999 };
29000
29001-static struct ata_port_operations nv_swncq_ops = {
29002+static const struct ata_port_operations nv_swncq_ops = {
29003 .inherits = &nv_generic_ops,
29004
29005 .qc_defer = ata_std_qc_defer,
29006diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29007index 07d8d00..6cc70bb 100644
29008--- a/drivers/ata/sata_promise.c
29009+++ b/drivers/ata/sata_promise.c
29010@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29011 .error_handler = pdc_error_handler,
29012 };
29013
29014-static struct ata_port_operations pdc_sata_ops = {
29015+static const struct ata_port_operations pdc_sata_ops = {
29016 .inherits = &pdc_common_ops,
29017 .cable_detect = pdc_sata_cable_detect,
29018 .freeze = pdc_sata_freeze,
29019@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29020
29021 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29022 and ->freeze/thaw that ignore the hotplug controls. */
29023-static struct ata_port_operations pdc_old_sata_ops = {
29024+static const struct ata_port_operations pdc_old_sata_ops = {
29025 .inherits = &pdc_sata_ops,
29026 .freeze = pdc_freeze,
29027 .thaw = pdc_thaw,
29028 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29029 };
29030
29031-static struct ata_port_operations pdc_pata_ops = {
29032+static const struct ata_port_operations pdc_pata_ops = {
29033 .inherits = &pdc_common_ops,
29034 .cable_detect = pdc_pata_cable_detect,
29035 .freeze = pdc_freeze,
29036diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29037index 326c0cf..36ecebe 100644
29038--- a/drivers/ata/sata_qstor.c
29039+++ b/drivers/ata/sata_qstor.c
29040@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29041 .dma_boundary = QS_DMA_BOUNDARY,
29042 };
29043
29044-static struct ata_port_operations qs_ata_ops = {
29045+static const struct ata_port_operations qs_ata_ops = {
29046 .inherits = &ata_sff_port_ops,
29047
29048 .check_atapi_dma = qs_check_atapi_dma,
29049diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29050index 3cb69d5..0871d3c 100644
29051--- a/drivers/ata/sata_sil.c
29052+++ b/drivers/ata/sata_sil.c
29053@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29054 .sg_tablesize = ATA_MAX_PRD
29055 };
29056
29057-static struct ata_port_operations sil_ops = {
29058+static const struct ata_port_operations sil_ops = {
29059 .inherits = &ata_bmdma32_port_ops,
29060 .dev_config = sil_dev_config,
29061 .set_mode = sil_set_mode,
29062diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29063index e6946fc..eddb794 100644
29064--- a/drivers/ata/sata_sil24.c
29065+++ b/drivers/ata/sata_sil24.c
29066@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29067 .dma_boundary = ATA_DMA_BOUNDARY,
29068 };
29069
29070-static struct ata_port_operations sil24_ops = {
29071+static const struct ata_port_operations sil24_ops = {
29072 .inherits = &sata_pmp_port_ops,
29073
29074 .qc_defer = sil24_qc_defer,
29075diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29076index f8a91bf..9cb06b6 100644
29077--- a/drivers/ata/sata_sis.c
29078+++ b/drivers/ata/sata_sis.c
29079@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29080 ATA_BMDMA_SHT(DRV_NAME),
29081 };
29082
29083-static struct ata_port_operations sis_ops = {
29084+static const struct ata_port_operations sis_ops = {
29085 .inherits = &ata_bmdma_port_ops,
29086 .scr_read = sis_scr_read,
29087 .scr_write = sis_scr_write,
29088diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29089index 7257f2d..d04c6f5 100644
29090--- a/drivers/ata/sata_svw.c
29091+++ b/drivers/ata/sata_svw.c
29092@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29093 };
29094
29095
29096-static struct ata_port_operations k2_sata_ops = {
29097+static const struct ata_port_operations k2_sata_ops = {
29098 .inherits = &ata_bmdma_port_ops,
29099 .sff_tf_load = k2_sata_tf_load,
29100 .sff_tf_read = k2_sata_tf_read,
29101diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29102index bbcf970..cd0df0d 100644
29103--- a/drivers/ata/sata_sx4.c
29104+++ b/drivers/ata/sata_sx4.c
29105@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29106 };
29107
29108 /* TODO: inherit from base port_ops after converting to new EH */
29109-static struct ata_port_operations pdc_20621_ops = {
29110+static const struct ata_port_operations pdc_20621_ops = {
29111 .inherits = &ata_sff_port_ops,
29112
29113 .check_atapi_dma = pdc_check_atapi_dma,
29114diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29115index e5bff47..089d859 100644
29116--- a/drivers/ata/sata_uli.c
29117+++ b/drivers/ata/sata_uli.c
29118@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29119 ATA_BMDMA_SHT(DRV_NAME),
29120 };
29121
29122-static struct ata_port_operations uli_ops = {
29123+static const struct ata_port_operations uli_ops = {
29124 .inherits = &ata_bmdma_port_ops,
29125 .scr_read = uli_scr_read,
29126 .scr_write = uli_scr_write,
29127diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29128index f5dcca7..77b94eb 100644
29129--- a/drivers/ata/sata_via.c
29130+++ b/drivers/ata/sata_via.c
29131@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29132 ATA_BMDMA_SHT(DRV_NAME),
29133 };
29134
29135-static struct ata_port_operations svia_base_ops = {
29136+static const struct ata_port_operations svia_base_ops = {
29137 .inherits = &ata_bmdma_port_ops,
29138 .sff_tf_load = svia_tf_load,
29139 };
29140
29141-static struct ata_port_operations vt6420_sata_ops = {
29142+static const struct ata_port_operations vt6420_sata_ops = {
29143 .inherits = &svia_base_ops,
29144 .freeze = svia_noop_freeze,
29145 .prereset = vt6420_prereset,
29146 .bmdma_start = vt6420_bmdma_start,
29147 };
29148
29149-static struct ata_port_operations vt6421_pata_ops = {
29150+static const struct ata_port_operations vt6421_pata_ops = {
29151 .inherits = &svia_base_ops,
29152 .cable_detect = vt6421_pata_cable_detect,
29153 .set_piomode = vt6421_set_pio_mode,
29154 .set_dmamode = vt6421_set_dma_mode,
29155 };
29156
29157-static struct ata_port_operations vt6421_sata_ops = {
29158+static const struct ata_port_operations vt6421_sata_ops = {
29159 .inherits = &svia_base_ops,
29160 .scr_read = svia_scr_read,
29161 .scr_write = svia_scr_write,
29162 };
29163
29164-static struct ata_port_operations vt8251_ops = {
29165+static const struct ata_port_operations vt8251_ops = {
29166 .inherits = &svia_base_ops,
29167 .hardreset = sata_std_hardreset,
29168 .scr_read = vt8251_scr_read,
29169diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29170index 8b2a278..51e65d3 100644
29171--- a/drivers/ata/sata_vsc.c
29172+++ b/drivers/ata/sata_vsc.c
29173@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29174 };
29175
29176
29177-static struct ata_port_operations vsc_sata_ops = {
29178+static const struct ata_port_operations vsc_sata_ops = {
29179 .inherits = &ata_bmdma_port_ops,
29180 /* The IRQ handling is not quite standard SFF behaviour so we
29181 cannot use the default lost interrupt handler */
29182diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29183index 5effec6..7e4019a 100644
29184--- a/drivers/atm/adummy.c
29185+++ b/drivers/atm/adummy.c
29186@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29187 vcc->pop(vcc, skb);
29188 else
29189 dev_kfree_skb_any(skb);
29190- atomic_inc(&vcc->stats->tx);
29191+ atomic_inc_unchecked(&vcc->stats->tx);
29192
29193 return 0;
29194 }
29195diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29196index 66e1813..26a27c6 100644
29197--- a/drivers/atm/ambassador.c
29198+++ b/drivers/atm/ambassador.c
29199@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29200 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29201
29202 // VC layer stats
29203- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29204+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29205
29206 // free the descriptor
29207 kfree (tx_descr);
29208@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29209 dump_skb ("<<<", vc, skb);
29210
29211 // VC layer stats
29212- atomic_inc(&atm_vcc->stats->rx);
29213+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29214 __net_timestamp(skb);
29215 // end of our responsability
29216 atm_vcc->push (atm_vcc, skb);
29217@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29218 } else {
29219 PRINTK (KERN_INFO, "dropped over-size frame");
29220 // should we count this?
29221- atomic_inc(&atm_vcc->stats->rx_drop);
29222+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29223 }
29224
29225 } else {
29226@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29227 }
29228
29229 if (check_area (skb->data, skb->len)) {
29230- atomic_inc(&atm_vcc->stats->tx_err);
29231+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29232 return -ENOMEM; // ?
29233 }
29234
29235diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29236index 02ad83d..6daffeb 100644
29237--- a/drivers/atm/atmtcp.c
29238+++ b/drivers/atm/atmtcp.c
29239@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29240 if (vcc->pop) vcc->pop(vcc,skb);
29241 else dev_kfree_skb(skb);
29242 if (dev_data) return 0;
29243- atomic_inc(&vcc->stats->tx_err);
29244+ atomic_inc_unchecked(&vcc->stats->tx_err);
29245 return -ENOLINK;
29246 }
29247 size = skb->len+sizeof(struct atmtcp_hdr);
29248@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29249 if (!new_skb) {
29250 if (vcc->pop) vcc->pop(vcc,skb);
29251 else dev_kfree_skb(skb);
29252- atomic_inc(&vcc->stats->tx_err);
29253+ atomic_inc_unchecked(&vcc->stats->tx_err);
29254 return -ENOBUFS;
29255 }
29256 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29257@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29258 if (vcc->pop) vcc->pop(vcc,skb);
29259 else dev_kfree_skb(skb);
29260 out_vcc->push(out_vcc,new_skb);
29261- atomic_inc(&vcc->stats->tx);
29262- atomic_inc(&out_vcc->stats->rx);
29263+ atomic_inc_unchecked(&vcc->stats->tx);
29264+ atomic_inc_unchecked(&out_vcc->stats->rx);
29265 return 0;
29266 }
29267
29268@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29269 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29270 read_unlock(&vcc_sklist_lock);
29271 if (!out_vcc) {
29272- atomic_inc(&vcc->stats->tx_err);
29273+ atomic_inc_unchecked(&vcc->stats->tx_err);
29274 goto done;
29275 }
29276 skb_pull(skb,sizeof(struct atmtcp_hdr));
29277@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29278 __net_timestamp(new_skb);
29279 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29280 out_vcc->push(out_vcc,new_skb);
29281- atomic_inc(&vcc->stats->tx);
29282- atomic_inc(&out_vcc->stats->rx);
29283+ atomic_inc_unchecked(&vcc->stats->tx);
29284+ atomic_inc_unchecked(&out_vcc->stats->rx);
29285 done:
29286 if (vcc->pop) vcc->pop(vcc,skb);
29287 else dev_kfree_skb(skb);
29288diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29289index 0c30261..3da356e 100644
29290--- a/drivers/atm/eni.c
29291+++ b/drivers/atm/eni.c
29292@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29293 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29294 vcc->dev->number);
29295 length = 0;
29296- atomic_inc(&vcc->stats->rx_err);
29297+ atomic_inc_unchecked(&vcc->stats->rx_err);
29298 }
29299 else {
29300 length = ATM_CELL_SIZE-1; /* no HEC */
29301@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29302 size);
29303 }
29304 eff = length = 0;
29305- atomic_inc(&vcc->stats->rx_err);
29306+ atomic_inc_unchecked(&vcc->stats->rx_err);
29307 }
29308 else {
29309 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29310@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29311 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29312 vcc->dev->number,vcc->vci,length,size << 2,descr);
29313 length = eff = 0;
29314- atomic_inc(&vcc->stats->rx_err);
29315+ atomic_inc_unchecked(&vcc->stats->rx_err);
29316 }
29317 }
29318 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29319@@ -770,7 +770,7 @@ rx_dequeued++;
29320 vcc->push(vcc,skb);
29321 pushed++;
29322 }
29323- atomic_inc(&vcc->stats->rx);
29324+ atomic_inc_unchecked(&vcc->stats->rx);
29325 }
29326 wake_up(&eni_dev->rx_wait);
29327 }
29328@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29329 PCI_DMA_TODEVICE);
29330 if (vcc->pop) vcc->pop(vcc,skb);
29331 else dev_kfree_skb_irq(skb);
29332- atomic_inc(&vcc->stats->tx);
29333+ atomic_inc_unchecked(&vcc->stats->tx);
29334 wake_up(&eni_dev->tx_wait);
29335 dma_complete++;
29336 }
29337@@ -1570,7 +1570,7 @@ tx_complete++;
29338 /*--------------------------------- entries ---------------------------------*/
29339
29340
29341-static const char *media_name[] __devinitdata = {
29342+static const char *media_name[] __devinitconst = {
29343 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29344 "UTP", "05?", "06?", "07?", /* 4- 7 */
29345 "TAXI","09?", "10?", "11?", /* 8-11 */
29346diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29347index cd5049a..a51209f 100644
29348--- a/drivers/atm/firestream.c
29349+++ b/drivers/atm/firestream.c
29350@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29351 }
29352 }
29353
29354- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29355+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29356
29357 fs_dprintk (FS_DEBUG_TXMEM, "i");
29358 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29359@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29360 #endif
29361 skb_put (skb, qe->p1 & 0xffff);
29362 ATM_SKB(skb)->vcc = atm_vcc;
29363- atomic_inc(&atm_vcc->stats->rx);
29364+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29365 __net_timestamp(skb);
29366 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29367 atm_vcc->push (atm_vcc, skb);
29368@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29369 kfree (pe);
29370 }
29371 if (atm_vcc)
29372- atomic_inc(&atm_vcc->stats->rx_drop);
29373+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29374 break;
29375 case 0x1f: /* Reassembly abort: no buffers. */
29376 /* Silently increment error counter. */
29377 if (atm_vcc)
29378- atomic_inc(&atm_vcc->stats->rx_drop);
29379+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29380 break;
29381 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29382 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29383diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29384index f766cc4..a34002e 100644
29385--- a/drivers/atm/fore200e.c
29386+++ b/drivers/atm/fore200e.c
29387@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29388 #endif
29389 /* check error condition */
29390 if (*entry->status & STATUS_ERROR)
29391- atomic_inc(&vcc->stats->tx_err);
29392+ atomic_inc_unchecked(&vcc->stats->tx_err);
29393 else
29394- atomic_inc(&vcc->stats->tx);
29395+ atomic_inc_unchecked(&vcc->stats->tx);
29396 }
29397 }
29398
29399@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29400 if (skb == NULL) {
29401 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29402
29403- atomic_inc(&vcc->stats->rx_drop);
29404+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29405 return -ENOMEM;
29406 }
29407
29408@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29409
29410 dev_kfree_skb_any(skb);
29411
29412- atomic_inc(&vcc->stats->rx_drop);
29413+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29414 return -ENOMEM;
29415 }
29416
29417 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29418
29419 vcc->push(vcc, skb);
29420- atomic_inc(&vcc->stats->rx);
29421+ atomic_inc_unchecked(&vcc->stats->rx);
29422
29423 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29424
29425@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29426 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29427 fore200e->atm_dev->number,
29428 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29429- atomic_inc(&vcc->stats->rx_err);
29430+ atomic_inc_unchecked(&vcc->stats->rx_err);
29431 }
29432 }
29433
29434@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29435 goto retry_here;
29436 }
29437
29438- atomic_inc(&vcc->stats->tx_err);
29439+ atomic_inc_unchecked(&vcc->stats->tx_err);
29440
29441 fore200e->tx_sat++;
29442 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29443diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29444index 7066703..2b130de 100644
29445--- a/drivers/atm/he.c
29446+++ b/drivers/atm/he.c
29447@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29448
29449 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29450 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29451- atomic_inc(&vcc->stats->rx_drop);
29452+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29453 goto return_host_buffers;
29454 }
29455
29456@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29457 RBRQ_LEN_ERR(he_dev->rbrq_head)
29458 ? "LEN_ERR" : "",
29459 vcc->vpi, vcc->vci);
29460- atomic_inc(&vcc->stats->rx_err);
29461+ atomic_inc_unchecked(&vcc->stats->rx_err);
29462 goto return_host_buffers;
29463 }
29464
29465@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29466 vcc->push(vcc, skb);
29467 spin_lock(&he_dev->global_lock);
29468
29469- atomic_inc(&vcc->stats->rx);
29470+ atomic_inc_unchecked(&vcc->stats->rx);
29471
29472 return_host_buffers:
29473 ++pdus_assembled;
29474@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29475 tpd->vcc->pop(tpd->vcc, tpd->skb);
29476 else
29477 dev_kfree_skb_any(tpd->skb);
29478- atomic_inc(&tpd->vcc->stats->tx_err);
29479+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29480 }
29481 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29482 return;
29483@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29484 vcc->pop(vcc, skb);
29485 else
29486 dev_kfree_skb_any(skb);
29487- atomic_inc(&vcc->stats->tx_err);
29488+ atomic_inc_unchecked(&vcc->stats->tx_err);
29489 return -EINVAL;
29490 }
29491
29492@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29493 vcc->pop(vcc, skb);
29494 else
29495 dev_kfree_skb_any(skb);
29496- atomic_inc(&vcc->stats->tx_err);
29497+ atomic_inc_unchecked(&vcc->stats->tx_err);
29498 return -EINVAL;
29499 }
29500 #endif
29501@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29502 vcc->pop(vcc, skb);
29503 else
29504 dev_kfree_skb_any(skb);
29505- atomic_inc(&vcc->stats->tx_err);
29506+ atomic_inc_unchecked(&vcc->stats->tx_err);
29507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29508 return -ENOMEM;
29509 }
29510@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29511 vcc->pop(vcc, skb);
29512 else
29513 dev_kfree_skb_any(skb);
29514- atomic_inc(&vcc->stats->tx_err);
29515+ atomic_inc_unchecked(&vcc->stats->tx_err);
29516 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29517 return -ENOMEM;
29518 }
29519@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29520 __enqueue_tpd(he_dev, tpd, cid);
29521 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29522
29523- atomic_inc(&vcc->stats->tx);
29524+ atomic_inc_unchecked(&vcc->stats->tx);
29525
29526 return 0;
29527 }
29528diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29529index 4e49021..01b1512 100644
29530--- a/drivers/atm/horizon.c
29531+++ b/drivers/atm/horizon.c
29532@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29533 {
29534 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29535 // VC layer stats
29536- atomic_inc(&vcc->stats->rx);
29537+ atomic_inc_unchecked(&vcc->stats->rx);
29538 __net_timestamp(skb);
29539 // end of our responsability
29540 vcc->push (vcc, skb);
29541@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29542 dev->tx_iovec = NULL;
29543
29544 // VC layer stats
29545- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29546+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29547
29548 // free the skb
29549 hrz_kfree_skb (skb);
29550diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29551index e33ae00..9deb4ab 100644
29552--- a/drivers/atm/idt77252.c
29553+++ b/drivers/atm/idt77252.c
29554@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29555 else
29556 dev_kfree_skb(skb);
29557
29558- atomic_inc(&vcc->stats->tx);
29559+ atomic_inc_unchecked(&vcc->stats->tx);
29560 }
29561
29562 atomic_dec(&scq->used);
29563@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29564 if ((sb = dev_alloc_skb(64)) == NULL) {
29565 printk("%s: Can't allocate buffers for aal0.\n",
29566 card->name);
29567- atomic_add(i, &vcc->stats->rx_drop);
29568+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29569 break;
29570 }
29571 if (!atm_charge(vcc, sb->truesize)) {
29572 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29573 card->name);
29574- atomic_add(i - 1, &vcc->stats->rx_drop);
29575+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29576 dev_kfree_skb(sb);
29577 break;
29578 }
29579@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29580 ATM_SKB(sb)->vcc = vcc;
29581 __net_timestamp(sb);
29582 vcc->push(vcc, sb);
29583- atomic_inc(&vcc->stats->rx);
29584+ atomic_inc_unchecked(&vcc->stats->rx);
29585
29586 cell += ATM_CELL_PAYLOAD;
29587 }
29588@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29589 "(CDC: %08x)\n",
29590 card->name, len, rpp->len, readl(SAR_REG_CDC));
29591 recycle_rx_pool_skb(card, rpp);
29592- atomic_inc(&vcc->stats->rx_err);
29593+ atomic_inc_unchecked(&vcc->stats->rx_err);
29594 return;
29595 }
29596 if (stat & SAR_RSQE_CRC) {
29597 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29598 recycle_rx_pool_skb(card, rpp);
29599- atomic_inc(&vcc->stats->rx_err);
29600+ atomic_inc_unchecked(&vcc->stats->rx_err);
29601 return;
29602 }
29603 if (skb_queue_len(&rpp->queue) > 1) {
29604@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29605 RXPRINTK("%s: Can't alloc RX skb.\n",
29606 card->name);
29607 recycle_rx_pool_skb(card, rpp);
29608- atomic_inc(&vcc->stats->rx_err);
29609+ atomic_inc_unchecked(&vcc->stats->rx_err);
29610 return;
29611 }
29612 if (!atm_charge(vcc, skb->truesize)) {
29613@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29614 __net_timestamp(skb);
29615
29616 vcc->push(vcc, skb);
29617- atomic_inc(&vcc->stats->rx);
29618+ atomic_inc_unchecked(&vcc->stats->rx);
29619
29620 return;
29621 }
29622@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29623 __net_timestamp(skb);
29624
29625 vcc->push(vcc, skb);
29626- atomic_inc(&vcc->stats->rx);
29627+ atomic_inc_unchecked(&vcc->stats->rx);
29628
29629 if (skb->truesize > SAR_FB_SIZE_3)
29630 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29631@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29632 if (vcc->qos.aal != ATM_AAL0) {
29633 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29634 card->name, vpi, vci);
29635- atomic_inc(&vcc->stats->rx_drop);
29636+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29637 goto drop;
29638 }
29639
29640 if ((sb = dev_alloc_skb(64)) == NULL) {
29641 printk("%s: Can't allocate buffers for AAL0.\n",
29642 card->name);
29643- atomic_inc(&vcc->stats->rx_err);
29644+ atomic_inc_unchecked(&vcc->stats->rx_err);
29645 goto drop;
29646 }
29647
29648@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29649 ATM_SKB(sb)->vcc = vcc;
29650 __net_timestamp(sb);
29651 vcc->push(vcc, sb);
29652- atomic_inc(&vcc->stats->rx);
29653+ atomic_inc_unchecked(&vcc->stats->rx);
29654
29655 drop:
29656 skb_pull(queue, 64);
29657@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29658
29659 if (vc == NULL) {
29660 printk("%s: NULL connection in send().\n", card->name);
29661- atomic_inc(&vcc->stats->tx_err);
29662+ atomic_inc_unchecked(&vcc->stats->tx_err);
29663 dev_kfree_skb(skb);
29664 return -EINVAL;
29665 }
29666 if (!test_bit(VCF_TX, &vc->flags)) {
29667 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29668- atomic_inc(&vcc->stats->tx_err);
29669+ atomic_inc_unchecked(&vcc->stats->tx_err);
29670 dev_kfree_skb(skb);
29671 return -EINVAL;
29672 }
29673@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29674 break;
29675 default:
29676 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29677- atomic_inc(&vcc->stats->tx_err);
29678+ atomic_inc_unchecked(&vcc->stats->tx_err);
29679 dev_kfree_skb(skb);
29680 return -EINVAL;
29681 }
29682
29683 if (skb_shinfo(skb)->nr_frags != 0) {
29684 printk("%s: No scatter-gather yet.\n", card->name);
29685- atomic_inc(&vcc->stats->tx_err);
29686+ atomic_inc_unchecked(&vcc->stats->tx_err);
29687 dev_kfree_skb(skb);
29688 return -EINVAL;
29689 }
29690@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29691
29692 err = queue_skb(card, vc, skb, oam);
29693 if (err) {
29694- atomic_inc(&vcc->stats->tx_err);
29695+ atomic_inc_unchecked(&vcc->stats->tx_err);
29696 dev_kfree_skb(skb);
29697 return err;
29698 }
29699@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29700 skb = dev_alloc_skb(64);
29701 if (!skb) {
29702 printk("%s: Out of memory in send_oam().\n", card->name);
29703- atomic_inc(&vcc->stats->tx_err);
29704+ atomic_inc_unchecked(&vcc->stats->tx_err);
29705 return -ENOMEM;
29706 }
29707 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29708diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29709index b2c1b37..faa672b 100644
29710--- a/drivers/atm/iphase.c
29711+++ b/drivers/atm/iphase.c
29712@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29713 status = (u_short) (buf_desc_ptr->desc_mode);
29714 if (status & (RX_CER | RX_PTE | RX_OFL))
29715 {
29716- atomic_inc(&vcc->stats->rx_err);
29717+ atomic_inc_unchecked(&vcc->stats->rx_err);
29718 IF_ERR(printk("IA: bad packet, dropping it");)
29719 if (status & RX_CER) {
29720 IF_ERR(printk(" cause: packet CRC error\n");)
29721@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29722 len = dma_addr - buf_addr;
29723 if (len > iadev->rx_buf_sz) {
29724 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29725- atomic_inc(&vcc->stats->rx_err);
29726+ atomic_inc_unchecked(&vcc->stats->rx_err);
29727 goto out_free_desc;
29728 }
29729
29730@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29731 ia_vcc = INPH_IA_VCC(vcc);
29732 if (ia_vcc == NULL)
29733 {
29734- atomic_inc(&vcc->stats->rx_err);
29735+ atomic_inc_unchecked(&vcc->stats->rx_err);
29736 dev_kfree_skb_any(skb);
29737 atm_return(vcc, atm_guess_pdu2truesize(len));
29738 goto INCR_DLE;
29739@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29740 if ((length > iadev->rx_buf_sz) || (length >
29741 (skb->len - sizeof(struct cpcs_trailer))))
29742 {
29743- atomic_inc(&vcc->stats->rx_err);
29744+ atomic_inc_unchecked(&vcc->stats->rx_err);
29745 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29746 length, skb->len);)
29747 dev_kfree_skb_any(skb);
29748@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29749
29750 IF_RX(printk("rx_dle_intr: skb push");)
29751 vcc->push(vcc,skb);
29752- atomic_inc(&vcc->stats->rx);
29753+ atomic_inc_unchecked(&vcc->stats->rx);
29754 iadev->rx_pkt_cnt++;
29755 }
29756 INCR_DLE:
29757@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29758 {
29759 struct k_sonet_stats *stats;
29760 stats = &PRIV(_ia_dev[board])->sonet_stats;
29761- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29762- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29763- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29764- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29765- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29766- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29767- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29768- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29769- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29770+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29771+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29772+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29773+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29774+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29775+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29776+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29777+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29778+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29779 }
29780 ia_cmds.status = 0;
29781 break;
29782@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29783 if ((desc == 0) || (desc > iadev->num_tx_desc))
29784 {
29785 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29786- atomic_inc(&vcc->stats->tx);
29787+ atomic_inc_unchecked(&vcc->stats->tx);
29788 if (vcc->pop)
29789 vcc->pop(vcc, skb);
29790 else
29791@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29792 ATM_DESC(skb) = vcc->vci;
29793 skb_queue_tail(&iadev->tx_dma_q, skb);
29794
29795- atomic_inc(&vcc->stats->tx);
29796+ atomic_inc_unchecked(&vcc->stats->tx);
29797 iadev->tx_pkt_cnt++;
29798 /* Increment transaction counter */
29799 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29800
29801 #if 0
29802 /* add flow control logic */
29803- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29804+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29805 if (iavcc->vc_desc_cnt > 10) {
29806 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29807 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29808diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29809index cf97c34..8d30655 100644
29810--- a/drivers/atm/lanai.c
29811+++ b/drivers/atm/lanai.c
29812@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29813 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29814 lanai_endtx(lanai, lvcc);
29815 lanai_free_skb(lvcc->tx.atmvcc, skb);
29816- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29817+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29818 }
29819
29820 /* Try to fill the buffer - don't call unless there is backlog */
29821@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29822 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29823 __net_timestamp(skb);
29824 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29825- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29826+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29827 out:
29828 lvcc->rx.buf.ptr = end;
29829 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29830@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29831 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29832 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29833 lanai->stats.service_rxnotaal5++;
29834- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29835+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29836 return 0;
29837 }
29838 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29839@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29840 int bytes;
29841 read_unlock(&vcc_sklist_lock);
29842 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29843- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29844+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29845 lvcc->stats.x.aal5.service_trash++;
29846 bytes = (SERVICE_GET_END(s) * 16) -
29847 (((unsigned long) lvcc->rx.buf.ptr) -
29848@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29849 }
29850 if (s & SERVICE_STREAM) {
29851 read_unlock(&vcc_sklist_lock);
29852- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29853+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29854 lvcc->stats.x.aal5.service_stream++;
29855 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29856 "PDU on VCI %d!\n", lanai->number, vci);
29857@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29858 return 0;
29859 }
29860 DPRINTK("got rx crc error on vci %d\n", vci);
29861- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29862+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29863 lvcc->stats.x.aal5.service_rxcrc++;
29864 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29865 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29866diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29867index 3da804b..d3b0eed 100644
29868--- a/drivers/atm/nicstar.c
29869+++ b/drivers/atm/nicstar.c
29870@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29871 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29872 {
29873 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29874- atomic_inc(&vcc->stats->tx_err);
29875+ atomic_inc_unchecked(&vcc->stats->tx_err);
29876 dev_kfree_skb_any(skb);
29877 return -EINVAL;
29878 }
29879@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29880 if (!vc->tx)
29881 {
29882 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29883- atomic_inc(&vcc->stats->tx_err);
29884+ atomic_inc_unchecked(&vcc->stats->tx_err);
29885 dev_kfree_skb_any(skb);
29886 return -EINVAL;
29887 }
29888@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29889 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29890 {
29891 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29892- atomic_inc(&vcc->stats->tx_err);
29893+ atomic_inc_unchecked(&vcc->stats->tx_err);
29894 dev_kfree_skb_any(skb);
29895 return -EINVAL;
29896 }
29897@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29898 if (skb_shinfo(skb)->nr_frags != 0)
29899 {
29900 printk("nicstar%d: No scatter-gather yet.\n", card->index);
29901- atomic_inc(&vcc->stats->tx_err);
29902+ atomic_inc_unchecked(&vcc->stats->tx_err);
29903 dev_kfree_skb_any(skb);
29904 return -EINVAL;
29905 }
29906@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29907
29908 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
29909 {
29910- atomic_inc(&vcc->stats->tx_err);
29911+ atomic_inc_unchecked(&vcc->stats->tx_err);
29912 dev_kfree_skb_any(skb);
29913 return -EIO;
29914 }
29915- atomic_inc(&vcc->stats->tx);
29916+ atomic_inc_unchecked(&vcc->stats->tx);
29917
29918 return 0;
29919 }
29920@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29921 {
29922 printk("nicstar%d: Can't allocate buffers for aal0.\n",
29923 card->index);
29924- atomic_add(i,&vcc->stats->rx_drop);
29925+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
29926 break;
29927 }
29928 if (!atm_charge(vcc, sb->truesize))
29929 {
29930 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
29931 card->index);
29932- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29933+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29934 dev_kfree_skb_any(sb);
29935 break;
29936 }
29937@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29938 ATM_SKB(sb)->vcc = vcc;
29939 __net_timestamp(sb);
29940 vcc->push(vcc, sb);
29941- atomic_inc(&vcc->stats->rx);
29942+ atomic_inc_unchecked(&vcc->stats->rx);
29943 cell += ATM_CELL_PAYLOAD;
29944 }
29945
29946@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29947 if (iovb == NULL)
29948 {
29949 printk("nicstar%d: Out of iovec buffers.\n", card->index);
29950- atomic_inc(&vcc->stats->rx_drop);
29951+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29952 recycle_rx_buf(card, skb);
29953 return;
29954 }
29955@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29956 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
29957 {
29958 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
29959- atomic_inc(&vcc->stats->rx_err);
29960+ atomic_inc_unchecked(&vcc->stats->rx_err);
29961 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
29962 NS_SKB(iovb)->iovcnt = 0;
29963 iovb->len = 0;
29964@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29965 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
29966 card->index);
29967 which_list(card, skb);
29968- atomic_inc(&vcc->stats->rx_err);
29969+ atomic_inc_unchecked(&vcc->stats->rx_err);
29970 recycle_rx_buf(card, skb);
29971 vc->rx_iov = NULL;
29972 recycle_iov_buf(card, iovb);
29973@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29974 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
29975 card->index);
29976 which_list(card, skb);
29977- atomic_inc(&vcc->stats->rx_err);
29978+ atomic_inc_unchecked(&vcc->stats->rx_err);
29979 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29980 NS_SKB(iovb)->iovcnt);
29981 vc->rx_iov = NULL;
29982@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29983 printk(" - PDU size mismatch.\n");
29984 else
29985 printk(".\n");
29986- atomic_inc(&vcc->stats->rx_err);
29987+ atomic_inc_unchecked(&vcc->stats->rx_err);
29988 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29989 NS_SKB(iovb)->iovcnt);
29990 vc->rx_iov = NULL;
29991@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29992 if (!atm_charge(vcc, skb->truesize))
29993 {
29994 push_rxbufs(card, skb);
29995- atomic_inc(&vcc->stats->rx_drop);
29996+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29997 }
29998 else
29999 {
30000@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30001 ATM_SKB(skb)->vcc = vcc;
30002 __net_timestamp(skb);
30003 vcc->push(vcc, skb);
30004- atomic_inc(&vcc->stats->rx);
30005+ atomic_inc_unchecked(&vcc->stats->rx);
30006 }
30007 }
30008 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30009@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30010 if (!atm_charge(vcc, sb->truesize))
30011 {
30012 push_rxbufs(card, sb);
30013- atomic_inc(&vcc->stats->rx_drop);
30014+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30015 }
30016 else
30017 {
30018@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30019 ATM_SKB(sb)->vcc = vcc;
30020 __net_timestamp(sb);
30021 vcc->push(vcc, sb);
30022- atomic_inc(&vcc->stats->rx);
30023+ atomic_inc_unchecked(&vcc->stats->rx);
30024 }
30025
30026 push_rxbufs(card, skb);
30027@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30028 if (!atm_charge(vcc, skb->truesize))
30029 {
30030 push_rxbufs(card, skb);
30031- atomic_inc(&vcc->stats->rx_drop);
30032+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30033 }
30034 else
30035 {
30036@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30037 ATM_SKB(skb)->vcc = vcc;
30038 __net_timestamp(skb);
30039 vcc->push(vcc, skb);
30040- atomic_inc(&vcc->stats->rx);
30041+ atomic_inc_unchecked(&vcc->stats->rx);
30042 }
30043
30044 push_rxbufs(card, sb);
30045@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30046 if (hb == NULL)
30047 {
30048 printk("nicstar%d: Out of huge buffers.\n", card->index);
30049- atomic_inc(&vcc->stats->rx_drop);
30050+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30051 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30052 NS_SKB(iovb)->iovcnt);
30053 vc->rx_iov = NULL;
30054@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30055 }
30056 else
30057 dev_kfree_skb_any(hb);
30058- atomic_inc(&vcc->stats->rx_drop);
30059+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30060 }
30061 else
30062 {
30063@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30064 #endif /* NS_USE_DESTRUCTORS */
30065 __net_timestamp(hb);
30066 vcc->push(vcc, hb);
30067- atomic_inc(&vcc->stats->rx);
30068+ atomic_inc_unchecked(&vcc->stats->rx);
30069 }
30070 }
30071
30072diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30073index 84c93ff..e6ed269 100644
30074--- a/drivers/atm/solos-pci.c
30075+++ b/drivers/atm/solos-pci.c
30076@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30077 }
30078 atm_charge(vcc, skb->truesize);
30079 vcc->push(vcc, skb);
30080- atomic_inc(&vcc->stats->rx);
30081+ atomic_inc_unchecked(&vcc->stats->rx);
30082 break;
30083
30084 case PKT_STATUS:
30085@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30086 char msg[500];
30087 char item[10];
30088
30089+ pax_track_stack();
30090+
30091 len = buf->len;
30092 for (i = 0; i < len; i++){
30093 if(i % 8 == 0)
30094@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30095 vcc = SKB_CB(oldskb)->vcc;
30096
30097 if (vcc) {
30098- atomic_inc(&vcc->stats->tx);
30099+ atomic_inc_unchecked(&vcc->stats->tx);
30100 solos_pop(vcc, oldskb);
30101 } else
30102 dev_kfree_skb_irq(oldskb);
30103diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30104index 6dd3f59..ee377f3 100644
30105--- a/drivers/atm/suni.c
30106+++ b/drivers/atm/suni.c
30107@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30108
30109
30110 #define ADD_LIMITED(s,v) \
30111- atomic_add((v),&stats->s); \
30112- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30113+ atomic_add_unchecked((v),&stats->s); \
30114+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30115
30116
30117 static void suni_hz(unsigned long from_timer)
30118diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30119index fc8cb07..4a80e53 100644
30120--- a/drivers/atm/uPD98402.c
30121+++ b/drivers/atm/uPD98402.c
30122@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30123 struct sonet_stats tmp;
30124 int error = 0;
30125
30126- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30127+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30128 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30129 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30130 if (zero && !error) {
30131@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30132
30133
30134 #define ADD_LIMITED(s,v) \
30135- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30136- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30137- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30138+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30139+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30140+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30141
30142
30143 static void stat_event(struct atm_dev *dev)
30144@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30145 if (reason & uPD98402_INT_PFM) stat_event(dev);
30146 if (reason & uPD98402_INT_PCO) {
30147 (void) GET(PCOCR); /* clear interrupt cause */
30148- atomic_add(GET(HECCT),
30149+ atomic_add_unchecked(GET(HECCT),
30150 &PRIV(dev)->sonet_stats.uncorr_hcs);
30151 }
30152 if ((reason & uPD98402_INT_RFO) &&
30153@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30154 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30155 uPD98402_INT_LOS),PIMR); /* enable them */
30156 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30157- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30158- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30159- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30160+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30161+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30162+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30163 return 0;
30164 }
30165
30166diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30167index 2e9635b..32927b4 100644
30168--- a/drivers/atm/zatm.c
30169+++ b/drivers/atm/zatm.c
30170@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30171 }
30172 if (!size) {
30173 dev_kfree_skb_irq(skb);
30174- if (vcc) atomic_inc(&vcc->stats->rx_err);
30175+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30176 continue;
30177 }
30178 if (!atm_charge(vcc,skb->truesize)) {
30179@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30180 skb->len = size;
30181 ATM_SKB(skb)->vcc = vcc;
30182 vcc->push(vcc,skb);
30183- atomic_inc(&vcc->stats->rx);
30184+ atomic_inc_unchecked(&vcc->stats->rx);
30185 }
30186 zout(pos & 0xffff,MTA(mbx));
30187 #if 0 /* probably a stupid idea */
30188@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30189 skb_queue_head(&zatm_vcc->backlog,skb);
30190 break;
30191 }
30192- atomic_inc(&vcc->stats->tx);
30193+ atomic_inc_unchecked(&vcc->stats->tx);
30194 wake_up(&zatm_vcc->tx_wait);
30195 }
30196
30197diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30198index 63c143e..fece183 100644
30199--- a/drivers/base/bus.c
30200+++ b/drivers/base/bus.c
30201@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30202 return ret;
30203 }
30204
30205-static struct sysfs_ops driver_sysfs_ops = {
30206+static const struct sysfs_ops driver_sysfs_ops = {
30207 .show = drv_attr_show,
30208 .store = drv_attr_store,
30209 };
30210@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30211 return ret;
30212 }
30213
30214-static struct sysfs_ops bus_sysfs_ops = {
30215+static const struct sysfs_ops bus_sysfs_ops = {
30216 .show = bus_attr_show,
30217 .store = bus_attr_store,
30218 };
30219@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30220 return 0;
30221 }
30222
30223-static struct kset_uevent_ops bus_uevent_ops = {
30224+static const struct kset_uevent_ops bus_uevent_ops = {
30225 .filter = bus_uevent_filter,
30226 };
30227
30228diff --git a/drivers/base/class.c b/drivers/base/class.c
30229index 6e2c3b0..cb61871 100644
30230--- a/drivers/base/class.c
30231+++ b/drivers/base/class.c
30232@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30233 kfree(cp);
30234 }
30235
30236-static struct sysfs_ops class_sysfs_ops = {
30237+static const struct sysfs_ops class_sysfs_ops = {
30238 .show = class_attr_show,
30239 .store = class_attr_store,
30240 };
30241diff --git a/drivers/base/core.c b/drivers/base/core.c
30242index f33d768..a9358d0 100644
30243--- a/drivers/base/core.c
30244+++ b/drivers/base/core.c
30245@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30246 return ret;
30247 }
30248
30249-static struct sysfs_ops dev_sysfs_ops = {
30250+static const struct sysfs_ops dev_sysfs_ops = {
30251 .show = dev_attr_show,
30252 .store = dev_attr_store,
30253 };
30254@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30255 return retval;
30256 }
30257
30258-static struct kset_uevent_ops device_uevent_ops = {
30259+static const struct kset_uevent_ops device_uevent_ops = {
30260 .filter = dev_uevent_filter,
30261 .name = dev_uevent_name,
30262 .uevent = dev_uevent,
30263diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30264index 989429c..2272b00 100644
30265--- a/drivers/base/memory.c
30266+++ b/drivers/base/memory.c
30267@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30268 return retval;
30269 }
30270
30271-static struct kset_uevent_ops memory_uevent_ops = {
30272+static const struct kset_uevent_ops memory_uevent_ops = {
30273 .name = memory_uevent_name,
30274 .uevent = memory_uevent,
30275 };
30276diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30277index 3f202f7..61c4a6f 100644
30278--- a/drivers/base/sys.c
30279+++ b/drivers/base/sys.c
30280@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30281 return -EIO;
30282 }
30283
30284-static struct sysfs_ops sysfs_ops = {
30285+static const struct sysfs_ops sysfs_ops = {
30286 .show = sysdev_show,
30287 .store = sysdev_store,
30288 };
30289@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30290 return -EIO;
30291 }
30292
30293-static struct sysfs_ops sysfs_class_ops = {
30294+static const struct sysfs_ops sysfs_class_ops = {
30295 .show = sysdev_class_show,
30296 .store = sysdev_class_store,
30297 };
30298diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30299index eb4fa19..1954777 100644
30300--- a/drivers/block/DAC960.c
30301+++ b/drivers/block/DAC960.c
30302@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30303 unsigned long flags;
30304 int Channel, TargetID;
30305
30306+ pax_track_stack();
30307+
30308 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30309 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30310 sizeof(DAC960_SCSI_Inquiry_T) +
30311diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30312index ca9c548..ca6899c 100644
30313--- a/drivers/block/cciss.c
30314+++ b/drivers/block/cciss.c
30315@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30316 int err;
30317 u32 cp;
30318
30319+ memset(&arg64, 0, sizeof(arg64));
30320+
30321 err = 0;
30322 err |=
30323 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30324@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30325 /* Wait (up to 20 seconds) for a command to complete */
30326
30327 for (i = 20 * HZ; i > 0; i--) {
30328- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30329+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30330 if (done == FIFO_EMPTY)
30331 schedule_timeout_uninterruptible(1);
30332 else
30333@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30334 resend_cmd1:
30335
30336 /* Disable interrupt on the board. */
30337- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30338+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30339
30340 /* Make sure there is room in the command FIFO */
30341 /* Actually it should be completely empty at this time */
30342@@ -2884,13 +2886,13 @@ resend_cmd1:
30343 /* tape side of the driver. */
30344 for (i = 200000; i > 0; i--) {
30345 /* if fifo isn't full go */
30346- if (!(h->access.fifo_full(h)))
30347+ if (!(h->access->fifo_full(h)))
30348 break;
30349 udelay(10);
30350 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30351 " waiting!\n", h->ctlr);
30352 }
30353- h->access.submit_command(h, c); /* Send the cmd */
30354+ h->access->submit_command(h, c); /* Send the cmd */
30355 do {
30356 complete = pollcomplete(h->ctlr);
30357
30358@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30359 while (!hlist_empty(&h->reqQ)) {
30360 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30361 /* can't do anything if fifo is full */
30362- if ((h->access.fifo_full(h))) {
30363+ if ((h->access->fifo_full(h))) {
30364 printk(KERN_WARNING "cciss: fifo full\n");
30365 break;
30366 }
30367@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30368 h->Qdepth--;
30369
30370 /* Tell the controller execute command */
30371- h->access.submit_command(h, c);
30372+ h->access->submit_command(h, c);
30373
30374 /* Put job onto the completed Q */
30375 addQ(&h->cmpQ, c);
30376@@ -3393,17 +3395,17 @@ startio:
30377
30378 static inline unsigned long get_next_completion(ctlr_info_t *h)
30379 {
30380- return h->access.command_completed(h);
30381+ return h->access->command_completed(h);
30382 }
30383
30384 static inline int interrupt_pending(ctlr_info_t *h)
30385 {
30386- return h->access.intr_pending(h);
30387+ return h->access->intr_pending(h);
30388 }
30389
30390 static inline long interrupt_not_for_us(ctlr_info_t *h)
30391 {
30392- return (((h->access.intr_pending(h) == 0) ||
30393+ return (((h->access->intr_pending(h) == 0) ||
30394 (h->interrupts_enabled == 0)));
30395 }
30396
30397@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30398 */
30399 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30400 c->product_name = products[prod_index].product_name;
30401- c->access = *(products[prod_index].access);
30402+ c->access = products[prod_index].access;
30403 c->nr_cmds = c->max_commands - 4;
30404 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30405 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30406@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30407 }
30408
30409 /* make sure the board interrupts are off */
30410- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30411+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30412 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30413 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30414 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30415@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30416 cciss_scsi_setup(i);
30417
30418 /* Turn the interrupts on so we can service requests */
30419- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30420+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30421
30422 /* Get the firmware version */
30423 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30424diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30425index 04d6bf8..36e712d 100644
30426--- a/drivers/block/cciss.h
30427+++ b/drivers/block/cciss.h
30428@@ -90,7 +90,7 @@ struct ctlr_info
30429 // information about each logical volume
30430 drive_info_struct *drv[CISS_MAX_LUN];
30431
30432- struct access_method access;
30433+ struct access_method *access;
30434
30435 /* queue and queue Info */
30436 struct hlist_head reqQ;
30437diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30438index 6422651..bb1bdef 100644
30439--- a/drivers/block/cpqarray.c
30440+++ b/drivers/block/cpqarray.c
30441@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30442 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30443 goto Enomem4;
30444 }
30445- hba[i]->access.set_intr_mask(hba[i], 0);
30446+ hba[i]->access->set_intr_mask(hba[i], 0);
30447 if (request_irq(hba[i]->intr, do_ida_intr,
30448 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30449 {
30450@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30451 add_timer(&hba[i]->timer);
30452
30453 /* Enable IRQ now that spinlock and rate limit timer are set up */
30454- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30455+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30456
30457 for(j=0; j<NWD; j++) {
30458 struct gendisk *disk = ida_gendisk[i][j];
30459@@ -695,7 +695,7 @@ DBGINFO(
30460 for(i=0; i<NR_PRODUCTS; i++) {
30461 if (board_id == products[i].board_id) {
30462 c->product_name = products[i].product_name;
30463- c->access = *(products[i].access);
30464+ c->access = products[i].access;
30465 break;
30466 }
30467 }
30468@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30469 hba[ctlr]->intr = intr;
30470 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30471 hba[ctlr]->product_name = products[j].product_name;
30472- hba[ctlr]->access = *(products[j].access);
30473+ hba[ctlr]->access = products[j].access;
30474 hba[ctlr]->ctlr = ctlr;
30475 hba[ctlr]->board_id = board_id;
30476 hba[ctlr]->pci_dev = NULL; /* not PCI */
30477@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30478 struct scatterlist tmp_sg[SG_MAX];
30479 int i, dir, seg;
30480
30481+ pax_track_stack();
30482+
30483 if (blk_queue_plugged(q))
30484 goto startio;
30485
30486@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30487
30488 while((c = h->reqQ) != NULL) {
30489 /* Can't do anything if we're busy */
30490- if (h->access.fifo_full(h) == 0)
30491+ if (h->access->fifo_full(h) == 0)
30492 return;
30493
30494 /* Get the first entry from the request Q */
30495@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30496 h->Qdepth--;
30497
30498 /* Tell the controller to do our bidding */
30499- h->access.submit_command(h, c);
30500+ h->access->submit_command(h, c);
30501
30502 /* Get onto the completion Q */
30503 addQ(&h->cmpQ, c);
30504@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30505 unsigned long flags;
30506 __u32 a,a1;
30507
30508- istat = h->access.intr_pending(h);
30509+ istat = h->access->intr_pending(h);
30510 /* Is this interrupt for us? */
30511 if (istat == 0)
30512 return IRQ_NONE;
30513@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30514 */
30515 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30516 if (istat & FIFO_NOT_EMPTY) {
30517- while((a = h->access.command_completed(h))) {
30518+ while((a = h->access->command_completed(h))) {
30519 a1 = a; a &= ~3;
30520 if ((c = h->cmpQ) == NULL)
30521 {
30522@@ -1434,11 +1436,11 @@ static int sendcmd(
30523 /*
30524 * Disable interrupt
30525 */
30526- info_p->access.set_intr_mask(info_p, 0);
30527+ info_p->access->set_intr_mask(info_p, 0);
30528 /* Make sure there is room in the command FIFO */
30529 /* Actually it should be completely empty at this time. */
30530 for (i = 200000; i > 0; i--) {
30531- temp = info_p->access.fifo_full(info_p);
30532+ temp = info_p->access->fifo_full(info_p);
30533 if (temp != 0) {
30534 break;
30535 }
30536@@ -1451,7 +1453,7 @@ DBG(
30537 /*
30538 * Send the cmd
30539 */
30540- info_p->access.submit_command(info_p, c);
30541+ info_p->access->submit_command(info_p, c);
30542 complete = pollcomplete(ctlr);
30543
30544 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30545@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30546 * we check the new geometry. Then turn interrupts back on when
30547 * we're done.
30548 */
30549- host->access.set_intr_mask(host, 0);
30550+ host->access->set_intr_mask(host, 0);
30551 getgeometry(ctlr);
30552- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30553+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30554
30555 for(i=0; i<NWD; i++) {
30556 struct gendisk *disk = ida_gendisk[ctlr][i];
30557@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30558 /* Wait (up to 2 seconds) for a command to complete */
30559
30560 for (i = 200000; i > 0; i--) {
30561- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30562+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30563 if (done == 0) {
30564 udelay(10); /* a short fixed delay */
30565 } else
30566diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30567index be73e9d..7fbf140 100644
30568--- a/drivers/block/cpqarray.h
30569+++ b/drivers/block/cpqarray.h
30570@@ -99,7 +99,7 @@ struct ctlr_info {
30571 drv_info_t drv[NWD];
30572 struct proc_dir_entry *proc;
30573
30574- struct access_method access;
30575+ struct access_method *access;
30576
30577 cmdlist_t *reqQ;
30578 cmdlist_t *cmpQ;
30579diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30580index 8ec2d70..2804b30 100644
30581--- a/drivers/block/loop.c
30582+++ b/drivers/block/loop.c
30583@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30584 mm_segment_t old_fs = get_fs();
30585
30586 set_fs(get_ds());
30587- bw = file->f_op->write(file, buf, len, &pos);
30588+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30589 set_fs(old_fs);
30590 if (likely(bw == len))
30591 return 0;
30592diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30593index 26ada47..083c480 100644
30594--- a/drivers/block/nbd.c
30595+++ b/drivers/block/nbd.c
30596@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30597 struct kvec iov;
30598 sigset_t blocked, oldset;
30599
30600+ pax_track_stack();
30601+
30602 if (unlikely(!sock)) {
30603 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30604 lo->disk->disk_name, (send ? "send" : "recv"));
30605@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30606 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30607 unsigned int cmd, unsigned long arg)
30608 {
30609+ pax_track_stack();
30610+
30611 switch (cmd) {
30612 case NBD_DISCONNECT: {
30613 struct request sreq;
30614diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30615index a5d585d..d087be3 100644
30616--- a/drivers/block/pktcdvd.c
30617+++ b/drivers/block/pktcdvd.c
30618@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30619 return len;
30620 }
30621
30622-static struct sysfs_ops kobj_pkt_ops = {
30623+static const struct sysfs_ops kobj_pkt_ops = {
30624 .show = kobj_pkt_show,
30625 .store = kobj_pkt_store
30626 };
30627diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30628index 6aad99e..89cd142 100644
30629--- a/drivers/char/Kconfig
30630+++ b/drivers/char/Kconfig
30631@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30632
30633 config DEVKMEM
30634 bool "/dev/kmem virtual device support"
30635- default y
30636+ default n
30637+ depends on !GRKERNSEC_KMEM
30638 help
30639 Say Y here if you want to support the /dev/kmem device. The
30640 /dev/kmem device is rarely used, but can be used for certain
30641@@ -1114,6 +1115,7 @@ config DEVPORT
30642 bool
30643 depends on !M68K
30644 depends on ISA || PCI
30645+ depends on !GRKERNSEC_KMEM
30646 default y
30647
30648 source "drivers/s390/char/Kconfig"
30649diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30650index a96f319..a778a5b 100644
30651--- a/drivers/char/agp/frontend.c
30652+++ b/drivers/char/agp/frontend.c
30653@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30654 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30655 return -EFAULT;
30656
30657- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30658+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30659 return -EFAULT;
30660
30661 client = agp_find_client_by_pid(reserve.pid);
30662diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30663index d8cff90..9628e70 100644
30664--- a/drivers/char/briq_panel.c
30665+++ b/drivers/char/briq_panel.c
30666@@ -10,6 +10,7 @@
30667 #include <linux/types.h>
30668 #include <linux/errno.h>
30669 #include <linux/tty.h>
30670+#include <linux/mutex.h>
30671 #include <linux/timer.h>
30672 #include <linux/kernel.h>
30673 #include <linux/wait.h>
30674@@ -36,6 +37,7 @@ static int vfd_is_open;
30675 static unsigned char vfd[40];
30676 static int vfd_cursor;
30677 static unsigned char ledpb, led;
30678+static DEFINE_MUTEX(vfd_mutex);
30679
30680 static void update_vfd(void)
30681 {
30682@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30683 if (!vfd_is_open)
30684 return -EBUSY;
30685
30686+ mutex_lock(&vfd_mutex);
30687 for (;;) {
30688 char c;
30689 if (!indx)
30690 break;
30691- if (get_user(c, buf))
30692+ if (get_user(c, buf)) {
30693+ mutex_unlock(&vfd_mutex);
30694 return -EFAULT;
30695+ }
30696 if (esc) {
30697 set_led(c);
30698 esc = 0;
30699@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30700 buf++;
30701 }
30702 update_vfd();
30703+ mutex_unlock(&vfd_mutex);
30704
30705 return len;
30706 }
30707diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30708index 31e7c91..161afc0 100644
30709--- a/drivers/char/genrtc.c
30710+++ b/drivers/char/genrtc.c
30711@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30712 switch (cmd) {
30713
30714 case RTC_PLL_GET:
30715+ memset(&pll, 0, sizeof(pll));
30716 if (get_rtc_pll(&pll))
30717 return -EINVAL;
30718 else
30719diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30720index 006466d..a2bb21c 100644
30721--- a/drivers/char/hpet.c
30722+++ b/drivers/char/hpet.c
30723@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30724 return 0;
30725 }
30726
30727-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30728+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30729
30730 static int
30731 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30732@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30733 }
30734
30735 static int
30736-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30737+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30738 {
30739 struct hpet_timer __iomem *timer;
30740 struct hpet __iomem *hpet;
30741@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30742 {
30743 struct hpet_info info;
30744
30745+ memset(&info, 0, sizeof(info));
30746+
30747 if (devp->hd_ireqfreq)
30748 info.hi_ireqfreq =
30749 hpet_time_div(hpetp, devp->hd_ireqfreq);
30750- else
30751- info.hi_ireqfreq = 0;
30752 info.hi_flags =
30753 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30754 info.hi_hpet = hpetp->hp_which;
30755diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30756index 0afc8b8..6913fc3 100644
30757--- a/drivers/char/hvc_beat.c
30758+++ b/drivers/char/hvc_beat.c
30759@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30760 return cnt;
30761 }
30762
30763-static struct hv_ops hvc_beat_get_put_ops = {
30764+static const struct hv_ops hvc_beat_get_put_ops = {
30765 .get_chars = hvc_beat_get_chars,
30766 .put_chars = hvc_beat_put_chars,
30767 };
30768diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30769index 98097f2..407dddc 100644
30770--- a/drivers/char/hvc_console.c
30771+++ b/drivers/char/hvc_console.c
30772@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30773 * console interfaces but can still be used as a tty device. This has to be
30774 * static because kmalloc will not work during early console init.
30775 */
30776-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30777+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30778 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30779 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30780
30781@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30782 * vty adapters do NOT get an hvc_instantiate() callback since they
30783 * appear after early console init.
30784 */
30785-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30786+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30787 {
30788 struct hvc_struct *hp;
30789
30790@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30791 };
30792
30793 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30794- struct hv_ops *ops, int outbuf_size)
30795+ const struct hv_ops *ops, int outbuf_size)
30796 {
30797 struct hvc_struct *hp;
30798 int i;
30799diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30800index 10950ca..ed176c3 100644
30801--- a/drivers/char/hvc_console.h
30802+++ b/drivers/char/hvc_console.h
30803@@ -55,7 +55,7 @@ struct hvc_struct {
30804 int outbuf_size;
30805 int n_outbuf;
30806 uint32_t vtermno;
30807- struct hv_ops *ops;
30808+ const struct hv_ops *ops;
30809 int irq_requested;
30810 int data;
30811 struct winsize ws;
30812@@ -76,11 +76,11 @@ struct hv_ops {
30813 };
30814
30815 /* Register a vterm and a slot index for use as a console (console_init) */
30816-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30817+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30818
30819 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30820 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30821- struct hv_ops *ops, int outbuf_size);
30822+ const struct hv_ops *ops, int outbuf_size);
30823 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30824 extern int hvc_remove(struct hvc_struct *hp);
30825
30826diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30827index 936d05b..fd02426 100644
30828--- a/drivers/char/hvc_iseries.c
30829+++ b/drivers/char/hvc_iseries.c
30830@@ -197,7 +197,7 @@ done:
30831 return sent;
30832 }
30833
30834-static struct hv_ops hvc_get_put_ops = {
30835+static const struct hv_ops hvc_get_put_ops = {
30836 .get_chars = get_chars,
30837 .put_chars = put_chars,
30838 .notifier_add = notifier_add_irq,
30839diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30840index b0e168f..69cda2a 100644
30841--- a/drivers/char/hvc_iucv.c
30842+++ b/drivers/char/hvc_iucv.c
30843@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30844
30845
30846 /* HVC operations */
30847-static struct hv_ops hvc_iucv_ops = {
30848+static const struct hv_ops hvc_iucv_ops = {
30849 .get_chars = hvc_iucv_get_chars,
30850 .put_chars = hvc_iucv_put_chars,
30851 .notifier_add = hvc_iucv_notifier_add,
30852diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30853index 88590d0..61c4a61 100644
30854--- a/drivers/char/hvc_rtas.c
30855+++ b/drivers/char/hvc_rtas.c
30856@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30857 return i;
30858 }
30859
30860-static struct hv_ops hvc_rtas_get_put_ops = {
30861+static const struct hv_ops hvc_rtas_get_put_ops = {
30862 .get_chars = hvc_rtas_read_console,
30863 .put_chars = hvc_rtas_write_console,
30864 };
30865diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30866index bd63ba8..b0957e6 100644
30867--- a/drivers/char/hvc_udbg.c
30868+++ b/drivers/char/hvc_udbg.c
30869@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30870 return i;
30871 }
30872
30873-static struct hv_ops hvc_udbg_ops = {
30874+static const struct hv_ops hvc_udbg_ops = {
30875 .get_chars = hvc_udbg_get,
30876 .put_chars = hvc_udbg_put,
30877 };
30878diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30879index 10be343..27370e9 100644
30880--- a/drivers/char/hvc_vio.c
30881+++ b/drivers/char/hvc_vio.c
30882@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30883 return got;
30884 }
30885
30886-static struct hv_ops hvc_get_put_ops = {
30887+static const struct hv_ops hvc_get_put_ops = {
30888 .get_chars = filtered_get_chars,
30889 .put_chars = hvc_put_chars,
30890 .notifier_add = notifier_add_irq,
30891diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30892index a6ee32b..94f8c26 100644
30893--- a/drivers/char/hvc_xen.c
30894+++ b/drivers/char/hvc_xen.c
30895@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30896 return recv;
30897 }
30898
30899-static struct hv_ops hvc_ops = {
30900+static const struct hv_ops hvc_ops = {
30901 .get_chars = read_console,
30902 .put_chars = write_console,
30903 .notifier_add = notifier_add_irq,
30904diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
30905index 266b858..f3ee0bb 100644
30906--- a/drivers/char/hvcs.c
30907+++ b/drivers/char/hvcs.c
30908@@ -82,6 +82,7 @@
30909 #include <asm/hvcserver.h>
30910 #include <asm/uaccess.h>
30911 #include <asm/vio.h>
30912+#include <asm/local.h>
30913
30914 /*
30915 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
30916@@ -269,7 +270,7 @@ struct hvcs_struct {
30917 unsigned int index;
30918
30919 struct tty_struct *tty;
30920- int open_count;
30921+ local_t open_count;
30922
30923 /*
30924 * Used to tell the driver kernel_thread what operations need to take
30925@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
30926
30927 spin_lock_irqsave(&hvcsd->lock, flags);
30928
30929- if (hvcsd->open_count > 0) {
30930+ if (local_read(&hvcsd->open_count) > 0) {
30931 spin_unlock_irqrestore(&hvcsd->lock, flags);
30932 printk(KERN_INFO "HVCS: vterm state unchanged. "
30933 "The hvcs device node is still in use.\n");
30934@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
30935 if ((retval = hvcs_partner_connect(hvcsd)))
30936 goto error_release;
30937
30938- hvcsd->open_count = 1;
30939+ local_set(&hvcsd->open_count, 1);
30940 hvcsd->tty = tty;
30941 tty->driver_data = hvcsd;
30942
30943@@ -1169,7 +1170,7 @@ fast_open:
30944
30945 spin_lock_irqsave(&hvcsd->lock, flags);
30946 kref_get(&hvcsd->kref);
30947- hvcsd->open_count++;
30948+ local_inc(&hvcsd->open_count);
30949 hvcsd->todo_mask |= HVCS_SCHED_READ;
30950 spin_unlock_irqrestore(&hvcsd->lock, flags);
30951
30952@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30953 hvcsd = tty->driver_data;
30954
30955 spin_lock_irqsave(&hvcsd->lock, flags);
30956- if (--hvcsd->open_count == 0) {
30957+ if (local_dec_and_test(&hvcsd->open_count)) {
30958
30959 vio_disable_interrupts(hvcsd->vdev);
30960
30961@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30962 free_irq(irq, hvcsd);
30963 kref_put(&hvcsd->kref, destroy_hvcs_struct);
30964 return;
30965- } else if (hvcsd->open_count < 0) {
30966+ } else if (local_read(&hvcsd->open_count) < 0) {
30967 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
30968 " is missmanaged.\n",
30969- hvcsd->vdev->unit_address, hvcsd->open_count);
30970+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
30971 }
30972
30973 spin_unlock_irqrestore(&hvcsd->lock, flags);
30974@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30975
30976 spin_lock_irqsave(&hvcsd->lock, flags);
30977 /* Preserve this so that we know how many kref refs to put */
30978- temp_open_count = hvcsd->open_count;
30979+ temp_open_count = local_read(&hvcsd->open_count);
30980
30981 /*
30982 * Don't kref put inside the spinlock because the destruction
30983@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30984 hvcsd->tty->driver_data = NULL;
30985 hvcsd->tty = NULL;
30986
30987- hvcsd->open_count = 0;
30988+ local_set(&hvcsd->open_count, 0);
30989
30990 /* This will drop any buffered data on the floor which is OK in a hangup
30991 * scenario. */
30992@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
30993 * the middle of a write operation? This is a crummy place to do this
30994 * but we want to keep it all in the spinlock.
30995 */
30996- if (hvcsd->open_count <= 0) {
30997+ if (local_read(&hvcsd->open_count) <= 0) {
30998 spin_unlock_irqrestore(&hvcsd->lock, flags);
30999 return -ENODEV;
31000 }
31001@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31002 {
31003 struct hvcs_struct *hvcsd = tty->driver_data;
31004
31005- if (!hvcsd || hvcsd->open_count <= 0)
31006+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31007 return 0;
31008
31009 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31010diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31011index ec5e3f8..02455ba 100644
31012--- a/drivers/char/ipmi/ipmi_msghandler.c
31013+++ b/drivers/char/ipmi/ipmi_msghandler.c
31014@@ -414,7 +414,7 @@ struct ipmi_smi {
31015 struct proc_dir_entry *proc_dir;
31016 char proc_dir_name[10];
31017
31018- atomic_t stats[IPMI_NUM_STATS];
31019+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31020
31021 /*
31022 * run_to_completion duplicate of smb_info, smi_info
31023@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31024
31025
31026 #define ipmi_inc_stat(intf, stat) \
31027- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31028+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31029 #define ipmi_get_stat(intf, stat) \
31030- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31031+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31032
31033 static int is_lan_addr(struct ipmi_addr *addr)
31034 {
31035@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31036 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31037 init_waitqueue_head(&intf->waitq);
31038 for (i = 0; i < IPMI_NUM_STATS; i++)
31039- atomic_set(&intf->stats[i], 0);
31040+ atomic_set_unchecked(&intf->stats[i], 0);
31041
31042 intf->proc_dir = NULL;
31043
31044@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31045 struct ipmi_smi_msg smi_msg;
31046 struct ipmi_recv_msg recv_msg;
31047
31048+ pax_track_stack();
31049+
31050 si = (struct ipmi_system_interface_addr *) &addr;
31051 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31052 si->channel = IPMI_BMC_CHANNEL;
31053diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31054index abae8c9..8021979 100644
31055--- a/drivers/char/ipmi/ipmi_si_intf.c
31056+++ b/drivers/char/ipmi/ipmi_si_intf.c
31057@@ -277,7 +277,7 @@ struct smi_info {
31058 unsigned char slave_addr;
31059
31060 /* Counters and things for the proc filesystem. */
31061- atomic_t stats[SI_NUM_STATS];
31062+ atomic_unchecked_t stats[SI_NUM_STATS];
31063
31064 struct task_struct *thread;
31065
31066@@ -285,9 +285,9 @@ struct smi_info {
31067 };
31068
31069 #define smi_inc_stat(smi, stat) \
31070- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31071+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31072 #define smi_get_stat(smi, stat) \
31073- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31074+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31075
31076 #define SI_MAX_PARMS 4
31077
31078@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31079 atomic_set(&new_smi->req_events, 0);
31080 new_smi->run_to_completion = 0;
31081 for (i = 0; i < SI_NUM_STATS; i++)
31082- atomic_set(&new_smi->stats[i], 0);
31083+ atomic_set_unchecked(&new_smi->stats[i], 0);
31084
31085 new_smi->interrupt_disabled = 0;
31086 atomic_set(&new_smi->stop_operation, 0);
31087diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31088index 402838f..55e2200 100644
31089--- a/drivers/char/istallion.c
31090+++ b/drivers/char/istallion.c
31091@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31092 * re-used for each stats call.
31093 */
31094 static comstats_t stli_comstats;
31095-static combrd_t stli_brdstats;
31096 static struct asystats stli_cdkstats;
31097
31098 /*****************************************************************************/
31099@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31100 {
31101 struct stlibrd *brdp;
31102 unsigned int i;
31103+ combrd_t stli_brdstats;
31104
31105 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31106 return -EFAULT;
31107@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31108 struct stliport stli_dummyport;
31109 struct stliport *portp;
31110
31111+ pax_track_stack();
31112+
31113 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31114 return -EFAULT;
31115 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31116@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31117 struct stlibrd stli_dummybrd;
31118 struct stlibrd *brdp;
31119
31120+ pax_track_stack();
31121+
31122 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31123 return -EFAULT;
31124 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31125diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31126index 950837c..e55a288 100644
31127--- a/drivers/char/keyboard.c
31128+++ b/drivers/char/keyboard.c
31129@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31130 kbd->kbdmode == VC_MEDIUMRAW) &&
31131 value != KVAL(K_SAK))
31132 return; /* SAK is allowed even in raw mode */
31133+
31134+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31135+ {
31136+ void *func = fn_handler[value];
31137+ if (func == fn_show_state || func == fn_show_ptregs ||
31138+ func == fn_show_mem)
31139+ return;
31140+ }
31141+#endif
31142+
31143 fn_handler[value](vc);
31144 }
31145
31146@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31147 .evbit = { BIT_MASK(EV_SND) },
31148 },
31149
31150- { }, /* Terminating entry */
31151+ { 0 }, /* Terminating entry */
31152 };
31153
31154 MODULE_DEVICE_TABLE(input, kbd_ids);
31155diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31156index 87c67b4..230527a 100644
31157--- a/drivers/char/mbcs.c
31158+++ b/drivers/char/mbcs.c
31159@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31160 return 0;
31161 }
31162
31163-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31164+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31165 {
31166 .part_num = MBCS_PART_NUM,
31167 .mfg_num = MBCS_MFG_NUM,
31168diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31169index 1270f64..8495f49 100644
31170--- a/drivers/char/mem.c
31171+++ b/drivers/char/mem.c
31172@@ -18,6 +18,7 @@
31173 #include <linux/raw.h>
31174 #include <linux/tty.h>
31175 #include <linux/capability.h>
31176+#include <linux/security.h>
31177 #include <linux/ptrace.h>
31178 #include <linux/device.h>
31179 #include <linux/highmem.h>
31180@@ -35,6 +36,10 @@
31181 # include <linux/efi.h>
31182 #endif
31183
31184+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31185+extern struct file_operations grsec_fops;
31186+#endif
31187+
31188 static inline unsigned long size_inside_page(unsigned long start,
31189 unsigned long size)
31190 {
31191@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31192
31193 while (cursor < to) {
31194 if (!devmem_is_allowed(pfn)) {
31195+#ifdef CONFIG_GRKERNSEC_KMEM
31196+ gr_handle_mem_readwrite(from, to);
31197+#else
31198 printk(KERN_INFO
31199 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31200 current->comm, from, to);
31201+#endif
31202 return 0;
31203 }
31204 cursor += PAGE_SIZE;
31205@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31206 }
31207 return 1;
31208 }
31209+#elif defined(CONFIG_GRKERNSEC_KMEM)
31210+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31211+{
31212+ return 0;
31213+}
31214 #else
31215 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31216 {
31217@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31218 #endif
31219
31220 while (count > 0) {
31221+ char *temp;
31222+
31223 /*
31224 * Handle first page in case it's not aligned
31225 */
31226@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31227 if (!ptr)
31228 return -EFAULT;
31229
31230- if (copy_to_user(buf, ptr, sz)) {
31231+#ifdef CONFIG_PAX_USERCOPY
31232+ temp = kmalloc(sz, GFP_KERNEL);
31233+ if (!temp) {
31234+ unxlate_dev_mem_ptr(p, ptr);
31235+ return -ENOMEM;
31236+ }
31237+ memcpy(temp, ptr, sz);
31238+#else
31239+ temp = ptr;
31240+#endif
31241+
31242+ if (copy_to_user(buf, temp, sz)) {
31243+
31244+#ifdef CONFIG_PAX_USERCOPY
31245+ kfree(temp);
31246+#endif
31247+
31248 unxlate_dev_mem_ptr(p, ptr);
31249 return -EFAULT;
31250 }
31251
31252+#ifdef CONFIG_PAX_USERCOPY
31253+ kfree(temp);
31254+#endif
31255+
31256 unxlate_dev_mem_ptr(p, ptr);
31257
31258 buf += sz;
31259@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31260 size_t count, loff_t *ppos)
31261 {
31262 unsigned long p = *ppos;
31263- ssize_t low_count, read, sz;
31264+ ssize_t low_count, read, sz, err = 0;
31265 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31266- int err = 0;
31267
31268 read = 0;
31269 if (p < (unsigned long) high_memory) {
31270@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31271 }
31272 #endif
31273 while (low_count > 0) {
31274+ char *temp;
31275+
31276 sz = size_inside_page(p, low_count);
31277
31278 /*
31279@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31280 */
31281 kbuf = xlate_dev_kmem_ptr((char *)p);
31282
31283- if (copy_to_user(buf, kbuf, sz))
31284+#ifdef CONFIG_PAX_USERCOPY
31285+ temp = kmalloc(sz, GFP_KERNEL);
31286+ if (!temp)
31287+ return -ENOMEM;
31288+ memcpy(temp, kbuf, sz);
31289+#else
31290+ temp = kbuf;
31291+#endif
31292+
31293+ err = copy_to_user(buf, temp, sz);
31294+
31295+#ifdef CONFIG_PAX_USERCOPY
31296+ kfree(temp);
31297+#endif
31298+
31299+ if (err)
31300 return -EFAULT;
31301 buf += sz;
31302 p += sz;
31303@@ -889,6 +941,9 @@ static const struct memdev {
31304 #ifdef CONFIG_CRASH_DUMP
31305 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31306 #endif
31307+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31308+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31309+#endif
31310 };
31311
31312 static int memory_open(struct inode *inode, struct file *filp)
31313diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31314index 674b3ab..a8d1970 100644
31315--- a/drivers/char/pcmcia/ipwireless/tty.c
31316+++ b/drivers/char/pcmcia/ipwireless/tty.c
31317@@ -29,6 +29,7 @@
31318 #include <linux/tty_driver.h>
31319 #include <linux/tty_flip.h>
31320 #include <linux/uaccess.h>
31321+#include <asm/local.h>
31322
31323 #include "tty.h"
31324 #include "network.h"
31325@@ -51,7 +52,7 @@ struct ipw_tty {
31326 int tty_type;
31327 struct ipw_network *network;
31328 struct tty_struct *linux_tty;
31329- int open_count;
31330+ local_t open_count;
31331 unsigned int control_lines;
31332 struct mutex ipw_tty_mutex;
31333 int tx_bytes_queued;
31334@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31335 mutex_unlock(&tty->ipw_tty_mutex);
31336 return -ENODEV;
31337 }
31338- if (tty->open_count == 0)
31339+ if (local_read(&tty->open_count) == 0)
31340 tty->tx_bytes_queued = 0;
31341
31342- tty->open_count++;
31343+ local_inc(&tty->open_count);
31344
31345 tty->linux_tty = linux_tty;
31346 linux_tty->driver_data = tty;
31347@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31348
31349 static void do_ipw_close(struct ipw_tty *tty)
31350 {
31351- tty->open_count--;
31352-
31353- if (tty->open_count == 0) {
31354+ if (local_dec_return(&tty->open_count) == 0) {
31355 struct tty_struct *linux_tty = tty->linux_tty;
31356
31357 if (linux_tty != NULL) {
31358@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31359 return;
31360
31361 mutex_lock(&tty->ipw_tty_mutex);
31362- if (tty->open_count == 0) {
31363+ if (local_read(&tty->open_count) == 0) {
31364 mutex_unlock(&tty->ipw_tty_mutex);
31365 return;
31366 }
31367@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31368 return;
31369 }
31370
31371- if (!tty->open_count) {
31372+ if (!local_read(&tty->open_count)) {
31373 mutex_unlock(&tty->ipw_tty_mutex);
31374 return;
31375 }
31376@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31377 return -ENODEV;
31378
31379 mutex_lock(&tty->ipw_tty_mutex);
31380- if (!tty->open_count) {
31381+ if (!local_read(&tty->open_count)) {
31382 mutex_unlock(&tty->ipw_tty_mutex);
31383 return -EINVAL;
31384 }
31385@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31386 if (!tty)
31387 return -ENODEV;
31388
31389- if (!tty->open_count)
31390+ if (!local_read(&tty->open_count))
31391 return -EINVAL;
31392
31393 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31394@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31395 if (!tty)
31396 return 0;
31397
31398- if (!tty->open_count)
31399+ if (!local_read(&tty->open_count))
31400 return 0;
31401
31402 return tty->tx_bytes_queued;
31403@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31404 if (!tty)
31405 return -ENODEV;
31406
31407- if (!tty->open_count)
31408+ if (!local_read(&tty->open_count))
31409 return -EINVAL;
31410
31411 return get_control_lines(tty);
31412@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31413 if (!tty)
31414 return -ENODEV;
31415
31416- if (!tty->open_count)
31417+ if (!local_read(&tty->open_count))
31418 return -EINVAL;
31419
31420 return set_control_lines(tty, set, clear);
31421@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31422 if (!tty)
31423 return -ENODEV;
31424
31425- if (!tty->open_count)
31426+ if (!local_read(&tty->open_count))
31427 return -EINVAL;
31428
31429 /* FIXME: Exactly how is the tty object locked here .. */
31430@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31431 against a parallel ioctl etc */
31432 mutex_lock(&ttyj->ipw_tty_mutex);
31433 }
31434- while (ttyj->open_count)
31435+ while (local_read(&ttyj->open_count))
31436 do_ipw_close(ttyj);
31437 ipwireless_disassociate_network_ttys(network,
31438 ttyj->channel_idx);
31439diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31440index 62f282e..e45c45c 100644
31441--- a/drivers/char/pty.c
31442+++ b/drivers/char/pty.c
31443@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31444 register_sysctl_table(pty_root_table);
31445
31446 /* Now create the /dev/ptmx special device */
31447+ pax_open_kernel();
31448 tty_default_fops(&ptmx_fops);
31449- ptmx_fops.open = ptmx_open;
31450+ *(void **)&ptmx_fops.open = ptmx_open;
31451+ pax_close_kernel();
31452
31453 cdev_init(&ptmx_cdev, &ptmx_fops);
31454 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31455diff --git a/drivers/char/random.c b/drivers/char/random.c
31456index 3a19e2d..6ed09d3 100644
31457--- a/drivers/char/random.c
31458+++ b/drivers/char/random.c
31459@@ -254,8 +254,13 @@
31460 /*
31461 * Configuration information
31462 */
31463+#ifdef CONFIG_GRKERNSEC_RANDNET
31464+#define INPUT_POOL_WORDS 512
31465+#define OUTPUT_POOL_WORDS 128
31466+#else
31467 #define INPUT_POOL_WORDS 128
31468 #define OUTPUT_POOL_WORDS 32
31469+#endif
31470 #define SEC_XFER_SIZE 512
31471
31472 /*
31473@@ -292,10 +297,17 @@ static struct poolinfo {
31474 int poolwords;
31475 int tap1, tap2, tap3, tap4, tap5;
31476 } poolinfo_table[] = {
31477+#ifdef CONFIG_GRKERNSEC_RANDNET
31478+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31479+ { 512, 411, 308, 208, 104, 1 },
31480+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31481+ { 128, 103, 76, 51, 25, 1 },
31482+#else
31483 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31484 { 128, 103, 76, 51, 25, 1 },
31485 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31486 { 32, 26, 20, 14, 7, 1 },
31487+#endif
31488 #if 0
31489 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31490 { 2048, 1638, 1231, 819, 411, 1 },
31491@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31492 #include <linux/sysctl.h>
31493
31494 static int min_read_thresh = 8, min_write_thresh;
31495-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31496+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31497 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31498 static char sysctl_bootid[16];
31499
31500diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31501index 0e29a23..0efc2c2 100644
31502--- a/drivers/char/rocket.c
31503+++ b/drivers/char/rocket.c
31504@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31505 struct rocket_ports tmp;
31506 int board;
31507
31508+ pax_track_stack();
31509+
31510 if (!retports)
31511 return -EFAULT;
31512 memset(&tmp, 0, sizeof (tmp));
31513diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31514index 8c262aa..4d3b058 100644
31515--- a/drivers/char/sonypi.c
31516+++ b/drivers/char/sonypi.c
31517@@ -55,6 +55,7 @@
31518 #include <asm/uaccess.h>
31519 #include <asm/io.h>
31520 #include <asm/system.h>
31521+#include <asm/local.h>
31522
31523 #include <linux/sonypi.h>
31524
31525@@ -491,7 +492,7 @@ static struct sonypi_device {
31526 spinlock_t fifo_lock;
31527 wait_queue_head_t fifo_proc_list;
31528 struct fasync_struct *fifo_async;
31529- int open_count;
31530+ local_t open_count;
31531 int model;
31532 struct input_dev *input_jog_dev;
31533 struct input_dev *input_key_dev;
31534@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31535 static int sonypi_misc_release(struct inode *inode, struct file *file)
31536 {
31537 mutex_lock(&sonypi_device.lock);
31538- sonypi_device.open_count--;
31539+ local_dec(&sonypi_device.open_count);
31540 mutex_unlock(&sonypi_device.lock);
31541 return 0;
31542 }
31543@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31544 lock_kernel();
31545 mutex_lock(&sonypi_device.lock);
31546 /* Flush input queue on first open */
31547- if (!sonypi_device.open_count)
31548+ if (!local_read(&sonypi_device.open_count))
31549 kfifo_reset(sonypi_device.fifo);
31550- sonypi_device.open_count++;
31551+ local_inc(&sonypi_device.open_count);
31552 mutex_unlock(&sonypi_device.lock);
31553 unlock_kernel();
31554 return 0;
31555diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31556index db6dcfa..13834cb 100644
31557--- a/drivers/char/stallion.c
31558+++ b/drivers/char/stallion.c
31559@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31560 struct stlport stl_dummyport;
31561 struct stlport *portp;
31562
31563+ pax_track_stack();
31564+
31565 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31566 return -EFAULT;
31567 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31568diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31569index a0789f6..cea3902 100644
31570--- a/drivers/char/tpm/tpm.c
31571+++ b/drivers/char/tpm/tpm.c
31572@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31573 chip->vendor.req_complete_val)
31574 goto out_recv;
31575
31576- if ((status == chip->vendor.req_canceled)) {
31577+ if (status == chip->vendor.req_canceled) {
31578 dev_err(chip->dev, "Operation Canceled\n");
31579 rc = -ECANCELED;
31580 goto out;
31581@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31582
31583 struct tpm_chip *chip = dev_get_drvdata(dev);
31584
31585+ pax_track_stack();
31586+
31587 tpm_cmd.header.in = tpm_readpubek_header;
31588 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31589 "attempting to read the PUBEK");
31590diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31591index bf2170f..ce8cab9 100644
31592--- a/drivers/char/tpm/tpm_bios.c
31593+++ b/drivers/char/tpm/tpm_bios.c
31594@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31595 event = addr;
31596
31597 if ((event->event_type == 0 && event->event_size == 0) ||
31598- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31599+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31600 return NULL;
31601
31602 return addr;
31603@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31604 return NULL;
31605
31606 if ((event->event_type == 0 && event->event_size == 0) ||
31607- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31608+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31609 return NULL;
31610
31611 (*pos)++;
31612@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31613 int i;
31614
31615 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31616- seq_putc(m, data[i]);
31617+ if (!seq_putc(m, data[i]))
31618+ return -EFAULT;
31619
31620 return 0;
31621 }
31622@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31623 log->bios_event_log_end = log->bios_event_log + len;
31624
31625 virt = acpi_os_map_memory(start, len);
31626+ if (!virt) {
31627+ kfree(log->bios_event_log);
31628+ log->bios_event_log = NULL;
31629+ return -EFAULT;
31630+ }
31631
31632- memcpy(log->bios_event_log, virt, len);
31633+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31634
31635 acpi_os_unmap_memory(virt, len);
31636 return 0;
31637diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31638index 123cedf..137edef 100644
31639--- a/drivers/char/tty_io.c
31640+++ b/drivers/char/tty_io.c
31641@@ -1774,6 +1774,7 @@ got_driver:
31642
31643 if (IS_ERR(tty)) {
31644 mutex_unlock(&tty_mutex);
31645+ tty_driver_kref_put(driver);
31646 return PTR_ERR(tty);
31647 }
31648 }
31649@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31650 return retval;
31651 }
31652
31653+EXPORT_SYMBOL(tty_ioctl);
31654+
31655 #ifdef CONFIG_COMPAT
31656-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31657+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31658 unsigned long arg)
31659 {
31660 struct inode *inode = file->f_dentry->d_inode;
31661@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31662
31663 return retval;
31664 }
31665+
31666+EXPORT_SYMBOL(tty_compat_ioctl);
31667 #endif
31668
31669 /*
31670@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31671
31672 void tty_default_fops(struct file_operations *fops)
31673 {
31674- *fops = tty_fops;
31675+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31676 }
31677
31678 /*
31679diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31680index d814a3d..b55b9c9 100644
31681--- a/drivers/char/tty_ldisc.c
31682+++ b/drivers/char/tty_ldisc.c
31683@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31684 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31685 struct tty_ldisc_ops *ldo = ld->ops;
31686
31687- ldo->refcount--;
31688+ atomic_dec(&ldo->refcount);
31689 module_put(ldo->owner);
31690 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31691
31692@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31693 spin_lock_irqsave(&tty_ldisc_lock, flags);
31694 tty_ldiscs[disc] = new_ldisc;
31695 new_ldisc->num = disc;
31696- new_ldisc->refcount = 0;
31697+ atomic_set(&new_ldisc->refcount, 0);
31698 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31699
31700 return ret;
31701@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31702 return -EINVAL;
31703
31704 spin_lock_irqsave(&tty_ldisc_lock, flags);
31705- if (tty_ldiscs[disc]->refcount)
31706+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31707 ret = -EBUSY;
31708 else
31709 tty_ldiscs[disc] = NULL;
31710@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31711 if (ldops) {
31712 ret = ERR_PTR(-EAGAIN);
31713 if (try_module_get(ldops->owner)) {
31714- ldops->refcount++;
31715+ atomic_inc(&ldops->refcount);
31716 ret = ldops;
31717 }
31718 }
31719@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31720 unsigned long flags;
31721
31722 spin_lock_irqsave(&tty_ldisc_lock, flags);
31723- ldops->refcount--;
31724+ atomic_dec(&ldops->refcount);
31725 module_put(ldops->owner);
31726 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31727 }
31728diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31729index a035ae3..c27fe2c 100644
31730--- a/drivers/char/virtio_console.c
31731+++ b/drivers/char/virtio_console.c
31732@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31733 * virtqueue, so we let the drivers do some boutique early-output thing. */
31734 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31735 {
31736- virtio_cons.put_chars = put_chars;
31737+ pax_open_kernel();
31738+ *(void **)&virtio_cons.put_chars = put_chars;
31739+ pax_close_kernel();
31740 return hvc_instantiate(0, 0, &virtio_cons);
31741 }
31742
31743@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31744 out_vq = vqs[1];
31745
31746 /* Start using the new console output. */
31747- virtio_cons.get_chars = get_chars;
31748- virtio_cons.put_chars = put_chars;
31749- virtio_cons.notifier_add = notifier_add_vio;
31750- virtio_cons.notifier_del = notifier_del_vio;
31751- virtio_cons.notifier_hangup = notifier_del_vio;
31752+ pax_open_kernel();
31753+ *(void **)&virtio_cons.get_chars = get_chars;
31754+ *(void **)&virtio_cons.put_chars = put_chars;
31755+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31756+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31757+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31758+ pax_close_kernel();
31759
31760 /* The first argument of hvc_alloc() is the virtual console number, so
31761 * we use zero. The second argument is the parameter for the
31762diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31763index 0c80c68..53d59c1 100644
31764--- a/drivers/char/vt.c
31765+++ b/drivers/char/vt.c
31766@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31767
31768 static void notify_write(struct vc_data *vc, unsigned int unicode)
31769 {
31770- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31771+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31772 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31773 }
31774
31775diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31776index 6351a26..999af95 100644
31777--- a/drivers/char/vt_ioctl.c
31778+++ b/drivers/char/vt_ioctl.c
31779@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31780 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31781 return -EFAULT;
31782
31783- if (!capable(CAP_SYS_TTY_CONFIG))
31784- perm = 0;
31785-
31786 switch (cmd) {
31787 case KDGKBENT:
31788 key_map = key_maps[s];
31789@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31790 val = (i ? K_HOLE : K_NOSUCHMAP);
31791 return put_user(val, &user_kbe->kb_value);
31792 case KDSKBENT:
31793+ if (!capable(CAP_SYS_TTY_CONFIG))
31794+ perm = 0;
31795+
31796 if (!perm)
31797 return -EPERM;
31798+
31799 if (!i && v == K_NOSUCHMAP) {
31800 /* deallocate map */
31801 key_map = key_maps[s];
31802@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31803 int i, j, k;
31804 int ret;
31805
31806- if (!capable(CAP_SYS_TTY_CONFIG))
31807- perm = 0;
31808-
31809 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31810 if (!kbs) {
31811 ret = -ENOMEM;
31812@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31813 kfree(kbs);
31814 return ((p && *p) ? -EOVERFLOW : 0);
31815 case KDSKBSENT:
31816+ if (!capable(CAP_SYS_TTY_CONFIG))
31817+ perm = 0;
31818+
31819 if (!perm) {
31820 ret = -EPERM;
31821 goto reterr;
31822diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31823index c7ae026..1769c1d 100644
31824--- a/drivers/cpufreq/cpufreq.c
31825+++ b/drivers/cpufreq/cpufreq.c
31826@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31827 complete(&policy->kobj_unregister);
31828 }
31829
31830-static struct sysfs_ops sysfs_ops = {
31831+static const struct sysfs_ops sysfs_ops = {
31832 .show = show,
31833 .store = store,
31834 };
31835diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31836index 97b0038..2056670 100644
31837--- a/drivers/cpuidle/sysfs.c
31838+++ b/drivers/cpuidle/sysfs.c
31839@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31840 return ret;
31841 }
31842
31843-static struct sysfs_ops cpuidle_sysfs_ops = {
31844+static const struct sysfs_ops cpuidle_sysfs_ops = {
31845 .show = cpuidle_show,
31846 .store = cpuidle_store,
31847 };
31848@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31849 return ret;
31850 }
31851
31852-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31853+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31854 .show = cpuidle_state_show,
31855 };
31856
31857@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31858 .release = cpuidle_state_sysfs_release,
31859 };
31860
31861-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31862+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31863 {
31864 kobject_put(&device->kobjs[i]->kobj);
31865 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31866diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31867index 5f753fc..0377ae9 100644
31868--- a/drivers/crypto/hifn_795x.c
31869+++ b/drivers/crypto/hifn_795x.c
31870@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31871 0xCA, 0x34, 0x2B, 0x2E};
31872 struct scatterlist sg;
31873
31874+ pax_track_stack();
31875+
31876 memset(src, 0, sizeof(src));
31877 memset(ctx.key, 0, sizeof(ctx.key));
31878
31879diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31880index 71e6482..de8d96c 100644
31881--- a/drivers/crypto/padlock-aes.c
31882+++ b/drivers/crypto/padlock-aes.c
31883@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31884 struct crypto_aes_ctx gen_aes;
31885 int cpu;
31886
31887+ pax_track_stack();
31888+
31889 if (key_len % 8) {
31890 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
31891 return -EINVAL;
31892diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
31893index dcc4ab7..cc834bb 100644
31894--- a/drivers/dma/ioat/dma.c
31895+++ b/drivers/dma/ioat/dma.c
31896@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
31897 return entry->show(&chan->common, page);
31898 }
31899
31900-struct sysfs_ops ioat_sysfs_ops = {
31901+const struct sysfs_ops ioat_sysfs_ops = {
31902 .show = ioat_attr_show,
31903 };
31904
31905diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
31906index bbc3e78..f2db62c 100644
31907--- a/drivers/dma/ioat/dma.h
31908+++ b/drivers/dma/ioat/dma.h
31909@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
31910 unsigned long *phys_complete);
31911 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
31912 void ioat_kobject_del(struct ioatdma_device *device);
31913-extern struct sysfs_ops ioat_sysfs_ops;
31914+extern const struct sysfs_ops ioat_sysfs_ops;
31915 extern struct ioat_sysfs_entry ioat_version_attr;
31916 extern struct ioat_sysfs_entry ioat_cap_attr;
31917 #endif /* IOATDMA_H */
31918diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
31919index 9908c9e..3ceb0e5 100644
31920--- a/drivers/dma/ioat/dma_v3.c
31921+++ b/drivers/dma/ioat/dma_v3.c
31922@@ -71,10 +71,10 @@
31923 /* provide a lookup table for setting the source address in the base or
31924 * extended descriptor of an xor or pq descriptor
31925 */
31926-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
31927-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
31928-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
31929-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
31930+static const u8 xor_idx_to_desc = 0xd0;
31931+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
31932+static const u8 pq_idx_to_desc = 0xf8;
31933+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
31934
31935 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
31936 {
31937diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
31938index 85c464a..afd1e73 100644
31939--- a/drivers/edac/amd64_edac.c
31940+++ b/drivers/edac/amd64_edac.c
31941@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
31942 * PCI core identifies what devices are on a system during boot, and then
31943 * inquiry this table to see if this driver is for a given device found.
31944 */
31945-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
31946+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
31947 {
31948 .vendor = PCI_VENDOR_ID_AMD,
31949 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
31950diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
31951index 2b95f1a..4f52793 100644
31952--- a/drivers/edac/amd76x_edac.c
31953+++ b/drivers/edac/amd76x_edac.c
31954@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
31955 edac_mc_free(mci);
31956 }
31957
31958-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
31959+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
31960 {
31961 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31962 AMD762},
31963diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
31964index d205d49..74c9672 100644
31965--- a/drivers/edac/e752x_edac.c
31966+++ b/drivers/edac/e752x_edac.c
31967@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
31968 edac_mc_free(mci);
31969 }
31970
31971-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
31972+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
31973 {
31974 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31975 E7520},
31976diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
31977index c7d11cc..c59c1ca 100644
31978--- a/drivers/edac/e7xxx_edac.c
31979+++ b/drivers/edac/e7xxx_edac.c
31980@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
31981 edac_mc_free(mci);
31982 }
31983
31984-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
31985+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
31986 {
31987 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31988 E7205},
31989diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
31990index 5376457..5fdedbc 100644
31991--- a/drivers/edac/edac_device_sysfs.c
31992+++ b/drivers/edac/edac_device_sysfs.c
31993@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
31994 }
31995
31996 /* edac_dev file operations for an 'ctl_info' */
31997-static struct sysfs_ops device_ctl_info_ops = {
31998+static const struct sysfs_ops device_ctl_info_ops = {
31999 .show = edac_dev_ctl_info_show,
32000 .store = edac_dev_ctl_info_store
32001 };
32002@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32003 }
32004
32005 /* edac_dev file operations for an 'instance' */
32006-static struct sysfs_ops device_instance_ops = {
32007+static const struct sysfs_ops device_instance_ops = {
32008 .show = edac_dev_instance_show,
32009 .store = edac_dev_instance_store
32010 };
32011@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32012 }
32013
32014 /* edac_dev file operations for a 'block' */
32015-static struct sysfs_ops device_block_ops = {
32016+static const struct sysfs_ops device_block_ops = {
32017 .show = edac_dev_block_show,
32018 .store = edac_dev_block_store
32019 };
32020diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32021index e1d4ce0..88840e9 100644
32022--- a/drivers/edac/edac_mc_sysfs.c
32023+++ b/drivers/edac/edac_mc_sysfs.c
32024@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32025 return -EIO;
32026 }
32027
32028-static struct sysfs_ops csrowfs_ops = {
32029+static const struct sysfs_ops csrowfs_ops = {
32030 .show = csrowdev_show,
32031 .store = csrowdev_store
32032 };
32033@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32034 }
32035
32036 /* Intermediate show/store table */
32037-static struct sysfs_ops mci_ops = {
32038+static const struct sysfs_ops mci_ops = {
32039 .show = mcidev_show,
32040 .store = mcidev_store
32041 };
32042diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32043index 422728c..d8d9c88 100644
32044--- a/drivers/edac/edac_pci_sysfs.c
32045+++ b/drivers/edac/edac_pci_sysfs.c
32046@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32047 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32048 static int edac_pci_poll_msec = 1000; /* one second workq period */
32049
32050-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32051-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32052+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32053+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32054
32055 static struct kobject *edac_pci_top_main_kobj;
32056 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32057@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32058 }
32059
32060 /* fs_ops table */
32061-static struct sysfs_ops pci_instance_ops = {
32062+static const struct sysfs_ops pci_instance_ops = {
32063 .show = edac_pci_instance_show,
32064 .store = edac_pci_instance_store
32065 };
32066@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32067 return -EIO;
32068 }
32069
32070-static struct sysfs_ops edac_pci_sysfs_ops = {
32071+static const struct sysfs_ops edac_pci_sysfs_ops = {
32072 .show = edac_pci_dev_show,
32073 .store = edac_pci_dev_store
32074 };
32075@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32076 edac_printk(KERN_CRIT, EDAC_PCI,
32077 "Signaled System Error on %s\n",
32078 pci_name(dev));
32079- atomic_inc(&pci_nonparity_count);
32080+ atomic_inc_unchecked(&pci_nonparity_count);
32081 }
32082
32083 if (status & (PCI_STATUS_PARITY)) {
32084@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32085 "Master Data Parity Error on %s\n",
32086 pci_name(dev));
32087
32088- atomic_inc(&pci_parity_count);
32089+ atomic_inc_unchecked(&pci_parity_count);
32090 }
32091
32092 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32093@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32094 "Detected Parity Error on %s\n",
32095 pci_name(dev));
32096
32097- atomic_inc(&pci_parity_count);
32098+ atomic_inc_unchecked(&pci_parity_count);
32099 }
32100 }
32101
32102@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32103 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32104 "Signaled System Error on %s\n",
32105 pci_name(dev));
32106- atomic_inc(&pci_nonparity_count);
32107+ atomic_inc_unchecked(&pci_nonparity_count);
32108 }
32109
32110 if (status & (PCI_STATUS_PARITY)) {
32111@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32112 "Master Data Parity Error on "
32113 "%s\n", pci_name(dev));
32114
32115- atomic_inc(&pci_parity_count);
32116+ atomic_inc_unchecked(&pci_parity_count);
32117 }
32118
32119 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32120@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32121 "Detected Parity Error on %s\n",
32122 pci_name(dev));
32123
32124- atomic_inc(&pci_parity_count);
32125+ atomic_inc_unchecked(&pci_parity_count);
32126 }
32127 }
32128 }
32129@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32130 if (!check_pci_errors)
32131 return;
32132
32133- before_count = atomic_read(&pci_parity_count);
32134+ before_count = atomic_read_unchecked(&pci_parity_count);
32135
32136 /* scan all PCI devices looking for a Parity Error on devices and
32137 * bridges.
32138@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32139 /* Only if operator has selected panic on PCI Error */
32140 if (edac_pci_get_panic_on_pe()) {
32141 /* If the count is different 'after' from 'before' */
32142- if (before_count != atomic_read(&pci_parity_count))
32143+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32144 panic("EDAC: PCI Parity Error");
32145 }
32146 }
32147diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32148index 6c9a0f2..9c1cf7e 100644
32149--- a/drivers/edac/i3000_edac.c
32150+++ b/drivers/edac/i3000_edac.c
32151@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32152 edac_mc_free(mci);
32153 }
32154
32155-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32156+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32157 {
32158 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32159 I3000},
32160diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32161index fde4db9..fe108f9 100644
32162--- a/drivers/edac/i3200_edac.c
32163+++ b/drivers/edac/i3200_edac.c
32164@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32165 edac_mc_free(mci);
32166 }
32167
32168-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32169+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32170 {
32171 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32172 I3200},
32173diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32174index adc10a2..57d4ccf 100644
32175--- a/drivers/edac/i5000_edac.c
32176+++ b/drivers/edac/i5000_edac.c
32177@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32178 *
32179 * The "E500P" device is the first device supported.
32180 */
32181-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32182+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32183 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32184 .driver_data = I5000P},
32185
32186diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32187index 22db05a..b2b5503 100644
32188--- a/drivers/edac/i5100_edac.c
32189+++ b/drivers/edac/i5100_edac.c
32190@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32191 edac_mc_free(mci);
32192 }
32193
32194-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32195+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32196 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32197 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32198 { 0, }
32199diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32200index f99d106..f050710 100644
32201--- a/drivers/edac/i5400_edac.c
32202+++ b/drivers/edac/i5400_edac.c
32203@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32204 *
32205 * The "E500P" device is the first device supported.
32206 */
32207-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32208+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32209 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32210 {0,} /* 0 terminated list. */
32211 };
32212diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32213index 577760a..9ce16ce 100644
32214--- a/drivers/edac/i82443bxgx_edac.c
32215+++ b/drivers/edac/i82443bxgx_edac.c
32216@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32217
32218 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32219
32220-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32221+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32222 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32223 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32224 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32225diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32226index c0088ba..64a7b98 100644
32227--- a/drivers/edac/i82860_edac.c
32228+++ b/drivers/edac/i82860_edac.c
32229@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32230 edac_mc_free(mci);
32231 }
32232
32233-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32234+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32235 {
32236 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32237 I82860},
32238diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32239index b2d83b9..a34357b 100644
32240--- a/drivers/edac/i82875p_edac.c
32241+++ b/drivers/edac/i82875p_edac.c
32242@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32243 edac_mc_free(mci);
32244 }
32245
32246-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32247+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32248 {
32249 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32250 I82875P},
32251diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32252index 2eed3ea..87bbbd1 100644
32253--- a/drivers/edac/i82975x_edac.c
32254+++ b/drivers/edac/i82975x_edac.c
32255@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32256 edac_mc_free(mci);
32257 }
32258
32259-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32260+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32261 {
32262 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32263 I82975X
32264diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32265index 9900675..78ac2b6 100644
32266--- a/drivers/edac/r82600_edac.c
32267+++ b/drivers/edac/r82600_edac.c
32268@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32269 edac_mc_free(mci);
32270 }
32271
32272-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32273+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32274 {
32275 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32276 },
32277diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32278index d4ec605..4cfec4e 100644
32279--- a/drivers/edac/x38_edac.c
32280+++ b/drivers/edac/x38_edac.c
32281@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32282 edac_mc_free(mci);
32283 }
32284
32285-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32286+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32287 {
32288 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32289 X38},
32290diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32291index 3fc2ceb..daf098f 100644
32292--- a/drivers/firewire/core-card.c
32293+++ b/drivers/firewire/core-card.c
32294@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32295
32296 void fw_core_remove_card(struct fw_card *card)
32297 {
32298- struct fw_card_driver dummy_driver = dummy_driver_template;
32299+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32300
32301 card->driver->update_phy_reg(card, 4,
32302 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32303diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32304index 4560d8f..36db24a 100644
32305--- a/drivers/firewire/core-cdev.c
32306+++ b/drivers/firewire/core-cdev.c
32307@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32308 int ret;
32309
32310 if ((request->channels == 0 && request->bandwidth == 0) ||
32311- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32312- request->bandwidth < 0)
32313+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32314 return -EINVAL;
32315
32316 r = kmalloc(sizeof(*r), GFP_KERNEL);
32317diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32318index da628c7..cf54a2c 100644
32319--- a/drivers/firewire/core-transaction.c
32320+++ b/drivers/firewire/core-transaction.c
32321@@ -36,6 +36,7 @@
32322 #include <linux/string.h>
32323 #include <linux/timer.h>
32324 #include <linux/types.h>
32325+#include <linux/sched.h>
32326
32327 #include <asm/byteorder.h>
32328
32329@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32330 struct transaction_callback_data d;
32331 struct fw_transaction t;
32332
32333+ pax_track_stack();
32334+
32335 init_completion(&d.done);
32336 d.payload = payload;
32337 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32338diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32339index 7ff6e75..a2965d9 100644
32340--- a/drivers/firewire/core.h
32341+++ b/drivers/firewire/core.h
32342@@ -86,6 +86,7 @@ struct fw_card_driver {
32343
32344 int (*stop_iso)(struct fw_iso_context *ctx);
32345 };
32346+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32347
32348 void fw_card_initialize(struct fw_card *card,
32349 const struct fw_card_driver *driver, struct device *device);
32350diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32351index 3a2ccb0..82fd7c4 100644
32352--- a/drivers/firmware/dmi_scan.c
32353+++ b/drivers/firmware/dmi_scan.c
32354@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32355 }
32356 }
32357 else {
32358- /*
32359- * no iounmap() for that ioremap(); it would be a no-op, but
32360- * it's so early in setup that sucker gets confused into doing
32361- * what it shouldn't if we actually call it.
32362- */
32363 p = dmi_ioremap(0xF0000, 0x10000);
32364 if (p == NULL)
32365 goto error;
32366@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32367 if (buf == NULL)
32368 return -1;
32369
32370- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32371+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32372
32373 iounmap(buf);
32374 return 0;
32375diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32376index 9e4f59d..110e24e 100644
32377--- a/drivers/firmware/edd.c
32378+++ b/drivers/firmware/edd.c
32379@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32380 return ret;
32381 }
32382
32383-static struct sysfs_ops edd_attr_ops = {
32384+static const struct sysfs_ops edd_attr_ops = {
32385 .show = edd_attr_show,
32386 };
32387
32388diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32389index f4f709d..082f06e 100644
32390--- a/drivers/firmware/efivars.c
32391+++ b/drivers/firmware/efivars.c
32392@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32393 return ret;
32394 }
32395
32396-static struct sysfs_ops efivar_attr_ops = {
32397+static const struct sysfs_ops efivar_attr_ops = {
32398 .show = efivar_attr_show,
32399 .store = efivar_attr_store,
32400 };
32401diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32402index 051d1eb..0a5d4e7 100644
32403--- a/drivers/firmware/iscsi_ibft.c
32404+++ b/drivers/firmware/iscsi_ibft.c
32405@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32406 return ret;
32407 }
32408
32409-static struct sysfs_ops ibft_attr_ops = {
32410+static const struct sysfs_ops ibft_attr_ops = {
32411 .show = ibft_show_attribute,
32412 };
32413
32414diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32415index 56f9234..8c58c7b 100644
32416--- a/drivers/firmware/memmap.c
32417+++ b/drivers/firmware/memmap.c
32418@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32419 NULL
32420 };
32421
32422-static struct sysfs_ops memmap_attr_ops = {
32423+static const struct sysfs_ops memmap_attr_ops = {
32424 .show = memmap_attr_show,
32425 };
32426
32427diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32428index b16c9a8..2af7d3f 100644
32429--- a/drivers/gpio/vr41xx_giu.c
32430+++ b/drivers/gpio/vr41xx_giu.c
32431@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32432 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32433 maskl, pendl, maskh, pendh);
32434
32435- atomic_inc(&irq_err_count);
32436+ atomic_inc_unchecked(&irq_err_count);
32437
32438 return -EINVAL;
32439 }
32440diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32441index bea6efc..3dc0f42 100644
32442--- a/drivers/gpu/drm/drm_crtc.c
32443+++ b/drivers/gpu/drm/drm_crtc.c
32444@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32445 */
32446 if ((out_resp->count_modes >= mode_count) && mode_count) {
32447 copied = 0;
32448- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32449+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32450 list_for_each_entry(mode, &connector->modes, head) {
32451 drm_crtc_convert_to_umode(&u_mode, mode);
32452 if (copy_to_user(mode_ptr + copied,
32453@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32454
32455 if ((out_resp->count_props >= props_count) && props_count) {
32456 copied = 0;
32457- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32458- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32459+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32460+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32461 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32462 if (connector->property_ids[i] != 0) {
32463 if (put_user(connector->property_ids[i],
32464@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32465
32466 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32467 copied = 0;
32468- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32469+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32470 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32471 if (connector->encoder_ids[i] != 0) {
32472 if (put_user(connector->encoder_ids[i],
32473@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32474 }
32475
32476 for (i = 0; i < crtc_req->count_connectors; i++) {
32477- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32478+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32479 if (get_user(out_id, &set_connectors_ptr[i])) {
32480 ret = -EFAULT;
32481 goto out;
32482@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32483 out_resp->flags = property->flags;
32484
32485 if ((out_resp->count_values >= value_count) && value_count) {
32486- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32487+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32488 for (i = 0; i < value_count; i++) {
32489 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32490 ret = -EFAULT;
32491@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32492 if (property->flags & DRM_MODE_PROP_ENUM) {
32493 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32494 copied = 0;
32495- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32496+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32497 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32498
32499 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32500@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32501 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32502 copied = 0;
32503 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32504- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32505+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32506
32507 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32508 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32509@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32510 blob = obj_to_blob(obj);
32511
32512 if (out_resp->length == blob->length) {
32513- blob_ptr = (void *)(unsigned long)out_resp->data;
32514+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32515 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32516 ret = -EFAULT;
32517 goto done;
32518diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32519index 1b8745d..92fdbf6 100644
32520--- a/drivers/gpu/drm/drm_crtc_helper.c
32521+++ b/drivers/gpu/drm/drm_crtc_helper.c
32522@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32523 struct drm_crtc *tmp;
32524 int crtc_mask = 1;
32525
32526- WARN(!crtc, "checking null crtc?");
32527+ BUG_ON(!crtc);
32528
32529 dev = crtc->dev;
32530
32531@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32532
32533 adjusted_mode = drm_mode_duplicate(dev, mode);
32534
32535+ pax_track_stack();
32536+
32537 crtc->enabled = drm_helper_crtc_in_use(crtc);
32538
32539 if (!crtc->enabled)
32540diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32541index 0e27d98..dec8768 100644
32542--- a/drivers/gpu/drm/drm_drv.c
32543+++ b/drivers/gpu/drm/drm_drv.c
32544@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32545 char *kdata = NULL;
32546
32547 atomic_inc(&dev->ioctl_count);
32548- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32549+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32550 ++file_priv->ioctl_count;
32551
32552 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32553diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32554index ba14553..182d0bb 100644
32555--- a/drivers/gpu/drm/drm_fops.c
32556+++ b/drivers/gpu/drm/drm_fops.c
32557@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32558 }
32559
32560 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32561- atomic_set(&dev->counts[i], 0);
32562+ atomic_set_unchecked(&dev->counts[i], 0);
32563
32564 dev->sigdata.lock = NULL;
32565
32566@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32567
32568 retcode = drm_open_helper(inode, filp, dev);
32569 if (!retcode) {
32570- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32571+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32572 spin_lock(&dev->count_lock);
32573- if (!dev->open_count++) {
32574+ if (local_inc_return(&dev->open_count) == 1) {
32575 spin_unlock(&dev->count_lock);
32576 retcode = drm_setup(dev);
32577 goto out;
32578@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32579
32580 lock_kernel();
32581
32582- DRM_DEBUG("open_count = %d\n", dev->open_count);
32583+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32584
32585 if (dev->driver->preclose)
32586 dev->driver->preclose(dev, file_priv);
32587@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32588 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32589 task_pid_nr(current),
32590 (long)old_encode_dev(file_priv->minor->device),
32591- dev->open_count);
32592+ local_read(&dev->open_count));
32593
32594 /* if the master has gone away we can't do anything with the lock */
32595 if (file_priv->minor->master)
32596@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32597 * End inline drm_release
32598 */
32599
32600- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32601+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32602 spin_lock(&dev->count_lock);
32603- if (!--dev->open_count) {
32604+ if (local_dec_and_test(&dev->open_count)) {
32605 if (atomic_read(&dev->ioctl_count)) {
32606 DRM_ERROR("Device busy: %d\n",
32607 atomic_read(&dev->ioctl_count));
32608diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32609index 8bf3770..7942280 100644
32610--- a/drivers/gpu/drm/drm_gem.c
32611+++ b/drivers/gpu/drm/drm_gem.c
32612@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32613 spin_lock_init(&dev->object_name_lock);
32614 idr_init(&dev->object_name_idr);
32615 atomic_set(&dev->object_count, 0);
32616- atomic_set(&dev->object_memory, 0);
32617+ atomic_set_unchecked(&dev->object_memory, 0);
32618 atomic_set(&dev->pin_count, 0);
32619- atomic_set(&dev->pin_memory, 0);
32620+ atomic_set_unchecked(&dev->pin_memory, 0);
32621 atomic_set(&dev->gtt_count, 0);
32622- atomic_set(&dev->gtt_memory, 0);
32623+ atomic_set_unchecked(&dev->gtt_memory, 0);
32624
32625 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32626 if (!mm) {
32627@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32628 goto fput;
32629 }
32630 atomic_inc(&dev->object_count);
32631- atomic_add(obj->size, &dev->object_memory);
32632+ atomic_add_unchecked(obj->size, &dev->object_memory);
32633 return obj;
32634 fput:
32635 fput(obj->filp);
32636@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32637
32638 fput(obj->filp);
32639 atomic_dec(&dev->object_count);
32640- atomic_sub(obj->size, &dev->object_memory);
32641+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32642 kfree(obj);
32643 }
32644 EXPORT_SYMBOL(drm_gem_object_free);
32645diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32646index f0f6c6b..34af322 100644
32647--- a/drivers/gpu/drm/drm_info.c
32648+++ b/drivers/gpu/drm/drm_info.c
32649@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32650 struct drm_local_map *map;
32651 struct drm_map_list *r_list;
32652
32653- /* Hardcoded from _DRM_FRAME_BUFFER,
32654- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32655- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32656- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32657+ static const char * const types[] = {
32658+ [_DRM_FRAME_BUFFER] = "FB",
32659+ [_DRM_REGISTERS] = "REG",
32660+ [_DRM_SHM] = "SHM",
32661+ [_DRM_AGP] = "AGP",
32662+ [_DRM_SCATTER_GATHER] = "SG",
32663+ [_DRM_CONSISTENT] = "PCI",
32664+ [_DRM_GEM] = "GEM" };
32665 const char *type;
32666 int i;
32667
32668@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32669 map = r_list->map;
32670 if (!map)
32671 continue;
32672- if (map->type < 0 || map->type > 5)
32673+ if (map->type >= ARRAY_SIZE(types))
32674 type = "??";
32675 else
32676 type = types[map->type];
32677@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32678 struct drm_device *dev = node->minor->dev;
32679
32680 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32681- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32682+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32683 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32684- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32685- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32686+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32687+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32688 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32689 return 0;
32690 }
32691@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32692 mutex_lock(&dev->struct_mutex);
32693 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32694 atomic_read(&dev->vma_count),
32695+#ifdef CONFIG_GRKERNSEC_HIDESYM
32696+ NULL, 0);
32697+#else
32698 high_memory, (u64)virt_to_phys(high_memory));
32699+#endif
32700
32701 list_for_each_entry(pt, &dev->vmalist, head) {
32702 vma = pt->vma;
32703@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32704 continue;
32705 seq_printf(m,
32706 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32707- pt->pid, vma->vm_start, vma->vm_end,
32708+ pt->pid,
32709+#ifdef CONFIG_GRKERNSEC_HIDESYM
32710+ 0, 0,
32711+#else
32712+ vma->vm_start, vma->vm_end,
32713+#endif
32714 vma->vm_flags & VM_READ ? 'r' : '-',
32715 vma->vm_flags & VM_WRITE ? 'w' : '-',
32716 vma->vm_flags & VM_EXEC ? 'x' : '-',
32717 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32718 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32719 vma->vm_flags & VM_IO ? 'i' : '-',
32720+#ifdef CONFIG_GRKERNSEC_HIDESYM
32721+ 0);
32722+#else
32723 vma->vm_pgoff);
32724+#endif
32725
32726 #if defined(__i386__)
32727 pgprot = pgprot_val(vma->vm_page_prot);
32728diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32729index 282d9fd..71e5f11 100644
32730--- a/drivers/gpu/drm/drm_ioc32.c
32731+++ b/drivers/gpu/drm/drm_ioc32.c
32732@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32733 request = compat_alloc_user_space(nbytes);
32734 if (!access_ok(VERIFY_WRITE, request, nbytes))
32735 return -EFAULT;
32736- list = (struct drm_buf_desc *) (request + 1);
32737+ list = (struct drm_buf_desc __user *) (request + 1);
32738
32739 if (__put_user(count, &request->count)
32740 || __put_user(list, &request->list))
32741@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32742 request = compat_alloc_user_space(nbytes);
32743 if (!access_ok(VERIFY_WRITE, request, nbytes))
32744 return -EFAULT;
32745- list = (struct drm_buf_pub *) (request + 1);
32746+ list = (struct drm_buf_pub __user *) (request + 1);
32747
32748 if (__put_user(count, &request->count)
32749 || __put_user(list, &request->list))
32750diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32751index 9b9ff46..4ea724c 100644
32752--- a/drivers/gpu/drm/drm_ioctl.c
32753+++ b/drivers/gpu/drm/drm_ioctl.c
32754@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32755 stats->data[i].value =
32756 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32757 else
32758- stats->data[i].value = atomic_read(&dev->counts[i]);
32759+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32760 stats->data[i].type = dev->types[i];
32761 }
32762
32763diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32764index e2f70a5..c703e86 100644
32765--- a/drivers/gpu/drm/drm_lock.c
32766+++ b/drivers/gpu/drm/drm_lock.c
32767@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32768 if (drm_lock_take(&master->lock, lock->context)) {
32769 master->lock.file_priv = file_priv;
32770 master->lock.lock_time = jiffies;
32771- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32772+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32773 break; /* Got lock */
32774 }
32775
32776@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32777 return -EINVAL;
32778 }
32779
32780- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32781+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32782
32783 /* kernel_context_switch isn't used by any of the x86 drm
32784 * modules but is required by the Sparc driver.
32785diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32786index 7d1d88c..b9131b2 100644
32787--- a/drivers/gpu/drm/i810/i810_dma.c
32788+++ b/drivers/gpu/drm/i810/i810_dma.c
32789@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32790 dma->buflist[vertex->idx],
32791 vertex->discard, vertex->used);
32792
32793- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32794- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32795+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32796+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32797 sarea_priv->last_enqueue = dev_priv->counter - 1;
32798 sarea_priv->last_dispatch = (int)hw_status[5];
32799
32800@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32801 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32802 mc->last_render);
32803
32804- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32805- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32806+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32807+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32808 sarea_priv->last_enqueue = dev_priv->counter - 1;
32809 sarea_priv->last_dispatch = (int)hw_status[5];
32810
32811diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32812index 21e2691..7321edd 100644
32813--- a/drivers/gpu/drm/i810/i810_drv.h
32814+++ b/drivers/gpu/drm/i810/i810_drv.h
32815@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32816 int page_flipping;
32817
32818 wait_queue_head_t irq_queue;
32819- atomic_t irq_received;
32820- atomic_t irq_emitted;
32821+ atomic_unchecked_t irq_received;
32822+ atomic_unchecked_t irq_emitted;
32823
32824 int front_offset;
32825 } drm_i810_private_t;
32826diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32827index da82afe..48a45de 100644
32828--- a/drivers/gpu/drm/i830/i830_drv.h
32829+++ b/drivers/gpu/drm/i830/i830_drv.h
32830@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32831 int page_flipping;
32832
32833 wait_queue_head_t irq_queue;
32834- atomic_t irq_received;
32835- atomic_t irq_emitted;
32836+ atomic_unchecked_t irq_received;
32837+ atomic_unchecked_t irq_emitted;
32838
32839 int use_mi_batchbuffer_start;
32840
32841diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32842index 91ec2bb..6f21fab 100644
32843--- a/drivers/gpu/drm/i830/i830_irq.c
32844+++ b/drivers/gpu/drm/i830/i830_irq.c
32845@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32846
32847 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32848
32849- atomic_inc(&dev_priv->irq_received);
32850+ atomic_inc_unchecked(&dev_priv->irq_received);
32851 wake_up_interruptible(&dev_priv->irq_queue);
32852
32853 return IRQ_HANDLED;
32854@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32855
32856 DRM_DEBUG("%s\n", __func__);
32857
32858- atomic_inc(&dev_priv->irq_emitted);
32859+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32860
32861 BEGIN_LP_RING(2);
32862 OUT_RING(0);
32863 OUT_RING(GFX_OP_USER_INTERRUPT);
32864 ADVANCE_LP_RING();
32865
32866- return atomic_read(&dev_priv->irq_emitted);
32867+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32868 }
32869
32870 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32871@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32872
32873 DRM_DEBUG("%s\n", __func__);
32874
32875- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32876+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32877 return 0;
32878
32879 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32880@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32881
32882 for (;;) {
32883 __set_current_state(TASK_INTERRUPTIBLE);
32884- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32885+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32886 break;
32887 if ((signed)(end - jiffies) <= 0) {
32888 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32889@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
32890 I830_WRITE16(I830REG_HWSTAM, 0xffff);
32891 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
32892 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
32893- atomic_set(&dev_priv->irq_received, 0);
32894- atomic_set(&dev_priv->irq_emitted, 0);
32895+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32896+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
32897 init_waitqueue_head(&dev_priv->irq_queue);
32898 }
32899
32900diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
32901index 288fc50..c6092055 100644
32902--- a/drivers/gpu/drm/i915/dvo.h
32903+++ b/drivers/gpu/drm/i915/dvo.h
32904@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
32905 *
32906 * \return singly-linked list of modes or NULL if no modes found.
32907 */
32908- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
32909+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
32910
32911 /**
32912 * Clean up driver-specific bits of the output
32913 */
32914- void (*destroy) (struct intel_dvo_device *dvo);
32915+ void (* const destroy) (struct intel_dvo_device *dvo);
32916
32917 /**
32918 * Debugging hook to dump device registers to log file
32919 */
32920- void (*dump_regs)(struct intel_dvo_device *dvo);
32921+ void (* const dump_regs)(struct intel_dvo_device *dvo);
32922 };
32923
32924-extern struct intel_dvo_dev_ops sil164_ops;
32925-extern struct intel_dvo_dev_ops ch7xxx_ops;
32926-extern struct intel_dvo_dev_ops ivch_ops;
32927-extern struct intel_dvo_dev_ops tfp410_ops;
32928-extern struct intel_dvo_dev_ops ch7017_ops;
32929+extern const struct intel_dvo_dev_ops sil164_ops;
32930+extern const struct intel_dvo_dev_ops ch7xxx_ops;
32931+extern const struct intel_dvo_dev_ops ivch_ops;
32932+extern const struct intel_dvo_dev_ops tfp410_ops;
32933+extern const struct intel_dvo_dev_ops ch7017_ops;
32934
32935 #endif /* _INTEL_DVO_H */
32936diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
32937index 621815b..499d82e 100644
32938--- a/drivers/gpu/drm/i915/dvo_ch7017.c
32939+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
32940@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
32941 }
32942 }
32943
32944-struct intel_dvo_dev_ops ch7017_ops = {
32945+const struct intel_dvo_dev_ops ch7017_ops = {
32946 .init = ch7017_init,
32947 .detect = ch7017_detect,
32948 .mode_valid = ch7017_mode_valid,
32949diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32950index a9b8962..ac769ba 100644
32951--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
32952+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32953@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
32954 }
32955 }
32956
32957-struct intel_dvo_dev_ops ch7xxx_ops = {
32958+const struct intel_dvo_dev_ops ch7xxx_ops = {
32959 .init = ch7xxx_init,
32960 .detect = ch7xxx_detect,
32961 .mode_valid = ch7xxx_mode_valid,
32962diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
32963index aa176f9..ed2930c 100644
32964--- a/drivers/gpu/drm/i915/dvo_ivch.c
32965+++ b/drivers/gpu/drm/i915/dvo_ivch.c
32966@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
32967 }
32968 }
32969
32970-struct intel_dvo_dev_ops ivch_ops= {
32971+const struct intel_dvo_dev_ops ivch_ops= {
32972 .init = ivch_init,
32973 .dpms = ivch_dpms,
32974 .save = ivch_save,
32975diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
32976index e1c1f73..7dbebcf 100644
32977--- a/drivers/gpu/drm/i915/dvo_sil164.c
32978+++ b/drivers/gpu/drm/i915/dvo_sil164.c
32979@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
32980 }
32981 }
32982
32983-struct intel_dvo_dev_ops sil164_ops = {
32984+const struct intel_dvo_dev_ops sil164_ops = {
32985 .init = sil164_init,
32986 .detect = sil164_detect,
32987 .mode_valid = sil164_mode_valid,
32988diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
32989index 16dce84..7e1b6f8 100644
32990--- a/drivers/gpu/drm/i915/dvo_tfp410.c
32991+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
32992@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
32993 }
32994 }
32995
32996-struct intel_dvo_dev_ops tfp410_ops = {
32997+const struct intel_dvo_dev_ops tfp410_ops = {
32998 .init = tfp410_init,
32999 .detect = tfp410_detect,
33000 .mode_valid = tfp410_mode_valid,
33001diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33002index 7e859d6..7d1cf2b 100644
33003--- a/drivers/gpu/drm/i915/i915_debugfs.c
33004+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33005@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33006 I915_READ(GTIMR));
33007 }
33008 seq_printf(m, "Interrupts received: %d\n",
33009- atomic_read(&dev_priv->irq_received));
33010+ atomic_read_unchecked(&dev_priv->irq_received));
33011 if (dev_priv->hw_status_page != NULL) {
33012 seq_printf(m, "Current sequence: %d\n",
33013 i915_get_gem_seqno(dev));
33014diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33015index 5449239..7e4f68d 100644
33016--- a/drivers/gpu/drm/i915/i915_drv.c
33017+++ b/drivers/gpu/drm/i915/i915_drv.c
33018@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33019 return i915_resume(dev);
33020 }
33021
33022-static struct vm_operations_struct i915_gem_vm_ops = {
33023+static const struct vm_operations_struct i915_gem_vm_ops = {
33024 .fault = i915_gem_fault,
33025 .open = drm_gem_vm_open,
33026 .close = drm_gem_vm_close,
33027diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33028index 97163f7..c24c7c7 100644
33029--- a/drivers/gpu/drm/i915/i915_drv.h
33030+++ b/drivers/gpu/drm/i915/i915_drv.h
33031@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33032 /* display clock increase/decrease */
33033 /* pll clock increase/decrease */
33034 /* clock gating init */
33035-};
33036+} __no_const;
33037
33038 typedef struct drm_i915_private {
33039 struct drm_device *dev;
33040@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33041 int page_flipping;
33042
33043 wait_queue_head_t irq_queue;
33044- atomic_t irq_received;
33045+ atomic_unchecked_t irq_received;
33046 /** Protects user_irq_refcount and irq_mask_reg */
33047 spinlock_t user_irq_lock;
33048 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33049diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33050index 27a3074..eb3f959 100644
33051--- a/drivers/gpu/drm/i915/i915_gem.c
33052+++ b/drivers/gpu/drm/i915/i915_gem.c
33053@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33054
33055 args->aper_size = dev->gtt_total;
33056 args->aper_available_size = (args->aper_size -
33057- atomic_read(&dev->pin_memory));
33058+ atomic_read_unchecked(&dev->pin_memory));
33059
33060 return 0;
33061 }
33062@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33063
33064 if (obj_priv->gtt_space) {
33065 atomic_dec(&dev->gtt_count);
33066- atomic_sub(obj->size, &dev->gtt_memory);
33067+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33068
33069 drm_mm_put_block(obj_priv->gtt_space);
33070 obj_priv->gtt_space = NULL;
33071@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33072 goto search_free;
33073 }
33074 atomic_inc(&dev->gtt_count);
33075- atomic_add(obj->size, &dev->gtt_memory);
33076+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33077
33078 /* Assert that the object is not currently in any GPU domain. As it
33079 * wasn't in the GTT, there shouldn't be any way it could have been in
33080@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33081 "%d/%d gtt bytes\n",
33082 atomic_read(&dev->object_count),
33083 atomic_read(&dev->pin_count),
33084- atomic_read(&dev->object_memory),
33085- atomic_read(&dev->pin_memory),
33086- atomic_read(&dev->gtt_memory),
33087+ atomic_read_unchecked(&dev->object_memory),
33088+ atomic_read_unchecked(&dev->pin_memory),
33089+ atomic_read_unchecked(&dev->gtt_memory),
33090 dev->gtt_total);
33091 }
33092 goto err;
33093@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33094 */
33095 if (obj_priv->pin_count == 1) {
33096 atomic_inc(&dev->pin_count);
33097- atomic_add(obj->size, &dev->pin_memory);
33098+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33099 if (!obj_priv->active &&
33100 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33101 !list_empty(&obj_priv->list))
33102@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33103 list_move_tail(&obj_priv->list,
33104 &dev_priv->mm.inactive_list);
33105 atomic_dec(&dev->pin_count);
33106- atomic_sub(obj->size, &dev->pin_memory);
33107+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33108 }
33109 i915_verify_inactive(dev, __FILE__, __LINE__);
33110 }
33111diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33112index 63f28ad..f5469da 100644
33113--- a/drivers/gpu/drm/i915/i915_irq.c
33114+++ b/drivers/gpu/drm/i915/i915_irq.c
33115@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33116 int irq_received;
33117 int ret = IRQ_NONE;
33118
33119- atomic_inc(&dev_priv->irq_received);
33120+ atomic_inc_unchecked(&dev_priv->irq_received);
33121
33122 if (IS_IGDNG(dev))
33123 return igdng_irq_handler(dev);
33124@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33125 {
33126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33127
33128- atomic_set(&dev_priv->irq_received, 0);
33129+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33130
33131 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33132 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33133diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33134index 5d9c6a7..d1b0e29 100644
33135--- a/drivers/gpu/drm/i915/intel_sdvo.c
33136+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33137@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33138 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33139
33140 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33141- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33142+ pax_open_kernel();
33143+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33144+ pax_close_kernel();
33145
33146 /* Read the regs to test if we can talk to the device */
33147 for (i = 0; i < 0x40; i++) {
33148diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33149index be6c6b9..8615d9c 100644
33150--- a/drivers/gpu/drm/mga/mga_drv.h
33151+++ b/drivers/gpu/drm/mga/mga_drv.h
33152@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33153 u32 clear_cmd;
33154 u32 maccess;
33155
33156- atomic_t vbl_received; /**< Number of vblanks received. */
33157+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33158 wait_queue_head_t fence_queue;
33159- atomic_t last_fence_retired;
33160+ atomic_unchecked_t last_fence_retired;
33161 u32 next_fence_to_post;
33162
33163 unsigned int fb_cpp;
33164diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33165index daa6041..a28a5da 100644
33166--- a/drivers/gpu/drm/mga/mga_irq.c
33167+++ b/drivers/gpu/drm/mga/mga_irq.c
33168@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33169 if (crtc != 0)
33170 return 0;
33171
33172- return atomic_read(&dev_priv->vbl_received);
33173+ return atomic_read_unchecked(&dev_priv->vbl_received);
33174 }
33175
33176
33177@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33178 /* VBLANK interrupt */
33179 if (status & MGA_VLINEPEN) {
33180 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33181- atomic_inc(&dev_priv->vbl_received);
33182+ atomic_inc_unchecked(&dev_priv->vbl_received);
33183 drm_handle_vblank(dev, 0);
33184 handled = 1;
33185 }
33186@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33187 MGA_WRITE(MGA_PRIMEND, prim_end);
33188 }
33189
33190- atomic_inc(&dev_priv->last_fence_retired);
33191+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33192 DRM_WAKEUP(&dev_priv->fence_queue);
33193 handled = 1;
33194 }
33195@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33196 * using fences.
33197 */
33198 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33199- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33200+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33201 - *sequence) <= (1 << 23)));
33202
33203 *sequence = cur_fence;
33204diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33205index 4c39a40..b22a9ea 100644
33206--- a/drivers/gpu/drm/r128/r128_cce.c
33207+++ b/drivers/gpu/drm/r128/r128_cce.c
33208@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33209
33210 /* GH: Simple idle check.
33211 */
33212- atomic_set(&dev_priv->idle_count, 0);
33213+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33214
33215 /* We don't support anything other than bus-mastering ring mode,
33216 * but the ring can be in either AGP or PCI space for the ring
33217diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33218index 3c60829..4faf484 100644
33219--- a/drivers/gpu/drm/r128/r128_drv.h
33220+++ b/drivers/gpu/drm/r128/r128_drv.h
33221@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33222 int is_pci;
33223 unsigned long cce_buffers_offset;
33224
33225- atomic_t idle_count;
33226+ atomic_unchecked_t idle_count;
33227
33228 int page_flipping;
33229 int current_page;
33230 u32 crtc_offset;
33231 u32 crtc_offset_cntl;
33232
33233- atomic_t vbl_received;
33234+ atomic_unchecked_t vbl_received;
33235
33236 u32 color_fmt;
33237 unsigned int front_offset;
33238diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33239index 69810fb..97bf17a 100644
33240--- a/drivers/gpu/drm/r128/r128_irq.c
33241+++ b/drivers/gpu/drm/r128/r128_irq.c
33242@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33243 if (crtc != 0)
33244 return 0;
33245
33246- return atomic_read(&dev_priv->vbl_received);
33247+ return atomic_read_unchecked(&dev_priv->vbl_received);
33248 }
33249
33250 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33251@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33252 /* VBLANK interrupt */
33253 if (status & R128_CRTC_VBLANK_INT) {
33254 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33255- atomic_inc(&dev_priv->vbl_received);
33256+ atomic_inc_unchecked(&dev_priv->vbl_received);
33257 drm_handle_vblank(dev, 0);
33258 return IRQ_HANDLED;
33259 }
33260diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33261index af2665c..51922d2 100644
33262--- a/drivers/gpu/drm/r128/r128_state.c
33263+++ b/drivers/gpu/drm/r128/r128_state.c
33264@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33265
33266 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33267 {
33268- if (atomic_read(&dev_priv->idle_count) == 0) {
33269+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33270 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33271 } else {
33272- atomic_set(&dev_priv->idle_count, 0);
33273+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33274 }
33275 }
33276
33277diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33278index dd72b91..8644b3c 100644
33279--- a/drivers/gpu/drm/radeon/atom.c
33280+++ b/drivers/gpu/drm/radeon/atom.c
33281@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33282 char name[512];
33283 int i;
33284
33285+ pax_track_stack();
33286+
33287 ctx->card = card;
33288 ctx->bios = bios;
33289
33290diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33291index 0d79577..efaa7a5 100644
33292--- a/drivers/gpu/drm/radeon/mkregtable.c
33293+++ b/drivers/gpu/drm/radeon/mkregtable.c
33294@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33295 regex_t mask_rex;
33296 regmatch_t match[4];
33297 char buf[1024];
33298- size_t end;
33299+ long end;
33300 int len;
33301 int done = 0;
33302 int r;
33303 unsigned o;
33304 struct offset *offset;
33305 char last_reg_s[10];
33306- int last_reg;
33307+ unsigned long last_reg;
33308
33309 if (regcomp
33310 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33311diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33312index 6735213..38c2c67 100644
33313--- a/drivers/gpu/drm/radeon/radeon.h
33314+++ b/drivers/gpu/drm/radeon/radeon.h
33315@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33316 */
33317 struct radeon_fence_driver {
33318 uint32_t scratch_reg;
33319- atomic_t seq;
33320+ atomic_unchecked_t seq;
33321 uint32_t last_seq;
33322 unsigned long count_timeout;
33323 wait_queue_head_t queue;
33324@@ -640,7 +640,7 @@ struct radeon_asic {
33325 uint32_t offset, uint32_t obj_size);
33326 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33327 void (*bandwidth_update)(struct radeon_device *rdev);
33328-};
33329+} __no_const;
33330
33331 /*
33332 * Asic structures
33333diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33334index 4e928b9..d8b6008 100644
33335--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33336+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33337@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33338 bool linkb;
33339 struct radeon_i2c_bus_rec ddc_bus;
33340
33341+ pax_track_stack();
33342+
33343 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33344
33345 if (data_offset == 0)
33346@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33347 }
33348 }
33349
33350-struct bios_connector {
33351+static struct bios_connector {
33352 bool valid;
33353 uint16_t line_mux;
33354 uint16_t devices;
33355 int connector_type;
33356 struct radeon_i2c_bus_rec ddc_bus;
33357-};
33358+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33359
33360 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33361 drm_device
33362@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33363 uint8_t dac;
33364 union atom_supported_devices *supported_devices;
33365 int i, j;
33366- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33367
33368 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33369
33370diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33371index 083a181..ccccae0 100644
33372--- a/drivers/gpu/drm/radeon/radeon_display.c
33373+++ b/drivers/gpu/drm/radeon/radeon_display.c
33374@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33375
33376 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33377 error = freq - current_freq;
33378- error = error < 0 ? 0xffffffff : error;
33379+ error = (int32_t)error < 0 ? 0xffffffff : error;
33380 } else
33381 error = abs(current_freq - freq);
33382 vco_diff = abs(vco - best_vco);
33383diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33384index 76e4070..193fa7f 100644
33385--- a/drivers/gpu/drm/radeon/radeon_drv.h
33386+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33387@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33388
33389 /* SW interrupt */
33390 wait_queue_head_t swi_queue;
33391- atomic_t swi_emitted;
33392+ atomic_unchecked_t swi_emitted;
33393 int vblank_crtc;
33394 uint32_t irq_enable_reg;
33395 uint32_t r500_disp_irq_reg;
33396diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33397index 3beb26d..6ce9c4a 100644
33398--- a/drivers/gpu/drm/radeon/radeon_fence.c
33399+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33400@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33401 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33402 return 0;
33403 }
33404- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33405+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33406 if (!rdev->cp.ready) {
33407 /* FIXME: cp is not running assume everythings is done right
33408 * away
33409@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33410 return r;
33411 }
33412 WREG32(rdev->fence_drv.scratch_reg, 0);
33413- atomic_set(&rdev->fence_drv.seq, 0);
33414+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33415 INIT_LIST_HEAD(&rdev->fence_drv.created);
33416 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33417 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33418diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33419index a1bf11d..4a123c0 100644
33420--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33421+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33422@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33423 request = compat_alloc_user_space(sizeof(*request));
33424 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33425 || __put_user(req32.param, &request->param)
33426- || __put_user((void __user *)(unsigned long)req32.value,
33427+ || __put_user((unsigned long)req32.value,
33428 &request->value))
33429 return -EFAULT;
33430
33431diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33432index b79ecc4..8dab92d 100644
33433--- a/drivers/gpu/drm/radeon/radeon_irq.c
33434+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33435@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33436 unsigned int ret;
33437 RING_LOCALS;
33438
33439- atomic_inc(&dev_priv->swi_emitted);
33440- ret = atomic_read(&dev_priv->swi_emitted);
33441+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33442+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33443
33444 BEGIN_RING(4);
33445 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33446@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33447 drm_radeon_private_t *dev_priv =
33448 (drm_radeon_private_t *) dev->dev_private;
33449
33450- atomic_set(&dev_priv->swi_emitted, 0);
33451+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33452 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33453
33454 dev->max_vblank_count = 0x001fffff;
33455diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33456index 4747910..48ca4b3 100644
33457--- a/drivers/gpu/drm/radeon/radeon_state.c
33458+++ b/drivers/gpu/drm/radeon/radeon_state.c
33459@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33460 {
33461 drm_radeon_private_t *dev_priv = dev->dev_private;
33462 drm_radeon_getparam_t *param = data;
33463- int value;
33464+ int value = 0;
33465
33466 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33467
33468diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33469index 1381e06..0e53b17 100644
33470--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33471+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33472@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33473 DRM_INFO("radeon: ttm finalized\n");
33474 }
33475
33476-static struct vm_operations_struct radeon_ttm_vm_ops;
33477-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33478-
33479-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33480-{
33481- struct ttm_buffer_object *bo;
33482- int r;
33483-
33484- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33485- if (bo == NULL) {
33486- return VM_FAULT_NOPAGE;
33487- }
33488- r = ttm_vm_ops->fault(vma, vmf);
33489- return r;
33490-}
33491-
33492 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33493 {
33494 struct drm_file *file_priv;
33495 struct radeon_device *rdev;
33496- int r;
33497
33498 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33499 return drm_mmap(filp, vma);
33500@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33501
33502 file_priv = (struct drm_file *)filp->private_data;
33503 rdev = file_priv->minor->dev->dev_private;
33504- if (rdev == NULL) {
33505+ if (!rdev)
33506 return -EINVAL;
33507- }
33508- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33509- if (unlikely(r != 0)) {
33510- return r;
33511- }
33512- if (unlikely(ttm_vm_ops == NULL)) {
33513- ttm_vm_ops = vma->vm_ops;
33514- radeon_ttm_vm_ops = *ttm_vm_ops;
33515- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33516- }
33517- vma->vm_ops = &radeon_ttm_vm_ops;
33518- return 0;
33519+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33520 }
33521
33522
33523diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33524index b12ff76..0bd0c6e 100644
33525--- a/drivers/gpu/drm/radeon/rs690.c
33526+++ b/drivers/gpu/drm/radeon/rs690.c
33527@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33528 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33529 rdev->pm.sideport_bandwidth.full)
33530 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33531- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33532+ read_delay_latency.full = rfixed_const(800 * 1000);
33533 read_delay_latency.full = rfixed_div(read_delay_latency,
33534 rdev->pm.igp_sideport_mclk);
33535+ a.full = rfixed_const(370);
33536+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33537 } else {
33538 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33539 rdev->pm.k8_bandwidth.full)
33540diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33541index 0ed436e..e6e7ce3 100644
33542--- a/drivers/gpu/drm/ttm/ttm_bo.c
33543+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33544@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33545 NULL
33546 };
33547
33548-static struct sysfs_ops ttm_bo_global_ops = {
33549+static const struct sysfs_ops ttm_bo_global_ops = {
33550 .show = &ttm_bo_global_show
33551 };
33552
33553diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33554index 1c040d0..f9e4af8 100644
33555--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33556+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33557@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33558 {
33559 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33560 vma->vm_private_data;
33561- struct ttm_bo_device *bdev = bo->bdev;
33562+ struct ttm_bo_device *bdev;
33563 unsigned long bus_base;
33564 unsigned long bus_offset;
33565 unsigned long bus_size;
33566@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33567 unsigned long address = (unsigned long)vmf->virtual_address;
33568 int retval = VM_FAULT_NOPAGE;
33569
33570+ if (!bo)
33571+ return VM_FAULT_NOPAGE;
33572+ bdev = bo->bdev;
33573+
33574 /*
33575 * Work around locking order reversal in fault / nopfn
33576 * between mmap_sem and bo_reserve: Perform a trylock operation
33577diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33578index b170071..28ae90e 100644
33579--- a/drivers/gpu/drm/ttm/ttm_global.c
33580+++ b/drivers/gpu/drm/ttm/ttm_global.c
33581@@ -36,7 +36,7 @@
33582 struct ttm_global_item {
33583 struct mutex mutex;
33584 void *object;
33585- int refcount;
33586+ atomic_t refcount;
33587 };
33588
33589 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33590@@ -49,7 +49,7 @@ void ttm_global_init(void)
33591 struct ttm_global_item *item = &glob[i];
33592 mutex_init(&item->mutex);
33593 item->object = NULL;
33594- item->refcount = 0;
33595+ atomic_set(&item->refcount, 0);
33596 }
33597 }
33598
33599@@ -59,7 +59,7 @@ void ttm_global_release(void)
33600 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33601 struct ttm_global_item *item = &glob[i];
33602 BUG_ON(item->object != NULL);
33603- BUG_ON(item->refcount != 0);
33604+ BUG_ON(atomic_read(&item->refcount) != 0);
33605 }
33606 }
33607
33608@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33609 void *object;
33610
33611 mutex_lock(&item->mutex);
33612- if (item->refcount == 0) {
33613+ if (atomic_read(&item->refcount) == 0) {
33614 item->object = kzalloc(ref->size, GFP_KERNEL);
33615 if (unlikely(item->object == NULL)) {
33616 ret = -ENOMEM;
33617@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33618 goto out_err;
33619
33620 }
33621- ++item->refcount;
33622+ atomic_inc(&item->refcount);
33623 ref->object = item->object;
33624 object = item->object;
33625 mutex_unlock(&item->mutex);
33626@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33627 struct ttm_global_item *item = &glob[ref->global_type];
33628
33629 mutex_lock(&item->mutex);
33630- BUG_ON(item->refcount == 0);
33631+ BUG_ON(atomic_read(&item->refcount) == 0);
33632 BUG_ON(ref->object != item->object);
33633- if (--item->refcount == 0) {
33634+ if (atomic_dec_and_test(&item->refcount)) {
33635 ref->release(ref);
33636 item->object = NULL;
33637 }
33638diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33639index 072c281..d8ef483 100644
33640--- a/drivers/gpu/drm/ttm/ttm_memory.c
33641+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33642@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33643 NULL
33644 };
33645
33646-static struct sysfs_ops ttm_mem_zone_ops = {
33647+static const struct sysfs_ops ttm_mem_zone_ops = {
33648 .show = &ttm_mem_zone_show,
33649 .store = &ttm_mem_zone_store
33650 };
33651diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33652index cafcb84..b8e66cc 100644
33653--- a/drivers/gpu/drm/via/via_drv.h
33654+++ b/drivers/gpu/drm/via/via_drv.h
33655@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33656 typedef uint32_t maskarray_t[5];
33657
33658 typedef struct drm_via_irq {
33659- atomic_t irq_received;
33660+ atomic_unchecked_t irq_received;
33661 uint32_t pending_mask;
33662 uint32_t enable_mask;
33663 wait_queue_head_t irq_queue;
33664@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33665 struct timeval last_vblank;
33666 int last_vblank_valid;
33667 unsigned usec_per_vblank;
33668- atomic_t vbl_received;
33669+ atomic_unchecked_t vbl_received;
33670 drm_via_state_t hc_state;
33671 char pci_buf[VIA_PCI_BUF_SIZE];
33672 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33673diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33674index 5935b88..127a8a6 100644
33675--- a/drivers/gpu/drm/via/via_irq.c
33676+++ b/drivers/gpu/drm/via/via_irq.c
33677@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33678 if (crtc != 0)
33679 return 0;
33680
33681- return atomic_read(&dev_priv->vbl_received);
33682+ return atomic_read_unchecked(&dev_priv->vbl_received);
33683 }
33684
33685 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33686@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33687
33688 status = VIA_READ(VIA_REG_INTERRUPT);
33689 if (status & VIA_IRQ_VBLANK_PENDING) {
33690- atomic_inc(&dev_priv->vbl_received);
33691- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33692+ atomic_inc_unchecked(&dev_priv->vbl_received);
33693+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33694 do_gettimeofday(&cur_vblank);
33695 if (dev_priv->last_vblank_valid) {
33696 dev_priv->usec_per_vblank =
33697@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33698 dev_priv->last_vblank = cur_vblank;
33699 dev_priv->last_vblank_valid = 1;
33700 }
33701- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33702+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33703 DRM_DEBUG("US per vblank is: %u\n",
33704 dev_priv->usec_per_vblank);
33705 }
33706@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33707
33708 for (i = 0; i < dev_priv->num_irqs; ++i) {
33709 if (status & cur_irq->pending_mask) {
33710- atomic_inc(&cur_irq->irq_received);
33711+ atomic_inc_unchecked(&cur_irq->irq_received);
33712 DRM_WAKEUP(&cur_irq->irq_queue);
33713 handled = 1;
33714 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33715@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33716 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33717 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33718 masks[irq][4]));
33719- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33720+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33721 } else {
33722 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33723 (((cur_irq_sequence =
33724- atomic_read(&cur_irq->irq_received)) -
33725+ atomic_read_unchecked(&cur_irq->irq_received)) -
33726 *sequence) <= (1 << 23)));
33727 }
33728 *sequence = cur_irq_sequence;
33729@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33730 }
33731
33732 for (i = 0; i < dev_priv->num_irqs; ++i) {
33733- atomic_set(&cur_irq->irq_received, 0);
33734+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33735 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33736 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33737 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33738@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33739 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33740 case VIA_IRQ_RELATIVE:
33741 irqwait->request.sequence +=
33742- atomic_read(&cur_irq->irq_received);
33743+ atomic_read_unchecked(&cur_irq->irq_received);
33744 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33745 case VIA_IRQ_ABSOLUTE:
33746 break;
33747diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33748index aa8688d..6a0140c 100644
33749--- a/drivers/gpu/vga/vgaarb.c
33750+++ b/drivers/gpu/vga/vgaarb.c
33751@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33752 uc = &priv->cards[i];
33753 }
33754
33755- if (!uc)
33756- return -EINVAL;
33757+ if (!uc) {
33758+ ret_val = -EINVAL;
33759+ goto done;
33760+ }
33761
33762- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33763- return -EINVAL;
33764+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33765+ ret_val = -EINVAL;
33766+ goto done;
33767+ }
33768
33769- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33770- return -EINVAL;
33771+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33772+ ret_val = -EINVAL;
33773+ goto done;
33774+ }
33775
33776 vga_put(pdev, io_state);
33777
33778diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33779index f3f1415..e561d90 100644
33780--- a/drivers/hid/hid-core.c
33781+++ b/drivers/hid/hid-core.c
33782@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33783
33784 int hid_add_device(struct hid_device *hdev)
33785 {
33786- static atomic_t id = ATOMIC_INIT(0);
33787+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33788 int ret;
33789
33790 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33791@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33792 /* XXX hack, any other cleaner solution after the driver core
33793 * is converted to allow more than 20 bytes as the device name? */
33794 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33795- hdev->vendor, hdev->product, atomic_inc_return(&id));
33796+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33797
33798 ret = device_add(&hdev->dev);
33799 if (!ret)
33800diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33801index 8b6ee24..70f657d 100644
33802--- a/drivers/hid/usbhid/hiddev.c
33803+++ b/drivers/hid/usbhid/hiddev.c
33804@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33805 return put_user(HID_VERSION, (int __user *)arg);
33806
33807 case HIDIOCAPPLICATION:
33808- if (arg < 0 || arg >= hid->maxapplication)
33809+ if (arg >= hid->maxapplication)
33810 return -EINVAL;
33811
33812 for (i = 0; i < hid->maxcollection; i++)
33813diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33814index 5d5ed69..f40533e 100644
33815--- a/drivers/hwmon/lis3lv02d.c
33816+++ b/drivers/hwmon/lis3lv02d.c
33817@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33818 * the lid is closed. This leads to interrupts as soon as a little move
33819 * is done.
33820 */
33821- atomic_inc(&lis3_dev.count);
33822+ atomic_inc_unchecked(&lis3_dev.count);
33823
33824 wake_up_interruptible(&lis3_dev.misc_wait);
33825 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33826@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33827 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33828 return -EBUSY; /* already open */
33829
33830- atomic_set(&lis3_dev.count, 0);
33831+ atomic_set_unchecked(&lis3_dev.count, 0);
33832
33833 /*
33834 * The sensor can generate interrupts for free-fall and direction
33835@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33836 add_wait_queue(&lis3_dev.misc_wait, &wait);
33837 while (true) {
33838 set_current_state(TASK_INTERRUPTIBLE);
33839- data = atomic_xchg(&lis3_dev.count, 0);
33840+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33841 if (data)
33842 break;
33843
33844@@ -244,7 +244,7 @@ out:
33845 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33846 {
33847 poll_wait(file, &lis3_dev.misc_wait, wait);
33848- if (atomic_read(&lis3_dev.count))
33849+ if (atomic_read_unchecked(&lis3_dev.count))
33850 return POLLIN | POLLRDNORM;
33851 return 0;
33852 }
33853diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33854index 7cdd76f..fe0efdf 100644
33855--- a/drivers/hwmon/lis3lv02d.h
33856+++ b/drivers/hwmon/lis3lv02d.h
33857@@ -201,7 +201,7 @@ struct lis3lv02d {
33858
33859 struct input_polled_dev *idev; /* input device */
33860 struct platform_device *pdev; /* platform device */
33861- atomic_t count; /* interrupt count after last read */
33862+ atomic_unchecked_t count; /* interrupt count after last read */
33863 int xcalib; /* calibrated null value for x */
33864 int ycalib; /* calibrated null value for y */
33865 int zcalib; /* calibrated null value for z */
33866diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33867index 2040507..706ec1e 100644
33868--- a/drivers/hwmon/sht15.c
33869+++ b/drivers/hwmon/sht15.c
33870@@ -112,7 +112,7 @@ struct sht15_data {
33871 int supply_uV;
33872 int supply_uV_valid;
33873 struct work_struct update_supply_work;
33874- atomic_t interrupt_handled;
33875+ atomic_unchecked_t interrupt_handled;
33876 };
33877
33878 /**
33879@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33880 return ret;
33881
33882 gpio_direction_input(data->pdata->gpio_data);
33883- atomic_set(&data->interrupt_handled, 0);
33884+ atomic_set_unchecked(&data->interrupt_handled, 0);
33885
33886 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33887 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33888 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33889 /* Only relevant if the interrupt hasn't occured. */
33890- if (!atomic_read(&data->interrupt_handled))
33891+ if (!atomic_read_unchecked(&data->interrupt_handled))
33892 schedule_work(&data->read_work);
33893 }
33894 ret = wait_event_timeout(data->wait_queue,
33895@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33896 struct sht15_data *data = d;
33897 /* First disable the interrupt */
33898 disable_irq_nosync(irq);
33899- atomic_inc(&data->interrupt_handled);
33900+ atomic_inc_unchecked(&data->interrupt_handled);
33901 /* Then schedule a reading work struct */
33902 if (data->flag != SHT15_READING_NOTHING)
33903 schedule_work(&data->read_work);
33904@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33905 here as could have gone low in meantime so verify
33906 it hasn't!
33907 */
33908- atomic_set(&data->interrupt_handled, 0);
33909+ atomic_set_unchecked(&data->interrupt_handled, 0);
33910 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33911 /* If still not occured or another handler has been scheduled */
33912 if (gpio_get_value(data->pdata->gpio_data)
33913- || atomic_read(&data->interrupt_handled))
33914+ || atomic_read_unchecked(&data->interrupt_handled))
33915 return;
33916 }
33917 /* Read the data back from the device */
33918diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
33919index 97851c5..cb40626 100644
33920--- a/drivers/hwmon/w83791d.c
33921+++ b/drivers/hwmon/w83791d.c
33922@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
33923 struct i2c_board_info *info);
33924 static int w83791d_remove(struct i2c_client *client);
33925
33926-static int w83791d_read(struct i2c_client *client, u8 register);
33927-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
33928+static int w83791d_read(struct i2c_client *client, u8 reg);
33929+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
33930 static struct w83791d_data *w83791d_update_device(struct device *dev);
33931
33932 #ifdef DEBUG
33933diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33934index 378fcb5..5e91fa8 100644
33935--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33936+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33937@@ -43,7 +43,7 @@
33938 extern struct i2c_adapter amd756_smbus;
33939
33940 static struct i2c_adapter *s4882_adapter;
33941-static struct i2c_algorithm *s4882_algo;
33942+static i2c_algorithm_no_const *s4882_algo;
33943
33944 /* Wrapper access functions for multiplexed SMBus */
33945 static DEFINE_MUTEX(amd756_lock);
33946diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33947index 29015eb..af2d8e9 100644
33948--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33949+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33950@@ -41,7 +41,7 @@
33951 extern struct i2c_adapter *nforce2_smbus;
33952
33953 static struct i2c_adapter *s4985_adapter;
33954-static struct i2c_algorithm *s4985_algo;
33955+static i2c_algorithm_no_const *s4985_algo;
33956
33957 /* Wrapper access functions for multiplexed SMBus */
33958 static DEFINE_MUTEX(nforce2_lock);
33959diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
33960index 878f8ec..12376fc 100644
33961--- a/drivers/ide/aec62xx.c
33962+++ b/drivers/ide/aec62xx.c
33963@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
33964 .cable_detect = atp86x_cable_detect,
33965 };
33966
33967-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
33968+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
33969 { /* 0: AEC6210 */
33970 .name = DRV_NAME,
33971 .init_chipset = init_chipset_aec62xx,
33972diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
33973index e59b6de..4b4fc65 100644
33974--- a/drivers/ide/alim15x3.c
33975+++ b/drivers/ide/alim15x3.c
33976@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
33977 .dma_sff_read_status = ide_dma_sff_read_status,
33978 };
33979
33980-static const struct ide_port_info ali15x3_chipset __devinitdata = {
33981+static const struct ide_port_info ali15x3_chipset __devinitconst = {
33982 .name = DRV_NAME,
33983 .init_chipset = init_chipset_ali15x3,
33984 .init_hwif = init_hwif_ali15x3,
33985diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
33986index 628cd2e..087a414 100644
33987--- a/drivers/ide/amd74xx.c
33988+++ b/drivers/ide/amd74xx.c
33989@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
33990 .udma_mask = udma, \
33991 }
33992
33993-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
33994+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
33995 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
33996 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
33997 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
33998diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
33999index 837322b..837fd71 100644
34000--- a/drivers/ide/atiixp.c
34001+++ b/drivers/ide/atiixp.c
34002@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34003 .cable_detect = atiixp_cable_detect,
34004 };
34005
34006-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34007+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34008 { /* 0: IXP200/300/400/700 */
34009 .name = DRV_NAME,
34010 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34011diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34012index ca0c46f..d55318a 100644
34013--- a/drivers/ide/cmd64x.c
34014+++ b/drivers/ide/cmd64x.c
34015@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34016 .dma_sff_read_status = ide_dma_sff_read_status,
34017 };
34018
34019-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34020+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34021 { /* 0: CMD643 */
34022 .name = DRV_NAME,
34023 .init_chipset = init_chipset_cmd64x,
34024diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34025index 09f98ed..cebc5bc 100644
34026--- a/drivers/ide/cs5520.c
34027+++ b/drivers/ide/cs5520.c
34028@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34029 .set_dma_mode = cs5520_set_dma_mode,
34030 };
34031
34032-static const struct ide_port_info cyrix_chipset __devinitdata = {
34033+static const struct ide_port_info cyrix_chipset __devinitconst = {
34034 .name = DRV_NAME,
34035 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34036 .port_ops = &cs5520_port_ops,
34037diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34038index 40bf05e..7d58ca0 100644
34039--- a/drivers/ide/cs5530.c
34040+++ b/drivers/ide/cs5530.c
34041@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34042 .udma_filter = cs5530_udma_filter,
34043 };
34044
34045-static const struct ide_port_info cs5530_chipset __devinitdata = {
34046+static const struct ide_port_info cs5530_chipset __devinitconst = {
34047 .name = DRV_NAME,
34048 .init_chipset = init_chipset_cs5530,
34049 .init_hwif = init_hwif_cs5530,
34050diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34051index 983d957..53e6172 100644
34052--- a/drivers/ide/cs5535.c
34053+++ b/drivers/ide/cs5535.c
34054@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34055 .cable_detect = cs5535_cable_detect,
34056 };
34057
34058-static const struct ide_port_info cs5535_chipset __devinitdata = {
34059+static const struct ide_port_info cs5535_chipset __devinitconst = {
34060 .name = DRV_NAME,
34061 .port_ops = &cs5535_port_ops,
34062 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34063diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34064index 74fc540..8e933d8 100644
34065--- a/drivers/ide/cy82c693.c
34066+++ b/drivers/ide/cy82c693.c
34067@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34068 .set_dma_mode = cy82c693_set_dma_mode,
34069 };
34070
34071-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34072+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34073 .name = DRV_NAME,
34074 .init_iops = init_iops_cy82c693,
34075 .port_ops = &cy82c693_port_ops,
34076diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34077index 7ce68ef..e78197d 100644
34078--- a/drivers/ide/hpt366.c
34079+++ b/drivers/ide/hpt366.c
34080@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34081 }
34082 };
34083
34084-static const struct hpt_info hpt36x __devinitdata = {
34085+static const struct hpt_info hpt36x __devinitconst = {
34086 .chip_name = "HPT36x",
34087 .chip_type = HPT36x,
34088 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34089@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34090 .timings = &hpt36x_timings
34091 };
34092
34093-static const struct hpt_info hpt370 __devinitdata = {
34094+static const struct hpt_info hpt370 __devinitconst = {
34095 .chip_name = "HPT370",
34096 .chip_type = HPT370,
34097 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34098@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34099 .timings = &hpt37x_timings
34100 };
34101
34102-static const struct hpt_info hpt370a __devinitdata = {
34103+static const struct hpt_info hpt370a __devinitconst = {
34104 .chip_name = "HPT370A",
34105 .chip_type = HPT370A,
34106 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34107@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34108 .timings = &hpt37x_timings
34109 };
34110
34111-static const struct hpt_info hpt374 __devinitdata = {
34112+static const struct hpt_info hpt374 __devinitconst = {
34113 .chip_name = "HPT374",
34114 .chip_type = HPT374,
34115 .udma_mask = ATA_UDMA5,
34116@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34117 .timings = &hpt37x_timings
34118 };
34119
34120-static const struct hpt_info hpt372 __devinitdata = {
34121+static const struct hpt_info hpt372 __devinitconst = {
34122 .chip_name = "HPT372",
34123 .chip_type = HPT372,
34124 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34125@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34126 .timings = &hpt37x_timings
34127 };
34128
34129-static const struct hpt_info hpt372a __devinitdata = {
34130+static const struct hpt_info hpt372a __devinitconst = {
34131 .chip_name = "HPT372A",
34132 .chip_type = HPT372A,
34133 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34134@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34135 .timings = &hpt37x_timings
34136 };
34137
34138-static const struct hpt_info hpt302 __devinitdata = {
34139+static const struct hpt_info hpt302 __devinitconst = {
34140 .chip_name = "HPT302",
34141 .chip_type = HPT302,
34142 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34143@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34144 .timings = &hpt37x_timings
34145 };
34146
34147-static const struct hpt_info hpt371 __devinitdata = {
34148+static const struct hpt_info hpt371 __devinitconst = {
34149 .chip_name = "HPT371",
34150 .chip_type = HPT371,
34151 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34152@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34153 .timings = &hpt37x_timings
34154 };
34155
34156-static const struct hpt_info hpt372n __devinitdata = {
34157+static const struct hpt_info hpt372n __devinitconst = {
34158 .chip_name = "HPT372N",
34159 .chip_type = HPT372N,
34160 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34161@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34162 .timings = &hpt37x_timings
34163 };
34164
34165-static const struct hpt_info hpt302n __devinitdata = {
34166+static const struct hpt_info hpt302n __devinitconst = {
34167 .chip_name = "HPT302N",
34168 .chip_type = HPT302N,
34169 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34170@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34171 .timings = &hpt37x_timings
34172 };
34173
34174-static const struct hpt_info hpt371n __devinitdata = {
34175+static const struct hpt_info hpt371n __devinitconst = {
34176 .chip_name = "HPT371N",
34177 .chip_type = HPT371N,
34178 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34179@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34180 .dma_sff_read_status = ide_dma_sff_read_status,
34181 };
34182
34183-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34184+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34185 { /* 0: HPT36x */
34186 .name = DRV_NAME,
34187 .init_chipset = init_chipset_hpt366,
34188diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34189index 2de76cc..74186a1 100644
34190--- a/drivers/ide/ide-cd.c
34191+++ b/drivers/ide/ide-cd.c
34192@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34193 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34194 if ((unsigned long)buf & alignment
34195 || blk_rq_bytes(rq) & q->dma_pad_mask
34196- || object_is_on_stack(buf))
34197+ || object_starts_on_stack(buf))
34198 drive->dma = 0;
34199 }
34200 }
34201diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34202index fefbdfc..62ff465 100644
34203--- a/drivers/ide/ide-floppy.c
34204+++ b/drivers/ide/ide-floppy.c
34205@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34206 u8 pc_buf[256], header_len, desc_cnt;
34207 int i, rc = 1, blocks, length;
34208
34209+ pax_track_stack();
34210+
34211 ide_debug_log(IDE_DBG_FUNC, "enter");
34212
34213 drive->bios_cyl = 0;
34214diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34215index 39d4e01..11538ce 100644
34216--- a/drivers/ide/ide-pci-generic.c
34217+++ b/drivers/ide/ide-pci-generic.c
34218@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34219 .udma_mask = ATA_UDMA6, \
34220 }
34221
34222-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34223+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34224 /* 0: Unknown */
34225 DECLARE_GENERIC_PCI_DEV(0),
34226
34227diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34228index 0d266a5..aaca790 100644
34229--- a/drivers/ide/it8172.c
34230+++ b/drivers/ide/it8172.c
34231@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34232 .set_dma_mode = it8172_set_dma_mode,
34233 };
34234
34235-static const struct ide_port_info it8172_port_info __devinitdata = {
34236+static const struct ide_port_info it8172_port_info __devinitconst = {
34237 .name = DRV_NAME,
34238 .port_ops = &it8172_port_ops,
34239 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34240diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34241index 4797616..4be488a 100644
34242--- a/drivers/ide/it8213.c
34243+++ b/drivers/ide/it8213.c
34244@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34245 .cable_detect = it8213_cable_detect,
34246 };
34247
34248-static const struct ide_port_info it8213_chipset __devinitdata = {
34249+static const struct ide_port_info it8213_chipset __devinitconst = {
34250 .name = DRV_NAME,
34251 .enablebits = { {0x41, 0x80, 0x80} },
34252 .port_ops = &it8213_port_ops,
34253diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34254index 51aa745..146ee60 100644
34255--- a/drivers/ide/it821x.c
34256+++ b/drivers/ide/it821x.c
34257@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34258 .cable_detect = it821x_cable_detect,
34259 };
34260
34261-static const struct ide_port_info it821x_chipset __devinitdata = {
34262+static const struct ide_port_info it821x_chipset __devinitconst = {
34263 .name = DRV_NAME,
34264 .init_chipset = init_chipset_it821x,
34265 .init_hwif = init_hwif_it821x,
34266diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34267index bf2be64..9270098 100644
34268--- a/drivers/ide/jmicron.c
34269+++ b/drivers/ide/jmicron.c
34270@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34271 .cable_detect = jmicron_cable_detect,
34272 };
34273
34274-static const struct ide_port_info jmicron_chipset __devinitdata = {
34275+static const struct ide_port_info jmicron_chipset __devinitconst = {
34276 .name = DRV_NAME,
34277 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34278 .port_ops = &jmicron_port_ops,
34279diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34280index 95327a2..73f78d8 100644
34281--- a/drivers/ide/ns87415.c
34282+++ b/drivers/ide/ns87415.c
34283@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34284 .dma_sff_read_status = superio_dma_sff_read_status,
34285 };
34286
34287-static const struct ide_port_info ns87415_chipset __devinitdata = {
34288+static const struct ide_port_info ns87415_chipset __devinitconst = {
34289 .name = DRV_NAME,
34290 .init_hwif = init_hwif_ns87415,
34291 .tp_ops = &ns87415_tp_ops,
34292diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34293index f1d70d6..e1de05b 100644
34294--- a/drivers/ide/opti621.c
34295+++ b/drivers/ide/opti621.c
34296@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34297 .set_pio_mode = opti621_set_pio_mode,
34298 };
34299
34300-static const struct ide_port_info opti621_chipset __devinitdata = {
34301+static const struct ide_port_info opti621_chipset __devinitconst = {
34302 .name = DRV_NAME,
34303 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34304 .port_ops = &opti621_port_ops,
34305diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34306index 65ba823..7311f4d 100644
34307--- a/drivers/ide/pdc202xx_new.c
34308+++ b/drivers/ide/pdc202xx_new.c
34309@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34310 .udma_mask = udma, \
34311 }
34312
34313-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34314+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34315 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34316 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34317 };
34318diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34319index cb812f3..af816ef 100644
34320--- a/drivers/ide/pdc202xx_old.c
34321+++ b/drivers/ide/pdc202xx_old.c
34322@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34323 .max_sectors = sectors, \
34324 }
34325
34326-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34327+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34328 { /* 0: PDC20246 */
34329 .name = DRV_NAME,
34330 .init_chipset = init_chipset_pdc202xx,
34331diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34332index bf14f39..15c4b98 100644
34333--- a/drivers/ide/piix.c
34334+++ b/drivers/ide/piix.c
34335@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34336 .udma_mask = udma, \
34337 }
34338
34339-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34340+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34341 /* 0: MPIIX */
34342 { /*
34343 * MPIIX actually has only a single IDE channel mapped to
34344diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34345index a6414a8..c04173e 100644
34346--- a/drivers/ide/rz1000.c
34347+++ b/drivers/ide/rz1000.c
34348@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34349 }
34350 }
34351
34352-static const struct ide_port_info rz1000_chipset __devinitdata = {
34353+static const struct ide_port_info rz1000_chipset __devinitconst = {
34354 .name = DRV_NAME,
34355 .host_flags = IDE_HFLAG_NO_DMA,
34356 };
34357diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34358index d467478..9203942 100644
34359--- a/drivers/ide/sc1200.c
34360+++ b/drivers/ide/sc1200.c
34361@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34362 .dma_sff_read_status = ide_dma_sff_read_status,
34363 };
34364
34365-static const struct ide_port_info sc1200_chipset __devinitdata = {
34366+static const struct ide_port_info sc1200_chipset __devinitconst = {
34367 .name = DRV_NAME,
34368 .port_ops = &sc1200_port_ops,
34369 .dma_ops = &sc1200_dma_ops,
34370diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34371index 1104bb3..59c5194 100644
34372--- a/drivers/ide/scc_pata.c
34373+++ b/drivers/ide/scc_pata.c
34374@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34375 .dma_sff_read_status = scc_dma_sff_read_status,
34376 };
34377
34378-static const struct ide_port_info scc_chipset __devinitdata = {
34379+static const struct ide_port_info scc_chipset __devinitconst = {
34380 .name = "sccIDE",
34381 .init_iops = init_iops_scc,
34382 .init_dma = scc_init_dma,
34383diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34384index b6554ef..6cc2cc3 100644
34385--- a/drivers/ide/serverworks.c
34386+++ b/drivers/ide/serverworks.c
34387@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34388 .cable_detect = svwks_cable_detect,
34389 };
34390
34391-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34392+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34393 { /* 0: OSB4 */
34394 .name = DRV_NAME,
34395 .init_chipset = init_chipset_svwks,
34396diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34397index ab3db61..afed580 100644
34398--- a/drivers/ide/setup-pci.c
34399+++ b/drivers/ide/setup-pci.c
34400@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34401 int ret, i, n_ports = dev2 ? 4 : 2;
34402 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34403
34404+ pax_track_stack();
34405+
34406 for (i = 0; i < n_ports / 2; i++) {
34407 ret = ide_setup_pci_controller(pdev[i], d, !i);
34408 if (ret < 0)
34409diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34410index d95df52..0b03a39 100644
34411--- a/drivers/ide/siimage.c
34412+++ b/drivers/ide/siimage.c
34413@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34414 .udma_mask = ATA_UDMA6, \
34415 }
34416
34417-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34418+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34419 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34420 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34421 };
34422diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34423index 3b88eba..ca8699d 100644
34424--- a/drivers/ide/sis5513.c
34425+++ b/drivers/ide/sis5513.c
34426@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34427 .cable_detect = sis_cable_detect,
34428 };
34429
34430-static const struct ide_port_info sis5513_chipset __devinitdata = {
34431+static const struct ide_port_info sis5513_chipset __devinitconst = {
34432 .name = DRV_NAME,
34433 .init_chipset = init_chipset_sis5513,
34434 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34435diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34436index d698da4..fca42a4 100644
34437--- a/drivers/ide/sl82c105.c
34438+++ b/drivers/ide/sl82c105.c
34439@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34440 .dma_sff_read_status = ide_dma_sff_read_status,
34441 };
34442
34443-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34444+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34445 .name = DRV_NAME,
34446 .init_chipset = init_chipset_sl82c105,
34447 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34448diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34449index 1ccfb40..83d5779 100644
34450--- a/drivers/ide/slc90e66.c
34451+++ b/drivers/ide/slc90e66.c
34452@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34453 .cable_detect = slc90e66_cable_detect,
34454 };
34455
34456-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34457+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34458 .name = DRV_NAME,
34459 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34460 .port_ops = &slc90e66_port_ops,
34461diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34462index 05a93d6..5f9e325 100644
34463--- a/drivers/ide/tc86c001.c
34464+++ b/drivers/ide/tc86c001.c
34465@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34466 .dma_sff_read_status = ide_dma_sff_read_status,
34467 };
34468
34469-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34470+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34471 .name = DRV_NAME,
34472 .init_hwif = init_hwif_tc86c001,
34473 .port_ops = &tc86c001_port_ops,
34474diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34475index 8773c3b..7907d6c 100644
34476--- a/drivers/ide/triflex.c
34477+++ b/drivers/ide/triflex.c
34478@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34479 .set_dma_mode = triflex_set_mode,
34480 };
34481
34482-static const struct ide_port_info triflex_device __devinitdata = {
34483+static const struct ide_port_info triflex_device __devinitconst = {
34484 .name = DRV_NAME,
34485 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34486 .port_ops = &triflex_port_ops,
34487diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34488index 4b42ca0..e494a98 100644
34489--- a/drivers/ide/trm290.c
34490+++ b/drivers/ide/trm290.c
34491@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34492 .dma_check = trm290_dma_check,
34493 };
34494
34495-static const struct ide_port_info trm290_chipset __devinitdata = {
34496+static const struct ide_port_info trm290_chipset __devinitconst = {
34497 .name = DRV_NAME,
34498 .init_hwif = init_hwif_trm290,
34499 .tp_ops = &trm290_tp_ops,
34500diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34501index 028de26..520d5d5 100644
34502--- a/drivers/ide/via82cxxx.c
34503+++ b/drivers/ide/via82cxxx.c
34504@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34505 .cable_detect = via82cxxx_cable_detect,
34506 };
34507
34508-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34509+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34510 .name = DRV_NAME,
34511 .init_chipset = init_chipset_via82cxxx,
34512 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34513diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34514index 2cd00b5..14de699 100644
34515--- a/drivers/ieee1394/dv1394.c
34516+++ b/drivers/ieee1394/dv1394.c
34517@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34518 based upon DIF section and sequence
34519 */
34520
34521-static void inline
34522+static inline void
34523 frame_put_packet (struct frame *f, struct packet *p)
34524 {
34525 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34526diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34527index e947d8f..6a966b9 100644
34528--- a/drivers/ieee1394/hosts.c
34529+++ b/drivers/ieee1394/hosts.c
34530@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34531 }
34532
34533 static struct hpsb_host_driver dummy_driver = {
34534+ .name = "dummy",
34535 .transmit_packet = dummy_transmit_packet,
34536 .devctl = dummy_devctl,
34537 .isoctl = dummy_isoctl
34538diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34539index ddaab6e..8d37435 100644
34540--- a/drivers/ieee1394/init_ohci1394_dma.c
34541+++ b/drivers/ieee1394/init_ohci1394_dma.c
34542@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34543 for (func = 0; func < 8; func++) {
34544 u32 class = read_pci_config(num,slot,func,
34545 PCI_CLASS_REVISION);
34546- if ((class == 0xffffffff))
34547+ if (class == 0xffffffff)
34548 continue; /* No device at this func */
34549
34550 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34551diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34552index 65c1429..5d8c11f 100644
34553--- a/drivers/ieee1394/ohci1394.c
34554+++ b/drivers/ieee1394/ohci1394.c
34555@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34556 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34557
34558 /* Module Parameters */
34559-static int phys_dma = 1;
34560+static int phys_dma;
34561 module_param(phys_dma, int, 0444);
34562-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34563+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34564
34565 static void dma_trm_tasklet(unsigned long data);
34566 static void dma_trm_reset(struct dma_trm_ctx *d);
34567diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34568index f199896..78c9fc8 100644
34569--- a/drivers/ieee1394/sbp2.c
34570+++ b/drivers/ieee1394/sbp2.c
34571@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34572 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34573 MODULE_LICENSE("GPL");
34574
34575-static int sbp2_module_init(void)
34576+static int __init sbp2_module_init(void)
34577 {
34578 int ret;
34579
34580diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34581index a5dea6b..0cefe8f 100644
34582--- a/drivers/infiniband/core/cm.c
34583+++ b/drivers/infiniband/core/cm.c
34584@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34585
34586 struct cm_counter_group {
34587 struct kobject obj;
34588- atomic_long_t counter[CM_ATTR_COUNT];
34589+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34590 };
34591
34592 struct cm_counter_attribute {
34593@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34594 struct ib_mad_send_buf *msg = NULL;
34595 int ret;
34596
34597- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34598+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34599 counter[CM_REQ_COUNTER]);
34600
34601 /* Quick state check to discard duplicate REQs. */
34602@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34603 if (!cm_id_priv)
34604 return;
34605
34606- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34607+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34608 counter[CM_REP_COUNTER]);
34609 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34610 if (ret)
34611@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34612 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34613 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34614 spin_unlock_irq(&cm_id_priv->lock);
34615- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34616+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34617 counter[CM_RTU_COUNTER]);
34618 goto out;
34619 }
34620@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34621 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34622 dreq_msg->local_comm_id);
34623 if (!cm_id_priv) {
34624- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34625+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34626 counter[CM_DREQ_COUNTER]);
34627 cm_issue_drep(work->port, work->mad_recv_wc);
34628 return -EINVAL;
34629@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34630 case IB_CM_MRA_REP_RCVD:
34631 break;
34632 case IB_CM_TIMEWAIT:
34633- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34634+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34635 counter[CM_DREQ_COUNTER]);
34636 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34637 goto unlock;
34638@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34639 cm_free_msg(msg);
34640 goto deref;
34641 case IB_CM_DREQ_RCVD:
34642- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34643+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34644 counter[CM_DREQ_COUNTER]);
34645 goto unlock;
34646 default:
34647@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34648 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34649 cm_id_priv->msg, timeout)) {
34650 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34651- atomic_long_inc(&work->port->
34652+ atomic_long_inc_unchecked(&work->port->
34653 counter_group[CM_RECV_DUPLICATES].
34654 counter[CM_MRA_COUNTER]);
34655 goto out;
34656@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34657 break;
34658 case IB_CM_MRA_REQ_RCVD:
34659 case IB_CM_MRA_REP_RCVD:
34660- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34661+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34662 counter[CM_MRA_COUNTER]);
34663 /* fall through */
34664 default:
34665@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34666 case IB_CM_LAP_IDLE:
34667 break;
34668 case IB_CM_MRA_LAP_SENT:
34669- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34670+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34671 counter[CM_LAP_COUNTER]);
34672 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34673 goto unlock;
34674@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34675 cm_free_msg(msg);
34676 goto deref;
34677 case IB_CM_LAP_RCVD:
34678- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34679+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34680 counter[CM_LAP_COUNTER]);
34681 goto unlock;
34682 default:
34683@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34684 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34685 if (cur_cm_id_priv) {
34686 spin_unlock_irq(&cm.lock);
34687- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34688+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34689 counter[CM_SIDR_REQ_COUNTER]);
34690 goto out; /* Duplicate message. */
34691 }
34692@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34693 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34694 msg->retries = 1;
34695
34696- atomic_long_add(1 + msg->retries,
34697+ atomic_long_add_unchecked(1 + msg->retries,
34698 &port->counter_group[CM_XMIT].counter[attr_index]);
34699 if (msg->retries)
34700- atomic_long_add(msg->retries,
34701+ atomic_long_add_unchecked(msg->retries,
34702 &port->counter_group[CM_XMIT_RETRIES].
34703 counter[attr_index]);
34704
34705@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34706 }
34707
34708 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34709- atomic_long_inc(&port->counter_group[CM_RECV].
34710+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34711 counter[attr_id - CM_ATTR_ID_OFFSET]);
34712
34713 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34714@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34715 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34716
34717 return sprintf(buf, "%ld\n",
34718- atomic_long_read(&group->counter[cm_attr->index]));
34719+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34720 }
34721
34722-static struct sysfs_ops cm_counter_ops = {
34723+static const struct sysfs_ops cm_counter_ops = {
34724 .show = cm_show_counter
34725 };
34726
34727diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34728index 4507043..14ad522 100644
34729--- a/drivers/infiniband/core/fmr_pool.c
34730+++ b/drivers/infiniband/core/fmr_pool.c
34731@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34732
34733 struct task_struct *thread;
34734
34735- atomic_t req_ser;
34736- atomic_t flush_ser;
34737+ atomic_unchecked_t req_ser;
34738+ atomic_unchecked_t flush_ser;
34739
34740 wait_queue_head_t force_wait;
34741 };
34742@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34743 struct ib_fmr_pool *pool = pool_ptr;
34744
34745 do {
34746- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34747+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34748 ib_fmr_batch_release(pool);
34749
34750- atomic_inc(&pool->flush_ser);
34751+ atomic_inc_unchecked(&pool->flush_ser);
34752 wake_up_interruptible(&pool->force_wait);
34753
34754 if (pool->flush_function)
34755@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34756 }
34757
34758 set_current_state(TASK_INTERRUPTIBLE);
34759- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34760+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34761 !kthread_should_stop())
34762 schedule();
34763 __set_current_state(TASK_RUNNING);
34764@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34765 pool->dirty_watermark = params->dirty_watermark;
34766 pool->dirty_len = 0;
34767 spin_lock_init(&pool->pool_lock);
34768- atomic_set(&pool->req_ser, 0);
34769- atomic_set(&pool->flush_ser, 0);
34770+ atomic_set_unchecked(&pool->req_ser, 0);
34771+ atomic_set_unchecked(&pool->flush_ser, 0);
34772 init_waitqueue_head(&pool->force_wait);
34773
34774 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34775@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34776 }
34777 spin_unlock_irq(&pool->pool_lock);
34778
34779- serial = atomic_inc_return(&pool->req_ser);
34780+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34781 wake_up_process(pool->thread);
34782
34783 if (wait_event_interruptible(pool->force_wait,
34784- atomic_read(&pool->flush_ser) - serial >= 0))
34785+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34786 return -EINTR;
34787
34788 return 0;
34789@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34790 } else {
34791 list_add_tail(&fmr->list, &pool->dirty_list);
34792 if (++pool->dirty_len >= pool->dirty_watermark) {
34793- atomic_inc(&pool->req_ser);
34794+ atomic_inc_unchecked(&pool->req_ser);
34795 wake_up_process(pool->thread);
34796 }
34797 }
34798diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34799index 158a214..1558bb7 100644
34800--- a/drivers/infiniband/core/sysfs.c
34801+++ b/drivers/infiniband/core/sysfs.c
34802@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34803 return port_attr->show(p, port_attr, buf);
34804 }
34805
34806-static struct sysfs_ops port_sysfs_ops = {
34807+static const struct sysfs_ops port_sysfs_ops = {
34808 .show = port_attr_show
34809 };
34810
34811diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34812index 5440da0..1194ecb 100644
34813--- a/drivers/infiniband/core/uverbs_marshall.c
34814+++ b/drivers/infiniband/core/uverbs_marshall.c
34815@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34816 dst->grh.sgid_index = src->grh.sgid_index;
34817 dst->grh.hop_limit = src->grh.hop_limit;
34818 dst->grh.traffic_class = src->grh.traffic_class;
34819+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34820 dst->dlid = src->dlid;
34821 dst->sl = src->sl;
34822 dst->src_path_bits = src->src_path_bits;
34823 dst->static_rate = src->static_rate;
34824 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34825 dst->port_num = src->port_num;
34826+ dst->reserved = 0;
34827 }
34828 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34829
34830 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34831 struct ib_qp_attr *src)
34832 {
34833+ dst->qp_state = src->qp_state;
34834 dst->cur_qp_state = src->cur_qp_state;
34835 dst->path_mtu = src->path_mtu;
34836 dst->path_mig_state = src->path_mig_state;
34837@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34838 dst->rnr_retry = src->rnr_retry;
34839 dst->alt_port_num = src->alt_port_num;
34840 dst->alt_timeout = src->alt_timeout;
34841+ memset(dst->reserved, 0, sizeof(dst->reserved));
34842 }
34843 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34844
34845diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34846index 100da85..62e6b88 100644
34847--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34848+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34849@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34850 struct infinipath_counters counters;
34851 struct ipath_devdata *dd;
34852
34853+ pax_track_stack();
34854+
34855 dd = file->f_path.dentry->d_inode->i_private;
34856 dd->ipath_f_read_counters(dd, &counters);
34857
34858diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34859index cbde0cf..afaf55c 100644
34860--- a/drivers/infiniband/hw/nes/nes.c
34861+++ b/drivers/infiniband/hw/nes/nes.c
34862@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34863 LIST_HEAD(nes_adapter_list);
34864 static LIST_HEAD(nes_dev_list);
34865
34866-atomic_t qps_destroyed;
34867+atomic_unchecked_t qps_destroyed;
34868
34869 static unsigned int ee_flsh_adapter;
34870 static unsigned int sysfs_nonidx_addr;
34871@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34872 struct nes_adapter *nesadapter = nesdev->nesadapter;
34873 u32 qp_id;
34874
34875- atomic_inc(&qps_destroyed);
34876+ atomic_inc_unchecked(&qps_destroyed);
34877
34878 /* Free the control structures */
34879
34880diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34881index bcc6abc..9c76b2f 100644
34882--- a/drivers/infiniband/hw/nes/nes.h
34883+++ b/drivers/infiniband/hw/nes/nes.h
34884@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
34885 extern unsigned int wqm_quanta;
34886 extern struct list_head nes_adapter_list;
34887
34888-extern atomic_t cm_connects;
34889-extern atomic_t cm_accepts;
34890-extern atomic_t cm_disconnects;
34891-extern atomic_t cm_closes;
34892-extern atomic_t cm_connecteds;
34893-extern atomic_t cm_connect_reqs;
34894-extern atomic_t cm_rejects;
34895-extern atomic_t mod_qp_timouts;
34896-extern atomic_t qps_created;
34897-extern atomic_t qps_destroyed;
34898-extern atomic_t sw_qps_destroyed;
34899+extern atomic_unchecked_t cm_connects;
34900+extern atomic_unchecked_t cm_accepts;
34901+extern atomic_unchecked_t cm_disconnects;
34902+extern atomic_unchecked_t cm_closes;
34903+extern atomic_unchecked_t cm_connecteds;
34904+extern atomic_unchecked_t cm_connect_reqs;
34905+extern atomic_unchecked_t cm_rejects;
34906+extern atomic_unchecked_t mod_qp_timouts;
34907+extern atomic_unchecked_t qps_created;
34908+extern atomic_unchecked_t qps_destroyed;
34909+extern atomic_unchecked_t sw_qps_destroyed;
34910 extern u32 mh_detected;
34911 extern u32 mh_pauses_sent;
34912 extern u32 cm_packets_sent;
34913@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
34914 extern u32 cm_listens_created;
34915 extern u32 cm_listens_destroyed;
34916 extern u32 cm_backlog_drops;
34917-extern atomic_t cm_loopbacks;
34918-extern atomic_t cm_nodes_created;
34919-extern atomic_t cm_nodes_destroyed;
34920-extern atomic_t cm_accel_dropped_pkts;
34921-extern atomic_t cm_resets_recvd;
34922+extern atomic_unchecked_t cm_loopbacks;
34923+extern atomic_unchecked_t cm_nodes_created;
34924+extern atomic_unchecked_t cm_nodes_destroyed;
34925+extern atomic_unchecked_t cm_accel_dropped_pkts;
34926+extern atomic_unchecked_t cm_resets_recvd;
34927
34928 extern u32 int_mod_timer_init;
34929 extern u32 int_mod_cq_depth_256;
34930diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34931index 73473db..5ed06e8 100644
34932--- a/drivers/infiniband/hw/nes/nes_cm.c
34933+++ b/drivers/infiniband/hw/nes/nes_cm.c
34934@@ -69,11 +69,11 @@ u32 cm_packets_received;
34935 u32 cm_listens_created;
34936 u32 cm_listens_destroyed;
34937 u32 cm_backlog_drops;
34938-atomic_t cm_loopbacks;
34939-atomic_t cm_nodes_created;
34940-atomic_t cm_nodes_destroyed;
34941-atomic_t cm_accel_dropped_pkts;
34942-atomic_t cm_resets_recvd;
34943+atomic_unchecked_t cm_loopbacks;
34944+atomic_unchecked_t cm_nodes_created;
34945+atomic_unchecked_t cm_nodes_destroyed;
34946+atomic_unchecked_t cm_accel_dropped_pkts;
34947+atomic_unchecked_t cm_resets_recvd;
34948
34949 static inline int mini_cm_accelerated(struct nes_cm_core *,
34950 struct nes_cm_node *);
34951@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
34952
34953 static struct nes_cm_core *g_cm_core;
34954
34955-atomic_t cm_connects;
34956-atomic_t cm_accepts;
34957-atomic_t cm_disconnects;
34958-atomic_t cm_closes;
34959-atomic_t cm_connecteds;
34960-atomic_t cm_connect_reqs;
34961-atomic_t cm_rejects;
34962+atomic_unchecked_t cm_connects;
34963+atomic_unchecked_t cm_accepts;
34964+atomic_unchecked_t cm_disconnects;
34965+atomic_unchecked_t cm_closes;
34966+atomic_unchecked_t cm_connecteds;
34967+atomic_unchecked_t cm_connect_reqs;
34968+atomic_unchecked_t cm_rejects;
34969
34970
34971 /**
34972@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34973 cm_node->rem_mac);
34974
34975 add_hte_node(cm_core, cm_node);
34976- atomic_inc(&cm_nodes_created);
34977+ atomic_inc_unchecked(&cm_nodes_created);
34978
34979 return cm_node;
34980 }
34981@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34982 }
34983
34984 atomic_dec(&cm_core->node_cnt);
34985- atomic_inc(&cm_nodes_destroyed);
34986+ atomic_inc_unchecked(&cm_nodes_destroyed);
34987 nesqp = cm_node->nesqp;
34988 if (nesqp) {
34989 nesqp->cm_node = NULL;
34990@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34991
34992 static void drop_packet(struct sk_buff *skb)
34993 {
34994- atomic_inc(&cm_accel_dropped_pkts);
34995+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34996 dev_kfree_skb_any(skb);
34997 }
34998
34999@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35000
35001 int reset = 0; /* whether to send reset in case of err.. */
35002 int passive_state;
35003- atomic_inc(&cm_resets_recvd);
35004+ atomic_inc_unchecked(&cm_resets_recvd);
35005 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35006 " refcnt=%d\n", cm_node, cm_node->state,
35007 atomic_read(&cm_node->ref_count));
35008@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35009 rem_ref_cm_node(cm_node->cm_core, cm_node);
35010 return NULL;
35011 }
35012- atomic_inc(&cm_loopbacks);
35013+ atomic_inc_unchecked(&cm_loopbacks);
35014 loopbackremotenode->loopbackpartner = cm_node;
35015 loopbackremotenode->tcp_cntxt.rcv_wscale =
35016 NES_CM_DEFAULT_RCV_WND_SCALE;
35017@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35018 add_ref_cm_node(cm_node);
35019 } else if (cm_node->state == NES_CM_STATE_TSA) {
35020 rem_ref_cm_node(cm_core, cm_node);
35021- atomic_inc(&cm_accel_dropped_pkts);
35022+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35023 dev_kfree_skb_any(skb);
35024 break;
35025 }
35026@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35027
35028 if ((cm_id) && (cm_id->event_handler)) {
35029 if (issue_disconn) {
35030- atomic_inc(&cm_disconnects);
35031+ atomic_inc_unchecked(&cm_disconnects);
35032 cm_event.event = IW_CM_EVENT_DISCONNECT;
35033 cm_event.status = disconn_status;
35034 cm_event.local_addr = cm_id->local_addr;
35035@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35036 }
35037
35038 if (issue_close) {
35039- atomic_inc(&cm_closes);
35040+ atomic_inc_unchecked(&cm_closes);
35041 nes_disconnect(nesqp, 1);
35042
35043 cm_id->provider_data = nesqp;
35044@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35045
35046 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35047 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35048- atomic_inc(&cm_accepts);
35049+ atomic_inc_unchecked(&cm_accepts);
35050
35051 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35052 atomic_read(&nesvnic->netdev->refcnt));
35053@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35054
35055 struct nes_cm_core *cm_core;
35056
35057- atomic_inc(&cm_rejects);
35058+ atomic_inc_unchecked(&cm_rejects);
35059 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35060 loopback = cm_node->loopbackpartner;
35061 cm_core = cm_node->cm_core;
35062@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35063 ntohl(cm_id->local_addr.sin_addr.s_addr),
35064 ntohs(cm_id->local_addr.sin_port));
35065
35066- atomic_inc(&cm_connects);
35067+ atomic_inc_unchecked(&cm_connects);
35068 nesqp->active_conn = 1;
35069
35070 /* cache the cm_id in the qp */
35071@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35072 if (nesqp->destroyed) {
35073 return;
35074 }
35075- atomic_inc(&cm_connecteds);
35076+ atomic_inc_unchecked(&cm_connecteds);
35077 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35078 " local port 0x%04X. jiffies = %lu.\n",
35079 nesqp->hwqp.qp_id,
35080@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35081
35082 ret = cm_id->event_handler(cm_id, &cm_event);
35083 cm_id->add_ref(cm_id);
35084- atomic_inc(&cm_closes);
35085+ atomic_inc_unchecked(&cm_closes);
35086 cm_event.event = IW_CM_EVENT_CLOSE;
35087 cm_event.status = IW_CM_EVENT_STATUS_OK;
35088 cm_event.provider_data = cm_id->provider_data;
35089@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35090 return;
35091 cm_id = cm_node->cm_id;
35092
35093- atomic_inc(&cm_connect_reqs);
35094+ atomic_inc_unchecked(&cm_connect_reqs);
35095 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35096 cm_node, cm_id, jiffies);
35097
35098@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35099 return;
35100 cm_id = cm_node->cm_id;
35101
35102- atomic_inc(&cm_connect_reqs);
35103+ atomic_inc_unchecked(&cm_connect_reqs);
35104 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35105 cm_node, cm_id, jiffies);
35106
35107diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35108index e593af3..870694a 100644
35109--- a/drivers/infiniband/hw/nes/nes_nic.c
35110+++ b/drivers/infiniband/hw/nes/nes_nic.c
35111@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35112 target_stat_values[++index] = mh_detected;
35113 target_stat_values[++index] = mh_pauses_sent;
35114 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35115- target_stat_values[++index] = atomic_read(&cm_connects);
35116- target_stat_values[++index] = atomic_read(&cm_accepts);
35117- target_stat_values[++index] = atomic_read(&cm_disconnects);
35118- target_stat_values[++index] = atomic_read(&cm_connecteds);
35119- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35120- target_stat_values[++index] = atomic_read(&cm_rejects);
35121- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35122- target_stat_values[++index] = atomic_read(&qps_created);
35123- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35124- target_stat_values[++index] = atomic_read(&qps_destroyed);
35125- target_stat_values[++index] = atomic_read(&cm_closes);
35126+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35127+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35128+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35129+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35130+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35131+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35132+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35133+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35134+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35135+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35136+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35137 target_stat_values[++index] = cm_packets_sent;
35138 target_stat_values[++index] = cm_packets_bounced;
35139 target_stat_values[++index] = cm_packets_created;
35140@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35141 target_stat_values[++index] = cm_listens_created;
35142 target_stat_values[++index] = cm_listens_destroyed;
35143 target_stat_values[++index] = cm_backlog_drops;
35144- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35145- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35146- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35147- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35148- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35149+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35150+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35151+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35152+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35153+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35154 target_stat_values[++index] = int_mod_timer_init;
35155 target_stat_values[++index] = int_mod_cq_depth_1;
35156 target_stat_values[++index] = int_mod_cq_depth_4;
35157diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35158index a680c42..f914deb 100644
35159--- a/drivers/infiniband/hw/nes/nes_verbs.c
35160+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35161@@ -45,9 +45,9 @@
35162
35163 #include <rdma/ib_umem.h>
35164
35165-atomic_t mod_qp_timouts;
35166-atomic_t qps_created;
35167-atomic_t sw_qps_destroyed;
35168+atomic_unchecked_t mod_qp_timouts;
35169+atomic_unchecked_t qps_created;
35170+atomic_unchecked_t sw_qps_destroyed;
35171
35172 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35173
35174@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35175 if (init_attr->create_flags)
35176 return ERR_PTR(-EINVAL);
35177
35178- atomic_inc(&qps_created);
35179+ atomic_inc_unchecked(&qps_created);
35180 switch (init_attr->qp_type) {
35181 case IB_QPT_RC:
35182 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35183@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35184 struct iw_cm_event cm_event;
35185 int ret;
35186
35187- atomic_inc(&sw_qps_destroyed);
35188+ atomic_inc_unchecked(&sw_qps_destroyed);
35189 nesqp->destroyed = 1;
35190
35191 /* Blow away the connection if it exists. */
35192diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35193index ac11be0..3883c04 100644
35194--- a/drivers/input/gameport/gameport.c
35195+++ b/drivers/input/gameport/gameport.c
35196@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35197 */
35198 static void gameport_init_port(struct gameport *gameport)
35199 {
35200- static atomic_t gameport_no = ATOMIC_INIT(0);
35201+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35202
35203 __module_get(THIS_MODULE);
35204
35205 mutex_init(&gameport->drv_mutex);
35206 device_initialize(&gameport->dev);
35207- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35208+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35209 gameport->dev.bus = &gameport_bus;
35210 gameport->dev.release = gameport_release_port;
35211 if (gameport->parent)
35212diff --git a/drivers/input/input.c b/drivers/input/input.c
35213index c82ae82..8cfb9cb 100644
35214--- a/drivers/input/input.c
35215+++ b/drivers/input/input.c
35216@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35217 */
35218 int input_register_device(struct input_dev *dev)
35219 {
35220- static atomic_t input_no = ATOMIC_INIT(0);
35221+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35222 struct input_handler *handler;
35223 const char *path;
35224 int error;
35225@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35226 dev->setkeycode = input_default_setkeycode;
35227
35228 dev_set_name(&dev->dev, "input%ld",
35229- (unsigned long) atomic_inc_return(&input_no) - 1);
35230+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35231
35232 error = device_add(&dev->dev);
35233 if (error)
35234diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35235index ca13a6b..b032b0c 100644
35236--- a/drivers/input/joystick/sidewinder.c
35237+++ b/drivers/input/joystick/sidewinder.c
35238@@ -30,6 +30,7 @@
35239 #include <linux/kernel.h>
35240 #include <linux/module.h>
35241 #include <linux/slab.h>
35242+#include <linux/sched.h>
35243 #include <linux/init.h>
35244 #include <linux/input.h>
35245 #include <linux/gameport.h>
35246@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35247 unsigned char buf[SW_LENGTH];
35248 int i;
35249
35250+ pax_track_stack();
35251+
35252 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35253
35254 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35255diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35256index 79e3edc..01412b9 100644
35257--- a/drivers/input/joystick/xpad.c
35258+++ b/drivers/input/joystick/xpad.c
35259@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35260
35261 static int xpad_led_probe(struct usb_xpad *xpad)
35262 {
35263- static atomic_t led_seq = ATOMIC_INIT(0);
35264+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35265 long led_no;
35266 struct xpad_led *led;
35267 struct led_classdev *led_cdev;
35268@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35269 if (!led)
35270 return -ENOMEM;
35271
35272- led_no = (long)atomic_inc_return(&led_seq) - 1;
35273+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35274
35275 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35276 led->xpad = xpad;
35277diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35278index 0236f0d..c7327f1 100644
35279--- a/drivers/input/serio/serio.c
35280+++ b/drivers/input/serio/serio.c
35281@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35282 */
35283 static void serio_init_port(struct serio *serio)
35284 {
35285- static atomic_t serio_no = ATOMIC_INIT(0);
35286+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35287
35288 __module_get(THIS_MODULE);
35289
35290@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35291 mutex_init(&serio->drv_mutex);
35292 device_initialize(&serio->dev);
35293 dev_set_name(&serio->dev, "serio%ld",
35294- (long)atomic_inc_return(&serio_no) - 1);
35295+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35296 serio->dev.bus = &serio_bus;
35297 serio->dev.release = serio_release_port;
35298 if (serio->parent) {
35299diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35300index 33dcd8d..2783d25 100644
35301--- a/drivers/isdn/gigaset/common.c
35302+++ b/drivers/isdn/gigaset/common.c
35303@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35304 cs->commands_pending = 0;
35305 cs->cur_at_seq = 0;
35306 cs->gotfwver = -1;
35307- cs->open_count = 0;
35308+ local_set(&cs->open_count, 0);
35309 cs->dev = NULL;
35310 cs->tty = NULL;
35311 cs->tty_dev = NULL;
35312diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35313index a2f6125..6a70677 100644
35314--- a/drivers/isdn/gigaset/gigaset.h
35315+++ b/drivers/isdn/gigaset/gigaset.h
35316@@ -34,6 +34,7 @@
35317 #include <linux/tty_driver.h>
35318 #include <linux/list.h>
35319 #include <asm/atomic.h>
35320+#include <asm/local.h>
35321
35322 #define GIG_VERSION {0,5,0,0}
35323 #define GIG_COMPAT {0,4,0,0}
35324@@ -446,7 +447,7 @@ struct cardstate {
35325 spinlock_t cmdlock;
35326 unsigned curlen, cmdbytes;
35327
35328- unsigned open_count;
35329+ local_t open_count;
35330 struct tty_struct *tty;
35331 struct tasklet_struct if_wake_tasklet;
35332 unsigned control_state;
35333diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35334index b3065b8..c7e8cc9 100644
35335--- a/drivers/isdn/gigaset/interface.c
35336+++ b/drivers/isdn/gigaset/interface.c
35337@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35338 return -ERESTARTSYS; // FIXME -EINTR?
35339 tty->driver_data = cs;
35340
35341- ++cs->open_count;
35342-
35343- if (cs->open_count == 1) {
35344+ if (local_inc_return(&cs->open_count) == 1) {
35345 spin_lock_irqsave(&cs->lock, flags);
35346 cs->tty = tty;
35347 spin_unlock_irqrestore(&cs->lock, flags);
35348@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35349
35350 if (!cs->connected)
35351 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35352- else if (!cs->open_count)
35353+ else if (!local_read(&cs->open_count))
35354 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35355 else {
35356- if (!--cs->open_count) {
35357+ if (!local_dec_return(&cs->open_count)) {
35358 spin_lock_irqsave(&cs->lock, flags);
35359 cs->tty = NULL;
35360 spin_unlock_irqrestore(&cs->lock, flags);
35361@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35362 if (!cs->connected) {
35363 gig_dbg(DEBUG_IF, "not connected");
35364 retval = -ENODEV;
35365- } else if (!cs->open_count)
35366+ } else if (!local_read(&cs->open_count))
35367 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35368 else {
35369 retval = 0;
35370@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35371 if (!cs->connected) {
35372 gig_dbg(DEBUG_IF, "not connected");
35373 retval = -ENODEV;
35374- } else if (!cs->open_count)
35375+ } else if (!local_read(&cs->open_count))
35376 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35377 else if (cs->mstate != MS_LOCKED) {
35378 dev_warn(cs->dev, "can't write to unlocked device\n");
35379@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35380 if (!cs->connected) {
35381 gig_dbg(DEBUG_IF, "not connected");
35382 retval = -ENODEV;
35383- } else if (!cs->open_count)
35384+ } else if (!local_read(&cs->open_count))
35385 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35386 else if (cs->mstate != MS_LOCKED) {
35387 dev_warn(cs->dev, "can't write to unlocked device\n");
35388@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35389
35390 if (!cs->connected)
35391 gig_dbg(DEBUG_IF, "not connected");
35392- else if (!cs->open_count)
35393+ else if (!local_read(&cs->open_count))
35394 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35395 else if (cs->mstate != MS_LOCKED)
35396 dev_warn(cs->dev, "can't write to unlocked device\n");
35397@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35398
35399 if (!cs->connected)
35400 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35401- else if (!cs->open_count)
35402+ else if (!local_read(&cs->open_count))
35403 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35404 else {
35405 //FIXME
35406@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35407
35408 if (!cs->connected)
35409 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35410- else if (!cs->open_count)
35411+ else if (!local_read(&cs->open_count))
35412 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35413 else {
35414 //FIXME
35415@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35416 goto out;
35417 }
35418
35419- if (!cs->open_count) {
35420+ if (!local_read(&cs->open_count)) {
35421 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35422 goto out;
35423 }
35424diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35425index a7c0083..62a7cb6 100644
35426--- a/drivers/isdn/hardware/avm/b1.c
35427+++ b/drivers/isdn/hardware/avm/b1.c
35428@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35429 }
35430 if (left) {
35431 if (t4file->user) {
35432- if (copy_from_user(buf, dp, left))
35433+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35434 return -EFAULT;
35435 } else {
35436 memcpy(buf, dp, left);
35437@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35438 }
35439 if (left) {
35440 if (config->user) {
35441- if (copy_from_user(buf, dp, left))
35442+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35443 return -EFAULT;
35444 } else {
35445 memcpy(buf, dp, left);
35446diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35447index f130724..c373c68 100644
35448--- a/drivers/isdn/hardware/eicon/capidtmf.c
35449+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35450@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35451 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35452 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35453
35454+ pax_track_stack();
35455
35456 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35457 {
35458diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35459index 4d425c6..a9be6c4 100644
35460--- a/drivers/isdn/hardware/eicon/capifunc.c
35461+++ b/drivers/isdn/hardware/eicon/capifunc.c
35462@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35463 IDI_SYNC_REQ req;
35464 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35465
35466+ pax_track_stack();
35467+
35468 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35469
35470 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35471diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35472index 3029234..ef0d9e2 100644
35473--- a/drivers/isdn/hardware/eicon/diddfunc.c
35474+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35475@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35476 IDI_SYNC_REQ req;
35477 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35478
35479+ pax_track_stack();
35480+
35481 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35482
35483 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35484diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35485index d36a4c0..11e7d1a 100644
35486--- a/drivers/isdn/hardware/eicon/divasfunc.c
35487+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35488@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35489 IDI_SYNC_REQ req;
35490 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35491
35492+ pax_track_stack();
35493+
35494 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35495
35496 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35497diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35498index 85784a7..a19ca98 100644
35499--- a/drivers/isdn/hardware/eicon/divasync.h
35500+++ b/drivers/isdn/hardware/eicon/divasync.h
35501@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35502 } diva_didd_add_adapter_t;
35503 typedef struct _diva_didd_remove_adapter {
35504 IDI_CALL p_request;
35505-} diva_didd_remove_adapter_t;
35506+} __no_const diva_didd_remove_adapter_t;
35507 typedef struct _diva_didd_read_adapter_array {
35508 void * buffer;
35509 dword length;
35510diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35511index db87d51..7d09acf 100644
35512--- a/drivers/isdn/hardware/eicon/idifunc.c
35513+++ b/drivers/isdn/hardware/eicon/idifunc.c
35514@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35515 IDI_SYNC_REQ req;
35516 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35517
35518+ pax_track_stack();
35519+
35520 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35521
35522 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35523diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35524index ae89fb8..0fab299 100644
35525--- a/drivers/isdn/hardware/eicon/message.c
35526+++ b/drivers/isdn/hardware/eicon/message.c
35527@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35528 dword d;
35529 word w;
35530
35531+ pax_track_stack();
35532+
35533 a = plci->adapter;
35534 Id = ((word)plci->Id<<8)|a->Id;
35535 PUT_WORD(&SS_Ind[4],0x0000);
35536@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35537 word j, n, w;
35538 dword d;
35539
35540+ pax_track_stack();
35541+
35542
35543 for(i=0;i<8;i++) bp_parms[i].length = 0;
35544 for(i=0;i<2;i++) global_config[i].length = 0;
35545@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35546 const byte llc3[] = {4,3,2,2,6,6,0};
35547 const byte header[] = {0,2,3,3,0,0,0};
35548
35549+ pax_track_stack();
35550+
35551 for(i=0;i<8;i++) bp_parms[i].length = 0;
35552 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35553 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35554@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35555 word appl_number_group_type[MAX_APPL];
35556 PLCI *auxplci;
35557
35558+ pax_track_stack();
35559+
35560 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35561
35562 if(!a->group_optimization_enabled)
35563diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35564index a564b75..f3cf8b5 100644
35565--- a/drivers/isdn/hardware/eicon/mntfunc.c
35566+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35567@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35568 IDI_SYNC_REQ req;
35569 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35570
35571+ pax_track_stack();
35572+
35573 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35574
35575 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35576diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35577index a3bd163..8956575 100644
35578--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35579+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35580@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35581 typedef struct _diva_os_idi_adapter_interface {
35582 diva_init_card_proc_t cleanup_adapter_proc;
35583 diva_cmd_card_proc_t cmd_proc;
35584-} diva_os_idi_adapter_interface_t;
35585+} __no_const diva_os_idi_adapter_interface_t;
35586
35587 typedef struct _diva_os_xdi_adapter {
35588 struct list_head link;
35589diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35590index adb1e8c..21b590b 100644
35591--- a/drivers/isdn/i4l/isdn_common.c
35592+++ b/drivers/isdn/i4l/isdn_common.c
35593@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35594 } iocpar;
35595 void __user *argp = (void __user *)arg;
35596
35597+ pax_track_stack();
35598+
35599 #define name iocpar.name
35600 #define bname iocpar.bname
35601 #define iocts iocpar.iocts
35602diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35603index bf7997a..cf091db 100644
35604--- a/drivers/isdn/icn/icn.c
35605+++ b/drivers/isdn/icn/icn.c
35606@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35607 if (count > len)
35608 count = len;
35609 if (user) {
35610- if (copy_from_user(msg, buf, count))
35611+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35612 return -EFAULT;
35613 } else
35614 memcpy(msg, buf, count);
35615diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35616index feb0fa4..f76f830 100644
35617--- a/drivers/isdn/mISDN/socket.c
35618+++ b/drivers/isdn/mISDN/socket.c
35619@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35620 if (dev) {
35621 struct mISDN_devinfo di;
35622
35623+ memset(&di, 0, sizeof(di));
35624 di.id = dev->id;
35625 di.Dprotocols = dev->Dprotocols;
35626 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35627@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35628 if (dev) {
35629 struct mISDN_devinfo di;
35630
35631+ memset(&di, 0, sizeof(di));
35632 di.id = dev->id;
35633 di.Dprotocols = dev->Dprotocols;
35634 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35635diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35636index 485be8b..f0225bc 100644
35637--- a/drivers/isdn/sc/interrupt.c
35638+++ b/drivers/isdn/sc/interrupt.c
35639@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35640 }
35641 else if(callid>=0x0000 && callid<=0x7FFF)
35642 {
35643+ int len;
35644+
35645 pr_debug("%s: Got Incoming Call\n",
35646 sc_adapter[card]->devicename);
35647- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35648- strcpy(setup.eazmsn,
35649- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35650+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35651+ sizeof(setup.phone));
35652+ if (len >= sizeof(setup.phone))
35653+ continue;
35654+ len = strlcpy(setup.eazmsn,
35655+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35656+ sizeof(setup.eazmsn));
35657+ if (len >= sizeof(setup.eazmsn))
35658+ continue;
35659 setup.si1 = 7;
35660 setup.si2 = 0;
35661 setup.plan = 0;
35662@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35663 * Handle a GetMyNumber Rsp
35664 */
35665 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35666- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35667+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35668+ rcvmsg.msg_data.byte_array,
35669+ sizeof(rcvmsg.msg_data.byte_array));
35670 continue;
35671 }
35672
35673diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35674index 8744d24..d1f9a9a 100644
35675--- a/drivers/lguest/core.c
35676+++ b/drivers/lguest/core.c
35677@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35678 * it's worked so far. The end address needs +1 because __get_vm_area
35679 * allocates an extra guard page, so we need space for that.
35680 */
35681+
35682+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35683+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35684+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35685+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35686+#else
35687 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35688 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35689 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35690+#endif
35691+
35692 if (!switcher_vma) {
35693 err = -ENOMEM;
35694 printk("lguest: could not map switcher pages high\n");
35695@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35696 * Now the Switcher is mapped at the right address, we can't fail!
35697 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35698 */
35699- memcpy(switcher_vma->addr, start_switcher_text,
35700+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35701 end_switcher_text - start_switcher_text);
35702
35703 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35704diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35705index 6ae3888..8b38145 100644
35706--- a/drivers/lguest/x86/core.c
35707+++ b/drivers/lguest/x86/core.c
35708@@ -59,7 +59,7 @@ static struct {
35709 /* Offset from where switcher.S was compiled to where we've copied it */
35710 static unsigned long switcher_offset(void)
35711 {
35712- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35713+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35714 }
35715
35716 /* This cpu's struct lguest_pages. */
35717@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35718 * These copies are pretty cheap, so we do them unconditionally: */
35719 /* Save the current Host top-level page directory.
35720 */
35721+
35722+#ifdef CONFIG_PAX_PER_CPU_PGD
35723+ pages->state.host_cr3 = read_cr3();
35724+#else
35725 pages->state.host_cr3 = __pa(current->mm->pgd);
35726+#endif
35727+
35728 /*
35729 * Set up the Guest's page tables to see this CPU's pages (and no
35730 * other CPU's pages).
35731@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35732 * compiled-in switcher code and the high-mapped copy we just made.
35733 */
35734 for (i = 0; i < IDT_ENTRIES; i++)
35735- default_idt_entries[i] += switcher_offset();
35736+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35737
35738 /*
35739 * Set up the Switcher's per-cpu areas.
35740@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35741 * it will be undisturbed when we switch. To change %cs and jump we
35742 * need this structure to feed to Intel's "lcall" instruction.
35743 */
35744- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35745+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35746 lguest_entry.segment = LGUEST_CS;
35747
35748 /*
35749diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35750index 40634b0..4f5855e 100644
35751--- a/drivers/lguest/x86/switcher_32.S
35752+++ b/drivers/lguest/x86/switcher_32.S
35753@@ -87,6 +87,7 @@
35754 #include <asm/page.h>
35755 #include <asm/segment.h>
35756 #include <asm/lguest.h>
35757+#include <asm/processor-flags.h>
35758
35759 // We mark the start of the code to copy
35760 // It's placed in .text tho it's never run here
35761@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35762 // Changes type when we load it: damn Intel!
35763 // For after we switch over our page tables
35764 // That entry will be read-only: we'd crash.
35765+
35766+#ifdef CONFIG_PAX_KERNEXEC
35767+ mov %cr0, %edx
35768+ xor $X86_CR0_WP, %edx
35769+ mov %edx, %cr0
35770+#endif
35771+
35772 movl $(GDT_ENTRY_TSS*8), %edx
35773 ltr %dx
35774
35775@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35776 // Let's clear it again for our return.
35777 // The GDT descriptor of the Host
35778 // Points to the table after two "size" bytes
35779- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35780+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35781 // Clear "used" from type field (byte 5, bit 2)
35782- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35783+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35784+
35785+#ifdef CONFIG_PAX_KERNEXEC
35786+ mov %cr0, %eax
35787+ xor $X86_CR0_WP, %eax
35788+ mov %eax, %cr0
35789+#endif
35790
35791 // Once our page table's switched, the Guest is live!
35792 // The Host fades as we run this final step.
35793@@ -295,13 +309,12 @@ deliver_to_host:
35794 // I consulted gcc, and it gave
35795 // These instructions, which I gladly credit:
35796 leal (%edx,%ebx,8), %eax
35797- movzwl (%eax),%edx
35798- movl 4(%eax), %eax
35799- xorw %ax, %ax
35800- orl %eax, %edx
35801+ movl 4(%eax), %edx
35802+ movw (%eax), %dx
35803 // Now the address of the handler's in %edx
35804 // We call it now: its "iret" drops us home.
35805- jmp *%edx
35806+ ljmp $__KERNEL_CS, $1f
35807+1: jmp *%edx
35808
35809 // Every interrupt can come to us here
35810 // But we must truly tell each apart.
35811diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35812index 588a5b0..b71db89 100644
35813--- a/drivers/macintosh/macio_asic.c
35814+++ b/drivers/macintosh/macio_asic.c
35815@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35816 * MacIO is matched against any Apple ID, it's probe() function
35817 * will then decide wether it applies or not
35818 */
35819-static const struct pci_device_id __devinitdata pci_ids [] = { {
35820+static const struct pci_device_id __devinitconst pci_ids [] = { {
35821 .vendor = PCI_VENDOR_ID_APPLE,
35822 .device = PCI_ANY_ID,
35823 .subvendor = PCI_ANY_ID,
35824diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35825index a348bb0..ecd9b3f 100644
35826--- a/drivers/macintosh/via-pmu-backlight.c
35827+++ b/drivers/macintosh/via-pmu-backlight.c
35828@@ -15,7 +15,7 @@
35829
35830 #define MAX_PMU_LEVEL 0xFF
35831
35832-static struct backlight_ops pmu_backlight_data;
35833+static const struct backlight_ops pmu_backlight_data;
35834 static DEFINE_SPINLOCK(pmu_backlight_lock);
35835 static int sleeping, uses_pmu_bl;
35836 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35837@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35838 return bd->props.brightness;
35839 }
35840
35841-static struct backlight_ops pmu_backlight_data = {
35842+static const struct backlight_ops pmu_backlight_data = {
35843 .get_brightness = pmu_backlight_get_brightness,
35844 .update_status = pmu_backlight_update_status,
35845
35846diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35847index 6f308a4..b5f7ff7 100644
35848--- a/drivers/macintosh/via-pmu.c
35849+++ b/drivers/macintosh/via-pmu.c
35850@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35851 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35852 }
35853
35854-static struct platform_suspend_ops pmu_pm_ops = {
35855+static const struct platform_suspend_ops pmu_pm_ops = {
35856 .enter = powerbook_sleep,
35857 .valid = pmu_sleep_valid,
35858 };
35859diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35860index 818b617..4656e38 100644
35861--- a/drivers/md/dm-ioctl.c
35862+++ b/drivers/md/dm-ioctl.c
35863@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35864 cmd == DM_LIST_VERSIONS_CMD)
35865 return 0;
35866
35867- if ((cmd == DM_DEV_CREATE_CMD)) {
35868+ if (cmd == DM_DEV_CREATE_CMD) {
35869 if (!*param->name) {
35870 DMWARN("name not supplied when creating device");
35871 return -EINVAL;
35872diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
35873index 6021d0a..a878643 100644
35874--- a/drivers/md/dm-raid1.c
35875+++ b/drivers/md/dm-raid1.c
35876@@ -41,7 +41,7 @@ enum dm_raid1_error {
35877
35878 struct mirror {
35879 struct mirror_set *ms;
35880- atomic_t error_count;
35881+ atomic_unchecked_t error_count;
35882 unsigned long error_type;
35883 struct dm_dev *dev;
35884 sector_t offset;
35885@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35886 * simple way to tell if a device has encountered
35887 * errors.
35888 */
35889- atomic_inc(&m->error_count);
35890+ atomic_inc_unchecked(&m->error_count);
35891
35892 if (test_and_set_bit(error_type, &m->error_type))
35893 return;
35894@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35895 }
35896
35897 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
35898- if (!atomic_read(&new->error_count)) {
35899+ if (!atomic_read_unchecked(&new->error_count)) {
35900 set_default_mirror(new);
35901 break;
35902 }
35903@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35904 struct mirror *m = get_default_mirror(ms);
35905
35906 do {
35907- if (likely(!atomic_read(&m->error_count)))
35908+ if (likely(!atomic_read_unchecked(&m->error_count)))
35909 return m;
35910
35911 if (m-- == ms->mirror)
35912@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
35913 {
35914 struct mirror *default_mirror = get_default_mirror(m->ms);
35915
35916- return !atomic_read(&default_mirror->error_count);
35917+ return !atomic_read_unchecked(&default_mirror->error_count);
35918 }
35919
35920 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35921@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35922 */
35923 if (likely(region_in_sync(ms, region, 1)))
35924 m = choose_mirror(ms, bio->bi_sector);
35925- else if (m && atomic_read(&m->error_count))
35926+ else if (m && atomic_read_unchecked(&m->error_count))
35927 m = NULL;
35928
35929 if (likely(m))
35930@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35931 }
35932
35933 ms->mirror[mirror].ms = ms;
35934- atomic_set(&(ms->mirror[mirror].error_count), 0);
35935+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35936 ms->mirror[mirror].error_type = 0;
35937 ms->mirror[mirror].offset = offset;
35938
35939@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
35940 */
35941 static char device_status_char(struct mirror *m)
35942 {
35943- if (!atomic_read(&(m->error_count)))
35944+ if (!atomic_read_unchecked(&(m->error_count)))
35945 return 'A';
35946
35947 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
35948diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35949index bd58703..9f26571 100644
35950--- a/drivers/md/dm-stripe.c
35951+++ b/drivers/md/dm-stripe.c
35952@@ -20,7 +20,7 @@ struct stripe {
35953 struct dm_dev *dev;
35954 sector_t physical_start;
35955
35956- atomic_t error_count;
35957+ atomic_unchecked_t error_count;
35958 };
35959
35960 struct stripe_c {
35961@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35962 kfree(sc);
35963 return r;
35964 }
35965- atomic_set(&(sc->stripe[i].error_count), 0);
35966+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35967 }
35968
35969 ti->private = sc;
35970@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
35971 DMEMIT("%d ", sc->stripes);
35972 for (i = 0; i < sc->stripes; i++) {
35973 DMEMIT("%s ", sc->stripe[i].dev->name);
35974- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35975+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35976 'D' : 'A';
35977 }
35978 buffer[i] = '\0';
35979@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35980 */
35981 for (i = 0; i < sc->stripes; i++)
35982 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35983- atomic_inc(&(sc->stripe[i].error_count));
35984- if (atomic_read(&(sc->stripe[i].error_count)) <
35985+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
35986+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35987 DM_IO_ERROR_THRESHOLD)
35988 queue_work(kstriped, &sc->kstriped_ws);
35989 }
35990diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
35991index 4b04590..13a77b2 100644
35992--- a/drivers/md/dm-sysfs.c
35993+++ b/drivers/md/dm-sysfs.c
35994@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
35995 NULL,
35996 };
35997
35998-static struct sysfs_ops dm_sysfs_ops = {
35999+static const struct sysfs_ops dm_sysfs_ops = {
36000 .show = dm_attr_show,
36001 };
36002
36003diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36004index 03345bb..332250d 100644
36005--- a/drivers/md/dm-table.c
36006+++ b/drivers/md/dm-table.c
36007@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36008 if (!dev_size)
36009 return 0;
36010
36011- if ((start >= dev_size) || (start + len > dev_size)) {
36012+ if ((start >= dev_size) || (len > dev_size - start)) {
36013 DMWARN("%s: %s too small for target: "
36014 "start=%llu, len=%llu, dev_size=%llu",
36015 dm_device_name(ti->table->md), bdevname(bdev, b),
36016diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36017index c988ac2..c418141 100644
36018--- a/drivers/md/dm.c
36019+++ b/drivers/md/dm.c
36020@@ -165,9 +165,9 @@ struct mapped_device {
36021 /*
36022 * Event handling.
36023 */
36024- atomic_t event_nr;
36025+ atomic_unchecked_t event_nr;
36026 wait_queue_head_t eventq;
36027- atomic_t uevent_seq;
36028+ atomic_unchecked_t uevent_seq;
36029 struct list_head uevent_list;
36030 spinlock_t uevent_lock; /* Protect access to uevent_list */
36031
36032@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36033 rwlock_init(&md->map_lock);
36034 atomic_set(&md->holders, 1);
36035 atomic_set(&md->open_count, 0);
36036- atomic_set(&md->event_nr, 0);
36037- atomic_set(&md->uevent_seq, 0);
36038+ atomic_set_unchecked(&md->event_nr, 0);
36039+ atomic_set_unchecked(&md->uevent_seq, 0);
36040 INIT_LIST_HEAD(&md->uevent_list);
36041 spin_lock_init(&md->uevent_lock);
36042
36043@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36044
36045 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36046
36047- atomic_inc(&md->event_nr);
36048+ atomic_inc_unchecked(&md->event_nr);
36049 wake_up(&md->eventq);
36050 }
36051
36052@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36053
36054 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36055 {
36056- return atomic_add_return(1, &md->uevent_seq);
36057+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36058 }
36059
36060 uint32_t dm_get_event_nr(struct mapped_device *md)
36061 {
36062- return atomic_read(&md->event_nr);
36063+ return atomic_read_unchecked(&md->event_nr);
36064 }
36065
36066 int dm_wait_event(struct mapped_device *md, int event_nr)
36067 {
36068 return wait_event_interruptible(md->eventq,
36069- (event_nr != atomic_read(&md->event_nr)));
36070+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36071 }
36072
36073 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36074diff --git a/drivers/md/md.c b/drivers/md/md.c
36075index 4ce6e2f..7a9530a 100644
36076--- a/drivers/md/md.c
36077+++ b/drivers/md/md.c
36078@@ -153,10 +153,10 @@ static int start_readonly;
36079 * start build, activate spare
36080 */
36081 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36082-static atomic_t md_event_count;
36083+static atomic_unchecked_t md_event_count;
36084 void md_new_event(mddev_t *mddev)
36085 {
36086- atomic_inc(&md_event_count);
36087+ atomic_inc_unchecked(&md_event_count);
36088 wake_up(&md_event_waiters);
36089 }
36090 EXPORT_SYMBOL_GPL(md_new_event);
36091@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36092 */
36093 static void md_new_event_inintr(mddev_t *mddev)
36094 {
36095- atomic_inc(&md_event_count);
36096+ atomic_inc_unchecked(&md_event_count);
36097 wake_up(&md_event_waiters);
36098 }
36099
36100@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36101
36102 rdev->preferred_minor = 0xffff;
36103 rdev->data_offset = le64_to_cpu(sb->data_offset);
36104- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36105+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36106
36107 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36108 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36109@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36110 else
36111 sb->resync_offset = cpu_to_le64(0);
36112
36113- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36114+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36115
36116 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36117 sb->size = cpu_to_le64(mddev->dev_sectors);
36118@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36119 static ssize_t
36120 errors_show(mdk_rdev_t *rdev, char *page)
36121 {
36122- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36123+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36124 }
36125
36126 static ssize_t
36127@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36128 char *e;
36129 unsigned long n = simple_strtoul(buf, &e, 10);
36130 if (*buf && (*e == 0 || *e == '\n')) {
36131- atomic_set(&rdev->corrected_errors, n);
36132+ atomic_set_unchecked(&rdev->corrected_errors, n);
36133 return len;
36134 }
36135 return -EINVAL;
36136@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36137 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36138 kfree(rdev);
36139 }
36140-static struct sysfs_ops rdev_sysfs_ops = {
36141+static const struct sysfs_ops rdev_sysfs_ops = {
36142 .show = rdev_attr_show,
36143 .store = rdev_attr_store,
36144 };
36145@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36146 rdev->data_offset = 0;
36147 rdev->sb_events = 0;
36148 atomic_set(&rdev->nr_pending, 0);
36149- atomic_set(&rdev->read_errors, 0);
36150- atomic_set(&rdev->corrected_errors, 0);
36151+ atomic_set_unchecked(&rdev->read_errors, 0);
36152+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36153
36154 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36155 if (!size) {
36156@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36157 kfree(mddev);
36158 }
36159
36160-static struct sysfs_ops md_sysfs_ops = {
36161+static const struct sysfs_ops md_sysfs_ops = {
36162 .show = md_attr_show,
36163 .store = md_attr_store,
36164 };
36165@@ -4482,7 +4482,8 @@ out:
36166 err = 0;
36167 blk_integrity_unregister(disk);
36168 md_new_event(mddev);
36169- sysfs_notify_dirent(mddev->sysfs_state);
36170+ if (mddev->sysfs_state)
36171+ sysfs_notify_dirent(mddev->sysfs_state);
36172 return err;
36173 }
36174
36175@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36176
36177 spin_unlock(&pers_lock);
36178 seq_printf(seq, "\n");
36179- mi->event = atomic_read(&md_event_count);
36180+ mi->event = atomic_read_unchecked(&md_event_count);
36181 return 0;
36182 }
36183 if (v == (void*)2) {
36184@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36185 chunk_kb ? "KB" : "B");
36186 if (bitmap->file) {
36187 seq_printf(seq, ", file: ");
36188- seq_path(seq, &bitmap->file->f_path, " \t\n");
36189+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36190 }
36191
36192 seq_printf(seq, "\n");
36193@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36194 else {
36195 struct seq_file *p = file->private_data;
36196 p->private = mi;
36197- mi->event = atomic_read(&md_event_count);
36198+ mi->event = atomic_read_unchecked(&md_event_count);
36199 }
36200 return error;
36201 }
36202@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36203 /* always allow read */
36204 mask = POLLIN | POLLRDNORM;
36205
36206- if (mi->event != atomic_read(&md_event_count))
36207+ if (mi->event != atomic_read_unchecked(&md_event_count))
36208 mask |= POLLERR | POLLPRI;
36209 return mask;
36210 }
36211@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36212 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36213 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36214 (int)part_stat_read(&disk->part0, sectors[1]) -
36215- atomic_read(&disk->sync_io);
36216+ atomic_read_unchecked(&disk->sync_io);
36217 /* sync IO will cause sync_io to increase before the disk_stats
36218 * as sync_io is counted when a request starts, and
36219 * disk_stats is counted when it completes.
36220diff --git a/drivers/md/md.h b/drivers/md/md.h
36221index 87430fe..0024a4c 100644
36222--- a/drivers/md/md.h
36223+++ b/drivers/md/md.h
36224@@ -94,10 +94,10 @@ struct mdk_rdev_s
36225 * only maintained for arrays that
36226 * support hot removal
36227 */
36228- atomic_t read_errors; /* number of consecutive read errors that
36229+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36230 * we have tried to ignore.
36231 */
36232- atomic_t corrected_errors; /* number of corrected read errors,
36233+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36234 * for reporting to userspace and storing
36235 * in superblock.
36236 */
36237@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36238
36239 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36240 {
36241- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36242+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36243 }
36244
36245 struct mdk_personality
36246diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36247index 968cb14..f0ad2e4 100644
36248--- a/drivers/md/raid1.c
36249+++ b/drivers/md/raid1.c
36250@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36251 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36252 continue;
36253 rdev = conf->mirrors[d].rdev;
36254- atomic_add(s, &rdev->corrected_errors);
36255+ atomic_add_unchecked(s, &rdev->corrected_errors);
36256 if (sync_page_io(rdev->bdev,
36257 sect + rdev->data_offset,
36258 s<<9,
36259@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36260 /* Well, this device is dead */
36261 md_error(mddev, rdev);
36262 else {
36263- atomic_add(s, &rdev->corrected_errors);
36264+ atomic_add_unchecked(s, &rdev->corrected_errors);
36265 printk(KERN_INFO
36266 "raid1:%s: read error corrected "
36267 "(%d sectors at %llu on %s)\n",
36268diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36269index 1b4e232..cf0f534 100644
36270--- a/drivers/md/raid10.c
36271+++ b/drivers/md/raid10.c
36272@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36273 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36274 set_bit(R10BIO_Uptodate, &r10_bio->state);
36275 else {
36276- atomic_add(r10_bio->sectors,
36277+ atomic_add_unchecked(r10_bio->sectors,
36278 &conf->mirrors[d].rdev->corrected_errors);
36279 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36280 md_error(r10_bio->mddev,
36281@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36282 test_bit(In_sync, &rdev->flags)) {
36283 atomic_inc(&rdev->nr_pending);
36284 rcu_read_unlock();
36285- atomic_add(s, &rdev->corrected_errors);
36286+ atomic_add_unchecked(s, &rdev->corrected_errors);
36287 if (sync_page_io(rdev->bdev,
36288 r10_bio->devs[sl].addr +
36289 sect + rdev->data_offset,
36290diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36291index 883215d..675bf47 100644
36292--- a/drivers/md/raid5.c
36293+++ b/drivers/md/raid5.c
36294@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36295 bi->bi_next = NULL;
36296 if ((rw & WRITE) &&
36297 test_bit(R5_ReWrite, &sh->dev[i].flags))
36298- atomic_add(STRIPE_SECTORS,
36299+ atomic_add_unchecked(STRIPE_SECTORS,
36300 &rdev->corrected_errors);
36301 generic_make_request(bi);
36302 } else {
36303@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36304 clear_bit(R5_ReadError, &sh->dev[i].flags);
36305 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36306 }
36307- if (atomic_read(&conf->disks[i].rdev->read_errors))
36308- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36309+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36310+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36311 } else {
36312 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36313 int retry = 0;
36314 rdev = conf->disks[i].rdev;
36315
36316 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36317- atomic_inc(&rdev->read_errors);
36318+ atomic_inc_unchecked(&rdev->read_errors);
36319 if (conf->mddev->degraded >= conf->max_degraded)
36320 printk_rl(KERN_WARNING
36321 "raid5:%s: read error not correctable "
36322@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36323 (unsigned long long)(sh->sector
36324 + rdev->data_offset),
36325 bdn);
36326- else if (atomic_read(&rdev->read_errors)
36327+ else if (atomic_read_unchecked(&rdev->read_errors)
36328 > conf->max_nr_stripes)
36329 printk(KERN_WARNING
36330 "raid5:%s: Too many read errors, failing device %s.\n",
36331@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36332 sector_t r_sector;
36333 struct stripe_head sh2;
36334
36335+ pax_track_stack();
36336
36337 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36338 stripe = new_sector;
36339diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36340index 05bde9c..2f31d40 100644
36341--- a/drivers/media/common/saa7146_hlp.c
36342+++ b/drivers/media/common/saa7146_hlp.c
36343@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36344
36345 int x[32], y[32], w[32], h[32];
36346
36347+ pax_track_stack();
36348+
36349 /* clear out memory */
36350 memset(&line_list[0], 0x00, sizeof(u32)*32);
36351 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36352diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36353index cb22da5..82b686e 100644
36354--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36355+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36356@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36357 u8 buf[HOST_LINK_BUF_SIZE];
36358 int i;
36359
36360+ pax_track_stack();
36361+
36362 dprintk("%s\n", __func__);
36363
36364 /* check if we have space for a link buf in the rx_buffer */
36365@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36366 unsigned long timeout;
36367 int written;
36368
36369+ pax_track_stack();
36370+
36371 dprintk("%s\n", __func__);
36372
36373 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36374diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36375index 2fe05d0..a3289c4 100644
36376--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36377+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36378@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36379 union {
36380 dmx_ts_cb ts;
36381 dmx_section_cb sec;
36382- } cb;
36383+ } __no_const cb;
36384
36385 struct dvb_demux *demux;
36386 void *priv;
36387diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36388index 94159b9..376bd8e 100644
36389--- a/drivers/media/dvb/dvb-core/dvbdev.c
36390+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36391@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36392 const struct dvb_device *template, void *priv, int type)
36393 {
36394 struct dvb_device *dvbdev;
36395- struct file_operations *dvbdevfops;
36396+ file_operations_no_const *dvbdevfops;
36397 struct device *clsdev;
36398 int minor;
36399 int id;
36400diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36401index 2a53dd0..db8c07a 100644
36402--- a/drivers/media/dvb/dvb-usb/cxusb.c
36403+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36404@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36405 struct dib0700_adapter_state {
36406 int (*set_param_save) (struct dvb_frontend *,
36407 struct dvb_frontend_parameters *);
36408-};
36409+} __no_const;
36410
36411 static int dib7070_set_param_override(struct dvb_frontend *fe,
36412 struct dvb_frontend_parameters *fep)
36413diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36414index db7f7f7..f55e96f 100644
36415--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36416+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36417@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36418
36419 u8 buf[260];
36420
36421+ pax_track_stack();
36422+
36423 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36424 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36425
36426diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36427index 524acf5..5ffc403 100644
36428--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36429+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36430@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36431
36432 struct dib0700_adapter_state {
36433 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36434-};
36435+} __no_const;
36436
36437 /* Hauppauge Nova-T 500 (aka Bristol)
36438 * has a LNA on GPIO0 which is enabled by setting 1 */
36439diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36440index ba91735..4261d84 100644
36441--- a/drivers/media/dvb/frontends/dib3000.h
36442+++ b/drivers/media/dvb/frontends/dib3000.h
36443@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36444 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36445 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36446 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36447-};
36448+} __no_const;
36449
36450 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36451 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36452diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36453index c709ce6..b3fe620 100644
36454--- a/drivers/media/dvb/frontends/or51211.c
36455+++ b/drivers/media/dvb/frontends/or51211.c
36456@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36457 u8 tudata[585];
36458 int i;
36459
36460+ pax_track_stack();
36461+
36462 dprintk("Firmware is %zd bytes\n",fw->size);
36463
36464 /* Get eprom data */
36465diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36466index 482d0f3..ee1e202 100644
36467--- a/drivers/media/radio/radio-cadet.c
36468+++ b/drivers/media/radio/radio-cadet.c
36469@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36470 while (i < count && dev->rdsin != dev->rdsout)
36471 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36472
36473- if (copy_to_user(data, readbuf, i))
36474+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36475 return -EFAULT;
36476 return i;
36477 }
36478diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36479index 6dd51e2..0359b92 100644
36480--- a/drivers/media/video/cx18/cx18-driver.c
36481+++ b/drivers/media/video/cx18/cx18-driver.c
36482@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36483
36484 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36485
36486-static atomic_t cx18_instance = ATOMIC_INIT(0);
36487+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36488
36489 /* Parameter declarations */
36490 static int cardtype[CX18_MAX_CARDS];
36491@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36492 struct i2c_client c;
36493 u8 eedata[256];
36494
36495+ pax_track_stack();
36496+
36497 memset(&c, 0, sizeof(c));
36498 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36499 c.adapter = &cx->i2c_adap[0];
36500@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36501 struct cx18 *cx;
36502
36503 /* FIXME - module parameter arrays constrain max instances */
36504- i = atomic_inc_return(&cx18_instance) - 1;
36505+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36506 if (i >= CX18_MAX_CARDS) {
36507 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36508 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36509diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36510index 463ec34..2f4625a 100644
36511--- a/drivers/media/video/ivtv/ivtv-driver.c
36512+++ b/drivers/media/video/ivtv/ivtv-driver.c
36513@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36514 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36515
36516 /* ivtv instance counter */
36517-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36518+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36519
36520 /* Parameter declarations */
36521 static int cardtype[IVTV_MAX_CARDS];
36522diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36523index 5fc4ac0..652a54a 100644
36524--- a/drivers/media/video/omap24xxcam.c
36525+++ b/drivers/media/video/omap24xxcam.c
36526@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36527 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36528
36529 do_gettimeofday(&vb->ts);
36530- vb->field_count = atomic_add_return(2, &fh->field_count);
36531+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36532 if (csr & csr_error) {
36533 vb->state = VIDEOBUF_ERROR;
36534 if (!atomic_read(&fh->cam->in_reset)) {
36535diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36536index 2ce67f5..cf26a5b 100644
36537--- a/drivers/media/video/omap24xxcam.h
36538+++ b/drivers/media/video/omap24xxcam.h
36539@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36540 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36541 struct videobuf_queue vbq;
36542 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36543- atomic_t field_count; /* field counter for videobuf_buffer */
36544+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36545 /* accessing cam here doesn't need serialisation: it's constant */
36546 struct omap24xxcam_device *cam;
36547 };
36548diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36549index 299afa4..eb47459 100644
36550--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36551+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36552@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36553 u8 *eeprom;
36554 struct tveeprom tvdata;
36555
36556+ pax_track_stack();
36557+
36558 memset(&tvdata,0,sizeof(tvdata));
36559
36560 eeprom = pvr2_eeprom_fetch(hdw);
36561diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36562index 5b152ff..3320638 100644
36563--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36564+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36565@@ -195,7 +195,7 @@ struct pvr2_hdw {
36566
36567 /* I2C stuff */
36568 struct i2c_adapter i2c_adap;
36569- struct i2c_algorithm i2c_algo;
36570+ i2c_algorithm_no_const i2c_algo;
36571 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36572 int i2c_cx25840_hack_state;
36573 int i2c_linked;
36574diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36575index 1eabff6..8e2313a 100644
36576--- a/drivers/media/video/saa7134/saa6752hs.c
36577+++ b/drivers/media/video/saa7134/saa6752hs.c
36578@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36579 unsigned char localPAT[256];
36580 unsigned char localPMT[256];
36581
36582+ pax_track_stack();
36583+
36584 /* Set video format - must be done first as it resets other settings */
36585 set_reg8(client, 0x41, h->video_format);
36586
36587diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36588index 9c1d3ac..b1b49e9 100644
36589--- a/drivers/media/video/saa7164/saa7164-cmd.c
36590+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36591@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36592 wait_queue_head_t *q = 0;
36593 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36594
36595+ pax_track_stack();
36596+
36597 /* While any outstand message on the bus exists... */
36598 do {
36599
36600@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36601 u8 tmp[512];
36602 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36603
36604+ pax_track_stack();
36605+
36606 while (loop) {
36607
36608 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36609diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36610index b085496..cde0270 100644
36611--- a/drivers/media/video/usbvideo/ibmcam.c
36612+++ b/drivers/media/video/usbvideo/ibmcam.c
36613@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36614 static int __init ibmcam_init(void)
36615 {
36616 struct usbvideo_cb cbTbl;
36617- memset(&cbTbl, 0, sizeof(cbTbl));
36618- cbTbl.probe = ibmcam_probe;
36619- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36620- cbTbl.videoStart = ibmcam_video_start;
36621- cbTbl.videoStop = ibmcam_video_stop;
36622- cbTbl.processData = ibmcam_ProcessIsocData;
36623- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36624- cbTbl.adjustPicture = ibmcam_adjust_picture;
36625- cbTbl.getFPS = ibmcam_calculate_fps;
36626+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36627+ *(void **)&cbTbl.probe = ibmcam_probe;
36628+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36629+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36630+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36631+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36632+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36633+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36634+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36635 return usbvideo_register(
36636 &cams,
36637 MAX_IBMCAM,
36638diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36639index 31d57f2..600b735 100644
36640--- a/drivers/media/video/usbvideo/konicawc.c
36641+++ b/drivers/media/video/usbvideo/konicawc.c
36642@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36643 int error;
36644
36645 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36646- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36647+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36648
36649 cam->input = input_dev = input_allocate_device();
36650 if (!input_dev) {
36651@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36652 struct usbvideo_cb cbTbl;
36653 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36654 DRIVER_DESC "\n");
36655- memset(&cbTbl, 0, sizeof(cbTbl));
36656- cbTbl.probe = konicawc_probe;
36657- cbTbl.setupOnOpen = konicawc_setup_on_open;
36658- cbTbl.processData = konicawc_process_isoc;
36659- cbTbl.getFPS = konicawc_calculate_fps;
36660- cbTbl.setVideoMode = konicawc_set_video_mode;
36661- cbTbl.startDataPump = konicawc_start_data;
36662- cbTbl.stopDataPump = konicawc_stop_data;
36663- cbTbl.adjustPicture = konicawc_adjust_picture;
36664- cbTbl.userFree = konicawc_free_uvd;
36665+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36666+ *(void **)&cbTbl.probe = konicawc_probe;
36667+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36668+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36669+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36670+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36671+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36672+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36673+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36674+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36675 return usbvideo_register(
36676 &cams,
36677 MAX_CAMERAS,
36678diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36679index 803d3e4..c4d1b96 100644
36680--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36681+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36682@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36683 int error;
36684
36685 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36686- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36687+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36688
36689 cam->input = input_dev = input_allocate_device();
36690 if (!input_dev) {
36691diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36692index fbd1b63..292f9f0 100644
36693--- a/drivers/media/video/usbvideo/ultracam.c
36694+++ b/drivers/media/video/usbvideo/ultracam.c
36695@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36696 {
36697 struct usbvideo_cb cbTbl;
36698 memset(&cbTbl, 0, sizeof(cbTbl));
36699- cbTbl.probe = ultracam_probe;
36700- cbTbl.setupOnOpen = ultracam_setup_on_open;
36701- cbTbl.videoStart = ultracam_video_start;
36702- cbTbl.videoStop = ultracam_video_stop;
36703- cbTbl.processData = ultracam_ProcessIsocData;
36704- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36705- cbTbl.adjustPicture = ultracam_adjust_picture;
36706- cbTbl.getFPS = ultracam_calculate_fps;
36707+ *(void **)&cbTbl.probe = ultracam_probe;
36708+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36709+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36710+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36711+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36712+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36713+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36714+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36715 return usbvideo_register(
36716 &cams,
36717 MAX_CAMERAS,
36718diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36719index dea8b32..34f6878 100644
36720--- a/drivers/media/video/usbvideo/usbvideo.c
36721+++ b/drivers/media/video/usbvideo/usbvideo.c
36722@@ -697,15 +697,15 @@ int usbvideo_register(
36723 __func__, cams, base_size, num_cams);
36724
36725 /* Copy callbacks, apply defaults for those that are not set */
36726- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36727+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36728 if (cams->cb.getFrame == NULL)
36729- cams->cb.getFrame = usbvideo_GetFrame;
36730+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36731 if (cams->cb.disconnect == NULL)
36732- cams->cb.disconnect = usbvideo_Disconnect;
36733+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36734 if (cams->cb.startDataPump == NULL)
36735- cams->cb.startDataPump = usbvideo_StartDataPump;
36736+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36737 if (cams->cb.stopDataPump == NULL)
36738- cams->cb.stopDataPump = usbvideo_StopDataPump;
36739+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36740
36741 cams->num_cameras = num_cams;
36742 cams->cam = (struct uvd *) &cams[1];
36743diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36744index c66985b..7fa143a 100644
36745--- a/drivers/media/video/usbvideo/usbvideo.h
36746+++ b/drivers/media/video/usbvideo/usbvideo.h
36747@@ -268,7 +268,7 @@ struct usbvideo_cb {
36748 int (*startDataPump)(struct uvd *uvd);
36749 void (*stopDataPump)(struct uvd *uvd);
36750 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36751-};
36752+} __no_const;
36753
36754 struct usbvideo {
36755 int num_cameras; /* As allocated */
36756diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36757index e0f91e4..37554ea 100644
36758--- a/drivers/media/video/usbvision/usbvision-core.c
36759+++ b/drivers/media/video/usbvision/usbvision-core.c
36760@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36761 unsigned char rv, gv, bv;
36762 static unsigned char *Y, *U, *V;
36763
36764+ pax_track_stack();
36765+
36766 frame = usbvision->curFrame;
36767 imageSize = frame->frmwidth * frame->frmheight;
36768 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36769diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36770index 0d06e7c..3d17d24 100644
36771--- a/drivers/media/video/v4l2-device.c
36772+++ b/drivers/media/video/v4l2-device.c
36773@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36774 EXPORT_SYMBOL_GPL(v4l2_device_register);
36775
36776 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36777- atomic_t *instance)
36778+ atomic_unchecked_t *instance)
36779 {
36780- int num = atomic_inc_return(instance) - 1;
36781+ int num = atomic_inc_return_unchecked(instance) - 1;
36782 int len = strlen(basename);
36783
36784 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36785diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36786index 032ebae..6a3532c 100644
36787--- a/drivers/media/video/videobuf-dma-sg.c
36788+++ b/drivers/media/video/videobuf-dma-sg.c
36789@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36790 {
36791 struct videobuf_queue q;
36792
36793+ pax_track_stack();
36794+
36795 /* Required to make generic handler to call __videobuf_alloc */
36796 q.int_ops = &sg_ops;
36797
36798diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36799index b6992b7..9fa7547 100644
36800--- a/drivers/message/fusion/mptbase.c
36801+++ b/drivers/message/fusion/mptbase.c
36802@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36803 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36804 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36805
36806+#ifdef CONFIG_GRKERNSEC_HIDESYM
36807+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36808+ NULL, NULL);
36809+#else
36810 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36811 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36812+#endif
36813+
36814 /*
36815 * Rounding UP to nearest 4-kB boundary here...
36816 */
36817diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36818index 83873e3..e360e9a 100644
36819--- a/drivers/message/fusion/mptsas.c
36820+++ b/drivers/message/fusion/mptsas.c
36821@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36822 return 0;
36823 }
36824
36825+static inline void
36826+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36827+{
36828+ if (phy_info->port_details) {
36829+ phy_info->port_details->rphy = rphy;
36830+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36831+ ioc->name, rphy));
36832+ }
36833+
36834+ if (rphy) {
36835+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36836+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36837+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36838+ ioc->name, rphy, rphy->dev.release));
36839+ }
36840+}
36841+
36842 /* no mutex */
36843 static void
36844 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36845@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36846 return NULL;
36847 }
36848
36849-static inline void
36850-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36851-{
36852- if (phy_info->port_details) {
36853- phy_info->port_details->rphy = rphy;
36854- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36855- ioc->name, rphy));
36856- }
36857-
36858- if (rphy) {
36859- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36860- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36861- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36862- ioc->name, rphy, rphy->dev.release));
36863- }
36864-}
36865-
36866 static inline struct sas_port *
36867 mptsas_get_port(struct mptsas_phyinfo *phy_info)
36868 {
36869diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
36870index bd096ca..332cf76 100644
36871--- a/drivers/message/fusion/mptscsih.c
36872+++ b/drivers/message/fusion/mptscsih.c
36873@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
36874
36875 h = shost_priv(SChost);
36876
36877- if (h) {
36878- if (h->info_kbuf == NULL)
36879- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36880- return h->info_kbuf;
36881- h->info_kbuf[0] = '\0';
36882+ if (!h)
36883+ return NULL;
36884
36885- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36886- h->info_kbuf[size-1] = '\0';
36887- }
36888+ if (h->info_kbuf == NULL)
36889+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36890+ return h->info_kbuf;
36891+ h->info_kbuf[0] = '\0';
36892+
36893+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36894+ h->info_kbuf[size-1] = '\0';
36895
36896 return h->info_kbuf;
36897 }
36898diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
36899index efba702..59b2c0f 100644
36900--- a/drivers/message/i2o/i2o_config.c
36901+++ b/drivers/message/i2o/i2o_config.c
36902@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
36903 struct i2o_message *msg;
36904 unsigned int iop;
36905
36906+ pax_track_stack();
36907+
36908 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
36909 return -EFAULT;
36910
36911diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
36912index 7045c45..c07b170 100644
36913--- a/drivers/message/i2o/i2o_proc.c
36914+++ b/drivers/message/i2o/i2o_proc.c
36915@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
36916 "Array Controller Device"
36917 };
36918
36919-static char *chtostr(u8 * chars, int n)
36920-{
36921- char tmp[256];
36922- tmp[0] = 0;
36923- return strncat(tmp, (char *)chars, n);
36924-}
36925-
36926 static int i2o_report_query_status(struct seq_file *seq, int block_status,
36927 char *group)
36928 {
36929@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36930
36931 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
36932 seq_printf(seq, "%-#8x", ddm_table.module_id);
36933- seq_printf(seq, "%-29s",
36934- chtostr(ddm_table.module_name_version, 28));
36935+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
36936 seq_printf(seq, "%9d ", ddm_table.data_size);
36937 seq_printf(seq, "%8d", ddm_table.code_size);
36938
36939@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36940
36941 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
36942 seq_printf(seq, "%-#8x", dst->module_id);
36943- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
36944- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
36945+ seq_printf(seq, "%-.28s", dst->module_name_version);
36946+ seq_printf(seq, "%-.8s", dst->date);
36947 seq_printf(seq, "%8d ", dst->module_size);
36948 seq_printf(seq, "%8d ", dst->mpb_size);
36949 seq_printf(seq, "0x%04x", dst->module_flags);
36950@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36951 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
36952 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
36953 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
36954- seq_printf(seq, "Vendor info : %s\n",
36955- chtostr((u8 *) (work32 + 2), 16));
36956- seq_printf(seq, "Product info : %s\n",
36957- chtostr((u8 *) (work32 + 6), 16));
36958- seq_printf(seq, "Description : %s\n",
36959- chtostr((u8 *) (work32 + 10), 16));
36960- seq_printf(seq, "Product rev. : %s\n",
36961- chtostr((u8 *) (work32 + 14), 8));
36962+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
36963+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
36964+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
36965+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
36966
36967 seq_printf(seq, "Serial number : ");
36968 print_serial_number(seq, (u8 *) (work32 + 16),
36969@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36970 }
36971
36972 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
36973- seq_printf(seq, "Module name : %s\n",
36974- chtostr(result.module_name, 24));
36975- seq_printf(seq, "Module revision : %s\n",
36976- chtostr(result.module_rev, 8));
36977+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
36978+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
36979
36980 seq_printf(seq, "Serial number : ");
36981 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
36982@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36983 return 0;
36984 }
36985
36986- seq_printf(seq, "Device name : %s\n",
36987- chtostr(result.device_name, 64));
36988- seq_printf(seq, "Service name : %s\n",
36989- chtostr(result.service_name, 64));
36990- seq_printf(seq, "Physical name : %s\n",
36991- chtostr(result.physical_location, 64));
36992- seq_printf(seq, "Instance number : %s\n",
36993- chtostr(result.instance_number, 4));
36994+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
36995+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
36996+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
36997+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
36998
36999 return 0;
37000 }
37001diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37002index 27cf4af..b1205b8 100644
37003--- a/drivers/message/i2o/iop.c
37004+++ b/drivers/message/i2o/iop.c
37005@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37006
37007 spin_lock_irqsave(&c->context_list_lock, flags);
37008
37009- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37010- atomic_inc(&c->context_list_counter);
37011+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37012+ atomic_inc_unchecked(&c->context_list_counter);
37013
37014- entry->context = atomic_read(&c->context_list_counter);
37015+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37016
37017 list_add(&entry->list, &c->context_list);
37018
37019@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37020
37021 #if BITS_PER_LONG == 64
37022 spin_lock_init(&c->context_list_lock);
37023- atomic_set(&c->context_list_counter, 0);
37024+ atomic_set_unchecked(&c->context_list_counter, 0);
37025 INIT_LIST_HEAD(&c->context_list);
37026 #endif
37027
37028diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37029index 78e3e85..66c9a0d 100644
37030--- a/drivers/mfd/ab3100-core.c
37031+++ b/drivers/mfd/ab3100-core.c
37032@@ -777,7 +777,7 @@ struct ab_family_id {
37033 char *name;
37034 };
37035
37036-static const struct ab_family_id ids[] __initdata = {
37037+static const struct ab_family_id ids[] __initconst = {
37038 /* AB3100 */
37039 {
37040 .id = 0xc0,
37041diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37042index 8d8c932..8104515 100644
37043--- a/drivers/mfd/wm8350-i2c.c
37044+++ b/drivers/mfd/wm8350-i2c.c
37045@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37046 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37047 int ret;
37048
37049+ pax_track_stack();
37050+
37051 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37052 return -EINVAL;
37053
37054diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37055index e4ff50b..4cc3f04 100644
37056--- a/drivers/misc/kgdbts.c
37057+++ b/drivers/misc/kgdbts.c
37058@@ -118,7 +118,7 @@
37059 } while (0)
37060 #define MAX_CONFIG_LEN 40
37061
37062-static struct kgdb_io kgdbts_io_ops;
37063+static const struct kgdb_io kgdbts_io_ops;
37064 static char get_buf[BUFMAX];
37065 static int get_buf_cnt;
37066 static char put_buf[BUFMAX];
37067@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37068 module_put(THIS_MODULE);
37069 }
37070
37071-static struct kgdb_io kgdbts_io_ops = {
37072+static const struct kgdb_io kgdbts_io_ops = {
37073 .name = "kgdbts",
37074 .read_char = kgdbts_get_char,
37075 .write_char = kgdbts_put_char,
37076diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37077index 37e7cfc..67cfb76 100644
37078--- a/drivers/misc/sgi-gru/gruhandles.c
37079+++ b/drivers/misc/sgi-gru/gruhandles.c
37080@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37081
37082 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37083 {
37084- atomic_long_inc(&mcs_op_statistics[op].count);
37085- atomic_long_add(clks, &mcs_op_statistics[op].total);
37086+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37087+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37088 if (mcs_op_statistics[op].max < clks)
37089 mcs_op_statistics[op].max = clks;
37090 }
37091diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37092index 3f2375c..467c6e6 100644
37093--- a/drivers/misc/sgi-gru/gruprocfs.c
37094+++ b/drivers/misc/sgi-gru/gruprocfs.c
37095@@ -32,9 +32,9 @@
37096
37097 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37098
37099-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37100+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37101 {
37102- unsigned long val = atomic_long_read(v);
37103+ unsigned long val = atomic_long_read_unchecked(v);
37104
37105 if (val)
37106 seq_printf(s, "%16lu %s\n", val, id);
37107@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37108 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37109
37110 for (op = 0; op < mcsop_last; op++) {
37111- count = atomic_long_read(&mcs_op_statistics[op].count);
37112- total = atomic_long_read(&mcs_op_statistics[op].total);
37113+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37114+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37115 max = mcs_op_statistics[op].max;
37116 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37117 count ? total / count : 0, max);
37118diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37119index 46990bc..4a251b5 100644
37120--- a/drivers/misc/sgi-gru/grutables.h
37121+++ b/drivers/misc/sgi-gru/grutables.h
37122@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37123 * GRU statistics.
37124 */
37125 struct gru_stats_s {
37126- atomic_long_t vdata_alloc;
37127- atomic_long_t vdata_free;
37128- atomic_long_t gts_alloc;
37129- atomic_long_t gts_free;
37130- atomic_long_t vdata_double_alloc;
37131- atomic_long_t gts_double_allocate;
37132- atomic_long_t assign_context;
37133- atomic_long_t assign_context_failed;
37134- atomic_long_t free_context;
37135- atomic_long_t load_user_context;
37136- atomic_long_t load_kernel_context;
37137- atomic_long_t lock_kernel_context;
37138- atomic_long_t unlock_kernel_context;
37139- atomic_long_t steal_user_context;
37140- atomic_long_t steal_kernel_context;
37141- atomic_long_t steal_context_failed;
37142- atomic_long_t nopfn;
37143- atomic_long_t break_cow;
37144- atomic_long_t asid_new;
37145- atomic_long_t asid_next;
37146- atomic_long_t asid_wrap;
37147- atomic_long_t asid_reuse;
37148- atomic_long_t intr;
37149- atomic_long_t intr_mm_lock_failed;
37150- atomic_long_t call_os;
37151- atomic_long_t call_os_offnode_reference;
37152- atomic_long_t call_os_check_for_bug;
37153- atomic_long_t call_os_wait_queue;
37154- atomic_long_t user_flush_tlb;
37155- atomic_long_t user_unload_context;
37156- atomic_long_t user_exception;
37157- atomic_long_t set_context_option;
37158- atomic_long_t migrate_check;
37159- atomic_long_t migrated_retarget;
37160- atomic_long_t migrated_unload;
37161- atomic_long_t migrated_unload_delay;
37162- atomic_long_t migrated_nopfn_retarget;
37163- atomic_long_t migrated_nopfn_unload;
37164- atomic_long_t tlb_dropin;
37165- atomic_long_t tlb_dropin_fail_no_asid;
37166- atomic_long_t tlb_dropin_fail_upm;
37167- atomic_long_t tlb_dropin_fail_invalid;
37168- atomic_long_t tlb_dropin_fail_range_active;
37169- atomic_long_t tlb_dropin_fail_idle;
37170- atomic_long_t tlb_dropin_fail_fmm;
37171- atomic_long_t tlb_dropin_fail_no_exception;
37172- atomic_long_t tlb_dropin_fail_no_exception_war;
37173- atomic_long_t tfh_stale_on_fault;
37174- atomic_long_t mmu_invalidate_range;
37175- atomic_long_t mmu_invalidate_page;
37176- atomic_long_t mmu_clear_flush_young;
37177- atomic_long_t flush_tlb;
37178- atomic_long_t flush_tlb_gru;
37179- atomic_long_t flush_tlb_gru_tgh;
37180- atomic_long_t flush_tlb_gru_zero_asid;
37181+ atomic_long_unchecked_t vdata_alloc;
37182+ atomic_long_unchecked_t vdata_free;
37183+ atomic_long_unchecked_t gts_alloc;
37184+ atomic_long_unchecked_t gts_free;
37185+ atomic_long_unchecked_t vdata_double_alloc;
37186+ atomic_long_unchecked_t gts_double_allocate;
37187+ atomic_long_unchecked_t assign_context;
37188+ atomic_long_unchecked_t assign_context_failed;
37189+ atomic_long_unchecked_t free_context;
37190+ atomic_long_unchecked_t load_user_context;
37191+ atomic_long_unchecked_t load_kernel_context;
37192+ atomic_long_unchecked_t lock_kernel_context;
37193+ atomic_long_unchecked_t unlock_kernel_context;
37194+ atomic_long_unchecked_t steal_user_context;
37195+ atomic_long_unchecked_t steal_kernel_context;
37196+ atomic_long_unchecked_t steal_context_failed;
37197+ atomic_long_unchecked_t nopfn;
37198+ atomic_long_unchecked_t break_cow;
37199+ atomic_long_unchecked_t asid_new;
37200+ atomic_long_unchecked_t asid_next;
37201+ atomic_long_unchecked_t asid_wrap;
37202+ atomic_long_unchecked_t asid_reuse;
37203+ atomic_long_unchecked_t intr;
37204+ atomic_long_unchecked_t intr_mm_lock_failed;
37205+ atomic_long_unchecked_t call_os;
37206+ atomic_long_unchecked_t call_os_offnode_reference;
37207+ atomic_long_unchecked_t call_os_check_for_bug;
37208+ atomic_long_unchecked_t call_os_wait_queue;
37209+ atomic_long_unchecked_t user_flush_tlb;
37210+ atomic_long_unchecked_t user_unload_context;
37211+ atomic_long_unchecked_t user_exception;
37212+ atomic_long_unchecked_t set_context_option;
37213+ atomic_long_unchecked_t migrate_check;
37214+ atomic_long_unchecked_t migrated_retarget;
37215+ atomic_long_unchecked_t migrated_unload;
37216+ atomic_long_unchecked_t migrated_unload_delay;
37217+ atomic_long_unchecked_t migrated_nopfn_retarget;
37218+ atomic_long_unchecked_t migrated_nopfn_unload;
37219+ atomic_long_unchecked_t tlb_dropin;
37220+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37221+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37222+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37223+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37224+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37225+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37226+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37227+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37228+ atomic_long_unchecked_t tfh_stale_on_fault;
37229+ atomic_long_unchecked_t mmu_invalidate_range;
37230+ atomic_long_unchecked_t mmu_invalidate_page;
37231+ atomic_long_unchecked_t mmu_clear_flush_young;
37232+ atomic_long_unchecked_t flush_tlb;
37233+ atomic_long_unchecked_t flush_tlb_gru;
37234+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37235+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37236
37237- atomic_long_t copy_gpa;
37238+ atomic_long_unchecked_t copy_gpa;
37239
37240- atomic_long_t mesq_receive;
37241- atomic_long_t mesq_receive_none;
37242- atomic_long_t mesq_send;
37243- atomic_long_t mesq_send_failed;
37244- atomic_long_t mesq_noop;
37245- atomic_long_t mesq_send_unexpected_error;
37246- atomic_long_t mesq_send_lb_overflow;
37247- atomic_long_t mesq_send_qlimit_reached;
37248- atomic_long_t mesq_send_amo_nacked;
37249- atomic_long_t mesq_send_put_nacked;
37250- atomic_long_t mesq_qf_not_full;
37251- atomic_long_t mesq_qf_locked;
37252- atomic_long_t mesq_qf_noop_not_full;
37253- atomic_long_t mesq_qf_switch_head_failed;
37254- atomic_long_t mesq_qf_unexpected_error;
37255- atomic_long_t mesq_noop_unexpected_error;
37256- atomic_long_t mesq_noop_lb_overflow;
37257- atomic_long_t mesq_noop_qlimit_reached;
37258- atomic_long_t mesq_noop_amo_nacked;
37259- atomic_long_t mesq_noop_put_nacked;
37260+ atomic_long_unchecked_t mesq_receive;
37261+ atomic_long_unchecked_t mesq_receive_none;
37262+ atomic_long_unchecked_t mesq_send;
37263+ atomic_long_unchecked_t mesq_send_failed;
37264+ atomic_long_unchecked_t mesq_noop;
37265+ atomic_long_unchecked_t mesq_send_unexpected_error;
37266+ atomic_long_unchecked_t mesq_send_lb_overflow;
37267+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37268+ atomic_long_unchecked_t mesq_send_amo_nacked;
37269+ atomic_long_unchecked_t mesq_send_put_nacked;
37270+ atomic_long_unchecked_t mesq_qf_not_full;
37271+ atomic_long_unchecked_t mesq_qf_locked;
37272+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37273+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37274+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37275+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37276+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37277+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37278+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37279+ atomic_long_unchecked_t mesq_noop_put_nacked;
37280
37281 };
37282
37283@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37284 cchop_deallocate, tghop_invalidate, mcsop_last};
37285
37286 struct mcs_op_statistic {
37287- atomic_long_t count;
37288- atomic_long_t total;
37289+ atomic_long_unchecked_t count;
37290+ atomic_long_unchecked_t total;
37291 unsigned long max;
37292 };
37293
37294@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37295
37296 #define STAT(id) do { \
37297 if (gru_options & OPT_STATS) \
37298- atomic_long_inc(&gru_stats.id); \
37299+ atomic_long_inc_unchecked(&gru_stats.id); \
37300 } while (0)
37301
37302 #ifdef CONFIG_SGI_GRU_DEBUG
37303diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37304index 2275126..12a9dbfb 100644
37305--- a/drivers/misc/sgi-xp/xp.h
37306+++ b/drivers/misc/sgi-xp/xp.h
37307@@ -289,7 +289,7 @@ struct xpc_interface {
37308 xpc_notify_func, void *);
37309 void (*received) (short, int, void *);
37310 enum xp_retval (*partid_to_nasids) (short, void *);
37311-};
37312+} __no_const;
37313
37314 extern struct xpc_interface xpc_interface;
37315
37316diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37317index b94d5f7..7f494c5 100644
37318--- a/drivers/misc/sgi-xp/xpc.h
37319+++ b/drivers/misc/sgi-xp/xpc.h
37320@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37321 void (*received_payload) (struct xpc_channel *, void *);
37322 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37323 };
37324+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37325
37326 /* struct xpc_partition act_state values (for XPC HB) */
37327
37328@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37329 /* found in xpc_main.c */
37330 extern struct device *xpc_part;
37331 extern struct device *xpc_chan;
37332-extern struct xpc_arch_operations xpc_arch_ops;
37333+extern xpc_arch_operations_no_const xpc_arch_ops;
37334 extern int xpc_disengage_timelimit;
37335 extern int xpc_disengage_timedout;
37336 extern int xpc_activate_IRQ_rcvd;
37337diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37338index fd3688a..7e211a4 100644
37339--- a/drivers/misc/sgi-xp/xpc_main.c
37340+++ b/drivers/misc/sgi-xp/xpc_main.c
37341@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37342 .notifier_call = xpc_system_die,
37343 };
37344
37345-struct xpc_arch_operations xpc_arch_ops;
37346+xpc_arch_operations_no_const xpc_arch_ops;
37347
37348 /*
37349 * Timer function to enforce the timelimit on the partition disengage.
37350diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37351index 8b70e03..700bda6 100644
37352--- a/drivers/misc/sgi-xp/xpc_sn2.c
37353+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37354@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37355 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37356 }
37357
37358-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37359+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37360 .setup_partitions = xpc_setup_partitions_sn2,
37361 .teardown_partitions = xpc_teardown_partitions_sn2,
37362 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37363@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37364 int ret;
37365 size_t buf_size;
37366
37367- xpc_arch_ops = xpc_arch_ops_sn2;
37368+ pax_open_kernel();
37369+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37370+ pax_close_kernel();
37371
37372 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37373 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37374diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37375index 8e08d71..7cb8c9b 100644
37376--- a/drivers/misc/sgi-xp/xpc_uv.c
37377+++ b/drivers/misc/sgi-xp/xpc_uv.c
37378@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37379 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37380 }
37381
37382-static struct xpc_arch_operations xpc_arch_ops_uv = {
37383+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37384 .setup_partitions = xpc_setup_partitions_uv,
37385 .teardown_partitions = xpc_teardown_partitions_uv,
37386 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37387@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37388 int
37389 xpc_init_uv(void)
37390 {
37391- xpc_arch_ops = xpc_arch_ops_uv;
37392+ pax_open_kernel();
37393+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37394+ pax_close_kernel();
37395
37396 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37397 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37398diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37399index 6fd20b42..650efe3 100644
37400--- a/drivers/mmc/host/sdhci-pci.c
37401+++ b/drivers/mmc/host/sdhci-pci.c
37402@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37403 .probe = via_probe,
37404 };
37405
37406-static const struct pci_device_id pci_ids[] __devinitdata = {
37407+static const struct pci_device_id pci_ids[] __devinitconst = {
37408 {
37409 .vendor = PCI_VENDOR_ID_RICOH,
37410 .device = PCI_DEVICE_ID_RICOH_R5C822,
37411diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37412index e7563a9..5f90ce5 100644
37413--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37414+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37415@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37416 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37417 unsigned long timeo = jiffies + HZ;
37418
37419+ pax_track_stack();
37420+
37421 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37422 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37423 goto sleep;
37424@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37425 unsigned long initial_adr;
37426 int initial_len = len;
37427
37428+ pax_track_stack();
37429+
37430 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37431 adr += chip->start;
37432 initial_adr = adr;
37433@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37434 int retries = 3;
37435 int ret;
37436
37437+ pax_track_stack();
37438+
37439 adr += chip->start;
37440
37441 retry:
37442diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37443index 0667a67..3ab97ed 100644
37444--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37445+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37446@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37447 unsigned long cmd_addr;
37448 struct cfi_private *cfi = map->fldrv_priv;
37449
37450+ pax_track_stack();
37451+
37452 adr += chip->start;
37453
37454 /* Ensure cmd read/writes are aligned. */
37455@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37456 DECLARE_WAITQUEUE(wait, current);
37457 int wbufsize, z;
37458
37459+ pax_track_stack();
37460+
37461 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37462 if (adr & (map_bankwidth(map)-1))
37463 return -EINVAL;
37464@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37465 DECLARE_WAITQUEUE(wait, current);
37466 int ret = 0;
37467
37468+ pax_track_stack();
37469+
37470 adr += chip->start;
37471
37472 /* Let's determine this according to the interleave only once */
37473@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37474 unsigned long timeo = jiffies + HZ;
37475 DECLARE_WAITQUEUE(wait, current);
37476
37477+ pax_track_stack();
37478+
37479 adr += chip->start;
37480
37481 /* Let's determine this according to the interleave only once */
37482@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37483 unsigned long timeo = jiffies + HZ;
37484 DECLARE_WAITQUEUE(wait, current);
37485
37486+ pax_track_stack();
37487+
37488 adr += chip->start;
37489
37490 /* Let's determine this according to the interleave only once */
37491diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37492index 5bf5f46..c5de373 100644
37493--- a/drivers/mtd/devices/doc2000.c
37494+++ b/drivers/mtd/devices/doc2000.c
37495@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37496
37497 /* The ECC will not be calculated correctly if less than 512 is written */
37498 /* DBB-
37499- if (len != 0x200 && eccbuf)
37500+ if (len != 0x200)
37501 printk(KERN_WARNING
37502 "ECC needs a full sector write (adr: %lx size %lx)\n",
37503 (long) to, (long) len);
37504diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37505index 0990f78..bb4e8a4 100644
37506--- a/drivers/mtd/devices/doc2001.c
37507+++ b/drivers/mtd/devices/doc2001.c
37508@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37509 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37510
37511 /* Don't allow read past end of device */
37512- if (from >= this->totlen)
37513+ if (from >= this->totlen || !len)
37514 return -EINVAL;
37515
37516 /* Don't allow a single read to cross a 512-byte block boundary */
37517diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37518index e56d6b4..f07e6cf 100644
37519--- a/drivers/mtd/ftl.c
37520+++ b/drivers/mtd/ftl.c
37521@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37522 loff_t offset;
37523 uint16_t srcunitswap = cpu_to_le16(srcunit);
37524
37525+ pax_track_stack();
37526+
37527 eun = &part->EUNInfo[srcunit];
37528 xfer = &part->XferInfo[xferunit];
37529 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37530diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37531index 8aca552..146446e 100755
37532--- a/drivers/mtd/inftlcore.c
37533+++ b/drivers/mtd/inftlcore.c
37534@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37535 struct inftl_oob oob;
37536 size_t retlen;
37537
37538+ pax_track_stack();
37539+
37540 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37541 "pending=%d)\n", inftl, thisVUC, pendingblock);
37542
37543diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37544index 32e82ae..ed50953 100644
37545--- a/drivers/mtd/inftlmount.c
37546+++ b/drivers/mtd/inftlmount.c
37547@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37548 struct INFTLPartition *ip;
37549 size_t retlen;
37550
37551+ pax_track_stack();
37552+
37553 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37554
37555 /*
37556diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37557index 79bf40f..fe5f8fd 100644
37558--- a/drivers/mtd/lpddr/qinfo_probe.c
37559+++ b/drivers/mtd/lpddr/qinfo_probe.c
37560@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37561 {
37562 map_word pfow_val[4];
37563
37564+ pax_track_stack();
37565+
37566 /* Check identification string */
37567 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37568 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37569diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37570index 726a1b8..f46b460 100644
37571--- a/drivers/mtd/mtdchar.c
37572+++ b/drivers/mtd/mtdchar.c
37573@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37574 u_long size;
37575 struct mtd_info_user info;
37576
37577+ pax_track_stack();
37578+
37579 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37580
37581 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37582diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37583index 1002e18..26d82d5 100644
37584--- a/drivers/mtd/nftlcore.c
37585+++ b/drivers/mtd/nftlcore.c
37586@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37587 int inplace = 1;
37588 size_t retlen;
37589
37590+ pax_track_stack();
37591+
37592 memset(BlockMap, 0xff, sizeof(BlockMap));
37593 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37594
37595diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37596index 8b22b18..6fada85 100644
37597--- a/drivers/mtd/nftlmount.c
37598+++ b/drivers/mtd/nftlmount.c
37599@@ -23,6 +23,7 @@
37600 #include <asm/errno.h>
37601 #include <linux/delay.h>
37602 #include <linux/slab.h>
37603+#include <linux/sched.h>
37604 #include <linux/mtd/mtd.h>
37605 #include <linux/mtd/nand.h>
37606 #include <linux/mtd/nftl.h>
37607@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37608 struct mtd_info *mtd = nftl->mbd.mtd;
37609 unsigned int i;
37610
37611+ pax_track_stack();
37612+
37613 /* Assume logical EraseSize == physical erasesize for starting the scan.
37614 We'll sort it out later if we find a MediaHeader which says otherwise */
37615 /* Actually, we won't. The new DiskOnChip driver has already scanned
37616diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37617index 14cec04..d775b87 100644
37618--- a/drivers/mtd/ubi/build.c
37619+++ b/drivers/mtd/ubi/build.c
37620@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37621 static int __init bytes_str_to_int(const char *str)
37622 {
37623 char *endp;
37624- unsigned long result;
37625+ unsigned long result, scale = 1;
37626
37627 result = simple_strtoul(str, &endp, 0);
37628 if (str == endp || result >= INT_MAX) {
37629@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37630
37631 switch (*endp) {
37632 case 'G':
37633- result *= 1024;
37634+ scale *= 1024;
37635 case 'M':
37636- result *= 1024;
37637+ scale *= 1024;
37638 case 'K':
37639- result *= 1024;
37640+ scale *= 1024;
37641 if (endp[1] == 'i' && endp[2] == 'B')
37642 endp += 2;
37643 case '\0':
37644@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37645 return -EINVAL;
37646 }
37647
37648- return result;
37649+ if ((intoverflow_t)result*scale >= INT_MAX) {
37650+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37651+ str);
37652+ return -EINVAL;
37653+ }
37654+
37655+ return result*scale;
37656 }
37657
37658 /**
37659diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37660index ab68886..ca405e8 100644
37661--- a/drivers/net/atlx/atl2.c
37662+++ b/drivers/net/atlx/atl2.c
37663@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37664 */
37665
37666 #define ATL2_PARAM(X, desc) \
37667- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37668+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37669 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37670 MODULE_PARM_DESC(X, desc);
37671 #else
37672diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37673index 4874b2b..67f8526 100644
37674--- a/drivers/net/bnx2.c
37675+++ b/drivers/net/bnx2.c
37676@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37677 int rc = 0;
37678 u32 magic, csum;
37679
37680+ pax_track_stack();
37681+
37682 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37683 goto test_nvram_done;
37684
37685diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37686index fd3eb07..8a6978d 100644
37687--- a/drivers/net/cxgb3/l2t.h
37688+++ b/drivers/net/cxgb3/l2t.h
37689@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37690 */
37691 struct l2t_skb_cb {
37692 arp_failure_handler_func arp_failure_handler;
37693-};
37694+} __no_const;
37695
37696 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37697
37698diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37699index 032cfe0..411af379 100644
37700--- a/drivers/net/cxgb3/t3_hw.c
37701+++ b/drivers/net/cxgb3/t3_hw.c
37702@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37703 int i, addr, ret;
37704 struct t3_vpd vpd;
37705
37706+ pax_track_stack();
37707+
37708 /*
37709 * Card information is normally at VPD_BASE but some early cards had
37710 * it at 0.
37711diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37712index d1e0563..b9e129c 100644
37713--- a/drivers/net/e1000e/82571.c
37714+++ b/drivers/net/e1000e/82571.c
37715@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37716 {
37717 struct e1000_hw *hw = &adapter->hw;
37718 struct e1000_mac_info *mac = &hw->mac;
37719- struct e1000_mac_operations *func = &mac->ops;
37720+ e1000_mac_operations_no_const *func = &mac->ops;
37721 u32 swsm = 0;
37722 u32 swsm2 = 0;
37723 bool force_clear_smbi = false;
37724@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37725 temp = er32(ICRXDMTC);
37726 }
37727
37728-static struct e1000_mac_operations e82571_mac_ops = {
37729+static const struct e1000_mac_operations e82571_mac_ops = {
37730 /* .check_mng_mode: mac type dependent */
37731 /* .check_for_link: media type dependent */
37732 .id_led_init = e1000e_id_led_init,
37733@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37734 .setup_led = e1000e_setup_led_generic,
37735 };
37736
37737-static struct e1000_phy_operations e82_phy_ops_igp = {
37738+static const struct e1000_phy_operations e82_phy_ops_igp = {
37739 .acquire_phy = e1000_get_hw_semaphore_82571,
37740 .check_reset_block = e1000e_check_reset_block_generic,
37741 .commit_phy = NULL,
37742@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37743 .cfg_on_link_up = NULL,
37744 };
37745
37746-static struct e1000_phy_operations e82_phy_ops_m88 = {
37747+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37748 .acquire_phy = e1000_get_hw_semaphore_82571,
37749 .check_reset_block = e1000e_check_reset_block_generic,
37750 .commit_phy = e1000e_phy_sw_reset,
37751@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37752 .cfg_on_link_up = NULL,
37753 };
37754
37755-static struct e1000_phy_operations e82_phy_ops_bm = {
37756+static const struct e1000_phy_operations e82_phy_ops_bm = {
37757 .acquire_phy = e1000_get_hw_semaphore_82571,
37758 .check_reset_block = e1000e_check_reset_block_generic,
37759 .commit_phy = e1000e_phy_sw_reset,
37760@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37761 .cfg_on_link_up = NULL,
37762 };
37763
37764-static struct e1000_nvm_operations e82571_nvm_ops = {
37765+static const struct e1000_nvm_operations e82571_nvm_ops = {
37766 .acquire_nvm = e1000_acquire_nvm_82571,
37767 .read_nvm = e1000e_read_nvm_eerd,
37768 .release_nvm = e1000_release_nvm_82571,
37769diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37770index 47db9bd..fa58ccd 100644
37771--- a/drivers/net/e1000e/e1000.h
37772+++ b/drivers/net/e1000e/e1000.h
37773@@ -375,9 +375,9 @@ struct e1000_info {
37774 u32 pba;
37775 u32 max_hw_frame_size;
37776 s32 (*get_variants)(struct e1000_adapter *);
37777- struct e1000_mac_operations *mac_ops;
37778- struct e1000_phy_operations *phy_ops;
37779- struct e1000_nvm_operations *nvm_ops;
37780+ const struct e1000_mac_operations *mac_ops;
37781+ const struct e1000_phy_operations *phy_ops;
37782+ const struct e1000_nvm_operations *nvm_ops;
37783 };
37784
37785 /* hardware capability, feature, and workaround flags */
37786diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37787index ae5d736..e9a93a1 100644
37788--- a/drivers/net/e1000e/es2lan.c
37789+++ b/drivers/net/e1000e/es2lan.c
37790@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37791 {
37792 struct e1000_hw *hw = &adapter->hw;
37793 struct e1000_mac_info *mac = &hw->mac;
37794- struct e1000_mac_operations *func = &mac->ops;
37795+ e1000_mac_operations_no_const *func = &mac->ops;
37796
37797 /* Set media type */
37798 switch (adapter->pdev->device) {
37799@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37800 temp = er32(ICRXDMTC);
37801 }
37802
37803-static struct e1000_mac_operations es2_mac_ops = {
37804+static const struct e1000_mac_operations es2_mac_ops = {
37805 .id_led_init = e1000e_id_led_init,
37806 .check_mng_mode = e1000e_check_mng_mode_generic,
37807 /* check_for_link dependent on media type */
37808@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37809 .setup_led = e1000e_setup_led_generic,
37810 };
37811
37812-static struct e1000_phy_operations es2_phy_ops = {
37813+static const struct e1000_phy_operations es2_phy_ops = {
37814 .acquire_phy = e1000_acquire_phy_80003es2lan,
37815 .check_reset_block = e1000e_check_reset_block_generic,
37816 .commit_phy = e1000e_phy_sw_reset,
37817@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37818 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37819 };
37820
37821-static struct e1000_nvm_operations es2_nvm_ops = {
37822+static const struct e1000_nvm_operations es2_nvm_ops = {
37823 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37824 .read_nvm = e1000e_read_nvm_eerd,
37825 .release_nvm = e1000_release_nvm_80003es2lan,
37826diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37827index 11f3b7c..6381887 100644
37828--- a/drivers/net/e1000e/hw.h
37829+++ b/drivers/net/e1000e/hw.h
37830@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37831 s32 (*setup_physical_interface)(struct e1000_hw *);
37832 s32 (*setup_led)(struct e1000_hw *);
37833 };
37834+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37835
37836 /* Function pointers for the PHY. */
37837 struct e1000_phy_operations {
37838@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37839 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37840 s32 (*cfg_on_link_up)(struct e1000_hw *);
37841 };
37842+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37843
37844 /* Function pointers for the NVM. */
37845 struct e1000_nvm_operations {
37846@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37847 s32 (*validate_nvm)(struct e1000_hw *);
37848 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37849 };
37850+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37851
37852 struct e1000_mac_info {
37853- struct e1000_mac_operations ops;
37854+ e1000_mac_operations_no_const ops;
37855
37856 u8 addr[6];
37857 u8 perm_addr[6];
37858@@ -823,7 +826,7 @@ struct e1000_mac_info {
37859 };
37860
37861 struct e1000_phy_info {
37862- struct e1000_phy_operations ops;
37863+ e1000_phy_operations_no_const ops;
37864
37865 enum e1000_phy_type type;
37866
37867@@ -857,7 +860,7 @@ struct e1000_phy_info {
37868 };
37869
37870 struct e1000_nvm_info {
37871- struct e1000_nvm_operations ops;
37872+ e1000_nvm_operations_no_const ops;
37873
37874 enum e1000_nvm_type type;
37875 enum e1000_nvm_override override;
37876diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
37877index de39f9a..e28d3e0 100644
37878--- a/drivers/net/e1000e/ich8lan.c
37879+++ b/drivers/net/e1000e/ich8lan.c
37880@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
37881 }
37882 }
37883
37884-static struct e1000_mac_operations ich8_mac_ops = {
37885+static const struct e1000_mac_operations ich8_mac_ops = {
37886 .id_led_init = e1000e_id_led_init,
37887 .check_mng_mode = e1000_check_mng_mode_ich8lan,
37888 .check_for_link = e1000_check_for_copper_link_ich8lan,
37889@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
37890 /* id_led_init dependent on mac type */
37891 };
37892
37893-static struct e1000_phy_operations ich8_phy_ops = {
37894+static const struct e1000_phy_operations ich8_phy_ops = {
37895 .acquire_phy = e1000_acquire_swflag_ich8lan,
37896 .check_reset_block = e1000_check_reset_block_ich8lan,
37897 .commit_phy = NULL,
37898@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
37899 .write_phy_reg = e1000e_write_phy_reg_igp,
37900 };
37901
37902-static struct e1000_nvm_operations ich8_nvm_ops = {
37903+static const struct e1000_nvm_operations ich8_nvm_ops = {
37904 .acquire_nvm = e1000_acquire_nvm_ich8lan,
37905 .read_nvm = e1000_read_nvm_ich8lan,
37906 .release_nvm = e1000_release_nvm_ich8lan,
37907diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
37908index 18d5fbb..542d96d 100644
37909--- a/drivers/net/fealnx.c
37910+++ b/drivers/net/fealnx.c
37911@@ -151,7 +151,7 @@ struct chip_info {
37912 int flags;
37913 };
37914
37915-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
37916+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
37917 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37918 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
37919 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37920diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
37921index 0e5b54b..b503f82 100644
37922--- a/drivers/net/hamradio/6pack.c
37923+++ b/drivers/net/hamradio/6pack.c
37924@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
37925 unsigned char buf[512];
37926 int count1;
37927
37928+ pax_track_stack();
37929+
37930 if (!count)
37931 return;
37932
37933diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
37934index 5862282..7cce8cb 100644
37935--- a/drivers/net/ibmveth.c
37936+++ b/drivers/net/ibmveth.c
37937@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
37938 NULL,
37939 };
37940
37941-static struct sysfs_ops veth_pool_ops = {
37942+static const struct sysfs_ops veth_pool_ops = {
37943 .show = veth_pool_show,
37944 .store = veth_pool_store,
37945 };
37946diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
37947index d617f2d..57b5309 100644
37948--- a/drivers/net/igb/e1000_82575.c
37949+++ b/drivers/net/igb/e1000_82575.c
37950@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
37951 wr32(E1000_VT_CTL, vt_ctl);
37952 }
37953
37954-static struct e1000_mac_operations e1000_mac_ops_82575 = {
37955+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
37956 .reset_hw = igb_reset_hw_82575,
37957 .init_hw = igb_init_hw_82575,
37958 .check_for_link = igb_check_for_link_82575,
37959@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
37960 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
37961 };
37962
37963-static struct e1000_phy_operations e1000_phy_ops_82575 = {
37964+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
37965 .acquire = igb_acquire_phy_82575,
37966 .get_cfg_done = igb_get_cfg_done_82575,
37967 .release = igb_release_phy_82575,
37968 };
37969
37970-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37971+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37972 .acquire = igb_acquire_nvm_82575,
37973 .read = igb_read_nvm_eerd,
37974 .release = igb_release_nvm_82575,
37975diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
37976index 72081df..d855cf5 100644
37977--- a/drivers/net/igb/e1000_hw.h
37978+++ b/drivers/net/igb/e1000_hw.h
37979@@ -288,6 +288,7 @@ struct e1000_mac_operations {
37980 s32 (*read_mac_addr)(struct e1000_hw *);
37981 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
37982 };
37983+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37984
37985 struct e1000_phy_operations {
37986 s32 (*acquire)(struct e1000_hw *);
37987@@ -303,6 +304,7 @@ struct e1000_phy_operations {
37988 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
37989 s32 (*write_reg)(struct e1000_hw *, u32, u16);
37990 };
37991+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37992
37993 struct e1000_nvm_operations {
37994 s32 (*acquire)(struct e1000_hw *);
37995@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
37996 void (*release)(struct e1000_hw *);
37997 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
37998 };
37999+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38000
38001 struct e1000_info {
38002 s32 (*get_invariants)(struct e1000_hw *);
38003@@ -321,7 +324,7 @@ struct e1000_info {
38004 extern const struct e1000_info e1000_82575_info;
38005
38006 struct e1000_mac_info {
38007- struct e1000_mac_operations ops;
38008+ e1000_mac_operations_no_const ops;
38009
38010 u8 addr[6];
38011 u8 perm_addr[6];
38012@@ -365,7 +368,7 @@ struct e1000_mac_info {
38013 };
38014
38015 struct e1000_phy_info {
38016- struct e1000_phy_operations ops;
38017+ e1000_phy_operations_no_const ops;
38018
38019 enum e1000_phy_type type;
38020
38021@@ -400,7 +403,7 @@ struct e1000_phy_info {
38022 };
38023
38024 struct e1000_nvm_info {
38025- struct e1000_nvm_operations ops;
38026+ e1000_nvm_operations_no_const ops;
38027
38028 enum e1000_nvm_type type;
38029 enum e1000_nvm_override override;
38030@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38031 s32 (*check_for_ack)(struct e1000_hw *, u16);
38032 s32 (*check_for_rst)(struct e1000_hw *, u16);
38033 };
38034+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38035
38036 struct e1000_mbx_stats {
38037 u32 msgs_tx;
38038@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38039 };
38040
38041 struct e1000_mbx_info {
38042- struct e1000_mbx_operations ops;
38043+ e1000_mbx_operations_no_const ops;
38044 struct e1000_mbx_stats stats;
38045 u32 timeout;
38046 u32 usec_delay;
38047diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38048index 1e8ce37..549c453 100644
38049--- a/drivers/net/igbvf/vf.h
38050+++ b/drivers/net/igbvf/vf.h
38051@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38052 s32 (*read_mac_addr)(struct e1000_hw *);
38053 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38054 };
38055+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38056
38057 struct e1000_mac_info {
38058- struct e1000_mac_operations ops;
38059+ e1000_mac_operations_no_const ops;
38060 u8 addr[6];
38061 u8 perm_addr[6];
38062
38063@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38064 s32 (*check_for_ack)(struct e1000_hw *);
38065 s32 (*check_for_rst)(struct e1000_hw *);
38066 };
38067+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38068
38069 struct e1000_mbx_stats {
38070 u32 msgs_tx;
38071@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38072 };
38073
38074 struct e1000_mbx_info {
38075- struct e1000_mbx_operations ops;
38076+ e1000_mbx_operations_no_const ops;
38077 struct e1000_mbx_stats stats;
38078 u32 timeout;
38079 u32 usec_delay;
38080diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38081index aa7286b..a61394f 100644
38082--- a/drivers/net/iseries_veth.c
38083+++ b/drivers/net/iseries_veth.c
38084@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38085 NULL
38086 };
38087
38088-static struct sysfs_ops veth_cnx_sysfs_ops = {
38089+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38090 .show = veth_cnx_attribute_show
38091 };
38092
38093@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38094 NULL
38095 };
38096
38097-static struct sysfs_ops veth_port_sysfs_ops = {
38098+static const struct sysfs_ops veth_port_sysfs_ops = {
38099 .show = veth_port_attribute_show
38100 };
38101
38102diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38103index 8aa44dc..fa1e797 100644
38104--- a/drivers/net/ixgb/ixgb_main.c
38105+++ b/drivers/net/ixgb/ixgb_main.c
38106@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38107 u32 rctl;
38108 int i;
38109
38110+ pax_track_stack();
38111+
38112 /* Check for Promiscuous and All Multicast modes */
38113
38114 rctl = IXGB_READ_REG(hw, RCTL);
38115diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38116index af35e1d..8781785 100644
38117--- a/drivers/net/ixgb/ixgb_param.c
38118+++ b/drivers/net/ixgb/ixgb_param.c
38119@@ -260,6 +260,9 @@ void __devinit
38120 ixgb_check_options(struct ixgb_adapter *adapter)
38121 {
38122 int bd = adapter->bd_number;
38123+
38124+ pax_track_stack();
38125+
38126 if (bd >= IXGB_MAX_NIC) {
38127 printk(KERN_NOTICE
38128 "Warning: no configuration for board #%i\n", bd);
38129diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38130index b17aa73..ed74540 100644
38131--- a/drivers/net/ixgbe/ixgbe_type.h
38132+++ b/drivers/net/ixgbe/ixgbe_type.h
38133@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38134 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38135 s32 (*update_checksum)(struct ixgbe_hw *);
38136 };
38137+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38138
38139 struct ixgbe_mac_operations {
38140 s32 (*init_hw)(struct ixgbe_hw *);
38141@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38142 /* Flow Control */
38143 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38144 };
38145+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38146
38147 struct ixgbe_phy_operations {
38148 s32 (*identify)(struct ixgbe_hw *);
38149@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38150 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38151 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38152 };
38153+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38154
38155 struct ixgbe_eeprom_info {
38156- struct ixgbe_eeprom_operations ops;
38157+ ixgbe_eeprom_operations_no_const ops;
38158 enum ixgbe_eeprom_type type;
38159 u32 semaphore_delay;
38160 u16 word_size;
38161@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38162 };
38163
38164 struct ixgbe_mac_info {
38165- struct ixgbe_mac_operations ops;
38166+ ixgbe_mac_operations_no_const ops;
38167 enum ixgbe_mac_type type;
38168 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38169 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38170@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38171 };
38172
38173 struct ixgbe_phy_info {
38174- struct ixgbe_phy_operations ops;
38175+ ixgbe_phy_operations_no_const ops;
38176 struct mdio_if_info mdio;
38177 enum ixgbe_phy_type type;
38178 u32 id;
38179diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38180index 291a505..2543756 100644
38181--- a/drivers/net/mlx4/main.c
38182+++ b/drivers/net/mlx4/main.c
38183@@ -38,6 +38,7 @@
38184 #include <linux/errno.h>
38185 #include <linux/pci.h>
38186 #include <linux/dma-mapping.h>
38187+#include <linux/sched.h>
38188
38189 #include <linux/mlx4/device.h>
38190 #include <linux/mlx4/doorbell.h>
38191@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38192 u64 icm_size;
38193 int err;
38194
38195+ pax_track_stack();
38196+
38197 err = mlx4_QUERY_FW(dev);
38198 if (err) {
38199 if (err == -EACCES)
38200diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38201index 2dce134..fa5ce75 100644
38202--- a/drivers/net/niu.c
38203+++ b/drivers/net/niu.c
38204@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38205 int i, num_irqs, err;
38206 u8 first_ldg;
38207
38208+ pax_track_stack();
38209+
38210 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38211 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38212 ldg_num_map[i] = first_ldg + i;
38213diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38214index c1b3f09..97cd8c4 100644
38215--- a/drivers/net/pcnet32.c
38216+++ b/drivers/net/pcnet32.c
38217@@ -79,7 +79,7 @@ static int cards_found;
38218 /*
38219 * VLB I/O addresses
38220 */
38221-static unsigned int pcnet32_portlist[] __initdata =
38222+static unsigned int pcnet32_portlist[] __devinitdata =
38223 { 0x300, 0x320, 0x340, 0x360, 0 };
38224
38225 static int pcnet32_debug = 0;
38226@@ -267,7 +267,7 @@ struct pcnet32_private {
38227 struct sk_buff **rx_skbuff;
38228 dma_addr_t *tx_dma_addr;
38229 dma_addr_t *rx_dma_addr;
38230- struct pcnet32_access a;
38231+ struct pcnet32_access *a;
38232 spinlock_t lock; /* Guard lock */
38233 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38234 unsigned int rx_ring_size; /* current rx ring size */
38235@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38236 u16 val;
38237
38238 netif_wake_queue(dev);
38239- val = lp->a.read_csr(ioaddr, CSR3);
38240+ val = lp->a->read_csr(ioaddr, CSR3);
38241 val &= 0x00ff;
38242- lp->a.write_csr(ioaddr, CSR3, val);
38243+ lp->a->write_csr(ioaddr, CSR3, val);
38244 napi_enable(&lp->napi);
38245 }
38246
38247@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38248 r = mii_link_ok(&lp->mii_if);
38249 } else if (lp->chip_version >= PCNET32_79C970A) {
38250 ulong ioaddr = dev->base_addr; /* card base I/O address */
38251- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38252+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38253 } else { /* can not detect link on really old chips */
38254 r = 1;
38255 }
38256@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38257 pcnet32_netif_stop(dev);
38258
38259 spin_lock_irqsave(&lp->lock, flags);
38260- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38261+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38262
38263 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38264
38265@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38266 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38267 {
38268 struct pcnet32_private *lp = netdev_priv(dev);
38269- struct pcnet32_access *a = &lp->a; /* access to registers */
38270+ struct pcnet32_access *a = lp->a; /* access to registers */
38271 ulong ioaddr = dev->base_addr; /* card base I/O address */
38272 struct sk_buff *skb; /* sk buff */
38273 int x, i; /* counters */
38274@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38275 pcnet32_netif_stop(dev);
38276
38277 spin_lock_irqsave(&lp->lock, flags);
38278- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38279+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38280
38281 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38282
38283 /* Reset the PCNET32 */
38284- lp->a.reset(ioaddr);
38285- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38286+ lp->a->reset(ioaddr);
38287+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38288
38289 /* switch pcnet32 to 32bit mode */
38290- lp->a.write_bcr(ioaddr, 20, 2);
38291+ lp->a->write_bcr(ioaddr, 20, 2);
38292
38293 /* purge & init rings but don't actually restart */
38294 pcnet32_restart(dev, 0x0000);
38295
38296- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38297+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38298
38299 /* Initialize Transmit buffers. */
38300 size = data_len + 15;
38301@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38302
38303 /* set int loopback in CSR15 */
38304 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38305- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38306+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38307
38308 teststatus = cpu_to_le16(0x8000);
38309- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38310+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38311
38312 /* Check status of descriptors */
38313 for (x = 0; x < numbuffs; x++) {
38314@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38315 }
38316 }
38317
38318- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38319+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38320 wmb();
38321 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38322 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38323@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38324 pcnet32_restart(dev, CSR0_NORMAL);
38325 } else {
38326 pcnet32_purge_rx_ring(dev);
38327- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38328+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38329 }
38330 spin_unlock_irqrestore(&lp->lock, flags);
38331
38332@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38333 static void pcnet32_led_blink_callback(struct net_device *dev)
38334 {
38335 struct pcnet32_private *lp = netdev_priv(dev);
38336- struct pcnet32_access *a = &lp->a;
38337+ struct pcnet32_access *a = lp->a;
38338 ulong ioaddr = dev->base_addr;
38339 unsigned long flags;
38340 int i;
38341@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38342 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38343 {
38344 struct pcnet32_private *lp = netdev_priv(dev);
38345- struct pcnet32_access *a = &lp->a;
38346+ struct pcnet32_access *a = lp->a;
38347 ulong ioaddr = dev->base_addr;
38348 unsigned long flags;
38349 int i, regs[4];
38350@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38351 {
38352 int csr5;
38353 struct pcnet32_private *lp = netdev_priv(dev);
38354- struct pcnet32_access *a = &lp->a;
38355+ struct pcnet32_access *a = lp->a;
38356 ulong ioaddr = dev->base_addr;
38357 int ticks;
38358
38359@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38360 spin_lock_irqsave(&lp->lock, flags);
38361 if (pcnet32_tx(dev)) {
38362 /* reset the chip to clear the error condition, then restart */
38363- lp->a.reset(ioaddr);
38364- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38365+ lp->a->reset(ioaddr);
38366+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38367 pcnet32_restart(dev, CSR0_START);
38368 netif_wake_queue(dev);
38369 }
38370@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38371 __napi_complete(napi);
38372
38373 /* clear interrupt masks */
38374- val = lp->a.read_csr(ioaddr, CSR3);
38375+ val = lp->a->read_csr(ioaddr, CSR3);
38376 val &= 0x00ff;
38377- lp->a.write_csr(ioaddr, CSR3, val);
38378+ lp->a->write_csr(ioaddr, CSR3, val);
38379
38380 /* Set interrupt enable. */
38381- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38382+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38383
38384 spin_unlock_irqrestore(&lp->lock, flags);
38385 }
38386@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38387 int i, csr0;
38388 u16 *buff = ptr;
38389 struct pcnet32_private *lp = netdev_priv(dev);
38390- struct pcnet32_access *a = &lp->a;
38391+ struct pcnet32_access *a = lp->a;
38392 ulong ioaddr = dev->base_addr;
38393 unsigned long flags;
38394
38395@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38396 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38397 if (lp->phymask & (1 << j)) {
38398 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38399- lp->a.write_bcr(ioaddr, 33,
38400+ lp->a->write_bcr(ioaddr, 33,
38401 (j << 5) | i);
38402- *buff++ = lp->a.read_bcr(ioaddr, 34);
38403+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38404 }
38405 }
38406 }
38407@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38408 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38409 lp->options |= PCNET32_PORT_FD;
38410
38411- lp->a = *a;
38412+ lp->a = a;
38413
38414 /* prior to register_netdev, dev->name is not yet correct */
38415 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38416@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38417 if (lp->mii) {
38418 /* lp->phycount and lp->phymask are set to 0 by memset above */
38419
38420- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38421+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38422 /* scan for PHYs */
38423 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38424 unsigned short id1, id2;
38425@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38426 "Found PHY %04x:%04x at address %d.\n",
38427 id1, id2, i);
38428 }
38429- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38430+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38431 if (lp->phycount > 1) {
38432 lp->options |= PCNET32_PORT_MII;
38433 }
38434@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38435 }
38436
38437 /* Reset the PCNET32 */
38438- lp->a.reset(ioaddr);
38439+ lp->a->reset(ioaddr);
38440
38441 /* switch pcnet32 to 32bit mode */
38442- lp->a.write_bcr(ioaddr, 20, 2);
38443+ lp->a->write_bcr(ioaddr, 20, 2);
38444
38445 if (netif_msg_ifup(lp))
38446 printk(KERN_DEBUG
38447@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38448 (u32) (lp->init_dma_addr));
38449
38450 /* set/reset autoselect bit */
38451- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38452+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38453 if (lp->options & PCNET32_PORT_ASEL)
38454 val |= 2;
38455- lp->a.write_bcr(ioaddr, 2, val);
38456+ lp->a->write_bcr(ioaddr, 2, val);
38457
38458 /* handle full duplex setting */
38459 if (lp->mii_if.full_duplex) {
38460- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38461+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38462 if (lp->options & PCNET32_PORT_FD) {
38463 val |= 1;
38464 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38465@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38466 if (lp->chip_version == 0x2627)
38467 val |= 3;
38468 }
38469- lp->a.write_bcr(ioaddr, 9, val);
38470+ lp->a->write_bcr(ioaddr, 9, val);
38471 }
38472
38473 /* set/reset GPSI bit in test register */
38474- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38475+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38476 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38477 val |= 0x10;
38478- lp->a.write_csr(ioaddr, 124, val);
38479+ lp->a->write_csr(ioaddr, 124, val);
38480
38481 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38482 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38483@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38484 * duplex, and/or enable auto negotiation, and clear DANAS
38485 */
38486 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38487- lp->a.write_bcr(ioaddr, 32,
38488- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38489+ lp->a->write_bcr(ioaddr, 32,
38490+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38491 /* disable Auto Negotiation, set 10Mpbs, HD */
38492- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38493+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38494 if (lp->options & PCNET32_PORT_FD)
38495 val |= 0x10;
38496 if (lp->options & PCNET32_PORT_100)
38497 val |= 0x08;
38498- lp->a.write_bcr(ioaddr, 32, val);
38499+ lp->a->write_bcr(ioaddr, 32, val);
38500 } else {
38501 if (lp->options & PCNET32_PORT_ASEL) {
38502- lp->a.write_bcr(ioaddr, 32,
38503- lp->a.read_bcr(ioaddr,
38504+ lp->a->write_bcr(ioaddr, 32,
38505+ lp->a->read_bcr(ioaddr,
38506 32) | 0x0080);
38507 /* enable auto negotiate, setup, disable fd */
38508- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38509+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38510 val |= 0x20;
38511- lp->a.write_bcr(ioaddr, 32, val);
38512+ lp->a->write_bcr(ioaddr, 32, val);
38513 }
38514 }
38515 } else {
38516@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38517 * There is really no good other way to handle multiple PHYs
38518 * other than turning off all automatics
38519 */
38520- val = lp->a.read_bcr(ioaddr, 2);
38521- lp->a.write_bcr(ioaddr, 2, val & ~2);
38522- val = lp->a.read_bcr(ioaddr, 32);
38523- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38524+ val = lp->a->read_bcr(ioaddr, 2);
38525+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38526+ val = lp->a->read_bcr(ioaddr, 32);
38527+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38528
38529 if (!(lp->options & PCNET32_PORT_ASEL)) {
38530 /* setup ecmd */
38531@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38532 ecmd.speed =
38533 lp->
38534 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38535- bcr9 = lp->a.read_bcr(ioaddr, 9);
38536+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38537
38538 if (lp->options & PCNET32_PORT_FD) {
38539 ecmd.duplex = DUPLEX_FULL;
38540@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38541 ecmd.duplex = DUPLEX_HALF;
38542 bcr9 |= ~(1 << 0);
38543 }
38544- lp->a.write_bcr(ioaddr, 9, bcr9);
38545+ lp->a->write_bcr(ioaddr, 9, bcr9);
38546 }
38547
38548 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38549@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38550
38551 #ifdef DO_DXSUFLO
38552 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38553- val = lp->a.read_csr(ioaddr, CSR3);
38554+ val = lp->a->read_csr(ioaddr, CSR3);
38555 val |= 0x40;
38556- lp->a.write_csr(ioaddr, CSR3, val);
38557+ lp->a->write_csr(ioaddr, CSR3, val);
38558 }
38559 #endif
38560
38561@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38562 napi_enable(&lp->napi);
38563
38564 /* Re-initialize the PCNET32, and start it when done. */
38565- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38566- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38567+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38568+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38569
38570- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38571- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38572+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38573+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38574
38575 netif_start_queue(dev);
38576
38577@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38578
38579 i = 0;
38580 while (i++ < 100)
38581- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38582+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38583 break;
38584 /*
38585 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38586 * reports that doing so triggers a bug in the '974.
38587 */
38588- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38589+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38590
38591 if (netif_msg_ifup(lp))
38592 printk(KERN_DEBUG
38593 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38594 dev->name, i,
38595 (u32) (lp->init_dma_addr),
38596- lp->a.read_csr(ioaddr, CSR0));
38597+ lp->a->read_csr(ioaddr, CSR0));
38598
38599 spin_unlock_irqrestore(&lp->lock, flags);
38600
38601@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38602 * Switch back to 16bit mode to avoid problems with dumb
38603 * DOS packet driver after a warm reboot
38604 */
38605- lp->a.write_bcr(ioaddr, 20, 4);
38606+ lp->a->write_bcr(ioaddr, 20, 4);
38607
38608 err_free_irq:
38609 spin_unlock_irqrestore(&lp->lock, flags);
38610@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38611
38612 /* wait for stop */
38613 for (i = 0; i < 100; i++)
38614- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38615+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38616 break;
38617
38618 if (i >= 100 && netif_msg_drv(lp))
38619@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38620 return;
38621
38622 /* ReInit Ring */
38623- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38624+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38625 i = 0;
38626 while (i++ < 1000)
38627- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38628+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38629 break;
38630
38631- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38632+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38633 }
38634
38635 static void pcnet32_tx_timeout(struct net_device *dev)
38636@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38637 if (pcnet32_debug & NETIF_MSG_DRV)
38638 printk(KERN_ERR
38639 "%s: transmit timed out, status %4.4x, resetting.\n",
38640- dev->name, lp->a.read_csr(ioaddr, CSR0));
38641- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38642+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38643+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38644 dev->stats.tx_errors++;
38645 if (netif_msg_tx_err(lp)) {
38646 int i;
38647@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38648 if (netif_msg_tx_queued(lp)) {
38649 printk(KERN_DEBUG
38650 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38651- dev->name, lp->a.read_csr(ioaddr, CSR0));
38652+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38653 }
38654
38655 /* Default status -- will not enable Successful-TxDone
38656@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38657 dev->stats.tx_bytes += skb->len;
38658
38659 /* Trigger an immediate send poll. */
38660- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38661+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38662
38663 dev->trans_start = jiffies;
38664
38665@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38666
38667 spin_lock(&lp->lock);
38668
38669- csr0 = lp->a.read_csr(ioaddr, CSR0);
38670+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38671 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38672 if (csr0 == 0xffff) {
38673 break; /* PCMCIA remove happened */
38674 }
38675 /* Acknowledge all of the current interrupt sources ASAP. */
38676- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38677+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38678
38679 if (netif_msg_intr(lp))
38680 printk(KERN_DEBUG
38681 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38682- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38683+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38684
38685 /* Log misc errors. */
38686 if (csr0 & 0x4000)
38687@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38688 if (napi_schedule_prep(&lp->napi)) {
38689 u16 val;
38690 /* set interrupt masks */
38691- val = lp->a.read_csr(ioaddr, CSR3);
38692+ val = lp->a->read_csr(ioaddr, CSR3);
38693 val |= 0x5f00;
38694- lp->a.write_csr(ioaddr, CSR3, val);
38695+ lp->a->write_csr(ioaddr, CSR3, val);
38696
38697 __napi_schedule(&lp->napi);
38698 break;
38699 }
38700- csr0 = lp->a.read_csr(ioaddr, CSR0);
38701+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38702 }
38703
38704 if (netif_msg_intr(lp))
38705 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38706- dev->name, lp->a.read_csr(ioaddr, CSR0));
38707+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38708
38709 spin_unlock(&lp->lock);
38710
38711@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38712
38713 spin_lock_irqsave(&lp->lock, flags);
38714
38715- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38716+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38717
38718 if (netif_msg_ifdown(lp))
38719 printk(KERN_DEBUG
38720 "%s: Shutting down ethercard, status was %2.2x.\n",
38721- dev->name, lp->a.read_csr(ioaddr, CSR0));
38722+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38723
38724 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38725- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38726+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38727
38728 /*
38729 * Switch back to 16bit mode to avoid problems with dumb
38730 * DOS packet driver after a warm reboot
38731 */
38732- lp->a.write_bcr(ioaddr, 20, 4);
38733+ lp->a->write_bcr(ioaddr, 20, 4);
38734
38735 spin_unlock_irqrestore(&lp->lock, flags);
38736
38737@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38738 unsigned long flags;
38739
38740 spin_lock_irqsave(&lp->lock, flags);
38741- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38742+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38743 spin_unlock_irqrestore(&lp->lock, flags);
38744
38745 return &dev->stats;
38746@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38747 if (dev->flags & IFF_ALLMULTI) {
38748 ib->filter[0] = cpu_to_le32(~0U);
38749 ib->filter[1] = cpu_to_le32(~0U);
38750- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38751- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38752- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38753- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38754+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38755+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38756+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38757+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38758 return;
38759 }
38760 /* clear the multicast filter */
38761@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38762 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38763 }
38764 for (i = 0; i < 4; i++)
38765- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38766+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38767 le16_to_cpu(mcast_table[i]));
38768 return;
38769 }
38770@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38771
38772 spin_lock_irqsave(&lp->lock, flags);
38773 suspended = pcnet32_suspend(dev, &flags, 0);
38774- csr15 = lp->a.read_csr(ioaddr, CSR15);
38775+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38776 if (dev->flags & IFF_PROMISC) {
38777 /* Log any net taps. */
38778 if (netif_msg_hw(lp))
38779@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38780 lp->init_block->mode =
38781 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38782 7);
38783- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38784+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38785 } else {
38786 lp->init_block->mode =
38787 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38788- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38789+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38790 pcnet32_load_multicast(dev);
38791 }
38792
38793 if (suspended) {
38794 int csr5;
38795 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38796- csr5 = lp->a.read_csr(ioaddr, CSR5);
38797- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38798+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38799+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38800 } else {
38801- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38802+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38803 pcnet32_restart(dev, CSR0_NORMAL);
38804 netif_wake_queue(dev);
38805 }
38806@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38807 if (!lp->mii)
38808 return 0;
38809
38810- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38811- val_out = lp->a.read_bcr(ioaddr, 34);
38812+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38813+ val_out = lp->a->read_bcr(ioaddr, 34);
38814
38815 return val_out;
38816 }
38817@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38818 if (!lp->mii)
38819 return;
38820
38821- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38822- lp->a.write_bcr(ioaddr, 34, val);
38823+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38824+ lp->a->write_bcr(ioaddr, 34, val);
38825 }
38826
38827 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38828@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38829 curr_link = mii_link_ok(&lp->mii_if);
38830 } else {
38831 ulong ioaddr = dev->base_addr; /* card base I/O address */
38832- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38833+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38834 }
38835 if (!curr_link) {
38836 if (prev_link || verbose) {
38837@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38838 (ecmd.duplex ==
38839 DUPLEX_FULL) ? "full" : "half");
38840 }
38841- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38842+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38843 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38844 if (lp->mii_if.full_duplex)
38845 bcr9 |= (1 << 0);
38846 else
38847 bcr9 &= ~(1 << 0);
38848- lp->a.write_bcr(dev->base_addr, 9, bcr9);
38849+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
38850 }
38851 } else {
38852 if (netif_msg_link(lp))
38853diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38854index 7cc9898..6eb50d3 100644
38855--- a/drivers/net/sis190.c
38856+++ b/drivers/net/sis190.c
38857@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38858 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38859 struct net_device *dev)
38860 {
38861- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38862+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38863 struct sis190_private *tp = netdev_priv(dev);
38864 struct pci_dev *isa_bridge;
38865 u8 reg, tmp8;
38866diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
38867index e13685a..60c948c 100644
38868--- a/drivers/net/sundance.c
38869+++ b/drivers/net/sundance.c
38870@@ -225,7 +225,7 @@ enum {
38871 struct pci_id_info {
38872 const char *name;
38873 };
38874-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38875+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38876 {"D-Link DFE-550TX FAST Ethernet Adapter"},
38877 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
38878 {"D-Link DFE-580TX 4 port Server Adapter"},
38879diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
38880index 529f55a..cccaa18 100644
38881--- a/drivers/net/tg3.h
38882+++ b/drivers/net/tg3.h
38883@@ -95,6 +95,7 @@
38884 #define CHIPREV_ID_5750_A0 0x4000
38885 #define CHIPREV_ID_5750_A1 0x4001
38886 #define CHIPREV_ID_5750_A3 0x4003
38887+#define CHIPREV_ID_5750_C1 0x4201
38888 #define CHIPREV_ID_5750_C2 0x4202
38889 #define CHIPREV_ID_5752_A0_HW 0x5000
38890 #define CHIPREV_ID_5752_A0 0x6000
38891diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
38892index b9db1b5..720f9ce 100644
38893--- a/drivers/net/tokenring/abyss.c
38894+++ b/drivers/net/tokenring/abyss.c
38895@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
38896
38897 static int __init abyss_init (void)
38898 {
38899- abyss_netdev_ops = tms380tr_netdev_ops;
38900+ pax_open_kernel();
38901+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38902
38903- abyss_netdev_ops.ndo_open = abyss_open;
38904- abyss_netdev_ops.ndo_stop = abyss_close;
38905+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
38906+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
38907+ pax_close_kernel();
38908
38909 return pci_register_driver(&abyss_driver);
38910 }
38911diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
38912index 456f8bf..373e56d 100644
38913--- a/drivers/net/tokenring/madgemc.c
38914+++ b/drivers/net/tokenring/madgemc.c
38915@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
38916
38917 static int __init madgemc_init (void)
38918 {
38919- madgemc_netdev_ops = tms380tr_netdev_ops;
38920- madgemc_netdev_ops.ndo_open = madgemc_open;
38921- madgemc_netdev_ops.ndo_stop = madgemc_close;
38922+ pax_open_kernel();
38923+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38924+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
38925+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
38926+ pax_close_kernel();
38927
38928 return mca_register_driver (&madgemc_driver);
38929 }
38930diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
38931index 16e8783..925bd49 100644
38932--- a/drivers/net/tokenring/proteon.c
38933+++ b/drivers/net/tokenring/proteon.c
38934@@ -353,9 +353,11 @@ static int __init proteon_init(void)
38935 struct platform_device *pdev;
38936 int i, num = 0, err = 0;
38937
38938- proteon_netdev_ops = tms380tr_netdev_ops;
38939- proteon_netdev_ops.ndo_open = proteon_open;
38940- proteon_netdev_ops.ndo_stop = tms380tr_close;
38941+ pax_open_kernel();
38942+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38943+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
38944+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
38945+ pax_close_kernel();
38946
38947 err = platform_driver_register(&proteon_driver);
38948 if (err)
38949diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
38950index 46db5c5..37c1536 100644
38951--- a/drivers/net/tokenring/skisa.c
38952+++ b/drivers/net/tokenring/skisa.c
38953@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
38954 struct platform_device *pdev;
38955 int i, num = 0, err = 0;
38956
38957- sk_isa_netdev_ops = tms380tr_netdev_ops;
38958- sk_isa_netdev_ops.ndo_open = sk_isa_open;
38959- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38960+ pax_open_kernel();
38961+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38962+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
38963+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38964+ pax_close_kernel();
38965
38966 err = platform_driver_register(&sk_isa_driver);
38967 if (err)
38968diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
38969index 74e5ba4..5cf6bc9 100644
38970--- a/drivers/net/tulip/de2104x.c
38971+++ b/drivers/net/tulip/de2104x.c
38972@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
38973 struct de_srom_info_leaf *il;
38974 void *bufp;
38975
38976+ pax_track_stack();
38977+
38978 /* download entire eeprom */
38979 for (i = 0; i < DE_EEPROM_WORDS; i++)
38980 ((__le16 *)ee_data)[i] =
38981diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
38982index a8349b7..90f9dfe 100644
38983--- a/drivers/net/tulip/de4x5.c
38984+++ b/drivers/net/tulip/de4x5.c
38985@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38986 for (i=0; i<ETH_ALEN; i++) {
38987 tmp.addr[i] = dev->dev_addr[i];
38988 }
38989- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38990+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38991 break;
38992
38993 case DE4X5_SET_HWADDR: /* Set the hardware address */
38994@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38995 spin_lock_irqsave(&lp->lock, flags);
38996 memcpy(&statbuf, &lp->pktStats, ioc->len);
38997 spin_unlock_irqrestore(&lp->lock, flags);
38998- if (copy_to_user(ioc->data, &statbuf, ioc->len))
38999+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39000 return -EFAULT;
39001 break;
39002 }
39003diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39004index 391acd3..56d11cd 100644
39005--- a/drivers/net/tulip/eeprom.c
39006+++ b/drivers/net/tulip/eeprom.c
39007@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39008 {NULL}};
39009
39010
39011-static const char *block_name[] __devinitdata = {
39012+static const char *block_name[] __devinitconst = {
39013 "21140 non-MII",
39014 "21140 MII PHY",
39015 "21142 Serial PHY",
39016diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39017index b38d3b7..b1cff23 100644
39018--- a/drivers/net/tulip/winbond-840.c
39019+++ b/drivers/net/tulip/winbond-840.c
39020@@ -235,7 +235,7 @@ struct pci_id_info {
39021 int drv_flags; /* Driver use, intended as capability flags. */
39022 };
39023
39024-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39025+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39026 { /* Sometime a Level-One switch card. */
39027 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39028 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39029diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39030index f450bc9..2b747c8 100644
39031--- a/drivers/net/usb/hso.c
39032+++ b/drivers/net/usb/hso.c
39033@@ -71,7 +71,7 @@
39034 #include <asm/byteorder.h>
39035 #include <linux/serial_core.h>
39036 #include <linux/serial.h>
39037-
39038+#include <asm/local.h>
39039
39040 #define DRIVER_VERSION "1.2"
39041 #define MOD_AUTHOR "Option Wireless"
39042@@ -258,7 +258,7 @@ struct hso_serial {
39043
39044 /* from usb_serial_port */
39045 struct tty_struct *tty;
39046- int open_count;
39047+ local_t open_count;
39048 spinlock_t serial_lock;
39049
39050 int (*write_data) (struct hso_serial *serial);
39051@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39052 struct urb *urb;
39053
39054 urb = serial->rx_urb[0];
39055- if (serial->open_count > 0) {
39056+ if (local_read(&serial->open_count) > 0) {
39057 count = put_rxbuf_data(urb, serial);
39058 if (count == -1)
39059 return;
39060@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39061 DUMP1(urb->transfer_buffer, urb->actual_length);
39062
39063 /* Anyone listening? */
39064- if (serial->open_count == 0)
39065+ if (local_read(&serial->open_count) == 0)
39066 return;
39067
39068 if (status == 0) {
39069@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39070 spin_unlock_irq(&serial->serial_lock);
39071
39072 /* check for port already opened, if not set the termios */
39073- serial->open_count++;
39074- if (serial->open_count == 1) {
39075+ if (local_inc_return(&serial->open_count) == 1) {
39076 tty->low_latency = 1;
39077 serial->rx_state = RX_IDLE;
39078 /* Force default termio settings */
39079@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39080 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39081 if (result) {
39082 hso_stop_serial_device(serial->parent);
39083- serial->open_count--;
39084+ local_dec(&serial->open_count);
39085 kref_put(&serial->parent->ref, hso_serial_ref_free);
39086 }
39087 } else {
39088@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39089
39090 /* reset the rts and dtr */
39091 /* do the actual close */
39092- serial->open_count--;
39093+ local_dec(&serial->open_count);
39094
39095- if (serial->open_count <= 0) {
39096- serial->open_count = 0;
39097+ if (local_read(&serial->open_count) <= 0) {
39098+ local_set(&serial->open_count, 0);
39099 spin_lock_irq(&serial->serial_lock);
39100 if (serial->tty == tty) {
39101 serial->tty->driver_data = NULL;
39102@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39103
39104 /* the actual setup */
39105 spin_lock_irqsave(&serial->serial_lock, flags);
39106- if (serial->open_count)
39107+ if (local_read(&serial->open_count))
39108 _hso_serial_set_termios(tty, old);
39109 else
39110 tty->termios = old;
39111@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39112 /* Start all serial ports */
39113 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39114 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39115- if (dev2ser(serial_table[i])->open_count) {
39116+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39117 result =
39118 hso_start_serial_device(serial_table[i], GFP_NOIO);
39119 hso_kick_transmit(dev2ser(serial_table[i]));
39120diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39121index 3e94f0c..ffdd926 100644
39122--- a/drivers/net/vxge/vxge-config.h
39123+++ b/drivers/net/vxge/vxge-config.h
39124@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39125 void (*link_down)(struct __vxge_hw_device *devh);
39126 void (*crit_err)(struct __vxge_hw_device *devh,
39127 enum vxge_hw_event type, u64 ext_data);
39128-};
39129+} __no_const;
39130
39131 /*
39132 * struct __vxge_hw_blockpool_entry - Block private data structure
39133diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39134index 068d7a9..35293de 100644
39135--- a/drivers/net/vxge/vxge-main.c
39136+++ b/drivers/net/vxge/vxge-main.c
39137@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39138 struct sk_buff *completed[NR_SKB_COMPLETED];
39139 int more;
39140
39141+ pax_track_stack();
39142+
39143 do {
39144 more = 0;
39145 skb_ptr = completed;
39146@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39147 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39148 int index;
39149
39150+ pax_track_stack();
39151+
39152 /*
39153 * Filling
39154 * - itable with bucket numbers
39155diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39156index 461742b..81be42e 100644
39157--- a/drivers/net/vxge/vxge-traffic.h
39158+++ b/drivers/net/vxge/vxge-traffic.h
39159@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39160 struct vxge_hw_mempool_dma *dma_object,
39161 u32 index,
39162 u32 is_last);
39163-};
39164+} __no_const;
39165
39166 void
39167 __vxge_hw_mempool_destroy(
39168diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39169index cd8cb95..4153b79 100644
39170--- a/drivers/net/wan/cycx_x25.c
39171+++ b/drivers/net/wan/cycx_x25.c
39172@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39173 unsigned char hex[1024],
39174 * phex = hex;
39175
39176+ pax_track_stack();
39177+
39178 if (len >= (sizeof(hex) / 2))
39179 len = (sizeof(hex) / 2) - 1;
39180
39181diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39182index aa9248f..a4e3c3b 100644
39183--- a/drivers/net/wan/hdlc_x25.c
39184+++ b/drivers/net/wan/hdlc_x25.c
39185@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39186
39187 static int x25_open(struct net_device *dev)
39188 {
39189- struct lapb_register_struct cb;
39190+ static struct lapb_register_struct cb = {
39191+ .connect_confirmation = x25_connected,
39192+ .connect_indication = x25_connected,
39193+ .disconnect_confirmation = x25_disconnected,
39194+ .disconnect_indication = x25_disconnected,
39195+ .data_indication = x25_data_indication,
39196+ .data_transmit = x25_data_transmit
39197+ };
39198 int result;
39199
39200- cb.connect_confirmation = x25_connected;
39201- cb.connect_indication = x25_connected;
39202- cb.disconnect_confirmation = x25_disconnected;
39203- cb.disconnect_indication = x25_disconnected;
39204- cb.data_indication = x25_data_indication;
39205- cb.data_transmit = x25_data_transmit;
39206-
39207 result = lapb_register(dev, &cb);
39208 if (result != LAPB_OK)
39209 return result;
39210diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39211index 5ad287c..783b020 100644
39212--- a/drivers/net/wimax/i2400m/usb-fw.c
39213+++ b/drivers/net/wimax/i2400m/usb-fw.c
39214@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39215 int do_autopm = 1;
39216 DECLARE_COMPLETION_ONSTACK(notif_completion);
39217
39218+ pax_track_stack();
39219+
39220 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39221 i2400m, ack, ack_size);
39222 BUG_ON(_ack == i2400m->bm_ack_buf);
39223diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39224index 6c26840..62c97c3 100644
39225--- a/drivers/net/wireless/airo.c
39226+++ b/drivers/net/wireless/airo.c
39227@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39228 BSSListElement * loop_net;
39229 BSSListElement * tmp_net;
39230
39231+ pax_track_stack();
39232+
39233 /* Blow away current list of scan results */
39234 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39235 list_move_tail (&loop_net->list, &ai->network_free_list);
39236@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39237 WepKeyRid wkr;
39238 int rc;
39239
39240+ pax_track_stack();
39241+
39242 memset( &mySsid, 0, sizeof( mySsid ) );
39243 kfree (ai->flash);
39244 ai->flash = NULL;
39245@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39246 __le32 *vals = stats.vals;
39247 int len;
39248
39249+ pax_track_stack();
39250+
39251 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39252 return -ENOMEM;
39253 data = (struct proc_data *)file->private_data;
39254@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39255 /* If doLoseSync is not 1, we won't do a Lose Sync */
39256 int doLoseSync = -1;
39257
39258+ pax_track_stack();
39259+
39260 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39261 return -ENOMEM;
39262 data = (struct proc_data *)file->private_data;
39263@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39264 int i;
39265 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39266
39267+ pax_track_stack();
39268+
39269 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39270 if (!qual)
39271 return -ENOMEM;
39272@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39273 CapabilityRid cap_rid;
39274 __le32 *vals = stats_rid.vals;
39275
39276+ pax_track_stack();
39277+
39278 /* Get stats out of the card */
39279 clear_bit(JOB_WSTATS, &local->jobs);
39280 if (local->power.event) {
39281diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39282index 747508c..82e965d 100644
39283--- a/drivers/net/wireless/ath/ath5k/debug.c
39284+++ b/drivers/net/wireless/ath/ath5k/debug.c
39285@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39286 unsigned int v;
39287 u64 tsf;
39288
39289+ pax_track_stack();
39290+
39291 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39292 len += snprintf(buf+len, sizeof(buf)-len,
39293 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39294@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39295 unsigned int len = 0;
39296 unsigned int i;
39297
39298+ pax_track_stack();
39299+
39300 len += snprintf(buf+len, sizeof(buf)-len,
39301 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39302
39303diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39304index 2be4c22..593b1eb 100644
39305--- a/drivers/net/wireless/ath/ath9k/debug.c
39306+++ b/drivers/net/wireless/ath/ath9k/debug.c
39307@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39308 char buf[512];
39309 unsigned int len = 0;
39310
39311+ pax_track_stack();
39312+
39313 len += snprintf(buf + len, sizeof(buf) - len,
39314 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39315 len += snprintf(buf + len, sizeof(buf) - len,
39316@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39317 int i;
39318 u8 addr[ETH_ALEN];
39319
39320+ pax_track_stack();
39321+
39322 len += snprintf(buf + len, sizeof(buf) - len,
39323 "primary: %s (%s chan=%d ht=%d)\n",
39324 wiphy_name(sc->pri_wiphy->hw->wiphy),
39325diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39326index 80b19a4..dab3a45 100644
39327--- a/drivers/net/wireless/b43/debugfs.c
39328+++ b/drivers/net/wireless/b43/debugfs.c
39329@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39330 struct b43_debugfs_fops {
39331 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39332 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39333- struct file_operations fops;
39334+ const struct file_operations fops;
39335 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39336 size_t file_struct_offset;
39337 };
39338diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39339index 1f85ac5..c99b4b4 100644
39340--- a/drivers/net/wireless/b43legacy/debugfs.c
39341+++ b/drivers/net/wireless/b43legacy/debugfs.c
39342@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39343 struct b43legacy_debugfs_fops {
39344 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39345 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39346- struct file_operations fops;
39347+ const struct file_operations fops;
39348 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39349 size_t file_struct_offset;
39350 /* Take wl->irq_lock before calling read/write? */
39351diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39352index 43102bf..3b569c3 100644
39353--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39354+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39355@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39356 int err;
39357 DECLARE_SSID_BUF(ssid);
39358
39359+ pax_track_stack();
39360+
39361 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39362
39363 if (ssid_len)
39364@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39365 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39366 int err;
39367
39368+ pax_track_stack();
39369+
39370 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39371 idx, keylen, len);
39372
39373diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39374index 282b1f7..169f0cf 100644
39375--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39376+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39377@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39378 unsigned long flags;
39379 DECLARE_SSID_BUF(ssid);
39380
39381+ pax_track_stack();
39382+
39383 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39384 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39385 print_ssid(ssid, info_element->data, info_element->len),
39386diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39387index 950267a..80d5fd2 100644
39388--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39389+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39390@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39391 },
39392 };
39393
39394-static struct iwl_ops iwl1000_ops = {
39395+static const struct iwl_ops iwl1000_ops = {
39396 .ucode = &iwl5000_ucode,
39397 .lib = &iwl1000_lib,
39398 .hcmd = &iwl5000_hcmd,
39399diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39400index 56bfcc3..b348020 100644
39401--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39402+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39403@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39404 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39405 };
39406
39407-static struct iwl_ops iwl3945_ops = {
39408+static const struct iwl_ops iwl3945_ops = {
39409 .ucode = &iwl3945_ucode,
39410 .lib = &iwl3945_lib,
39411 .hcmd = &iwl3945_hcmd,
39412diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39413index 585b8d4..e142963 100644
39414--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39415+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39416@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39417 },
39418 };
39419
39420-static struct iwl_ops iwl4965_ops = {
39421+static const struct iwl_ops iwl4965_ops = {
39422 .ucode = &iwl4965_ucode,
39423 .lib = &iwl4965_lib,
39424 .hcmd = &iwl4965_hcmd,
39425diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39426index 1f423f2..e37c192 100644
39427--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39428+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39429@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39430 },
39431 };
39432
39433-struct iwl_ops iwl5000_ops = {
39434+const struct iwl_ops iwl5000_ops = {
39435 .ucode = &iwl5000_ucode,
39436 .lib = &iwl5000_lib,
39437 .hcmd = &iwl5000_hcmd,
39438 .utils = &iwl5000_hcmd_utils,
39439 };
39440
39441-static struct iwl_ops iwl5150_ops = {
39442+static const struct iwl_ops iwl5150_ops = {
39443 .ucode = &iwl5000_ucode,
39444 .lib = &iwl5150_lib,
39445 .hcmd = &iwl5000_hcmd,
39446diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39447index 1473452..f07d5e1 100644
39448--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39449+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39450@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39451 .calc_rssi = iwl5000_calc_rssi,
39452 };
39453
39454-static struct iwl_ops iwl6000_ops = {
39455+static const struct iwl_ops iwl6000_ops = {
39456 .ucode = &iwl5000_ucode,
39457 .lib = &iwl6000_lib,
39458 .hcmd = &iwl5000_hcmd,
39459diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39460index 1a3dfa2..b3e0a61 100644
39461--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39462+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39463@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39464 u8 active_index = 0;
39465 s32 tpt = 0;
39466
39467+ pax_track_stack();
39468+
39469 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39470
39471 if (!ieee80211_is_data(hdr->frame_control) ||
39472@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39473 u8 valid_tx_ant = 0;
39474 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39475
39476+ pax_track_stack();
39477+
39478 /* Override starting rate (index 0) if needed for debug purposes */
39479 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39480
39481diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39482index 0e56d78..6a3c107 100644
39483--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39484+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39485@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39486 if (iwl_debug_level & IWL_DL_INFO)
39487 dev_printk(KERN_DEBUG, &(pdev->dev),
39488 "Disabling hw_scan\n");
39489- iwl_hw_ops.hw_scan = NULL;
39490+ pax_open_kernel();
39491+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39492+ pax_close_kernel();
39493 }
39494
39495 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39496diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39497index cbc6290..eb323d7 100644
39498--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39499+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39500@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39501 #endif
39502
39503 #else
39504-#define IWL_DEBUG(__priv, level, fmt, args...)
39505-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39506+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39507+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39508 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39509 void *p, u32 len)
39510 {}
39511diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39512index a198bcf..8e68233 100644
39513--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39514+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39515@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39516 int pos = 0;
39517 const size_t bufsz = sizeof(buf);
39518
39519+ pax_track_stack();
39520+
39521 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39522 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39523 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39524@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39525 const size_t bufsz = sizeof(buf);
39526 ssize_t ret;
39527
39528+ pax_track_stack();
39529+
39530 for (i = 0; i < AC_NUM; i++) {
39531 pos += scnprintf(buf + pos, bufsz - pos,
39532 "\tcw_min\tcw_max\taifsn\ttxop\n");
39533diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39534index 3539ea4..b174bfa 100644
39535--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39536+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39537@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39538
39539 /* shared structures from iwl-5000.c */
39540 extern struct iwl_mod_params iwl50_mod_params;
39541-extern struct iwl_ops iwl5000_ops;
39542+extern const struct iwl_ops iwl5000_ops;
39543 extern struct iwl_ucode_ops iwl5000_ucode;
39544 extern struct iwl_lib_ops iwl5000_lib;
39545 extern struct iwl_hcmd_ops iwl5000_hcmd;
39546diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39547index 619590d..69235ee 100644
39548--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39549+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39550@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39551 */
39552 if (iwl3945_mod_params.disable_hw_scan) {
39553 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39554- iwl3945_hw_ops.hw_scan = NULL;
39555+ pax_open_kernel();
39556+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39557+ pax_close_kernel();
39558 }
39559
39560
39561diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39562index 1465379..fe4d78b 100644
39563--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39564+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39565@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39566 int buf_len = 512;
39567 size_t len = 0;
39568
39569+ pax_track_stack();
39570+
39571 if (*ppos != 0)
39572 return 0;
39573 if (count < sizeof(buf))
39574diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39575index 893a55c..7f66a50 100644
39576--- a/drivers/net/wireless/libertas/debugfs.c
39577+++ b/drivers/net/wireless/libertas/debugfs.c
39578@@ -708,7 +708,7 @@ out_unlock:
39579 struct lbs_debugfs_files {
39580 const char *name;
39581 int perm;
39582- struct file_operations fops;
39583+ const struct file_operations fops;
39584 };
39585
39586 static const struct lbs_debugfs_files debugfs_files[] = {
39587diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39588index 2ecbedb..42704f0 100644
39589--- a/drivers/net/wireless/rndis_wlan.c
39590+++ b/drivers/net/wireless/rndis_wlan.c
39591@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39592
39593 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39594
39595- if (rts_threshold < 0 || rts_threshold > 2347)
39596+ if (rts_threshold > 2347)
39597 rts_threshold = 2347;
39598
39599 tmp = cpu_to_le32(rts_threshold);
39600diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39601index 5c4df24..3b42925 100644
39602--- a/drivers/oprofile/buffer_sync.c
39603+++ b/drivers/oprofile/buffer_sync.c
39604@@ -341,7 +341,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39605 if (cookie == NO_COOKIE)
39606 offset = pc;
39607 if (cookie == INVALID_COOKIE) {
39608- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39609+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39610 offset = pc;
39611 }
39612 if (cookie != last_cookie) {
39613@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39614 /* add userspace sample */
39615
39616 if (!mm) {
39617- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39618+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39619 return 0;
39620 }
39621
39622 cookie = lookup_dcookie(mm, s->eip, &offset);
39623
39624 if (cookie == INVALID_COOKIE) {
39625- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39626+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39627 return 0;
39628 }
39629
39630@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
39631 /* ignore backtraces if failed to add a sample */
39632 if (state == sb_bt_start) {
39633 state = sb_bt_ignore;
39634- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39635+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39636 }
39637 }
39638 release_mm(mm);
39639diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39640index 5df60a6..72f5c1c 100644
39641--- a/drivers/oprofile/event_buffer.c
39642+++ b/drivers/oprofile/event_buffer.c
39643@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39644 }
39645
39646 if (buffer_pos == buffer_size) {
39647- atomic_inc(&oprofile_stats.event_lost_overflow);
39648+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39649 return;
39650 }
39651
39652diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39653index dc8a042..fe5f315 100644
39654--- a/drivers/oprofile/oprof.c
39655+++ b/drivers/oprofile/oprof.c
39656@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39657 if (oprofile_ops.switch_events())
39658 return;
39659
39660- atomic_inc(&oprofile_stats.multiplex_counter);
39661+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39662 start_switch_worker();
39663 }
39664
39665diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39666index 61689e8..387f7f8 100644
39667--- a/drivers/oprofile/oprofile_stats.c
39668+++ b/drivers/oprofile/oprofile_stats.c
39669@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39670 cpu_buf->sample_invalid_eip = 0;
39671 }
39672
39673- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39674- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39675- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39676- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39677- atomic_set(&oprofile_stats.multiplex_counter, 0);
39678+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39679+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39680+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39681+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39682+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39683 }
39684
39685
39686diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39687index 0b54e46..a37c527 100644
39688--- a/drivers/oprofile/oprofile_stats.h
39689+++ b/drivers/oprofile/oprofile_stats.h
39690@@ -13,11 +13,11 @@
39691 #include <asm/atomic.h>
39692
39693 struct oprofile_stat_struct {
39694- atomic_t sample_lost_no_mm;
39695- atomic_t sample_lost_no_mapping;
39696- atomic_t bt_lost_no_mapping;
39697- atomic_t event_lost_overflow;
39698- atomic_t multiplex_counter;
39699+ atomic_unchecked_t sample_lost_no_mm;
39700+ atomic_unchecked_t sample_lost_no_mapping;
39701+ atomic_unchecked_t bt_lost_no_mapping;
39702+ atomic_unchecked_t event_lost_overflow;
39703+ atomic_unchecked_t multiplex_counter;
39704 };
39705
39706 extern struct oprofile_stat_struct oprofile_stats;
39707diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39708index 2766a6d..80c77e2 100644
39709--- a/drivers/oprofile/oprofilefs.c
39710+++ b/drivers/oprofile/oprofilefs.c
39711@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39712
39713
39714 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39715- char const *name, atomic_t *val)
39716+ char const *name, atomic_unchecked_t *val)
39717 {
39718 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39719 &atomic_ro_fops, 0444);
39720diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39721index 13a64bc..ad62835 100644
39722--- a/drivers/parisc/pdc_stable.c
39723+++ b/drivers/parisc/pdc_stable.c
39724@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39725 return ret;
39726 }
39727
39728-static struct sysfs_ops pdcspath_attr_ops = {
39729+static const struct sysfs_ops pdcspath_attr_ops = {
39730 .show = pdcspath_attr_show,
39731 .store = pdcspath_attr_store,
39732 };
39733diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39734index 8eefe56..40751a7 100644
39735--- a/drivers/parport/procfs.c
39736+++ b/drivers/parport/procfs.c
39737@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39738
39739 *ppos += len;
39740
39741- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39742+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39743 }
39744
39745 #ifdef CONFIG_PARPORT_1284
39746@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39747
39748 *ppos += len;
39749
39750- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39751+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39752 }
39753 #endif /* IEEE1284.3 support. */
39754
39755diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39756index 73e7d8e..c80f3d2 100644
39757--- a/drivers/pci/hotplug/acpiphp_glue.c
39758+++ b/drivers/pci/hotplug/acpiphp_glue.c
39759@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39760 }
39761
39762
39763-static struct acpi_dock_ops acpiphp_dock_ops = {
39764+static const struct acpi_dock_ops acpiphp_dock_ops = {
39765 .handler = handle_hotplug_event_func,
39766 };
39767
39768diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39769index 9fff878..ad0ad53 100644
39770--- a/drivers/pci/hotplug/cpci_hotplug.h
39771+++ b/drivers/pci/hotplug/cpci_hotplug.h
39772@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39773 int (*hardware_test) (struct slot* slot, u32 value);
39774 u8 (*get_power) (struct slot* slot);
39775 int (*set_power) (struct slot* slot, int value);
39776-};
39777+} __no_const;
39778
39779 struct cpci_hp_controller {
39780 unsigned int irq;
39781diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39782index 76ba8a1..20ca857 100644
39783--- a/drivers/pci/hotplug/cpqphp_nvram.c
39784+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39785@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39786
39787 void compaq_nvram_init (void __iomem *rom_start)
39788 {
39789+
39790+#ifndef CONFIG_PAX_KERNEXEC
39791 if (rom_start) {
39792 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39793 }
39794+#endif
39795+
39796 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39797
39798 /* initialize our int15 lock */
39799diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39800index 6151389..0a894ef 100644
39801--- a/drivers/pci/hotplug/fakephp.c
39802+++ b/drivers/pci/hotplug/fakephp.c
39803@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39804 }
39805
39806 static struct kobj_type legacy_ktype = {
39807- .sysfs_ops = &(struct sysfs_ops){
39808+ .sysfs_ops = &(const struct sysfs_ops){
39809 .store = legacy_store, .show = legacy_show
39810 },
39811 .release = &legacy_release,
39812diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39813index 5b680df..fe05b7e 100644
39814--- a/drivers/pci/intel-iommu.c
39815+++ b/drivers/pci/intel-iommu.c
39816@@ -2643,7 +2643,7 @@ error:
39817 return 0;
39818 }
39819
39820-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39821+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39822 unsigned long offset, size_t size,
39823 enum dma_data_direction dir,
39824 struct dma_attrs *attrs)
39825@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39826 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39827 }
39828
39829-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39830+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39831 size_t size, enum dma_data_direction dir,
39832 struct dma_attrs *attrs)
39833 {
39834@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39835 }
39836 }
39837
39838-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39839+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39840 dma_addr_t *dma_handle, gfp_t flags)
39841 {
39842 void *vaddr;
39843@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39844 return NULL;
39845 }
39846
39847-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39848+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39849 dma_addr_t dma_handle)
39850 {
39851 int order;
39852@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39853 free_pages((unsigned long)vaddr, order);
39854 }
39855
39856-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39857+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39858 int nelems, enum dma_data_direction dir,
39859 struct dma_attrs *attrs)
39860 {
39861@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39862 return nelems;
39863 }
39864
39865-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39866+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39867 enum dma_data_direction dir, struct dma_attrs *attrs)
39868 {
39869 int i;
39870@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
39871 return nelems;
39872 }
39873
39874-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39875+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39876 {
39877 return !dma_addr;
39878 }
39879
39880-struct dma_map_ops intel_dma_ops = {
39881+const struct dma_map_ops intel_dma_ops = {
39882 .alloc_coherent = intel_alloc_coherent,
39883 .free_coherent = intel_free_coherent,
39884 .map_sg = intel_map_sg,
39885diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
39886index 5b7056c..607bc94 100644
39887--- a/drivers/pci/pcie/aspm.c
39888+++ b/drivers/pci/pcie/aspm.c
39889@@ -27,9 +27,9 @@
39890 #define MODULE_PARAM_PREFIX "pcie_aspm."
39891
39892 /* Note: those are not register definitions */
39893-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
39894-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
39895-#define ASPM_STATE_L1 (4) /* L1 state */
39896+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
39897+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
39898+#define ASPM_STATE_L1 (4U) /* L1 state */
39899 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
39900 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
39901
39902diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
39903index 8105e32..ca10419 100644
39904--- a/drivers/pci/probe.c
39905+++ b/drivers/pci/probe.c
39906@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
39907 return ret;
39908 }
39909
39910-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
39911+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
39912 struct device_attribute *attr,
39913 char *buf)
39914 {
39915 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
39916 }
39917
39918-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
39919+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
39920 struct device_attribute *attr,
39921 char *buf)
39922 {
39923diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
39924index a03ad8c..024b0da 100644
39925--- a/drivers/pci/proc.c
39926+++ b/drivers/pci/proc.c
39927@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
39928 static int __init pci_proc_init(void)
39929 {
39930 struct pci_dev *dev = NULL;
39931+
39932+#ifdef CONFIG_GRKERNSEC_PROC_ADD
39933+#ifdef CONFIG_GRKERNSEC_PROC_USER
39934+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
39935+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39936+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
39937+#endif
39938+#else
39939 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
39940+#endif
39941 proc_create("devices", 0, proc_bus_pci_dir,
39942 &proc_bus_pci_dev_operations);
39943 proc_initialized = 1;
39944diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
39945index 8c02b6c..5584d8e 100644
39946--- a/drivers/pci/slot.c
39947+++ b/drivers/pci/slot.c
39948@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
39949 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
39950 }
39951
39952-static struct sysfs_ops pci_slot_sysfs_ops = {
39953+static const struct sysfs_ops pci_slot_sysfs_ops = {
39954 .show = pci_slot_attr_show,
39955 .store = pci_slot_attr_store,
39956 };
39957diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
39958index 30cf71d2..50938f1 100644
39959--- a/drivers/pcmcia/pcmcia_ioctl.c
39960+++ b/drivers/pcmcia/pcmcia_ioctl.c
39961@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
39962 return -EFAULT;
39963 }
39964 }
39965- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39966+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39967 if (!buf)
39968 return -ENOMEM;
39969
39970diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
39971index 52183c4..b224c69 100644
39972--- a/drivers/platform/x86/acer-wmi.c
39973+++ b/drivers/platform/x86/acer-wmi.c
39974@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
39975 return 0;
39976 }
39977
39978-static struct backlight_ops acer_bl_ops = {
39979+static const struct backlight_ops acer_bl_ops = {
39980 .get_brightness = read_brightness,
39981 .update_status = update_bl_status,
39982 };
39983diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
39984index 767cb61..a87380b 100644
39985--- a/drivers/platform/x86/asus-laptop.c
39986+++ b/drivers/platform/x86/asus-laptop.c
39987@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
39988 */
39989 static int read_brightness(struct backlight_device *bd);
39990 static int update_bl_status(struct backlight_device *bd);
39991-static struct backlight_ops asusbl_ops = {
39992+static const struct backlight_ops asusbl_ops = {
39993 .get_brightness = read_brightness,
39994 .update_status = update_bl_status,
39995 };
39996diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
39997index d66c07a..a4abaac 100644
39998--- a/drivers/platform/x86/asus_acpi.c
39999+++ b/drivers/platform/x86/asus_acpi.c
40000@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40001 return 0;
40002 }
40003
40004-static struct backlight_ops asus_backlight_data = {
40005+static const struct backlight_ops asus_backlight_data = {
40006 .get_brightness = read_brightness,
40007 .update_status = set_brightness_status,
40008 };
40009diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40010index 11003bb..550ff1b 100644
40011--- a/drivers/platform/x86/compal-laptop.c
40012+++ b/drivers/platform/x86/compal-laptop.c
40013@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40014 return set_lcd_level(b->props.brightness);
40015 }
40016
40017-static struct backlight_ops compalbl_ops = {
40018+static const struct backlight_ops compalbl_ops = {
40019 .get_brightness = bl_get_brightness,
40020 .update_status = bl_update_status,
40021 };
40022diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40023index 07a74da..9dc99fa 100644
40024--- a/drivers/platform/x86/dell-laptop.c
40025+++ b/drivers/platform/x86/dell-laptop.c
40026@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40027 return buffer.output[1];
40028 }
40029
40030-static struct backlight_ops dell_ops = {
40031+static const struct backlight_ops dell_ops = {
40032 .get_brightness = dell_get_intensity,
40033 .update_status = dell_send_intensity,
40034 };
40035diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40036index c533b1c..5c81f22 100644
40037--- a/drivers/platform/x86/eeepc-laptop.c
40038+++ b/drivers/platform/x86/eeepc-laptop.c
40039@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40040 */
40041 static int read_brightness(struct backlight_device *bd);
40042 static int update_bl_status(struct backlight_device *bd);
40043-static struct backlight_ops eeepcbl_ops = {
40044+static const struct backlight_ops eeepcbl_ops = {
40045 .get_brightness = read_brightness,
40046 .update_status = update_bl_status,
40047 };
40048diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40049index bcd4ba8..a249b35 100644
40050--- a/drivers/platform/x86/fujitsu-laptop.c
40051+++ b/drivers/platform/x86/fujitsu-laptop.c
40052@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40053 return ret;
40054 }
40055
40056-static struct backlight_ops fujitsubl_ops = {
40057+static const struct backlight_ops fujitsubl_ops = {
40058 .get_brightness = bl_get_brightness,
40059 .update_status = bl_update_status,
40060 };
40061diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40062index 759763d..1093ba2 100644
40063--- a/drivers/platform/x86/msi-laptop.c
40064+++ b/drivers/platform/x86/msi-laptop.c
40065@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40066 return set_lcd_level(b->props.brightness);
40067 }
40068
40069-static struct backlight_ops msibl_ops = {
40070+static const struct backlight_ops msibl_ops = {
40071 .get_brightness = bl_get_brightness,
40072 .update_status = bl_update_status,
40073 };
40074diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40075index fe7cf01..9012d8d 100644
40076--- a/drivers/platform/x86/panasonic-laptop.c
40077+++ b/drivers/platform/x86/panasonic-laptop.c
40078@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40079 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40080 }
40081
40082-static struct backlight_ops pcc_backlight_ops = {
40083+static const struct backlight_ops pcc_backlight_ops = {
40084 .get_brightness = bl_get,
40085 .update_status = bl_set_status,
40086 };
40087diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40088index a2a742c..b37e25e 100644
40089--- a/drivers/platform/x86/sony-laptop.c
40090+++ b/drivers/platform/x86/sony-laptop.c
40091@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40092 }
40093
40094 static struct backlight_device *sony_backlight_device;
40095-static struct backlight_ops sony_backlight_ops = {
40096+static const struct backlight_ops sony_backlight_ops = {
40097 .update_status = sony_backlight_update_status,
40098 .get_brightness = sony_backlight_get_brightness,
40099 };
40100diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40101index 68271ae..5e8fb10 100644
40102--- a/drivers/platform/x86/thinkpad_acpi.c
40103+++ b/drivers/platform/x86/thinkpad_acpi.c
40104@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40105 return 0;
40106 }
40107
40108-void static hotkey_mask_warn_incomplete_mask(void)
40109+static void hotkey_mask_warn_incomplete_mask(void)
40110 {
40111 /* log only what the user can fix... */
40112 const u32 wantedmask = hotkey_driver_mask &
40113@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40114 BACKLIGHT_UPDATE_HOTKEY);
40115 }
40116
40117-static struct backlight_ops ibm_backlight_data = {
40118+static const struct backlight_ops ibm_backlight_data = {
40119 .get_brightness = brightness_get,
40120 .update_status = brightness_update_status,
40121 };
40122diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40123index 51c0a8b..0786629 100644
40124--- a/drivers/platform/x86/toshiba_acpi.c
40125+++ b/drivers/platform/x86/toshiba_acpi.c
40126@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40127 return AE_OK;
40128 }
40129
40130-static struct backlight_ops toshiba_backlight_data = {
40131+static const struct backlight_ops toshiba_backlight_data = {
40132 .get_brightness = get_lcd,
40133 .update_status = set_lcd_status,
40134 };
40135diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40136index fc83783c..cf370d7 100644
40137--- a/drivers/pnp/pnpbios/bioscalls.c
40138+++ b/drivers/pnp/pnpbios/bioscalls.c
40139@@ -60,7 +60,7 @@ do { \
40140 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40141 } while(0)
40142
40143-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40144+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40145 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40146
40147 /*
40148@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40149
40150 cpu = get_cpu();
40151 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40152+
40153+ pax_open_kernel();
40154 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40155+ pax_close_kernel();
40156
40157 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40158 spin_lock_irqsave(&pnp_bios_lock, flags);
40159@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40160 :"memory");
40161 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40162
40163+ pax_open_kernel();
40164 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40165+ pax_close_kernel();
40166+
40167 put_cpu();
40168
40169 /* If we get here and this is set then the PnP BIOS faulted on us. */
40170@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40171 return status;
40172 }
40173
40174-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40175+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40176 {
40177 int i;
40178
40179@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40180 pnp_bios_callpoint.offset = header->fields.pm16offset;
40181 pnp_bios_callpoint.segment = PNP_CS16;
40182
40183+ pax_open_kernel();
40184+
40185 for_each_possible_cpu(i) {
40186 struct desc_struct *gdt = get_cpu_gdt_table(i);
40187 if (!gdt)
40188@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40189 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40190 (unsigned long)__va(header->fields.pm16dseg));
40191 }
40192+
40193+ pax_close_kernel();
40194 }
40195diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40196index ba97654..66b99d4 100644
40197--- a/drivers/pnp/resource.c
40198+++ b/drivers/pnp/resource.c
40199@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40200 return 1;
40201
40202 /* check if the resource is valid */
40203- if (*irq < 0 || *irq > 15)
40204+ if (*irq > 15)
40205 return 0;
40206
40207 /* check if the resource is reserved */
40208@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40209 return 1;
40210
40211 /* check if the resource is valid */
40212- if (*dma < 0 || *dma == 4 || *dma > 7)
40213+ if (*dma == 4 || *dma > 7)
40214 return 0;
40215
40216 /* check if the resource is reserved */
40217diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40218index 62bb981..24a2dc9 100644
40219--- a/drivers/power/bq27x00_battery.c
40220+++ b/drivers/power/bq27x00_battery.c
40221@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40222 struct bq27x00_access_methods {
40223 int (*read)(u8 reg, int *rt_value, int b_single,
40224 struct bq27x00_device_info *di);
40225-};
40226+} __no_const;
40227
40228 struct bq27x00_device_info {
40229 struct device *dev;
40230diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40231index 62227cd..b5b538b 100644
40232--- a/drivers/rtc/rtc-dev.c
40233+++ b/drivers/rtc/rtc-dev.c
40234@@ -14,6 +14,7 @@
40235 #include <linux/module.h>
40236 #include <linux/rtc.h>
40237 #include <linux/sched.h>
40238+#include <linux/grsecurity.h>
40239 #include "rtc-core.h"
40240
40241 static dev_t rtc_devt;
40242@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40243 if (copy_from_user(&tm, uarg, sizeof(tm)))
40244 return -EFAULT;
40245
40246+ gr_log_timechange();
40247+
40248 return rtc_set_time(rtc, &tm);
40249
40250 case RTC_PIE_ON:
40251diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40252index 968e3c7..fbc637a 100644
40253--- a/drivers/s390/cio/qdio_perf.c
40254+++ b/drivers/s390/cio/qdio_perf.c
40255@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40256 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40257 {
40258 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40259- (long)atomic_long_read(&perf_stats.qdio_int));
40260+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40261 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40262- (long)atomic_long_read(&perf_stats.pci_int));
40263+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40264 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40265- (long)atomic_long_read(&perf_stats.thin_int));
40266+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40267 seq_printf(m, "\n");
40268 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40269- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40270+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40271 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40272- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40273+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40274 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40275- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40276- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40277+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40278+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40279 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40280- (long)atomic_long_read(&perf_stats.thinint_inbound),
40281- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40282+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40283+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40284 seq_printf(m, "\n");
40285 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40286- (long)atomic_long_read(&perf_stats.siga_in));
40287+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40288 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40289- (long)atomic_long_read(&perf_stats.siga_out));
40290+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40291 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40292- (long)atomic_long_read(&perf_stats.siga_sync));
40293+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40294 seq_printf(m, "\n");
40295 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40296- (long)atomic_long_read(&perf_stats.inbound_handler));
40297+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40298 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40299- (long)atomic_long_read(&perf_stats.outbound_handler));
40300+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40301 seq_printf(m, "\n");
40302 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40303- (long)atomic_long_read(&perf_stats.fast_requeue));
40304+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40305 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40306- (long)atomic_long_read(&perf_stats.outbound_target_full));
40307+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40308 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40309- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40310+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40311 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40312- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40313+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40314 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40315- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40316+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40317 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40318- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40319- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40320+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40321+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40322 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40323- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40324- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40325+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40326+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40327 seq_printf(m, "\n");
40328 return 0;
40329 }
40330diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40331index ff4504c..b3604c3 100644
40332--- a/drivers/s390/cio/qdio_perf.h
40333+++ b/drivers/s390/cio/qdio_perf.h
40334@@ -13,46 +13,46 @@
40335
40336 struct qdio_perf_stats {
40337 /* interrupt handler calls */
40338- atomic_long_t qdio_int;
40339- atomic_long_t pci_int;
40340- atomic_long_t thin_int;
40341+ atomic_long_unchecked_t qdio_int;
40342+ atomic_long_unchecked_t pci_int;
40343+ atomic_long_unchecked_t thin_int;
40344
40345 /* tasklet runs */
40346- atomic_long_t tasklet_inbound;
40347- atomic_long_t tasklet_outbound;
40348- atomic_long_t tasklet_thinint;
40349- atomic_long_t tasklet_thinint_loop;
40350- atomic_long_t thinint_inbound;
40351- atomic_long_t thinint_inbound_loop;
40352- atomic_long_t thinint_inbound_loop2;
40353+ atomic_long_unchecked_t tasklet_inbound;
40354+ atomic_long_unchecked_t tasklet_outbound;
40355+ atomic_long_unchecked_t tasklet_thinint;
40356+ atomic_long_unchecked_t tasklet_thinint_loop;
40357+ atomic_long_unchecked_t thinint_inbound;
40358+ atomic_long_unchecked_t thinint_inbound_loop;
40359+ atomic_long_unchecked_t thinint_inbound_loop2;
40360
40361 /* signal adapter calls */
40362- atomic_long_t siga_out;
40363- atomic_long_t siga_in;
40364- atomic_long_t siga_sync;
40365+ atomic_long_unchecked_t siga_out;
40366+ atomic_long_unchecked_t siga_in;
40367+ atomic_long_unchecked_t siga_sync;
40368
40369 /* misc */
40370- atomic_long_t inbound_handler;
40371- atomic_long_t outbound_handler;
40372- atomic_long_t fast_requeue;
40373- atomic_long_t outbound_target_full;
40374+ atomic_long_unchecked_t inbound_handler;
40375+ atomic_long_unchecked_t outbound_handler;
40376+ atomic_long_unchecked_t fast_requeue;
40377+ atomic_long_unchecked_t outbound_target_full;
40378
40379 /* for debugging */
40380- atomic_long_t debug_tl_out_timer;
40381- atomic_long_t debug_stop_polling;
40382- atomic_long_t debug_eqbs_all;
40383- atomic_long_t debug_eqbs_incomplete;
40384- atomic_long_t debug_sqbs_all;
40385- atomic_long_t debug_sqbs_incomplete;
40386+ atomic_long_unchecked_t debug_tl_out_timer;
40387+ atomic_long_unchecked_t debug_stop_polling;
40388+ atomic_long_unchecked_t debug_eqbs_all;
40389+ atomic_long_unchecked_t debug_eqbs_incomplete;
40390+ atomic_long_unchecked_t debug_sqbs_all;
40391+ atomic_long_unchecked_t debug_sqbs_incomplete;
40392 };
40393
40394 extern struct qdio_perf_stats perf_stats;
40395 extern int qdio_performance_stats;
40396
40397-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40398+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40399 {
40400 if (qdio_performance_stats)
40401- atomic_long_inc(count);
40402+ atomic_long_inc_unchecked(count);
40403 }
40404
40405 int qdio_setup_perf_stats(void);
40406diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40407index 1ddcf40..a85f062 100644
40408--- a/drivers/scsi/BusLogic.c
40409+++ b/drivers/scsi/BusLogic.c
40410@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40411 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40412 *PrototypeHostAdapter)
40413 {
40414+ pax_track_stack();
40415+
40416 /*
40417 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40418 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40419diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40420index cdbdec9..b7d560b 100644
40421--- a/drivers/scsi/aacraid/aacraid.h
40422+++ b/drivers/scsi/aacraid/aacraid.h
40423@@ -471,7 +471,7 @@ struct adapter_ops
40424 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40425 /* Administrative operations */
40426 int (*adapter_comm)(struct aac_dev * dev, int comm);
40427-};
40428+} __no_const;
40429
40430 /*
40431 * Define which interrupt handler needs to be installed
40432diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40433index a5b8e7b..a6a0e43 100644
40434--- a/drivers/scsi/aacraid/commctrl.c
40435+++ b/drivers/scsi/aacraid/commctrl.c
40436@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40437 u32 actual_fibsize64, actual_fibsize = 0;
40438 int i;
40439
40440+ pax_track_stack();
40441
40442 if (dev->in_reset) {
40443 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40444diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40445index 9b97c3e..f099725 100644
40446--- a/drivers/scsi/aacraid/linit.c
40447+++ b/drivers/scsi/aacraid/linit.c
40448@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40449 #elif defined(__devinitconst)
40450 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40451 #else
40452-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40453+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40454 #endif
40455 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40456 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40457diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40458index 996f722..9127845 100644
40459--- a/drivers/scsi/aic94xx/aic94xx_init.c
40460+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40461@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40462 flash_error_table[i].reason);
40463 }
40464
40465-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40466+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40467 asd_show_update_bios, asd_store_update_bios);
40468
40469 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40470@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40471 .lldd_control_phy = asd_control_phy,
40472 };
40473
40474-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40475+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40476 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40477 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40478 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40479diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40480index 58efd4b..cb48dc7 100644
40481--- a/drivers/scsi/bfa/bfa_ioc.h
40482+++ b/drivers/scsi/bfa/bfa_ioc.h
40483@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40484 bfa_ioc_disable_cbfn_t disable_cbfn;
40485 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40486 bfa_ioc_reset_cbfn_t reset_cbfn;
40487-};
40488+} __no_const;
40489
40490 /**
40491 * Heartbeat failure notification queue element.
40492diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40493index 7ad177e..5503586 100644
40494--- a/drivers/scsi/bfa/bfa_iocfc.h
40495+++ b/drivers/scsi/bfa/bfa_iocfc.h
40496@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40497 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40498 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40499 u32 *nvecs, u32 *maxvec);
40500-};
40501+} __no_const;
40502 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40503
40504 struct bfa_iocfc_s {
40505diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40506index 4967643..cbec06b 100644
40507--- a/drivers/scsi/dpt_i2o.c
40508+++ b/drivers/scsi/dpt_i2o.c
40509@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40510 dma_addr_t addr;
40511 ulong flags = 0;
40512
40513+ pax_track_stack();
40514+
40515 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40516 // get user msg size in u32s
40517 if(get_user(size, &user_msg[0])){
40518@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40519 s32 rcode;
40520 dma_addr_t addr;
40521
40522+ pax_track_stack();
40523+
40524 memset(msg, 0 , sizeof(msg));
40525 len = scsi_bufflen(cmd);
40526 direction = 0x00000000;
40527diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40528index c7076ce..e20c67c 100644
40529--- a/drivers/scsi/eata.c
40530+++ b/drivers/scsi/eata.c
40531@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40532 struct hostdata *ha;
40533 char name[16];
40534
40535+ pax_track_stack();
40536+
40537 sprintf(name, "%s%d", driver_name, j);
40538
40539 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40540diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40541index 11ae5c9..891daec 100644
40542--- a/drivers/scsi/fcoe/libfcoe.c
40543+++ b/drivers/scsi/fcoe/libfcoe.c
40544@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40545 size_t rlen;
40546 size_t dlen;
40547
40548+ pax_track_stack();
40549+
40550 fiph = (struct fip_header *)skb->data;
40551 sub = fiph->fip_subcode;
40552 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40553diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40554index 71c7bbe..e93088a 100644
40555--- a/drivers/scsi/fnic/fnic_main.c
40556+++ b/drivers/scsi/fnic/fnic_main.c
40557@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40558 /* Start local port initiatialization */
40559
40560 lp->link_up = 0;
40561- lp->tt = fnic_transport_template;
40562+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40563
40564 lp->max_retry_count = fnic->config.flogi_retries;
40565 lp->max_rport_retry_count = fnic->config.plogi_retries;
40566diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40567index bb96d74..9ec3ce4 100644
40568--- a/drivers/scsi/gdth.c
40569+++ b/drivers/scsi/gdth.c
40570@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40571 ulong flags;
40572 gdth_ha_str *ha;
40573
40574+ pax_track_stack();
40575+
40576 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40577 return -EFAULT;
40578 ha = gdth_find_ha(ldrv.ionode);
40579@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40580 gdth_ha_str *ha;
40581 int rval;
40582
40583+ pax_track_stack();
40584+
40585 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40586 res.number >= MAX_HDRIVES)
40587 return -EFAULT;
40588@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40589 gdth_ha_str *ha;
40590 int rval;
40591
40592+ pax_track_stack();
40593+
40594 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40595 return -EFAULT;
40596 ha = gdth_find_ha(gen.ionode);
40597@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40598 int i;
40599 gdth_cmd_str gdtcmd;
40600 char cmnd[MAX_COMMAND_SIZE];
40601+
40602+ pax_track_stack();
40603+
40604 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40605
40606 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40607diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40608index 1258da3..20d8ae6 100644
40609--- a/drivers/scsi/gdth_proc.c
40610+++ b/drivers/scsi/gdth_proc.c
40611@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40612 ulong64 paddr;
40613
40614 char cmnd[MAX_COMMAND_SIZE];
40615+
40616+ pax_track_stack();
40617+
40618 memset(cmnd, 0xff, 12);
40619 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40620
40621@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40622 gdth_hget_str *phg;
40623 char cmnd[MAX_COMMAND_SIZE];
40624
40625+ pax_track_stack();
40626+
40627 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40628 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40629 if (!gdtcmd || !estr)
40630diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40631index d03a926..f324286 100644
40632--- a/drivers/scsi/hosts.c
40633+++ b/drivers/scsi/hosts.c
40634@@ -40,7 +40,7 @@
40635 #include "scsi_logging.h"
40636
40637
40638-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40639+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40640
40641
40642 static void scsi_host_cls_release(struct device *dev)
40643@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40644 * subtract one because we increment first then return, but we need to
40645 * know what the next host number was before increment
40646 */
40647- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40648+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40649 shost->dma_channel = 0xff;
40650
40651 /* These three are default values which can be overridden */
40652diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40653index a601159..55e19d2 100644
40654--- a/drivers/scsi/ipr.c
40655+++ b/drivers/scsi/ipr.c
40656@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40657 return true;
40658 }
40659
40660-static struct ata_port_operations ipr_sata_ops = {
40661+static const struct ata_port_operations ipr_sata_ops = {
40662 .phy_reset = ipr_ata_phy_reset,
40663 .hardreset = ipr_sata_reset,
40664 .post_internal_cmd = ipr_ata_post_internal,
40665diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40666index 4e49fbc..97907ff 100644
40667--- a/drivers/scsi/ips.h
40668+++ b/drivers/scsi/ips.h
40669@@ -1027,7 +1027,7 @@ typedef struct {
40670 int (*intr)(struct ips_ha *);
40671 void (*enableint)(struct ips_ha *);
40672 uint32_t (*statupd)(struct ips_ha *);
40673-} ips_hw_func_t;
40674+} __no_const ips_hw_func_t;
40675
40676 typedef struct ips_ha {
40677 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40678diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40679index c1c1574..a9c9348 100644
40680--- a/drivers/scsi/libfc/fc_exch.c
40681+++ b/drivers/scsi/libfc/fc_exch.c
40682@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40683 * all together if not used XXX
40684 */
40685 struct {
40686- atomic_t no_free_exch;
40687- atomic_t no_free_exch_xid;
40688- atomic_t xid_not_found;
40689- atomic_t xid_busy;
40690- atomic_t seq_not_found;
40691- atomic_t non_bls_resp;
40692+ atomic_unchecked_t no_free_exch;
40693+ atomic_unchecked_t no_free_exch_xid;
40694+ atomic_unchecked_t xid_not_found;
40695+ atomic_unchecked_t xid_busy;
40696+ atomic_unchecked_t seq_not_found;
40697+ atomic_unchecked_t non_bls_resp;
40698 } stats;
40699 };
40700 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40701@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40702 /* allocate memory for exchange */
40703 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40704 if (!ep) {
40705- atomic_inc(&mp->stats.no_free_exch);
40706+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40707 goto out;
40708 }
40709 memset(ep, 0, sizeof(*ep));
40710@@ -557,7 +557,7 @@ out:
40711 return ep;
40712 err:
40713 spin_unlock_bh(&pool->lock);
40714- atomic_inc(&mp->stats.no_free_exch_xid);
40715+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40716 mempool_free(ep, mp->ep_pool);
40717 return NULL;
40718 }
40719@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40720 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40721 ep = fc_exch_find(mp, xid);
40722 if (!ep) {
40723- atomic_inc(&mp->stats.xid_not_found);
40724+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40725 reject = FC_RJT_OX_ID;
40726 goto out;
40727 }
40728@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40729 ep = fc_exch_find(mp, xid);
40730 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40731 if (ep) {
40732- atomic_inc(&mp->stats.xid_busy);
40733+ atomic_inc_unchecked(&mp->stats.xid_busy);
40734 reject = FC_RJT_RX_ID;
40735 goto rel;
40736 }
40737@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40738 }
40739 xid = ep->xid; /* get our XID */
40740 } else if (!ep) {
40741- atomic_inc(&mp->stats.xid_not_found);
40742+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40743 reject = FC_RJT_RX_ID; /* XID not found */
40744 goto out;
40745 }
40746@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40747 } else {
40748 sp = &ep->seq;
40749 if (sp->id != fh->fh_seq_id) {
40750- atomic_inc(&mp->stats.seq_not_found);
40751+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40752 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40753 goto rel;
40754 }
40755@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40756
40757 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40758 if (!ep) {
40759- atomic_inc(&mp->stats.xid_not_found);
40760+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40761 goto out;
40762 }
40763 if (ep->esb_stat & ESB_ST_COMPLETE) {
40764- atomic_inc(&mp->stats.xid_not_found);
40765+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40766 goto out;
40767 }
40768 if (ep->rxid == FC_XID_UNKNOWN)
40769 ep->rxid = ntohs(fh->fh_rx_id);
40770 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40771- atomic_inc(&mp->stats.xid_not_found);
40772+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40773 goto rel;
40774 }
40775 if (ep->did != ntoh24(fh->fh_s_id) &&
40776 ep->did != FC_FID_FLOGI) {
40777- atomic_inc(&mp->stats.xid_not_found);
40778+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40779 goto rel;
40780 }
40781 sof = fr_sof(fp);
40782@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40783 } else {
40784 sp = &ep->seq;
40785 if (sp->id != fh->fh_seq_id) {
40786- atomic_inc(&mp->stats.seq_not_found);
40787+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40788 goto rel;
40789 }
40790 }
40791@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40792 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40793
40794 if (!sp)
40795- atomic_inc(&mp->stats.xid_not_found);
40796+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40797 else
40798- atomic_inc(&mp->stats.non_bls_resp);
40799+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40800
40801 fc_frame_free(fp);
40802 }
40803diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40804index 0ee989f..a582241 100644
40805--- a/drivers/scsi/libsas/sas_ata.c
40806+++ b/drivers/scsi/libsas/sas_ata.c
40807@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40808 }
40809 }
40810
40811-static struct ata_port_operations sas_sata_ops = {
40812+static const struct ata_port_operations sas_sata_ops = {
40813 .phy_reset = sas_ata_phy_reset,
40814 .post_internal_cmd = sas_ata_post_internal,
40815 .qc_defer = ata_std_qc_defer,
40816diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40817index aa10f79..5cc79e4 100644
40818--- a/drivers/scsi/lpfc/lpfc.h
40819+++ b/drivers/scsi/lpfc/lpfc.h
40820@@ -400,7 +400,7 @@ struct lpfc_vport {
40821 struct dentry *debug_nodelist;
40822 struct dentry *vport_debugfs_root;
40823 struct lpfc_debugfs_trc *disc_trc;
40824- atomic_t disc_trc_cnt;
40825+ atomic_unchecked_t disc_trc_cnt;
40826 #endif
40827 uint8_t stat_data_enabled;
40828 uint8_t stat_data_blocked;
40829@@ -725,8 +725,8 @@ struct lpfc_hba {
40830 struct timer_list fabric_block_timer;
40831 unsigned long bit_flags;
40832 #define FABRIC_COMANDS_BLOCKED 0
40833- atomic_t num_rsrc_err;
40834- atomic_t num_cmd_success;
40835+ atomic_unchecked_t num_rsrc_err;
40836+ atomic_unchecked_t num_cmd_success;
40837 unsigned long last_rsrc_error_time;
40838 unsigned long last_ramp_down_time;
40839 unsigned long last_ramp_up_time;
40840@@ -740,7 +740,7 @@ struct lpfc_hba {
40841 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40842 struct dentry *debug_slow_ring_trc;
40843 struct lpfc_debugfs_trc *slow_ring_trc;
40844- atomic_t slow_ring_trc_cnt;
40845+ atomic_unchecked_t slow_ring_trc_cnt;
40846 #endif
40847
40848 /* Used for deferred freeing of ELS data buffers */
40849diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40850index 8d0f0de..7c77a62 100644
40851--- a/drivers/scsi/lpfc/lpfc_debugfs.c
40852+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40853@@ -124,7 +124,7 @@ struct lpfc_debug {
40854 int len;
40855 };
40856
40857-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40858+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40859 static unsigned long lpfc_debugfs_start_time = 0L;
40860
40861 /**
40862@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40863 lpfc_debugfs_enable = 0;
40864
40865 len = 0;
40866- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
40867+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
40868 (lpfc_debugfs_max_disc_trc - 1);
40869 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
40870 dtp = vport->disc_trc + i;
40871@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
40872 lpfc_debugfs_enable = 0;
40873
40874 len = 0;
40875- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
40876+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
40877 (lpfc_debugfs_max_slow_ring_trc - 1);
40878 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
40879 dtp = phba->slow_ring_trc + i;
40880@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
40881 uint32_t *ptr;
40882 char buffer[1024];
40883
40884+ pax_track_stack();
40885+
40886 off = 0;
40887 spin_lock_irq(&phba->hbalock);
40888
40889@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
40890 !vport || !vport->disc_trc)
40891 return;
40892
40893- index = atomic_inc_return(&vport->disc_trc_cnt) &
40894+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
40895 (lpfc_debugfs_max_disc_trc - 1);
40896 dtp = vport->disc_trc + index;
40897 dtp->fmt = fmt;
40898 dtp->data1 = data1;
40899 dtp->data2 = data2;
40900 dtp->data3 = data3;
40901- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40902+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40903 dtp->jif = jiffies;
40904 #endif
40905 return;
40906@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
40907 !phba || !phba->slow_ring_trc)
40908 return;
40909
40910- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
40911+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
40912 (lpfc_debugfs_max_slow_ring_trc - 1);
40913 dtp = phba->slow_ring_trc + index;
40914 dtp->fmt = fmt;
40915 dtp->data1 = data1;
40916 dtp->data2 = data2;
40917 dtp->data3 = data3;
40918- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40919+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40920 dtp->jif = jiffies;
40921 #endif
40922 return;
40923@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40924 "slow_ring buffer\n");
40925 goto debug_failed;
40926 }
40927- atomic_set(&phba->slow_ring_trc_cnt, 0);
40928+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
40929 memset(phba->slow_ring_trc, 0,
40930 (sizeof(struct lpfc_debugfs_trc) *
40931 lpfc_debugfs_max_slow_ring_trc));
40932@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40933 "buffer\n");
40934 goto debug_failed;
40935 }
40936- atomic_set(&vport->disc_trc_cnt, 0);
40937+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
40938
40939 snprintf(name, sizeof(name), "discovery_trace");
40940 vport->debug_disc_trc =
40941diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
40942index 549bc7d..8189dbb 100644
40943--- a/drivers/scsi/lpfc/lpfc_init.c
40944+++ b/drivers/scsi/lpfc/lpfc_init.c
40945@@ -8021,8 +8021,10 @@ lpfc_init(void)
40946 printk(LPFC_COPYRIGHT "\n");
40947
40948 if (lpfc_enable_npiv) {
40949- lpfc_transport_functions.vport_create = lpfc_vport_create;
40950- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40951+ pax_open_kernel();
40952+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
40953+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40954+ pax_close_kernel();
40955 }
40956 lpfc_transport_template =
40957 fc_attach_transport(&lpfc_transport_functions);
40958diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
40959index c88f59f..ff2a42f 100644
40960--- a/drivers/scsi/lpfc/lpfc_scsi.c
40961+++ b/drivers/scsi/lpfc/lpfc_scsi.c
40962@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
40963 uint32_t evt_posted;
40964
40965 spin_lock_irqsave(&phba->hbalock, flags);
40966- atomic_inc(&phba->num_rsrc_err);
40967+ atomic_inc_unchecked(&phba->num_rsrc_err);
40968 phba->last_rsrc_error_time = jiffies;
40969
40970 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
40971@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
40972 unsigned long flags;
40973 struct lpfc_hba *phba = vport->phba;
40974 uint32_t evt_posted;
40975- atomic_inc(&phba->num_cmd_success);
40976+ atomic_inc_unchecked(&phba->num_cmd_success);
40977
40978 if (vport->cfg_lun_queue_depth <= queue_depth)
40979 return;
40980@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40981 int i;
40982 struct lpfc_rport_data *rdata;
40983
40984- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
40985- num_cmd_success = atomic_read(&phba->num_cmd_success);
40986+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
40987+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
40988
40989 vports = lpfc_create_vport_work_array(phba);
40990 if (vports != NULL)
40991@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40992 }
40993 }
40994 lpfc_destroy_vport_work_array(phba, vports);
40995- atomic_set(&phba->num_rsrc_err, 0);
40996- atomic_set(&phba->num_cmd_success, 0);
40997+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
40998+ atomic_set_unchecked(&phba->num_cmd_success, 0);
40999 }
41000
41001 /**
41002@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41003 }
41004 }
41005 lpfc_destroy_vport_work_array(phba, vports);
41006- atomic_set(&phba->num_rsrc_err, 0);
41007- atomic_set(&phba->num_cmd_success, 0);
41008+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41009+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41010 }
41011
41012 /**
41013diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41014index 234f0b7..3020aea 100644
41015--- a/drivers/scsi/megaraid/megaraid_mbox.c
41016+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41017@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41018 int rval;
41019 int i;
41020
41021+ pax_track_stack();
41022+
41023 // Allocate memory for the base list of scb for management module.
41024 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41025
41026diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41027index 7a117c1..ee01e9e 100644
41028--- a/drivers/scsi/osd/osd_initiator.c
41029+++ b/drivers/scsi/osd/osd_initiator.c
41030@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41031 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41032 int ret;
41033
41034+ pax_track_stack();
41035+
41036 or = osd_start_request(od, GFP_KERNEL);
41037 if (!or)
41038 return -ENOMEM;
41039diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41040index 9ab8c86..9425ad3 100644
41041--- a/drivers/scsi/pmcraid.c
41042+++ b/drivers/scsi/pmcraid.c
41043@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41044 res->scsi_dev = scsi_dev;
41045 scsi_dev->hostdata = res;
41046 res->change_detected = 0;
41047- atomic_set(&res->read_failures, 0);
41048- atomic_set(&res->write_failures, 0);
41049+ atomic_set_unchecked(&res->read_failures, 0);
41050+ atomic_set_unchecked(&res->write_failures, 0);
41051 rc = 0;
41052 }
41053 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41054@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41055
41056 /* If this was a SCSI read/write command keep count of errors */
41057 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41058- atomic_inc(&res->read_failures);
41059+ atomic_inc_unchecked(&res->read_failures);
41060 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41061- atomic_inc(&res->write_failures);
41062+ atomic_inc_unchecked(&res->write_failures);
41063
41064 if (!RES_IS_GSCSI(res->cfg_entry) &&
41065 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41066@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41067
41068 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41069 /* add resources only after host is added into system */
41070- if (!atomic_read(&pinstance->expose_resources))
41071+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41072 return;
41073
41074 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41075@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41076 init_waitqueue_head(&pinstance->reset_wait_q);
41077
41078 atomic_set(&pinstance->outstanding_cmds, 0);
41079- atomic_set(&pinstance->expose_resources, 0);
41080+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41081
41082 INIT_LIST_HEAD(&pinstance->free_res_q);
41083 INIT_LIST_HEAD(&pinstance->used_res_q);
41084@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41085 /* Schedule worker thread to handle CCN and take care of adding and
41086 * removing devices to OS
41087 */
41088- atomic_set(&pinstance->expose_resources, 1);
41089+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41090 schedule_work(&pinstance->worker_q);
41091 return rc;
41092
41093diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41094index 3441b3f..6cbe8f7 100644
41095--- a/drivers/scsi/pmcraid.h
41096+++ b/drivers/scsi/pmcraid.h
41097@@ -690,7 +690,7 @@ struct pmcraid_instance {
41098 atomic_t outstanding_cmds;
41099
41100 /* should add/delete resources to mid-layer now ?*/
41101- atomic_t expose_resources;
41102+ atomic_unchecked_t expose_resources;
41103
41104 /* Tasklet to handle deferred processing */
41105 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41106@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41107 struct list_head queue; /* link to "to be exposed" resources */
41108 struct pmcraid_config_table_entry cfg_entry;
41109 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41110- atomic_t read_failures; /* count of failed READ commands */
41111- atomic_t write_failures; /* count of failed WRITE commands */
41112+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41113+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41114
41115 /* To indicate add/delete/modify during CCN */
41116 u8 change_detected;
41117diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41118index 2150618..7034215 100644
41119--- a/drivers/scsi/qla2xxx/qla_def.h
41120+++ b/drivers/scsi/qla2xxx/qla_def.h
41121@@ -2089,7 +2089,7 @@ struct isp_operations {
41122
41123 int (*get_flash_version) (struct scsi_qla_host *, void *);
41124 int (*start_scsi) (srb_t *);
41125-};
41126+} __no_const;
41127
41128 /* MSI-X Support *************************************************************/
41129
41130diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41131index 81b5f29..2ae1fad 100644
41132--- a/drivers/scsi/qla4xxx/ql4_def.h
41133+++ b/drivers/scsi/qla4xxx/ql4_def.h
41134@@ -240,7 +240,7 @@ struct ddb_entry {
41135 atomic_t retry_relogin_timer; /* Min Time between relogins
41136 * (4000 only) */
41137 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41138- atomic_t relogin_retry_count; /* Num of times relogin has been
41139+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41140 * retried */
41141
41142 uint16_t port;
41143diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41144index af8c323..515dd51 100644
41145--- a/drivers/scsi/qla4xxx/ql4_init.c
41146+++ b/drivers/scsi/qla4xxx/ql4_init.c
41147@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41148 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41149 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41150 atomic_set(&ddb_entry->relogin_timer, 0);
41151- atomic_set(&ddb_entry->relogin_retry_count, 0);
41152+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41153 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41154 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41155 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41156@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41157 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41158 atomic_set(&ddb_entry->port_down_timer,
41159 ha->port_down_retry_count);
41160- atomic_set(&ddb_entry->relogin_retry_count, 0);
41161+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41162 atomic_set(&ddb_entry->relogin_timer, 0);
41163 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41164 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41165diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41166index 83c8b5e..a82b348 100644
41167--- a/drivers/scsi/qla4xxx/ql4_os.c
41168+++ b/drivers/scsi/qla4xxx/ql4_os.c
41169@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41170 ddb_entry->fw_ddb_device_state ==
41171 DDB_DS_SESSION_FAILED) {
41172 /* Reset retry relogin timer */
41173- atomic_inc(&ddb_entry->relogin_retry_count);
41174+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41175 DEBUG2(printk("scsi%ld: index[%d] relogin"
41176 " timed out-retrying"
41177 " relogin (%d)\n",
41178 ha->host_no,
41179 ddb_entry->fw_ddb_index,
41180- atomic_read(&ddb_entry->
41181+ atomic_read_unchecked(&ddb_entry->
41182 relogin_retry_count))
41183 );
41184 start_dpc++;
41185diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41186index dd098ca..686ce01 100644
41187--- a/drivers/scsi/scsi.c
41188+++ b/drivers/scsi/scsi.c
41189@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41190 unsigned long timeout;
41191 int rtn = 0;
41192
41193- atomic_inc(&cmd->device->iorequest_cnt);
41194+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41195
41196 /* check if the device is still usable */
41197 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41198diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41199index bc3e363..e1a8e50 100644
41200--- a/drivers/scsi/scsi_debug.c
41201+++ b/drivers/scsi/scsi_debug.c
41202@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41203 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41204 unsigned char *cmd = (unsigned char *)scp->cmnd;
41205
41206+ pax_track_stack();
41207+
41208 if ((errsts = check_readiness(scp, 1, devip)))
41209 return errsts;
41210 memset(arr, 0, sizeof(arr));
41211@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41212 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41213 unsigned char *cmd = (unsigned char *)scp->cmnd;
41214
41215+ pax_track_stack();
41216+
41217 if ((errsts = check_readiness(scp, 1, devip)))
41218 return errsts;
41219 memset(arr, 0, sizeof(arr));
41220diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41221index 8df12522..c4c1472 100644
41222--- a/drivers/scsi/scsi_lib.c
41223+++ b/drivers/scsi/scsi_lib.c
41224@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41225 shost = sdev->host;
41226 scsi_init_cmd_errh(cmd);
41227 cmd->result = DID_NO_CONNECT << 16;
41228- atomic_inc(&cmd->device->iorequest_cnt);
41229+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41230
41231 /*
41232 * SCSI request completion path will do scsi_device_unbusy(),
41233@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41234 */
41235 cmd->serial_number = 0;
41236
41237- atomic_inc(&cmd->device->iodone_cnt);
41238+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41239 if (cmd->result)
41240- atomic_inc(&cmd->device->ioerr_cnt);
41241+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41242
41243 disposition = scsi_decide_disposition(cmd);
41244 if (disposition != SUCCESS &&
41245diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41246index 91a93e0..eae0fe3 100644
41247--- a/drivers/scsi/scsi_sysfs.c
41248+++ b/drivers/scsi/scsi_sysfs.c
41249@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41250 char *buf) \
41251 { \
41252 struct scsi_device *sdev = to_scsi_device(dev); \
41253- unsigned long long count = atomic_read(&sdev->field); \
41254+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41255 return snprintf(buf, 20, "0x%llx\n", count); \
41256 } \
41257 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41258diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41259index 1030327..f91fd30 100644
41260--- a/drivers/scsi/scsi_tgt_lib.c
41261+++ b/drivers/scsi/scsi_tgt_lib.c
41262@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41263 int err;
41264
41265 dprintk("%lx %u\n", uaddr, len);
41266- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41267+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41268 if (err) {
41269 /*
41270 * TODO: need to fixup sg_tablesize, max_segment_size,
41271diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41272index db02e31..1b42ea9 100644
41273--- a/drivers/scsi/scsi_transport_fc.c
41274+++ b/drivers/scsi/scsi_transport_fc.c
41275@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41276 * Netlink Infrastructure
41277 */
41278
41279-static atomic_t fc_event_seq;
41280+static atomic_unchecked_t fc_event_seq;
41281
41282 /**
41283 * fc_get_event_number - Obtain the next sequential FC event number
41284@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41285 u32
41286 fc_get_event_number(void)
41287 {
41288- return atomic_add_return(1, &fc_event_seq);
41289+ return atomic_add_return_unchecked(1, &fc_event_seq);
41290 }
41291 EXPORT_SYMBOL(fc_get_event_number);
41292
41293@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41294 {
41295 int error;
41296
41297- atomic_set(&fc_event_seq, 0);
41298+ atomic_set_unchecked(&fc_event_seq, 0);
41299
41300 error = transport_class_register(&fc_host_class);
41301 if (error)
41302diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41303index de2f8c4..63c5278 100644
41304--- a/drivers/scsi/scsi_transport_iscsi.c
41305+++ b/drivers/scsi/scsi_transport_iscsi.c
41306@@ -81,7 +81,7 @@ struct iscsi_internal {
41307 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41308 };
41309
41310-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41311+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41312 static struct workqueue_struct *iscsi_eh_timer_workq;
41313
41314 /*
41315@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41316 int err;
41317
41318 ihost = shost->shost_data;
41319- session->sid = atomic_add_return(1, &iscsi_session_nr);
41320+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41321
41322 if (id == ISCSI_MAX_TARGET) {
41323 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41324@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41325 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41326 ISCSI_TRANSPORT_VERSION);
41327
41328- atomic_set(&iscsi_session_nr, 0);
41329+ atomic_set_unchecked(&iscsi_session_nr, 0);
41330
41331 err = class_register(&iscsi_transport_class);
41332 if (err)
41333diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41334index 21a045e..ec89e03 100644
41335--- a/drivers/scsi/scsi_transport_srp.c
41336+++ b/drivers/scsi/scsi_transport_srp.c
41337@@ -33,7 +33,7 @@
41338 #include "scsi_transport_srp_internal.h"
41339
41340 struct srp_host_attrs {
41341- atomic_t next_port_id;
41342+ atomic_unchecked_t next_port_id;
41343 };
41344 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41345
41346@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41347 struct Scsi_Host *shost = dev_to_shost(dev);
41348 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41349
41350- atomic_set(&srp_host->next_port_id, 0);
41351+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41352 return 0;
41353 }
41354
41355@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41356 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41357 rport->roles = ids->roles;
41358
41359- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41360+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41361 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41362
41363 transport_setup_device(&rport->dev);
41364diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41365index 040f751..98a5ed2 100644
41366--- a/drivers/scsi/sg.c
41367+++ b/drivers/scsi/sg.c
41368@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41369 sdp->disk->disk_name,
41370 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41371 NULL,
41372- (char *)arg);
41373+ (char __user *)arg);
41374 case BLKTRACESTART:
41375 return blk_trace_startstop(sdp->device->request_queue, 1);
41376 case BLKTRACESTOP:
41377@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41378 const struct file_operations * fops;
41379 };
41380
41381-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41382+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41383 {"allow_dio", &adio_fops},
41384 {"debug", &debug_fops},
41385 {"def_reserved_size", &dressz_fops},
41386@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41387 {
41388 int k, mask;
41389 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41390- struct sg_proc_leaf * leaf;
41391+ const struct sg_proc_leaf * leaf;
41392
41393 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41394 if (!sg_proc_sgp)
41395diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41396index 45374d6..61ee484 100644
41397--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41398+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41399@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41400 int do_iounmap = 0;
41401 int do_disable_device = 1;
41402
41403+ pax_track_stack();
41404+
41405 memset(&sym_dev, 0, sizeof(sym_dev));
41406 memset(&nvram, 0, sizeof(nvram));
41407 sym_dev.pdev = pdev;
41408diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41409index eadc1ab..2d81457 100644
41410--- a/drivers/serial/kgdboc.c
41411+++ b/drivers/serial/kgdboc.c
41412@@ -18,7 +18,7 @@
41413
41414 #define MAX_CONFIG_LEN 40
41415
41416-static struct kgdb_io kgdboc_io_ops;
41417+static const struct kgdb_io kgdboc_io_ops;
41418
41419 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41420 static int configured = -1;
41421@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41422 module_put(THIS_MODULE);
41423 }
41424
41425-static struct kgdb_io kgdboc_io_ops = {
41426+static const struct kgdb_io kgdboc_io_ops = {
41427 .name = "kgdboc",
41428 .read_char = kgdboc_get_char,
41429 .write_char = kgdboc_put_char,
41430diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41431index b76f246..7f41af7 100644
41432--- a/drivers/spi/spi.c
41433+++ b/drivers/spi/spi.c
41434@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41435 EXPORT_SYMBOL_GPL(spi_sync);
41436
41437 /* portable code must never pass more than 32 bytes */
41438-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41439+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41440
41441 static u8 *buf;
41442
41443diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41444index 99010d4..6bad87b 100644
41445--- a/drivers/staging/android/binder.c
41446+++ b/drivers/staging/android/binder.c
41447@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41448 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41449 }
41450
41451-static struct vm_operations_struct binder_vm_ops = {
41452+static const struct vm_operations_struct binder_vm_ops = {
41453 .open = binder_vma_open,
41454 .close = binder_vma_close,
41455 };
41456diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41457index cda26bb..39fed3f 100644
41458--- a/drivers/staging/b3dfg/b3dfg.c
41459+++ b/drivers/staging/b3dfg/b3dfg.c
41460@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41461 return VM_FAULT_NOPAGE;
41462 }
41463
41464-static struct vm_operations_struct b3dfg_vm_ops = {
41465+static const struct vm_operations_struct b3dfg_vm_ops = {
41466 .fault = b3dfg_vma_fault,
41467 };
41468
41469@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41470 return r;
41471 }
41472
41473-static struct file_operations b3dfg_fops = {
41474+static const struct file_operations b3dfg_fops = {
41475 .owner = THIS_MODULE,
41476 .open = b3dfg_open,
41477 .release = b3dfg_release,
41478diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41479index 908f25a..c9a579b 100644
41480--- a/drivers/staging/comedi/comedi_fops.c
41481+++ b/drivers/staging/comedi/comedi_fops.c
41482@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41483 mutex_unlock(&dev->mutex);
41484 }
41485
41486-static struct vm_operations_struct comedi_vm_ops = {
41487+static const struct vm_operations_struct comedi_vm_ops = {
41488 .close = comedi_unmap,
41489 };
41490
41491diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41492index e55a0db..577b776 100644
41493--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41494+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41495@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41496 static dev_t adsp_devno;
41497 static struct class *adsp_class;
41498
41499-static struct file_operations adsp_fops = {
41500+static const struct file_operations adsp_fops = {
41501 .owner = THIS_MODULE,
41502 .open = adsp_open,
41503 .unlocked_ioctl = adsp_ioctl,
41504diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41505index ad2390f..4116ee8 100644
41506--- a/drivers/staging/dream/qdsp5/audio_aac.c
41507+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41508@@ -1022,7 +1022,7 @@ done:
41509 return rc;
41510 }
41511
41512-static struct file_operations audio_aac_fops = {
41513+static const struct file_operations audio_aac_fops = {
41514 .owner = THIS_MODULE,
41515 .open = audio_open,
41516 .release = audio_release,
41517diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41518index cd818a5..870b37b 100644
41519--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41520+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41521@@ -833,7 +833,7 @@ done:
41522 return rc;
41523 }
41524
41525-static struct file_operations audio_amrnb_fops = {
41526+static const struct file_operations audio_amrnb_fops = {
41527 .owner = THIS_MODULE,
41528 .open = audamrnb_open,
41529 .release = audamrnb_release,
41530diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41531index 4b43e18..cedafda 100644
41532--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41533+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41534@@ -805,7 +805,7 @@ dma_fail:
41535 return rc;
41536 }
41537
41538-static struct file_operations audio_evrc_fops = {
41539+static const struct file_operations audio_evrc_fops = {
41540 .owner = THIS_MODULE,
41541 .open = audevrc_open,
41542 .release = audevrc_release,
41543diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41544index 3d950a2..9431118 100644
41545--- a/drivers/staging/dream/qdsp5/audio_in.c
41546+++ b/drivers/staging/dream/qdsp5/audio_in.c
41547@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41548 return 0;
41549 }
41550
41551-static struct file_operations audio_fops = {
41552+static const struct file_operations audio_fops = {
41553 .owner = THIS_MODULE,
41554 .open = audio_in_open,
41555 .release = audio_in_release,
41556@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41557 .unlocked_ioctl = audio_in_ioctl,
41558 };
41559
41560-static struct file_operations audpre_fops = {
41561+static const struct file_operations audpre_fops = {
41562 .owner = THIS_MODULE,
41563 .open = audpre_open,
41564 .unlocked_ioctl = audpre_ioctl,
41565diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41566index b95574f..286c2f4 100644
41567--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41568+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41569@@ -941,7 +941,7 @@ done:
41570 return rc;
41571 }
41572
41573-static struct file_operations audio_mp3_fops = {
41574+static const struct file_operations audio_mp3_fops = {
41575 .owner = THIS_MODULE,
41576 .open = audio_open,
41577 .release = audio_release,
41578diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41579index d1adcf6..f8f9833 100644
41580--- a/drivers/staging/dream/qdsp5/audio_out.c
41581+++ b/drivers/staging/dream/qdsp5/audio_out.c
41582@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41583 return 0;
41584 }
41585
41586-static struct file_operations audio_fops = {
41587+static const struct file_operations audio_fops = {
41588 .owner = THIS_MODULE,
41589 .open = audio_open,
41590 .release = audio_release,
41591@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41592 .unlocked_ioctl = audio_ioctl,
41593 };
41594
41595-static struct file_operations audpp_fops = {
41596+static const struct file_operations audpp_fops = {
41597 .owner = THIS_MODULE,
41598 .open = audpp_open,
41599 .unlocked_ioctl = audpp_ioctl,
41600diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41601index f0f50e3..f6b9dbc 100644
41602--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41603+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41604@@ -816,7 +816,7 @@ err:
41605 return rc;
41606 }
41607
41608-static struct file_operations audio_qcelp_fops = {
41609+static const struct file_operations audio_qcelp_fops = {
41610 .owner = THIS_MODULE,
41611 .open = audqcelp_open,
41612 .release = audqcelp_release,
41613diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41614index 037d7ff..5469ec3 100644
41615--- a/drivers/staging/dream/qdsp5/snd.c
41616+++ b/drivers/staging/dream/qdsp5/snd.c
41617@@ -242,7 +242,7 @@ err:
41618 return rc;
41619 }
41620
41621-static struct file_operations snd_fops = {
41622+static const struct file_operations snd_fops = {
41623 .owner = THIS_MODULE,
41624 .open = snd_open,
41625 .release = snd_release,
41626diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41627index d4e7d88..0ea632a 100644
41628--- a/drivers/staging/dream/smd/smd_qmi.c
41629+++ b/drivers/staging/dream/smd/smd_qmi.c
41630@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41631 return 0;
41632 }
41633
41634-static struct file_operations qmi_fops = {
41635+static const struct file_operations qmi_fops = {
41636 .owner = THIS_MODULE,
41637 .read = qmi_read,
41638 .write = qmi_write,
41639diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41640index cd3910b..ff053d3 100644
41641--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41642+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41643@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41644 return rc;
41645 }
41646
41647-static struct file_operations rpcrouter_server_fops = {
41648+static const struct file_operations rpcrouter_server_fops = {
41649 .owner = THIS_MODULE,
41650 .open = rpcrouter_open,
41651 .release = rpcrouter_release,
41652@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41653 .unlocked_ioctl = rpcrouter_ioctl,
41654 };
41655
41656-static struct file_operations rpcrouter_router_fops = {
41657+static const struct file_operations rpcrouter_router_fops = {
41658 .owner = THIS_MODULE,
41659 .open = rpcrouter_open,
41660 .release = rpcrouter_release,
41661diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41662index c24e4e0..07665be 100644
41663--- a/drivers/staging/dst/dcore.c
41664+++ b/drivers/staging/dst/dcore.c
41665@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41666 return 0;
41667 }
41668
41669-static struct block_device_operations dst_blk_ops = {
41670+static const struct block_device_operations dst_blk_ops = {
41671 .open = dst_bdev_open,
41672 .release = dst_bdev_release,
41673 .owner = THIS_MODULE,
41674@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41675 n->size = ctl->size;
41676
41677 atomic_set(&n->refcnt, 1);
41678- atomic_long_set(&n->gen, 0);
41679+ atomic_long_set_unchecked(&n->gen, 0);
41680 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41681
41682 err = dst_node_sysfs_init(n);
41683diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41684index 557d372..8d84422 100644
41685--- a/drivers/staging/dst/trans.c
41686+++ b/drivers/staging/dst/trans.c
41687@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41688 t->error = 0;
41689 t->retries = 0;
41690 atomic_set(&t->refcnt, 1);
41691- t->gen = atomic_long_inc_return(&n->gen);
41692+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41693
41694 t->enc = bio_data_dir(bio);
41695 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41696diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41697index 94f7752..d051514 100644
41698--- a/drivers/staging/et131x/et1310_tx.c
41699+++ b/drivers/staging/et131x/et1310_tx.c
41700@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41701 struct net_device_stats *stats = &etdev->net_stats;
41702
41703 if (pMpTcb->Flags & fMP_DEST_BROAD)
41704- atomic_inc(&etdev->Stats.brdcstxmt);
41705+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41706 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41707- atomic_inc(&etdev->Stats.multixmt);
41708+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41709 else
41710- atomic_inc(&etdev->Stats.unixmt);
41711+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41712
41713 if (pMpTcb->Packet) {
41714 stats->tx_bytes += pMpTcb->Packet->len;
41715diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41716index 1dfe06f..f469b4d 100644
41717--- a/drivers/staging/et131x/et131x_adapter.h
41718+++ b/drivers/staging/et131x/et131x_adapter.h
41719@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41720 * operations
41721 */
41722 u32 unircv; /* # multicast packets received */
41723- atomic_t unixmt; /* # multicast packets for Tx */
41724+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41725 u32 multircv; /* # multicast packets received */
41726- atomic_t multixmt; /* # multicast packets for Tx */
41727+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41728 u32 brdcstrcv; /* # broadcast packets received */
41729- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41730+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41731 u32 norcvbuf; /* # Rx packets discarded */
41732 u32 noxmtbuf; /* # Tx packets discarded */
41733
41734diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41735index 4bd353a..e28f455 100644
41736--- a/drivers/staging/go7007/go7007-v4l2.c
41737+++ b/drivers/staging/go7007/go7007-v4l2.c
41738@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41739 return 0;
41740 }
41741
41742-static struct vm_operations_struct go7007_vm_ops = {
41743+static const struct vm_operations_struct go7007_vm_ops = {
41744 .open = go7007_vm_open,
41745 .close = go7007_vm_close,
41746 .fault = go7007_vm_fault,
41747diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41748index 366dc95..b974d87 100644
41749--- a/drivers/staging/hv/Channel.c
41750+++ b/drivers/staging/hv/Channel.c
41751@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41752
41753 DPRINT_ENTER(VMBUS);
41754
41755- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41756- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41757+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41758+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41759
41760 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41761 ASSERT(msgInfo != NULL);
41762diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41763index b12237f..01ae28a 100644
41764--- a/drivers/staging/hv/Hv.c
41765+++ b/drivers/staging/hv/Hv.c
41766@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41767 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41768 u32 outputAddressHi = outputAddress >> 32;
41769 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41770- volatile void *hypercallPage = gHvContext.HypercallPage;
41771+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41772
41773 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41774 Control, Input, Output);
41775diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41776index d089bb1..2ebc158 100644
41777--- a/drivers/staging/hv/VmbusApi.h
41778+++ b/drivers/staging/hv/VmbusApi.h
41779@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41780 u32 *GpadlHandle);
41781 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41782 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41783-};
41784+} __no_const;
41785
41786 /* Base driver object */
41787 struct hv_driver {
41788diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41789index 5a37cce..6ecc88c 100644
41790--- a/drivers/staging/hv/VmbusPrivate.h
41791+++ b/drivers/staging/hv/VmbusPrivate.h
41792@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41793 struct VMBUS_CONNECTION {
41794 enum VMBUS_CONNECT_STATE ConnectState;
41795
41796- atomic_t NextGpadlHandle;
41797+ atomic_unchecked_t NextGpadlHandle;
41798
41799 /*
41800 * Represents channel interrupts. Each bit position represents a
41801diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41802index 871a202..ca50ddf 100644
41803--- a/drivers/staging/hv/blkvsc_drv.c
41804+++ b/drivers/staging/hv/blkvsc_drv.c
41805@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41806 /* The one and only one */
41807 static struct blkvsc_driver_context g_blkvsc_drv;
41808
41809-static struct block_device_operations block_ops = {
41810+static const struct block_device_operations block_ops = {
41811 .owner = THIS_MODULE,
41812 .open = blkvsc_open,
41813 .release = blkvsc_release,
41814diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41815index 6acc49a..fbc8d46 100644
41816--- a/drivers/staging/hv/vmbus_drv.c
41817+++ b/drivers/staging/hv/vmbus_drv.c
41818@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41819 to_device_context(root_device_obj);
41820 struct device_context *child_device_ctx =
41821 to_device_context(child_device_obj);
41822- static atomic_t device_num = ATOMIC_INIT(0);
41823+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41824
41825 DPRINT_ENTER(VMBUS_DRV);
41826
41827@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41828
41829 /* Set the device name. Otherwise, device_register() will fail. */
41830 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41831- atomic_inc_return(&device_num));
41832+ atomic_inc_return_unchecked(&device_num));
41833
41834 /* The new device belongs to this bus */
41835 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41836diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41837index d926189..17b19fd 100644
41838--- a/drivers/staging/iio/ring_generic.h
41839+++ b/drivers/staging/iio/ring_generic.h
41840@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41841
41842 int (*is_enabled)(struct iio_ring_buffer *ring);
41843 int (*enable)(struct iio_ring_buffer *ring);
41844-};
41845+} __no_const;
41846
41847 /**
41848 * struct iio_ring_buffer - general ring buffer structure
41849diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41850index 1b237b7..88c624e 100644
41851--- a/drivers/staging/octeon/ethernet-rx.c
41852+++ b/drivers/staging/octeon/ethernet-rx.c
41853@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41854 /* Increment RX stats for virtual ports */
41855 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41856 #ifdef CONFIG_64BIT
41857- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41858- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41859+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41860+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41861 #else
41862- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41863- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41864+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41865+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41866 #endif
41867 }
41868 netif_receive_skb(skb);
41869@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41870 dev->name);
41871 */
41872 #ifdef CONFIG_64BIT
41873- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41874+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
41875 #else
41876- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41877+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
41878 #endif
41879 dev_kfree_skb_irq(skb);
41880 }
41881diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41882index 492c502..d9909f1 100644
41883--- a/drivers/staging/octeon/ethernet.c
41884+++ b/drivers/staging/octeon/ethernet.c
41885@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41886 * since the RX tasklet also increments it.
41887 */
41888 #ifdef CONFIG_64BIT
41889- atomic64_add(rx_status.dropped_packets,
41890- (atomic64_t *)&priv->stats.rx_dropped);
41891+ atomic64_add_unchecked(rx_status.dropped_packets,
41892+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41893 #else
41894- atomic_add(rx_status.dropped_packets,
41895- (atomic_t *)&priv->stats.rx_dropped);
41896+ atomic_add_unchecked(rx_status.dropped_packets,
41897+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
41898 #endif
41899 }
41900
41901diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
41902index a35bd5d..28fff45 100644
41903--- a/drivers/staging/otus/80211core/pub_zfi.h
41904+++ b/drivers/staging/otus/80211core/pub_zfi.h
41905@@ -531,7 +531,7 @@ struct zsCbFuncTbl
41906 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
41907
41908 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
41909-};
41910+} __no_const;
41911
41912 extern void zfZeroMemory(u8_t* va, u16_t length);
41913 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
41914diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
41915index c39a25f..696f5aa 100644
41916--- a/drivers/staging/panel/panel.c
41917+++ b/drivers/staging/panel/panel.c
41918@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
41919 return 0;
41920 }
41921
41922-static struct file_operations lcd_fops = {
41923+static const struct file_operations lcd_fops = {
41924 .write = lcd_write,
41925 .open = lcd_open,
41926 .release = lcd_release,
41927@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
41928 return 0;
41929 }
41930
41931-static struct file_operations keypad_fops = {
41932+static const struct file_operations keypad_fops = {
41933 .read = keypad_read, /* read */
41934 .open = keypad_open, /* open */
41935 .release = keypad_release, /* close */
41936diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
41937index 270ebcb..37e46af 100644
41938--- a/drivers/staging/phison/phison.c
41939+++ b/drivers/staging/phison/phison.c
41940@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
41941 ATA_BMDMA_SHT(DRV_NAME),
41942 };
41943
41944-static struct ata_port_operations phison_ops = {
41945+static const struct ata_port_operations phison_ops = {
41946 .inherits = &ata_bmdma_port_ops,
41947 .prereset = phison_pre_reset,
41948 };
41949diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
41950index 2eb8e3d..57616a7 100644
41951--- a/drivers/staging/poch/poch.c
41952+++ b/drivers/staging/poch/poch.c
41953@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
41954 return 0;
41955 }
41956
41957-static struct file_operations poch_fops = {
41958+static const struct file_operations poch_fops = {
41959 .owner = THIS_MODULE,
41960 .open = poch_open,
41961 .release = poch_release,
41962diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
41963index c94de31..19402bc 100644
41964--- a/drivers/staging/pohmelfs/inode.c
41965+++ b/drivers/staging/pohmelfs/inode.c
41966@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41967 mutex_init(&psb->mcache_lock);
41968 psb->mcache_root = RB_ROOT;
41969 psb->mcache_timeout = msecs_to_jiffies(5000);
41970- atomic_long_set(&psb->mcache_gen, 0);
41971+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
41972
41973 psb->trans_max_pages = 100;
41974
41975@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41976 INIT_LIST_HEAD(&psb->crypto_ready_list);
41977 INIT_LIST_HEAD(&psb->crypto_active_list);
41978
41979- atomic_set(&psb->trans_gen, 1);
41980+ atomic_set_unchecked(&psb->trans_gen, 1);
41981 atomic_long_set(&psb->total_inodes, 0);
41982
41983 mutex_init(&psb->state_lock);
41984diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
41985index e22665c..a2a9390 100644
41986--- a/drivers/staging/pohmelfs/mcache.c
41987+++ b/drivers/staging/pohmelfs/mcache.c
41988@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
41989 m->data = data;
41990 m->start = start;
41991 m->size = size;
41992- m->gen = atomic_long_inc_return(&psb->mcache_gen);
41993+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
41994
41995 mutex_lock(&psb->mcache_lock);
41996 err = pohmelfs_mcache_insert(psb, m);
41997diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
41998index 623a07d..4035c19 100644
41999--- a/drivers/staging/pohmelfs/netfs.h
42000+++ b/drivers/staging/pohmelfs/netfs.h
42001@@ -570,14 +570,14 @@ struct pohmelfs_config;
42002 struct pohmelfs_sb {
42003 struct rb_root mcache_root;
42004 struct mutex mcache_lock;
42005- atomic_long_t mcache_gen;
42006+ atomic_long_unchecked_t mcache_gen;
42007 unsigned long mcache_timeout;
42008
42009 unsigned int idx;
42010
42011 unsigned int trans_retries;
42012
42013- atomic_t trans_gen;
42014+ atomic_unchecked_t trans_gen;
42015
42016 unsigned int crypto_attached_size;
42017 unsigned int crypto_align_size;
42018diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42019index 36a2535..0591bf4 100644
42020--- a/drivers/staging/pohmelfs/trans.c
42021+++ b/drivers/staging/pohmelfs/trans.c
42022@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42023 int err;
42024 struct netfs_cmd *cmd = t->iovec.iov_base;
42025
42026- t->gen = atomic_inc_return(&psb->trans_gen);
42027+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42028
42029 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42030 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42031diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42032index f890a16..509ece8 100644
42033--- a/drivers/staging/sep/sep_driver.c
42034+++ b/drivers/staging/sep/sep_driver.c
42035@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42036 static dev_t sep_devno;
42037
42038 /* the files operations structure of the driver */
42039-static struct file_operations sep_file_operations = {
42040+static const struct file_operations sep_file_operations = {
42041 .owner = THIS_MODULE,
42042 .ioctl = sep_ioctl,
42043 .poll = sep_poll,
42044diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42045index 5e16bc3..7655b10 100644
42046--- a/drivers/staging/usbip/usbip_common.h
42047+++ b/drivers/staging/usbip/usbip_common.h
42048@@ -374,7 +374,7 @@ struct usbip_device {
42049 void (*shutdown)(struct usbip_device *);
42050 void (*reset)(struct usbip_device *);
42051 void (*unusable)(struct usbip_device *);
42052- } eh_ops;
42053+ } __no_const eh_ops;
42054 };
42055
42056
42057diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42058index 57f7946..d9df23d 100644
42059--- a/drivers/staging/usbip/vhci.h
42060+++ b/drivers/staging/usbip/vhci.h
42061@@ -92,7 +92,7 @@ struct vhci_hcd {
42062 unsigned resuming:1;
42063 unsigned long re_timeout;
42064
42065- atomic_t seqnum;
42066+ atomic_unchecked_t seqnum;
42067
42068 /*
42069 * NOTE:
42070diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42071index 20cd7db..c2693ff 100644
42072--- a/drivers/staging/usbip/vhci_hcd.c
42073+++ b/drivers/staging/usbip/vhci_hcd.c
42074@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42075 return;
42076 }
42077
42078- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42079+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42080 if (priv->seqnum == 0xffff)
42081 usbip_uinfo("seqnum max\n");
42082
42083@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42084 return -ENOMEM;
42085 }
42086
42087- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42088+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42089 if (unlink->seqnum == 0xffff)
42090 usbip_uinfo("seqnum max\n");
42091
42092@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42093 vdev->rhport = rhport;
42094 }
42095
42096- atomic_set(&vhci->seqnum, 0);
42097+ atomic_set_unchecked(&vhci->seqnum, 0);
42098 spin_lock_init(&vhci->lock);
42099
42100
42101diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42102index 7fd76fe..673695a 100644
42103--- a/drivers/staging/usbip/vhci_rx.c
42104+++ b/drivers/staging/usbip/vhci_rx.c
42105@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42106 usbip_uerr("cannot find a urb of seqnum %u\n",
42107 pdu->base.seqnum);
42108 usbip_uinfo("max seqnum %d\n",
42109- atomic_read(&the_controller->seqnum));
42110+ atomic_read_unchecked(&the_controller->seqnum));
42111 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42112 return;
42113 }
42114diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42115index 7891288..8e31300 100644
42116--- a/drivers/staging/vme/devices/vme_user.c
42117+++ b/drivers/staging/vme/devices/vme_user.c
42118@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42119 static int __init vme_user_probe(struct device *, int, int);
42120 static int __exit vme_user_remove(struct device *, int, int);
42121
42122-static struct file_operations vme_user_fops = {
42123+static const struct file_operations vme_user_fops = {
42124 .open = vme_user_open,
42125 .release = vme_user_release,
42126 .read = vme_user_read,
42127diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42128index 58abf44..00c1fc8 100644
42129--- a/drivers/staging/vt6655/hostap.c
42130+++ b/drivers/staging/vt6655/hostap.c
42131@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42132 PSDevice apdev_priv;
42133 struct net_device *dev = pDevice->dev;
42134 int ret;
42135- const struct net_device_ops apdev_netdev_ops = {
42136+ net_device_ops_no_const apdev_netdev_ops = {
42137 .ndo_start_xmit = pDevice->tx_80211,
42138 };
42139
42140diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42141index 0c8267a..db1f363 100644
42142--- a/drivers/staging/vt6656/hostap.c
42143+++ b/drivers/staging/vt6656/hostap.c
42144@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42145 PSDevice apdev_priv;
42146 struct net_device *dev = pDevice->dev;
42147 int ret;
42148- const struct net_device_ops apdev_netdev_ops = {
42149+ net_device_ops_no_const apdev_netdev_ops = {
42150 .ndo_start_xmit = pDevice->tx_80211,
42151 };
42152
42153diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42154index 925678b..da7f5ed 100644
42155--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42156+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42157@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42158
42159 struct usbctlx_completor {
42160 int (*complete) (struct usbctlx_completor *);
42161-};
42162+} __no_const;
42163 typedef struct usbctlx_completor usbctlx_completor_t;
42164
42165 static int
42166diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42167index 40de151..924f268 100644
42168--- a/drivers/telephony/ixj.c
42169+++ b/drivers/telephony/ixj.c
42170@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42171 bool mContinue;
42172 char *pIn, *pOut;
42173
42174+ pax_track_stack();
42175+
42176 if (!SCI_Prepare(j))
42177 return 0;
42178
42179diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42180index e941367..b631f5a 100644
42181--- a/drivers/uio/uio.c
42182+++ b/drivers/uio/uio.c
42183@@ -23,6 +23,7 @@
42184 #include <linux/string.h>
42185 #include <linux/kobject.h>
42186 #include <linux/uio_driver.h>
42187+#include <asm/local.h>
42188
42189 #define UIO_MAX_DEVICES 255
42190
42191@@ -30,10 +31,10 @@ struct uio_device {
42192 struct module *owner;
42193 struct device *dev;
42194 int minor;
42195- atomic_t event;
42196+ atomic_unchecked_t event;
42197 struct fasync_struct *async_queue;
42198 wait_queue_head_t wait;
42199- int vma_count;
42200+ local_t vma_count;
42201 struct uio_info *info;
42202 struct kobject *map_dir;
42203 struct kobject *portio_dir;
42204@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42205 return entry->show(mem, buf);
42206 }
42207
42208-static struct sysfs_ops map_sysfs_ops = {
42209+static const struct sysfs_ops map_sysfs_ops = {
42210 .show = map_type_show,
42211 };
42212
42213@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42214 return entry->show(port, buf);
42215 }
42216
42217-static struct sysfs_ops portio_sysfs_ops = {
42218+static const struct sysfs_ops portio_sysfs_ops = {
42219 .show = portio_type_show,
42220 };
42221
42222@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42223 struct uio_device *idev = dev_get_drvdata(dev);
42224 if (idev)
42225 return sprintf(buf, "%u\n",
42226- (unsigned int)atomic_read(&idev->event));
42227+ (unsigned int)atomic_read_unchecked(&idev->event));
42228 else
42229 return -ENODEV;
42230 }
42231@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42232 {
42233 struct uio_device *idev = info->uio_dev;
42234
42235- atomic_inc(&idev->event);
42236+ atomic_inc_unchecked(&idev->event);
42237 wake_up_interruptible(&idev->wait);
42238 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42239 }
42240@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42241 }
42242
42243 listener->dev = idev;
42244- listener->event_count = atomic_read(&idev->event);
42245+ listener->event_count = atomic_read_unchecked(&idev->event);
42246 filep->private_data = listener;
42247
42248 if (idev->info->open) {
42249@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42250 return -EIO;
42251
42252 poll_wait(filep, &idev->wait, wait);
42253- if (listener->event_count != atomic_read(&idev->event))
42254+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42255 return POLLIN | POLLRDNORM;
42256 return 0;
42257 }
42258@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42259 do {
42260 set_current_state(TASK_INTERRUPTIBLE);
42261
42262- event_count = atomic_read(&idev->event);
42263+ event_count = atomic_read_unchecked(&idev->event);
42264 if (event_count != listener->event_count) {
42265 if (copy_to_user(buf, &event_count, count))
42266 retval = -EFAULT;
42267@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42268 static void uio_vma_open(struct vm_area_struct *vma)
42269 {
42270 struct uio_device *idev = vma->vm_private_data;
42271- idev->vma_count++;
42272+ local_inc(&idev->vma_count);
42273 }
42274
42275 static void uio_vma_close(struct vm_area_struct *vma)
42276 {
42277 struct uio_device *idev = vma->vm_private_data;
42278- idev->vma_count--;
42279+ local_dec(&idev->vma_count);
42280 }
42281
42282 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42283@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42284 idev->owner = owner;
42285 idev->info = info;
42286 init_waitqueue_head(&idev->wait);
42287- atomic_set(&idev->event, 0);
42288+ atomic_set_unchecked(&idev->event, 0);
42289
42290 ret = uio_get_minor(idev);
42291 if (ret)
42292diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42293index fbea856..06efea6 100644
42294--- a/drivers/usb/atm/usbatm.c
42295+++ b/drivers/usb/atm/usbatm.c
42296@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42297 if (printk_ratelimit())
42298 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42299 __func__, vpi, vci);
42300- atomic_inc(&vcc->stats->rx_err);
42301+ atomic_inc_unchecked(&vcc->stats->rx_err);
42302 return;
42303 }
42304
42305@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42306 if (length > ATM_MAX_AAL5_PDU) {
42307 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42308 __func__, length, vcc);
42309- atomic_inc(&vcc->stats->rx_err);
42310+ atomic_inc_unchecked(&vcc->stats->rx_err);
42311 goto out;
42312 }
42313
42314@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42315 if (sarb->len < pdu_length) {
42316 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42317 __func__, pdu_length, sarb->len, vcc);
42318- atomic_inc(&vcc->stats->rx_err);
42319+ atomic_inc_unchecked(&vcc->stats->rx_err);
42320 goto out;
42321 }
42322
42323 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42324 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42325 __func__, vcc);
42326- atomic_inc(&vcc->stats->rx_err);
42327+ atomic_inc_unchecked(&vcc->stats->rx_err);
42328 goto out;
42329 }
42330
42331@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42332 if (printk_ratelimit())
42333 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42334 __func__, length);
42335- atomic_inc(&vcc->stats->rx_drop);
42336+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42337 goto out;
42338 }
42339
42340@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42341
42342 vcc->push(vcc, skb);
42343
42344- atomic_inc(&vcc->stats->rx);
42345+ atomic_inc_unchecked(&vcc->stats->rx);
42346 out:
42347 skb_trim(sarb, 0);
42348 }
42349@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42350 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42351
42352 usbatm_pop(vcc, skb);
42353- atomic_inc(&vcc->stats->tx);
42354+ atomic_inc_unchecked(&vcc->stats->tx);
42355
42356 skb = skb_dequeue(&instance->sndqueue);
42357 }
42358@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42359 if (!left--)
42360 return sprintf(page,
42361 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42362- atomic_read(&atm_dev->stats.aal5.tx),
42363- atomic_read(&atm_dev->stats.aal5.tx_err),
42364- atomic_read(&atm_dev->stats.aal5.rx),
42365- atomic_read(&atm_dev->stats.aal5.rx_err),
42366- atomic_read(&atm_dev->stats.aal5.rx_drop));
42367+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42368+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42369+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42370+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42371+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42372
42373 if (!left--) {
42374 if (instance->disconnected)
42375diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
42376index 3e564bf..949b448 100644
42377--- a/drivers/usb/class/cdc-wdm.c
42378+++ b/drivers/usb/class/cdc-wdm.c
42379@@ -314,7 +314,7 @@ static ssize_t wdm_write
42380 if (r < 0)
42381 goto outnp;
42382
42383- if (!file->f_flags && O_NONBLOCK)
42384+ if (!(file->f_flags & O_NONBLOCK))
42385 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
42386 &desc->flags));
42387 else
42388diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42389index 24e6205..fe5a5d4 100644
42390--- a/drivers/usb/core/hcd.c
42391+++ b/drivers/usb/core/hcd.c
42392@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42393
42394 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42395
42396-struct usb_mon_operations *mon_ops;
42397+const struct usb_mon_operations *mon_ops;
42398
42399 /*
42400 * The registration is unlocked.
42401@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42402 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42403 */
42404
42405-int usb_mon_register (struct usb_mon_operations *ops)
42406+int usb_mon_register (const struct usb_mon_operations *ops)
42407 {
42408
42409 if (mon_ops)
42410diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42411index bcbe104..9cfd1c6 100644
42412--- a/drivers/usb/core/hcd.h
42413+++ b/drivers/usb/core/hcd.h
42414@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42415 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42416
42417 struct usb_mon_operations {
42418- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42419- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42420- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42421+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42422+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42423+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42424 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42425 };
42426
42427-extern struct usb_mon_operations *mon_ops;
42428+extern const struct usb_mon_operations *mon_ops;
42429
42430 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42431 {
42432@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42433 (*mon_ops->urb_complete)(bus, urb, status);
42434 }
42435
42436-int usb_mon_register(struct usb_mon_operations *ops);
42437+int usb_mon_register(const struct usb_mon_operations *ops);
42438 void usb_mon_deregister(void);
42439
42440 #else
42441diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42442index 409cc94..a673bad 100644
42443--- a/drivers/usb/core/message.c
42444+++ b/drivers/usb/core/message.c
42445@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42446 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42447 if (buf) {
42448 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42449- if (len > 0) {
42450- smallbuf = kmalloc(++len, GFP_NOIO);
42451+ if (len++ > 0) {
42452+ smallbuf = kmalloc(len, GFP_NOIO);
42453 if (!smallbuf)
42454 return buf;
42455 memcpy(smallbuf, buf, len);
42456diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42457index 62ff5e7..530b74e 100644
42458--- a/drivers/usb/misc/appledisplay.c
42459+++ b/drivers/usb/misc/appledisplay.c
42460@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42461 return pdata->msgdata[1];
42462 }
42463
42464-static struct backlight_ops appledisplay_bl_data = {
42465+static const struct backlight_ops appledisplay_bl_data = {
42466 .get_brightness = appledisplay_bl_get_brightness,
42467 .update_status = appledisplay_bl_update_status,
42468 };
42469diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42470index e0c2db3..bd8cb66 100644
42471--- a/drivers/usb/mon/mon_main.c
42472+++ b/drivers/usb/mon/mon_main.c
42473@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42474 /*
42475 * Ops
42476 */
42477-static struct usb_mon_operations mon_ops_0 = {
42478+static const struct usb_mon_operations mon_ops_0 = {
42479 .urb_submit = mon_submit,
42480 .urb_submit_error = mon_submit_error,
42481 .urb_complete = mon_complete,
42482diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42483index d6bea3e..60b250e 100644
42484--- a/drivers/usb/wusbcore/wa-hc.h
42485+++ b/drivers/usb/wusbcore/wa-hc.h
42486@@ -192,7 +192,7 @@ struct wahc {
42487 struct list_head xfer_delayed_list;
42488 spinlock_t xfer_list_lock;
42489 struct work_struct xfer_work;
42490- atomic_t xfer_id_count;
42491+ atomic_unchecked_t xfer_id_count;
42492 };
42493
42494
42495@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42496 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42497 spin_lock_init(&wa->xfer_list_lock);
42498 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42499- atomic_set(&wa->xfer_id_count, 1);
42500+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42501 }
42502
42503 /**
42504diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42505index 613a5fc..3174865 100644
42506--- a/drivers/usb/wusbcore/wa-xfer.c
42507+++ b/drivers/usb/wusbcore/wa-xfer.c
42508@@ -293,7 +293,7 @@ out:
42509 */
42510 static void wa_xfer_id_init(struct wa_xfer *xfer)
42511 {
42512- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42513+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42514 }
42515
42516 /*
42517diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42518index aa42fce..f8a828c 100644
42519--- a/drivers/uwb/wlp/messages.c
42520+++ b/drivers/uwb/wlp/messages.c
42521@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42522 size_t len = skb->len;
42523 size_t used;
42524 ssize_t result;
42525- struct wlp_nonce enonce, rnonce;
42526+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42527 enum wlp_assc_error assc_err;
42528 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42529 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42530diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42531index 0370399..6627c94 100644
42532--- a/drivers/uwb/wlp/sysfs.c
42533+++ b/drivers/uwb/wlp/sysfs.c
42534@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42535 return ret;
42536 }
42537
42538-static
42539-struct sysfs_ops wss_sysfs_ops = {
42540+static const struct sysfs_ops wss_sysfs_ops = {
42541 .show = wlp_wss_attr_show,
42542 .store = wlp_wss_attr_store,
42543 };
42544diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42545index d5e8010..5687b56 100644
42546--- a/drivers/video/atmel_lcdfb.c
42547+++ b/drivers/video/atmel_lcdfb.c
42548@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42549 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42550 }
42551
42552-static struct backlight_ops atmel_lcdc_bl_ops = {
42553+static const struct backlight_ops atmel_lcdc_bl_ops = {
42554 .update_status = atmel_bl_update_status,
42555 .get_brightness = atmel_bl_get_brightness,
42556 };
42557diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42558index e4e4d43..66bcbcc 100644
42559--- a/drivers/video/aty/aty128fb.c
42560+++ b/drivers/video/aty/aty128fb.c
42561@@ -149,7 +149,7 @@ enum {
42562 };
42563
42564 /* Must match above enum */
42565-static const char *r128_family[] __devinitdata = {
42566+static const char *r128_family[] __devinitconst = {
42567 "AGP",
42568 "PCI",
42569 "PRO AGP",
42570@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42571 return bd->props.brightness;
42572 }
42573
42574-static struct backlight_ops aty128_bl_data = {
42575+static const struct backlight_ops aty128_bl_data = {
42576 .get_brightness = aty128_bl_get_brightness,
42577 .update_status = aty128_bl_update_status,
42578 };
42579diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42580index 913b4a4..9295a38 100644
42581--- a/drivers/video/aty/atyfb_base.c
42582+++ b/drivers/video/aty/atyfb_base.c
42583@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42584 return bd->props.brightness;
42585 }
42586
42587-static struct backlight_ops aty_bl_data = {
42588+static const struct backlight_ops aty_bl_data = {
42589 .get_brightness = aty_bl_get_brightness,
42590 .update_status = aty_bl_update_status,
42591 };
42592diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42593index 1a056ad..221bd6a 100644
42594--- a/drivers/video/aty/radeon_backlight.c
42595+++ b/drivers/video/aty/radeon_backlight.c
42596@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42597 return bd->props.brightness;
42598 }
42599
42600-static struct backlight_ops radeon_bl_data = {
42601+static const struct backlight_ops radeon_bl_data = {
42602 .get_brightness = radeon_bl_get_brightness,
42603 .update_status = radeon_bl_update_status,
42604 };
42605diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42606index ad05da5..3cb2cb9 100644
42607--- a/drivers/video/backlight/adp5520_bl.c
42608+++ b/drivers/video/backlight/adp5520_bl.c
42609@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42610 return error ? data->current_brightness : reg_val;
42611 }
42612
42613-static struct backlight_ops adp5520_bl_ops = {
42614+static const struct backlight_ops adp5520_bl_ops = {
42615 .update_status = adp5520_bl_update_status,
42616 .get_brightness = adp5520_bl_get_brightness,
42617 };
42618diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42619index 2c3bdfc..d769b0b 100644
42620--- a/drivers/video/backlight/adx_bl.c
42621+++ b/drivers/video/backlight/adx_bl.c
42622@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42623 return 1;
42624 }
42625
42626-static struct backlight_ops adx_backlight_ops = {
42627+static const struct backlight_ops adx_backlight_ops = {
42628 .options = 0,
42629 .update_status = adx_backlight_update_status,
42630 .get_brightness = adx_backlight_get_brightness,
42631diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42632index 505c082..6b6b3cc 100644
42633--- a/drivers/video/backlight/atmel-pwm-bl.c
42634+++ b/drivers/video/backlight/atmel-pwm-bl.c
42635@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42636 return pwm_channel_enable(&pwmbl->pwmc);
42637 }
42638
42639-static struct backlight_ops atmel_pwm_bl_ops = {
42640+static const struct backlight_ops atmel_pwm_bl_ops = {
42641 .get_brightness = atmel_pwm_bl_get_intensity,
42642 .update_status = atmel_pwm_bl_set_intensity,
42643 };
42644diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42645index 5e20e6e..89025e6 100644
42646--- a/drivers/video/backlight/backlight.c
42647+++ b/drivers/video/backlight/backlight.c
42648@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42649 * ERR_PTR() or a pointer to the newly allocated device.
42650 */
42651 struct backlight_device *backlight_device_register(const char *name,
42652- struct device *parent, void *devdata, struct backlight_ops *ops)
42653+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42654 {
42655 struct backlight_device *new_bd;
42656 int rc;
42657diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42658index 9677494..b4bcf80 100644
42659--- a/drivers/video/backlight/corgi_lcd.c
42660+++ b/drivers/video/backlight/corgi_lcd.c
42661@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42662 }
42663 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42664
42665-static struct backlight_ops corgi_bl_ops = {
42666+static const struct backlight_ops corgi_bl_ops = {
42667 .get_brightness = corgi_bl_get_intensity,
42668 .update_status = corgi_bl_update_status,
42669 };
42670diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42671index b9fe62b..2914bf1 100644
42672--- a/drivers/video/backlight/cr_bllcd.c
42673+++ b/drivers/video/backlight/cr_bllcd.c
42674@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42675 return intensity;
42676 }
42677
42678-static struct backlight_ops cr_backlight_ops = {
42679+static const struct backlight_ops cr_backlight_ops = {
42680 .get_brightness = cr_backlight_get_intensity,
42681 .update_status = cr_backlight_set_intensity,
42682 };
42683diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42684index 701a108..feacfd5 100644
42685--- a/drivers/video/backlight/da903x_bl.c
42686+++ b/drivers/video/backlight/da903x_bl.c
42687@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42688 return data->current_brightness;
42689 }
42690
42691-static struct backlight_ops da903x_backlight_ops = {
42692+static const struct backlight_ops da903x_backlight_ops = {
42693 .update_status = da903x_backlight_update_status,
42694 .get_brightness = da903x_backlight_get_brightness,
42695 };
42696diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42697index 6d27f62..e6d348e 100644
42698--- a/drivers/video/backlight/generic_bl.c
42699+++ b/drivers/video/backlight/generic_bl.c
42700@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42701 }
42702 EXPORT_SYMBOL(corgibl_limit_intensity);
42703
42704-static struct backlight_ops genericbl_ops = {
42705+static const struct backlight_ops genericbl_ops = {
42706 .options = BL_CORE_SUSPENDRESUME,
42707 .get_brightness = genericbl_get_intensity,
42708 .update_status = genericbl_send_intensity,
42709diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42710index 7fb4eef..f7cc528 100644
42711--- a/drivers/video/backlight/hp680_bl.c
42712+++ b/drivers/video/backlight/hp680_bl.c
42713@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42714 return current_intensity;
42715 }
42716
42717-static struct backlight_ops hp680bl_ops = {
42718+static const struct backlight_ops hp680bl_ops = {
42719 .get_brightness = hp680bl_get_intensity,
42720 .update_status = hp680bl_set_intensity,
42721 };
42722diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42723index 7aed256..db9071f 100644
42724--- a/drivers/video/backlight/jornada720_bl.c
42725+++ b/drivers/video/backlight/jornada720_bl.c
42726@@ -93,7 +93,7 @@ out:
42727 return ret;
42728 }
42729
42730-static struct backlight_ops jornada_bl_ops = {
42731+static const struct backlight_ops jornada_bl_ops = {
42732 .get_brightness = jornada_bl_get_brightness,
42733 .update_status = jornada_bl_update_status,
42734 .options = BL_CORE_SUSPENDRESUME,
42735diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42736index a38fda1..939e7b8 100644
42737--- a/drivers/video/backlight/kb3886_bl.c
42738+++ b/drivers/video/backlight/kb3886_bl.c
42739@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42740 return kb3886bl_intensity;
42741 }
42742
42743-static struct backlight_ops kb3886bl_ops = {
42744+static const struct backlight_ops kb3886bl_ops = {
42745 .get_brightness = kb3886bl_get_intensity,
42746 .update_status = kb3886bl_send_intensity,
42747 };
42748diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42749index 6b488b8..00a9591 100644
42750--- a/drivers/video/backlight/locomolcd.c
42751+++ b/drivers/video/backlight/locomolcd.c
42752@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42753 return current_intensity;
42754 }
42755
42756-static struct backlight_ops locomobl_data = {
42757+static const struct backlight_ops locomobl_data = {
42758 .get_brightness = locomolcd_get_intensity,
42759 .update_status = locomolcd_set_intensity,
42760 };
42761diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42762index 99bdfa8..3dac448 100644
42763--- a/drivers/video/backlight/mbp_nvidia_bl.c
42764+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42765@@ -33,7 +33,7 @@ struct dmi_match_data {
42766 unsigned long iostart;
42767 unsigned long iolen;
42768 /* Backlight operations structure. */
42769- struct backlight_ops backlight_ops;
42770+ const struct backlight_ops backlight_ops;
42771 };
42772
42773 /* Module parameters. */
42774diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42775index cbad67e..3cf900e 100644
42776--- a/drivers/video/backlight/omap1_bl.c
42777+++ b/drivers/video/backlight/omap1_bl.c
42778@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42779 return bl->current_intensity;
42780 }
42781
42782-static struct backlight_ops omapbl_ops = {
42783+static const struct backlight_ops omapbl_ops = {
42784 .get_brightness = omapbl_get_intensity,
42785 .update_status = omapbl_update_status,
42786 };
42787diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42788index 9edaf24..075786e 100644
42789--- a/drivers/video/backlight/progear_bl.c
42790+++ b/drivers/video/backlight/progear_bl.c
42791@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42792 return intensity - HW_LEVEL_MIN;
42793 }
42794
42795-static struct backlight_ops progearbl_ops = {
42796+static const struct backlight_ops progearbl_ops = {
42797 .get_brightness = progearbl_get_intensity,
42798 .update_status = progearbl_set_intensity,
42799 };
42800diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42801index 8871662..df9e0b3 100644
42802--- a/drivers/video/backlight/pwm_bl.c
42803+++ b/drivers/video/backlight/pwm_bl.c
42804@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42805 return bl->props.brightness;
42806 }
42807
42808-static struct backlight_ops pwm_backlight_ops = {
42809+static const struct backlight_ops pwm_backlight_ops = {
42810 .update_status = pwm_backlight_update_status,
42811 .get_brightness = pwm_backlight_get_brightness,
42812 };
42813diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42814index 43edbad..e14ce4d 100644
42815--- a/drivers/video/backlight/tosa_bl.c
42816+++ b/drivers/video/backlight/tosa_bl.c
42817@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42818 return props->brightness;
42819 }
42820
42821-static struct backlight_ops bl_ops = {
42822+static const struct backlight_ops bl_ops = {
42823 .get_brightness = tosa_bl_get_brightness,
42824 .update_status = tosa_bl_update_status,
42825 };
42826diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42827index 467bdb7..e32add3 100644
42828--- a/drivers/video/backlight/wm831x_bl.c
42829+++ b/drivers/video/backlight/wm831x_bl.c
42830@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42831 return data->current_brightness;
42832 }
42833
42834-static struct backlight_ops wm831x_backlight_ops = {
42835+static const struct backlight_ops wm831x_backlight_ops = {
42836 .options = BL_CORE_SUSPENDRESUME,
42837 .update_status = wm831x_backlight_update_status,
42838 .get_brightness = wm831x_backlight_get_brightness,
42839diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42840index e49ae5e..db4e6f7 100644
42841--- a/drivers/video/bf54x-lq043fb.c
42842+++ b/drivers/video/bf54x-lq043fb.c
42843@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42844 return 0;
42845 }
42846
42847-static struct backlight_ops bfin_lq043fb_bl_ops = {
42848+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42849 .get_brightness = bl_get_brightness,
42850 };
42851
42852diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42853index 2c72a7c..d523e52 100644
42854--- a/drivers/video/bfin-t350mcqb-fb.c
42855+++ b/drivers/video/bfin-t350mcqb-fb.c
42856@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42857 return 0;
42858 }
42859
42860-static struct backlight_ops bfin_lq043fb_bl_ops = {
42861+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42862 .get_brightness = bl_get_brightness,
42863 };
42864
42865diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42866index f53b9f1..958bf4e 100644
42867--- a/drivers/video/fbcmap.c
42868+++ b/drivers/video/fbcmap.c
42869@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42870 rc = -ENODEV;
42871 goto out;
42872 }
42873- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42874- !info->fbops->fb_setcmap)) {
42875+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42876 rc = -EINVAL;
42877 goto out1;
42878 }
42879diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
42880index 99bbd28..ad3829e 100644
42881--- a/drivers/video/fbmem.c
42882+++ b/drivers/video/fbmem.c
42883@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42884 image->dx += image->width + 8;
42885 }
42886 } else if (rotate == FB_ROTATE_UD) {
42887- for (x = 0; x < num && image->dx >= 0; x++) {
42888+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
42889 info->fbops->fb_imageblit(info, image);
42890 image->dx -= image->width + 8;
42891 }
42892@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42893 image->dy += image->height + 8;
42894 }
42895 } else if (rotate == FB_ROTATE_CCW) {
42896- for (x = 0; x < num && image->dy >= 0; x++) {
42897+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
42898 info->fbops->fb_imageblit(info, image);
42899 image->dy -= image->height + 8;
42900 }
42901@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
42902 int flags = info->flags;
42903 int ret = 0;
42904
42905+ pax_track_stack();
42906+
42907 if (var->activate & FB_ACTIVATE_INV_MODE) {
42908 struct fb_videomode mode1, mode2;
42909
42910@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42911 void __user *argp = (void __user *)arg;
42912 long ret = 0;
42913
42914+ pax_track_stack();
42915+
42916 switch (cmd) {
42917 case FBIOGET_VSCREENINFO:
42918 if (!lock_fb_info(info))
42919@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42920 return -EFAULT;
42921 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
42922 return -EINVAL;
42923- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
42924+ if (con2fb.framebuffer >= FB_MAX)
42925 return -EINVAL;
42926 if (!registered_fb[con2fb.framebuffer])
42927 request_module("fb%d", con2fb.framebuffer);
42928diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
42929index f20eff8..3e4f622 100644
42930--- a/drivers/video/geode/gx1fb_core.c
42931+++ b/drivers/video/geode/gx1fb_core.c
42932@@ -30,7 +30,7 @@ static int crt_option = 1;
42933 static char panel_option[32] = "";
42934
42935 /* Modes relevant to the GX1 (taken from modedb.c) */
42936-static const struct fb_videomode __initdata gx1_modedb[] = {
42937+static const struct fb_videomode __initconst gx1_modedb[] = {
42938 /* 640x480-60 VESA */
42939 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
42940 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
42941diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
42942index 896e53d..4d87d0b 100644
42943--- a/drivers/video/gxt4500.c
42944+++ b/drivers/video/gxt4500.c
42945@@ -156,7 +156,7 @@ struct gxt4500_par {
42946 static char *mode_option;
42947
42948 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
42949-static const struct fb_videomode defaultmode __devinitdata = {
42950+static const struct fb_videomode defaultmode __devinitconst = {
42951 .refresh = 60,
42952 .xres = 1280,
42953 .yres = 1024,
42954@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
42955 return 0;
42956 }
42957
42958-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
42959+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
42960 .id = "IBM GXT4500P",
42961 .type = FB_TYPE_PACKED_PIXELS,
42962 .visual = FB_VISUAL_PSEUDOCOLOR,
42963diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
42964index f5bedee..28c6028 100644
42965--- a/drivers/video/i810/i810_accel.c
42966+++ b/drivers/video/i810/i810_accel.c
42967@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
42968 }
42969 }
42970 printk("ringbuffer lockup!!!\n");
42971+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
42972 i810_report_error(mmio);
42973 par->dev_flags |= LOCKUP;
42974 info->pixmap.scan_align = 1;
42975diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
42976index 5743ea2..457f82c 100644
42977--- a/drivers/video/i810/i810_main.c
42978+++ b/drivers/video/i810/i810_main.c
42979@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
42980 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
42981
42982 /* PCI */
42983-static const char *i810_pci_list[] __devinitdata = {
42984+static const char *i810_pci_list[] __devinitconst = {
42985 "Intel(R) 810 Framebuffer Device" ,
42986 "Intel(R) 810-DC100 Framebuffer Device" ,
42987 "Intel(R) 810E Framebuffer Device" ,
42988diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
42989index 3c14e43..eafa544 100644
42990--- a/drivers/video/logo/logo_linux_clut224.ppm
42991+++ b/drivers/video/logo/logo_linux_clut224.ppm
42992@@ -1,1604 +1,1123 @@
42993 P3
42994-# Standard 224-color Linux logo
42995 80 80
42996 255
42997- 0 0 0 0 0 0 0 0 0 0 0 0
42998- 0 0 0 0 0 0 0 0 0 0 0 0
42999- 0 0 0 0 0 0 0 0 0 0 0 0
43000- 0 0 0 0 0 0 0 0 0 0 0 0
43001- 0 0 0 0 0 0 0 0 0 0 0 0
43002- 0 0 0 0 0 0 0 0 0 0 0 0
43003- 0 0 0 0 0 0 0 0 0 0 0 0
43004- 0 0 0 0 0 0 0 0 0 0 0 0
43005- 0 0 0 0 0 0 0 0 0 0 0 0
43006- 6 6 6 6 6 6 10 10 10 10 10 10
43007- 10 10 10 6 6 6 6 6 6 6 6 6
43008- 0 0 0 0 0 0 0 0 0 0 0 0
43009- 0 0 0 0 0 0 0 0 0 0 0 0
43010- 0 0 0 0 0 0 0 0 0 0 0 0
43011- 0 0 0 0 0 0 0 0 0 0 0 0
43012- 0 0 0 0 0 0 0 0 0 0 0 0
43013- 0 0 0 0 0 0 0 0 0 0 0 0
43014- 0 0 0 0 0 0 0 0 0 0 0 0
43015- 0 0 0 0 0 0 0 0 0 0 0 0
43016- 0 0 0 0 0 0 0 0 0 0 0 0
43017- 0 0 0 0 0 0 0 0 0 0 0 0
43018- 0 0 0 0 0 0 0 0 0 0 0 0
43019- 0 0 0 0 0 0 0 0 0 0 0 0
43020- 0 0 0 0 0 0 0 0 0 0 0 0
43021- 0 0 0 0 0 0 0 0 0 0 0 0
43022- 0 0 0 0 0 0 0 0 0 0 0 0
43023- 0 0 0 0 0 0 0 0 0 0 0 0
43024- 0 0 0 0 0 0 0 0 0 0 0 0
43025- 0 0 0 6 6 6 10 10 10 14 14 14
43026- 22 22 22 26 26 26 30 30 30 34 34 34
43027- 30 30 30 30 30 30 26 26 26 18 18 18
43028- 14 14 14 10 10 10 6 6 6 0 0 0
43029- 0 0 0 0 0 0 0 0 0 0 0 0
43030- 0 0 0 0 0 0 0 0 0 0 0 0
43031- 0 0 0 0 0 0 0 0 0 0 0 0
43032- 0 0 0 0 0 0 0 0 0 0 0 0
43033- 0 0 0 0 0 0 0 0 0 0 0 0
43034- 0 0 0 0 0 0 0 0 0 0 0 0
43035- 0 0 0 0 0 0 0 0 0 0 0 0
43036- 0 0 0 0 0 0 0 0 0 0 0 0
43037- 0 0 0 0 0 0 0 0 0 0 0 0
43038- 0 0 0 0 0 1 0 0 1 0 0 0
43039- 0 0 0 0 0 0 0 0 0 0 0 0
43040- 0 0 0 0 0 0 0 0 0 0 0 0
43041- 0 0 0 0 0 0 0 0 0 0 0 0
43042- 0 0 0 0 0 0 0 0 0 0 0 0
43043- 0 0 0 0 0 0 0 0 0 0 0 0
43044- 0 0 0 0 0 0 0 0 0 0 0 0
43045- 6 6 6 14 14 14 26 26 26 42 42 42
43046- 54 54 54 66 66 66 78 78 78 78 78 78
43047- 78 78 78 74 74 74 66 66 66 54 54 54
43048- 42 42 42 26 26 26 18 18 18 10 10 10
43049- 6 6 6 0 0 0 0 0 0 0 0 0
43050- 0 0 0 0 0 0 0 0 0 0 0 0
43051- 0 0 0 0 0 0 0 0 0 0 0 0
43052- 0 0 0 0 0 0 0 0 0 0 0 0
43053- 0 0 0 0 0 0 0 0 0 0 0 0
43054- 0 0 0 0 0 0 0 0 0 0 0 0
43055- 0 0 0 0 0 0 0 0 0 0 0 0
43056- 0 0 0 0 0 0 0 0 0 0 0 0
43057- 0 0 0 0 0 0 0 0 0 0 0 0
43058- 0 0 1 0 0 0 0 0 0 0 0 0
43059- 0 0 0 0 0 0 0 0 0 0 0 0
43060- 0 0 0 0 0 0 0 0 0 0 0 0
43061- 0 0 0 0 0 0 0 0 0 0 0 0
43062- 0 0 0 0 0 0 0 0 0 0 0 0
43063- 0 0 0 0 0 0 0 0 0 0 0 0
43064- 0 0 0 0 0 0 0 0 0 10 10 10
43065- 22 22 22 42 42 42 66 66 66 86 86 86
43066- 66 66 66 38 38 38 38 38 38 22 22 22
43067- 26 26 26 34 34 34 54 54 54 66 66 66
43068- 86 86 86 70 70 70 46 46 46 26 26 26
43069- 14 14 14 6 6 6 0 0 0 0 0 0
43070- 0 0 0 0 0 0 0 0 0 0 0 0
43071- 0 0 0 0 0 0 0 0 0 0 0 0
43072- 0 0 0 0 0 0 0 0 0 0 0 0
43073- 0 0 0 0 0 0 0 0 0 0 0 0
43074- 0 0 0 0 0 0 0 0 0 0 0 0
43075- 0 0 0 0 0 0 0 0 0 0 0 0
43076- 0 0 0 0 0 0 0 0 0 0 0 0
43077- 0 0 0 0 0 0 0 0 0 0 0 0
43078- 0 0 1 0 0 1 0 0 1 0 0 0
43079- 0 0 0 0 0 0 0 0 0 0 0 0
43080- 0 0 0 0 0 0 0 0 0 0 0 0
43081- 0 0 0 0 0 0 0 0 0 0 0 0
43082- 0 0 0 0 0 0 0 0 0 0 0 0
43083- 0 0 0 0 0 0 0 0 0 0 0 0
43084- 0 0 0 0 0 0 10 10 10 26 26 26
43085- 50 50 50 82 82 82 58 58 58 6 6 6
43086- 2 2 6 2 2 6 2 2 6 2 2 6
43087- 2 2 6 2 2 6 2 2 6 2 2 6
43088- 6 6 6 54 54 54 86 86 86 66 66 66
43089- 38 38 38 18 18 18 6 6 6 0 0 0
43090- 0 0 0 0 0 0 0 0 0 0 0 0
43091- 0 0 0 0 0 0 0 0 0 0 0 0
43092- 0 0 0 0 0 0 0 0 0 0 0 0
43093- 0 0 0 0 0 0 0 0 0 0 0 0
43094- 0 0 0 0 0 0 0 0 0 0 0 0
43095- 0 0 0 0 0 0 0 0 0 0 0 0
43096- 0 0 0 0 0 0 0 0 0 0 0 0
43097- 0 0 0 0 0 0 0 0 0 0 0 0
43098- 0 0 0 0 0 0 0 0 0 0 0 0
43099- 0 0 0 0 0 0 0 0 0 0 0 0
43100- 0 0 0 0 0 0 0 0 0 0 0 0
43101- 0 0 0 0 0 0 0 0 0 0 0 0
43102- 0 0 0 0 0 0 0 0 0 0 0 0
43103- 0 0 0 0 0 0 0 0 0 0 0 0
43104- 0 0 0 6 6 6 22 22 22 50 50 50
43105- 78 78 78 34 34 34 2 2 6 2 2 6
43106- 2 2 6 2 2 6 2 2 6 2 2 6
43107- 2 2 6 2 2 6 2 2 6 2 2 6
43108- 2 2 6 2 2 6 6 6 6 70 70 70
43109- 78 78 78 46 46 46 22 22 22 6 6 6
43110- 0 0 0 0 0 0 0 0 0 0 0 0
43111- 0 0 0 0 0 0 0 0 0 0 0 0
43112- 0 0 0 0 0 0 0 0 0 0 0 0
43113- 0 0 0 0 0 0 0 0 0 0 0 0
43114- 0 0 0 0 0 0 0 0 0 0 0 0
43115- 0 0 0 0 0 0 0 0 0 0 0 0
43116- 0 0 0 0 0 0 0 0 0 0 0 0
43117- 0 0 0 0 0 0 0 0 0 0 0 0
43118- 0 0 1 0 0 1 0 0 1 0 0 0
43119- 0 0 0 0 0 0 0 0 0 0 0 0
43120- 0 0 0 0 0 0 0 0 0 0 0 0
43121- 0 0 0 0 0 0 0 0 0 0 0 0
43122- 0 0 0 0 0 0 0 0 0 0 0 0
43123- 0 0 0 0 0 0 0 0 0 0 0 0
43124- 6 6 6 18 18 18 42 42 42 82 82 82
43125- 26 26 26 2 2 6 2 2 6 2 2 6
43126- 2 2 6 2 2 6 2 2 6 2 2 6
43127- 2 2 6 2 2 6 2 2 6 14 14 14
43128- 46 46 46 34 34 34 6 6 6 2 2 6
43129- 42 42 42 78 78 78 42 42 42 18 18 18
43130- 6 6 6 0 0 0 0 0 0 0 0 0
43131- 0 0 0 0 0 0 0 0 0 0 0 0
43132- 0 0 0 0 0 0 0 0 0 0 0 0
43133- 0 0 0 0 0 0 0 0 0 0 0 0
43134- 0 0 0 0 0 0 0 0 0 0 0 0
43135- 0 0 0 0 0 0 0 0 0 0 0 0
43136- 0 0 0 0 0 0 0 0 0 0 0 0
43137- 0 0 0 0 0 0 0 0 0 0 0 0
43138- 0 0 1 0 0 0 0 0 1 0 0 0
43139- 0 0 0 0 0 0 0 0 0 0 0 0
43140- 0 0 0 0 0 0 0 0 0 0 0 0
43141- 0 0 0 0 0 0 0 0 0 0 0 0
43142- 0 0 0 0 0 0 0 0 0 0 0 0
43143- 0 0 0 0 0 0 0 0 0 0 0 0
43144- 10 10 10 30 30 30 66 66 66 58 58 58
43145- 2 2 6 2 2 6 2 2 6 2 2 6
43146- 2 2 6 2 2 6 2 2 6 2 2 6
43147- 2 2 6 2 2 6 2 2 6 26 26 26
43148- 86 86 86 101 101 101 46 46 46 10 10 10
43149- 2 2 6 58 58 58 70 70 70 34 34 34
43150- 10 10 10 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 0 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 1 0 0 1 0 0 1 0 0 0
43159- 0 0 0 0 0 0 0 0 0 0 0 0
43160- 0 0 0 0 0 0 0 0 0 0 0 0
43161- 0 0 0 0 0 0 0 0 0 0 0 0
43162- 0 0 0 0 0 0 0 0 0 0 0 0
43163- 0 0 0 0 0 0 0 0 0 0 0 0
43164- 14 14 14 42 42 42 86 86 86 10 10 10
43165- 2 2 6 2 2 6 2 2 6 2 2 6
43166- 2 2 6 2 2 6 2 2 6 2 2 6
43167- 2 2 6 2 2 6 2 2 6 30 30 30
43168- 94 94 94 94 94 94 58 58 58 26 26 26
43169- 2 2 6 6 6 6 78 78 78 54 54 54
43170- 22 22 22 6 6 6 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 0 0 0 0 0 0 0 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 0 0 0
43179- 0 0 0 0 0 0 0 0 0 0 0 0
43180- 0 0 0 0 0 0 0 0 0 0 0 0
43181- 0 0 0 0 0 0 0 0 0 0 0 0
43182- 0 0 0 0 0 0 0 0 0 0 0 0
43183- 0 0 0 0 0 0 0 0 0 6 6 6
43184- 22 22 22 62 62 62 62 62 62 2 2 6
43185- 2 2 6 2 2 6 2 2 6 2 2 6
43186- 2 2 6 2 2 6 2 2 6 2 2 6
43187- 2 2 6 2 2 6 2 2 6 26 26 26
43188- 54 54 54 38 38 38 18 18 18 10 10 10
43189- 2 2 6 2 2 6 34 34 34 82 82 82
43190- 38 38 38 14 14 14 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 1 0 0 1 0 0 0
43199- 0 0 0 0 0 0 0 0 0 0 0 0
43200- 0 0 0 0 0 0 0 0 0 0 0 0
43201- 0 0 0 0 0 0 0 0 0 0 0 0
43202- 0 0 0 0 0 0 0 0 0 0 0 0
43203- 0 0 0 0 0 0 0 0 0 6 6 6
43204- 30 30 30 78 78 78 30 30 30 2 2 6
43205- 2 2 6 2 2 6 2 2 6 2 2 6
43206- 2 2 6 2 2 6 2 2 6 2 2 6
43207- 2 2 6 2 2 6 2 2 6 10 10 10
43208- 10 10 10 2 2 6 2 2 6 2 2 6
43209- 2 2 6 2 2 6 2 2 6 78 78 78
43210- 50 50 50 18 18 18 6 6 6 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 0 0 0 0
43213- 0 0 0 0 0 0 0 0 0 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 1 0 0 0 0 0 0 0 0 0
43219- 0 0 0 0 0 0 0 0 0 0 0 0
43220- 0 0 0 0 0 0 0 0 0 0 0 0
43221- 0 0 0 0 0 0 0 0 0 0 0 0
43222- 0 0 0 0 0 0 0 0 0 0 0 0
43223- 0 0 0 0 0 0 0 0 0 10 10 10
43224- 38 38 38 86 86 86 14 14 14 2 2 6
43225- 2 2 6 2 2 6 2 2 6 2 2 6
43226- 2 2 6 2 2 6 2 2 6 2 2 6
43227- 2 2 6 2 2 6 2 2 6 2 2 6
43228- 2 2 6 2 2 6 2 2 6 2 2 6
43229- 2 2 6 2 2 6 2 2 6 54 54 54
43230- 66 66 66 26 26 26 6 6 6 0 0 0
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 1 0 0 1 0 0 0
43239- 0 0 0 0 0 0 0 0 0 0 0 0
43240- 0 0 0 0 0 0 0 0 0 0 0 0
43241- 0 0 0 0 0 0 0 0 0 0 0 0
43242- 0 0 0 0 0 0 0 0 0 0 0 0
43243- 0 0 0 0 0 0 0 0 0 14 14 14
43244- 42 42 42 82 82 82 2 2 6 2 2 6
43245- 2 2 6 6 6 6 10 10 10 2 2 6
43246- 2 2 6 2 2 6 2 2 6 2 2 6
43247- 2 2 6 2 2 6 2 2 6 6 6 6
43248- 14 14 14 10 10 10 2 2 6 2 2 6
43249- 2 2 6 2 2 6 2 2 6 18 18 18
43250- 82 82 82 34 34 34 10 10 10 0 0 0
43251- 0 0 0 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 0 0 0 0
43253- 0 0 0 0 0 0 0 0 0 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 1 0 0 0 0 0 0 0 0 0
43259- 0 0 0 0 0 0 0 0 0 0 0 0
43260- 0 0 0 0 0 0 0 0 0 0 0 0
43261- 0 0 0 0 0 0 0 0 0 0 0 0
43262- 0 0 0 0 0 0 0 0 0 0 0 0
43263- 0 0 0 0 0 0 0 0 0 14 14 14
43264- 46 46 46 86 86 86 2 2 6 2 2 6
43265- 6 6 6 6 6 6 22 22 22 34 34 34
43266- 6 6 6 2 2 6 2 2 6 2 2 6
43267- 2 2 6 2 2 6 18 18 18 34 34 34
43268- 10 10 10 50 50 50 22 22 22 2 2 6
43269- 2 2 6 2 2 6 2 2 6 10 10 10
43270- 86 86 86 42 42 42 14 14 14 0 0 0
43271- 0 0 0 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 0 0 0 0
43273- 0 0 0 0 0 0 0 0 0 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 1 0 0 1 0 0 1 0 0 0
43279- 0 0 0 0 0 0 0 0 0 0 0 0
43280- 0 0 0 0 0 0 0 0 0 0 0 0
43281- 0 0 0 0 0 0 0 0 0 0 0 0
43282- 0 0 0 0 0 0 0 0 0 0 0 0
43283- 0 0 0 0 0 0 0 0 0 14 14 14
43284- 46 46 46 86 86 86 2 2 6 2 2 6
43285- 38 38 38 116 116 116 94 94 94 22 22 22
43286- 22 22 22 2 2 6 2 2 6 2 2 6
43287- 14 14 14 86 86 86 138 138 138 162 162 162
43288-154 154 154 38 38 38 26 26 26 6 6 6
43289- 2 2 6 2 2 6 2 2 6 2 2 6
43290- 86 86 86 46 46 46 14 14 14 0 0 0
43291- 0 0 0 0 0 0 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 0 0 0 0
43293- 0 0 0 0 0 0 0 0 0 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 0 0 0
43299- 0 0 0 0 0 0 0 0 0 0 0 0
43300- 0 0 0 0 0 0 0 0 0 0 0 0
43301- 0 0 0 0 0 0 0 0 0 0 0 0
43302- 0 0 0 0 0 0 0 0 0 0 0 0
43303- 0 0 0 0 0 0 0 0 0 14 14 14
43304- 46 46 46 86 86 86 2 2 6 14 14 14
43305-134 134 134 198 198 198 195 195 195 116 116 116
43306- 10 10 10 2 2 6 2 2 6 6 6 6
43307-101 98 89 187 187 187 210 210 210 218 218 218
43308-214 214 214 134 134 134 14 14 14 6 6 6
43309- 2 2 6 2 2 6 2 2 6 2 2 6
43310- 86 86 86 50 50 50 18 18 18 6 6 6
43311- 0 0 0 0 0 0 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 1 0 0 0
43318- 0 0 1 0 0 1 0 0 1 0 0 0
43319- 0 0 0 0 0 0 0 0 0 0 0 0
43320- 0 0 0 0 0 0 0 0 0 0 0 0
43321- 0 0 0 0 0 0 0 0 0 0 0 0
43322- 0 0 0 0 0 0 0 0 0 0 0 0
43323- 0 0 0 0 0 0 0 0 0 14 14 14
43324- 46 46 46 86 86 86 2 2 6 54 54 54
43325-218 218 218 195 195 195 226 226 226 246 246 246
43326- 58 58 58 2 2 6 2 2 6 30 30 30
43327-210 210 210 253 253 253 174 174 174 123 123 123
43328-221 221 221 234 234 234 74 74 74 2 2 6
43329- 2 2 6 2 2 6 2 2 6 2 2 6
43330- 70 70 70 58 58 58 22 22 22 6 6 6
43331- 0 0 0 0 0 0 0 0 0 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 0 0 0
43339- 0 0 0 0 0 0 0 0 0 0 0 0
43340- 0 0 0 0 0 0 0 0 0 0 0 0
43341- 0 0 0 0 0 0 0 0 0 0 0 0
43342- 0 0 0 0 0 0 0 0 0 0 0 0
43343- 0 0 0 0 0 0 0 0 0 14 14 14
43344- 46 46 46 82 82 82 2 2 6 106 106 106
43345-170 170 170 26 26 26 86 86 86 226 226 226
43346-123 123 123 10 10 10 14 14 14 46 46 46
43347-231 231 231 190 190 190 6 6 6 70 70 70
43348- 90 90 90 238 238 238 158 158 158 2 2 6
43349- 2 2 6 2 2 6 2 2 6 2 2 6
43350- 70 70 70 58 58 58 22 22 22 6 6 6
43351- 0 0 0 0 0 0 0 0 0 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 1 0 0 0
43358- 0 0 1 0 0 1 0 0 1 0 0 0
43359- 0 0 0 0 0 0 0 0 0 0 0 0
43360- 0 0 0 0 0 0 0 0 0 0 0 0
43361- 0 0 0 0 0 0 0 0 0 0 0 0
43362- 0 0 0 0 0 0 0 0 0 0 0 0
43363- 0 0 0 0 0 0 0 0 0 14 14 14
43364- 42 42 42 86 86 86 6 6 6 116 116 116
43365-106 106 106 6 6 6 70 70 70 149 149 149
43366-128 128 128 18 18 18 38 38 38 54 54 54
43367-221 221 221 106 106 106 2 2 6 14 14 14
43368- 46 46 46 190 190 190 198 198 198 2 2 6
43369- 2 2 6 2 2 6 2 2 6 2 2 6
43370- 74 74 74 62 62 62 22 22 22 6 6 6
43371- 0 0 0 0 0 0 0 0 0 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 1 0 0 0
43378- 0 0 1 0 0 0 0 0 1 0 0 0
43379- 0 0 0 0 0 0 0 0 0 0 0 0
43380- 0 0 0 0 0 0 0 0 0 0 0 0
43381- 0 0 0 0 0 0 0 0 0 0 0 0
43382- 0 0 0 0 0 0 0 0 0 0 0 0
43383- 0 0 0 0 0 0 0 0 0 14 14 14
43384- 42 42 42 94 94 94 14 14 14 101 101 101
43385-128 128 128 2 2 6 18 18 18 116 116 116
43386-118 98 46 121 92 8 121 92 8 98 78 10
43387-162 162 162 106 106 106 2 2 6 2 2 6
43388- 2 2 6 195 195 195 195 195 195 6 6 6
43389- 2 2 6 2 2 6 2 2 6 2 2 6
43390- 74 74 74 62 62 62 22 22 22 6 6 6
43391- 0 0 0 0 0 0 0 0 0 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 1 0 0 1
43398- 0 0 1 0 0 0 0 0 1 0 0 0
43399- 0 0 0 0 0 0 0 0 0 0 0 0
43400- 0 0 0 0 0 0 0 0 0 0 0 0
43401- 0 0 0 0 0 0 0 0 0 0 0 0
43402- 0 0 0 0 0 0 0 0 0 0 0 0
43403- 0 0 0 0 0 0 0 0 0 10 10 10
43404- 38 38 38 90 90 90 14 14 14 58 58 58
43405-210 210 210 26 26 26 54 38 6 154 114 10
43406-226 170 11 236 186 11 225 175 15 184 144 12
43407-215 174 15 175 146 61 37 26 9 2 2 6
43408- 70 70 70 246 246 246 138 138 138 2 2 6
43409- 2 2 6 2 2 6 2 2 6 2 2 6
43410- 70 70 70 66 66 66 26 26 26 6 6 6
43411- 0 0 0 0 0 0 0 0 0 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 0 0 0 0 0 0 0 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 10 10 10
43424- 38 38 38 86 86 86 14 14 14 10 10 10
43425-195 195 195 188 164 115 192 133 9 225 175 15
43426-239 182 13 234 190 10 232 195 16 232 200 30
43427-245 207 45 241 208 19 232 195 16 184 144 12
43428-218 194 134 211 206 186 42 42 42 2 2 6
43429- 2 2 6 2 2 6 2 2 6 2 2 6
43430- 50 50 50 74 74 74 30 30 30 6 6 6
43431- 0 0 0 0 0 0 0 0 0 0 0 0
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 0 0 0
43439- 0 0 0 0 0 0 0 0 0 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 10 10 10
43444- 34 34 34 86 86 86 14 14 14 2 2 6
43445-121 87 25 192 133 9 219 162 10 239 182 13
43446-236 186 11 232 195 16 241 208 19 244 214 54
43447-246 218 60 246 218 38 246 215 20 241 208 19
43448-241 208 19 226 184 13 121 87 25 2 2 6
43449- 2 2 6 2 2 6 2 2 6 2 2 6
43450- 50 50 50 82 82 82 34 34 34 10 10 10
43451- 0 0 0 0 0 0 0 0 0 0 0 0
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 0 0 0 0
43459- 0 0 0 0 0 0 0 0 0 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 10 10 10
43464- 34 34 34 82 82 82 30 30 30 61 42 6
43465-180 123 7 206 145 10 230 174 11 239 182 13
43466-234 190 10 238 202 15 241 208 19 246 218 74
43467-246 218 38 246 215 20 246 215 20 246 215 20
43468-226 184 13 215 174 15 184 144 12 6 6 6
43469- 2 2 6 2 2 6 2 2 6 2 2 6
43470- 26 26 26 94 94 94 42 42 42 14 14 14
43471- 0 0 0 0 0 0 0 0 0 0 0 0
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 0 0 0 0
43479- 0 0 0 0 0 0 0 0 0 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 10 10 10
43484- 30 30 30 78 78 78 50 50 50 104 69 6
43485-192 133 9 216 158 10 236 178 12 236 186 11
43486-232 195 16 241 208 19 244 214 54 245 215 43
43487-246 215 20 246 215 20 241 208 19 198 155 10
43488-200 144 11 216 158 10 156 118 10 2 2 6
43489- 2 2 6 2 2 6 2 2 6 2 2 6
43490- 6 6 6 90 90 90 54 54 54 18 18 18
43491- 6 6 6 0 0 0 0 0 0 0 0 0
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 0 0 0 0 0 0 0 0 0
43499- 0 0 0 0 0 0 0 0 0 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 10 10 10
43504- 30 30 30 78 78 78 46 46 46 22 22 22
43505-137 92 6 210 162 10 239 182 13 238 190 10
43506-238 202 15 241 208 19 246 215 20 246 215 20
43507-241 208 19 203 166 17 185 133 11 210 150 10
43508-216 158 10 210 150 10 102 78 10 2 2 6
43509- 6 6 6 54 54 54 14 14 14 2 2 6
43510- 2 2 6 62 62 62 74 74 74 30 30 30
43511- 10 10 10 0 0 0 0 0 0 0 0 0
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 0 0 0 0 0 0 0 0 0 0 0 0
43519- 0 0 0 0 0 0 0 0 0 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 10 10 10
43524- 34 34 34 78 78 78 50 50 50 6 6 6
43525- 94 70 30 139 102 15 190 146 13 226 184 13
43526-232 200 30 232 195 16 215 174 15 190 146 13
43527-168 122 10 192 133 9 210 150 10 213 154 11
43528-202 150 34 182 157 106 101 98 89 2 2 6
43529- 2 2 6 78 78 78 116 116 116 58 58 58
43530- 2 2 6 22 22 22 90 90 90 46 46 46
43531- 18 18 18 6 6 6 0 0 0 0 0 0
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 0 0 0
43538- 0 0 0 0 0 0 0 0 0 0 0 0
43539- 0 0 0 0 0 0 0 0 0 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 10 10 10
43544- 38 38 38 86 86 86 50 50 50 6 6 6
43545-128 128 128 174 154 114 156 107 11 168 122 10
43546-198 155 10 184 144 12 197 138 11 200 144 11
43547-206 145 10 206 145 10 197 138 11 188 164 115
43548-195 195 195 198 198 198 174 174 174 14 14 14
43549- 2 2 6 22 22 22 116 116 116 116 116 116
43550- 22 22 22 2 2 6 74 74 74 70 70 70
43551- 30 30 30 10 10 10 0 0 0 0 0 0
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 0 0 0
43558- 0 0 0 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 6 6 6 18 18 18
43564- 50 50 50 101 101 101 26 26 26 10 10 10
43565-138 138 138 190 190 190 174 154 114 156 107 11
43566-197 138 11 200 144 11 197 138 11 192 133 9
43567-180 123 7 190 142 34 190 178 144 187 187 187
43568-202 202 202 221 221 221 214 214 214 66 66 66
43569- 2 2 6 2 2 6 50 50 50 62 62 62
43570- 6 6 6 2 2 6 10 10 10 90 90 90
43571- 50 50 50 18 18 18 6 6 6 0 0 0
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 0 0 0 0 0 0
43578- 0 0 0 0 0 0 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 10 10 10 34 34 34
43584- 74 74 74 74 74 74 2 2 6 6 6 6
43585-144 144 144 198 198 198 190 190 190 178 166 146
43586-154 121 60 156 107 11 156 107 11 168 124 44
43587-174 154 114 187 187 187 190 190 190 210 210 210
43588-246 246 246 253 253 253 253 253 253 182 182 182
43589- 6 6 6 2 2 6 2 2 6 2 2 6
43590- 2 2 6 2 2 6 2 2 6 62 62 62
43591- 74 74 74 34 34 34 14 14 14 0 0 0
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 0 0 0 0 0 0 0 0 0
43598- 0 0 0 0 0 0 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 10 10 10 22 22 22 54 54 54
43604- 94 94 94 18 18 18 2 2 6 46 46 46
43605-234 234 234 221 221 221 190 190 190 190 190 190
43606-190 190 190 187 187 187 187 187 187 190 190 190
43607-190 190 190 195 195 195 214 214 214 242 242 242
43608-253 253 253 253 253 253 253 253 253 253 253 253
43609- 82 82 82 2 2 6 2 2 6 2 2 6
43610- 2 2 6 2 2 6 2 2 6 14 14 14
43611- 86 86 86 54 54 54 22 22 22 6 6 6
43612- 0 0 0 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 0 0 0 0 0 0 0 0 0 0 0 0
43618- 0 0 0 0 0 0 0 0 0 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 6 6 6 18 18 18 46 46 46 90 90 90
43624- 46 46 46 18 18 18 6 6 6 182 182 182
43625-253 253 253 246 246 246 206 206 206 190 190 190
43626-190 190 190 190 190 190 190 190 190 190 190 190
43627-206 206 206 231 231 231 250 250 250 253 253 253
43628-253 253 253 253 253 253 253 253 253 253 253 253
43629-202 202 202 14 14 14 2 2 6 2 2 6
43630- 2 2 6 2 2 6 2 2 6 2 2 6
43631- 42 42 42 86 86 86 42 42 42 18 18 18
43632- 6 6 6 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 0 0 0 0 0 0 0 0 0 0 0 0
43638- 0 0 0 0 0 0 0 0 0 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 6 6 6
43643- 14 14 14 38 38 38 74 74 74 66 66 66
43644- 2 2 6 6 6 6 90 90 90 250 250 250
43645-253 253 253 253 253 253 238 238 238 198 198 198
43646-190 190 190 190 190 190 195 195 195 221 221 221
43647-246 246 246 253 253 253 253 253 253 253 253 253
43648-253 253 253 253 253 253 253 253 253 253 253 253
43649-253 253 253 82 82 82 2 2 6 2 2 6
43650- 2 2 6 2 2 6 2 2 6 2 2 6
43651- 2 2 6 78 78 78 70 70 70 34 34 34
43652- 14 14 14 6 6 6 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 0 0 0 0 0 0 0 0 0 0 0 0
43658- 0 0 0 0 0 0 0 0 0 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 14 14 14
43663- 34 34 34 66 66 66 78 78 78 6 6 6
43664- 2 2 6 18 18 18 218 218 218 253 253 253
43665-253 253 253 253 253 253 253 253 253 246 246 246
43666-226 226 226 231 231 231 246 246 246 253 253 253
43667-253 253 253 253 253 253 253 253 253 253 253 253
43668-253 253 253 253 253 253 253 253 253 253 253 253
43669-253 253 253 178 178 178 2 2 6 2 2 6
43670- 2 2 6 2 2 6 2 2 6 2 2 6
43671- 2 2 6 18 18 18 90 90 90 62 62 62
43672- 30 30 30 10 10 10 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 0 0 0
43677- 0 0 0 0 0 0 0 0 0 0 0 0
43678- 0 0 0 0 0 0 0 0 0 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 10 10 10 26 26 26
43683- 58 58 58 90 90 90 18 18 18 2 2 6
43684- 2 2 6 110 110 110 253 253 253 253 253 253
43685-253 253 253 253 253 253 253 253 253 253 253 253
43686-250 250 250 253 253 253 253 253 253 253 253 253
43687-253 253 253 253 253 253 253 253 253 253 253 253
43688-253 253 253 253 253 253 253 253 253 253 253 253
43689-253 253 253 231 231 231 18 18 18 2 2 6
43690- 2 2 6 2 2 6 2 2 6 2 2 6
43691- 2 2 6 2 2 6 18 18 18 94 94 94
43692- 54 54 54 26 26 26 10 10 10 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 0 0 0
43697- 0 0 0 0 0 0 0 0 0 0 0 0
43698- 0 0 0 0 0 0 0 0 0 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 6 6 6 22 22 22 50 50 50
43703- 90 90 90 26 26 26 2 2 6 2 2 6
43704- 14 14 14 195 195 195 250 250 250 253 253 253
43705-253 253 253 253 253 253 253 253 253 253 253 253
43706-253 253 253 253 253 253 253 253 253 253 253 253
43707-253 253 253 253 253 253 253 253 253 253 253 253
43708-253 253 253 253 253 253 253 253 253 253 253 253
43709-250 250 250 242 242 242 54 54 54 2 2 6
43710- 2 2 6 2 2 6 2 2 6 2 2 6
43711- 2 2 6 2 2 6 2 2 6 38 38 38
43712- 86 86 86 50 50 50 22 22 22 6 6 6
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 0 0 0
43717- 0 0 0 0 0 0 0 0 0 0 0 0
43718- 0 0 0 0 0 0 0 0 0 0 0 0
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 6 6 6 14 14 14 38 38 38 82 82 82
43723- 34 34 34 2 2 6 2 2 6 2 2 6
43724- 42 42 42 195 195 195 246 246 246 253 253 253
43725-253 253 253 253 253 253 253 253 253 250 250 250
43726-242 242 242 242 242 242 250 250 250 253 253 253
43727-253 253 253 253 253 253 253 253 253 253 253 253
43728-253 253 253 250 250 250 246 246 246 238 238 238
43729-226 226 226 231 231 231 101 101 101 6 6 6
43730- 2 2 6 2 2 6 2 2 6 2 2 6
43731- 2 2 6 2 2 6 2 2 6 2 2 6
43732- 38 38 38 82 82 82 42 42 42 14 14 14
43733- 6 6 6 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 0 0 0 0 0 0
43737- 0 0 0 0 0 0 0 0 0 0 0 0
43738- 0 0 0 0 0 0 0 0 0 0 0 0
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 10 10 10 26 26 26 62 62 62 66 66 66
43743- 2 2 6 2 2 6 2 2 6 6 6 6
43744- 70 70 70 170 170 170 206 206 206 234 234 234
43745-246 246 246 250 250 250 250 250 250 238 238 238
43746-226 226 226 231 231 231 238 238 238 250 250 250
43747-250 250 250 250 250 250 246 246 246 231 231 231
43748-214 214 214 206 206 206 202 202 202 202 202 202
43749-198 198 198 202 202 202 182 182 182 18 18 18
43750- 2 2 6 2 2 6 2 2 6 2 2 6
43751- 2 2 6 2 2 6 2 2 6 2 2 6
43752- 2 2 6 62 62 62 66 66 66 30 30 30
43753- 10 10 10 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 0 0 0 0 0 0
43757- 0 0 0 0 0 0 0 0 0 0 0 0
43758- 0 0 0 0 0 0 0 0 0 0 0 0
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 14 14 14 42 42 42 82 82 82 18 18 18
43763- 2 2 6 2 2 6 2 2 6 10 10 10
43764- 94 94 94 182 182 182 218 218 218 242 242 242
43765-250 250 250 253 253 253 253 253 253 250 250 250
43766-234 234 234 253 253 253 253 253 253 253 253 253
43767-253 253 253 253 253 253 253 253 253 246 246 246
43768-238 238 238 226 226 226 210 210 210 202 202 202
43769-195 195 195 195 195 195 210 210 210 158 158 158
43770- 6 6 6 14 14 14 50 50 50 14 14 14
43771- 2 2 6 2 2 6 2 2 6 2 2 6
43772- 2 2 6 6 6 6 86 86 86 46 46 46
43773- 18 18 18 6 6 6 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 0 0 0 0 0 0 0 0 0
43777- 0 0 0 0 0 0 0 0 0 0 0 0
43778- 0 0 0 0 0 0 0 0 0 0 0 0
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 6 6 6
43782- 22 22 22 54 54 54 70 70 70 2 2 6
43783- 2 2 6 10 10 10 2 2 6 22 22 22
43784-166 166 166 231 231 231 250 250 250 253 253 253
43785-253 253 253 253 253 253 253 253 253 250 250 250
43786-242 242 242 253 253 253 253 253 253 253 253 253
43787-253 253 253 253 253 253 253 253 253 253 253 253
43788-253 253 253 253 253 253 253 253 253 246 246 246
43789-231 231 231 206 206 206 198 198 198 226 226 226
43790- 94 94 94 2 2 6 6 6 6 38 38 38
43791- 30 30 30 2 2 6 2 2 6 2 2 6
43792- 2 2 6 2 2 6 62 62 62 66 66 66
43793- 26 26 26 10 10 10 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 0 0 0 0 0 0 0 0 0
43797- 0 0 0 0 0 0 0 0 0 0 0 0
43798- 0 0 0 0 0 0 0 0 0 0 0 0
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 10 10 10
43802- 30 30 30 74 74 74 50 50 50 2 2 6
43803- 26 26 26 26 26 26 2 2 6 106 106 106
43804-238 238 238 253 253 253 253 253 253 253 253 253
43805-253 253 253 253 253 253 253 253 253 253 253 253
43806-253 253 253 253 253 253 253 253 253 253 253 253
43807-253 253 253 253 253 253 253 253 253 253 253 253
43808-253 253 253 253 253 253 253 253 253 253 253 253
43809-253 253 253 246 246 246 218 218 218 202 202 202
43810-210 210 210 14 14 14 2 2 6 2 2 6
43811- 30 30 30 22 22 22 2 2 6 2 2 6
43812- 2 2 6 2 2 6 18 18 18 86 86 86
43813- 42 42 42 14 14 14 0 0 0 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 0 0 0 0 0 0 0 0 0 0 0 0
43817- 0 0 0 0 0 0 0 0 0 0 0 0
43818- 0 0 0 0 0 0 0 0 0 0 0 0
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 14 14 14
43822- 42 42 42 90 90 90 22 22 22 2 2 6
43823- 42 42 42 2 2 6 18 18 18 218 218 218
43824-253 253 253 253 253 253 253 253 253 253 253 253
43825-253 253 253 253 253 253 253 253 253 253 253 253
43826-253 253 253 253 253 253 253 253 253 253 253 253
43827-253 253 253 253 253 253 253 253 253 253 253 253
43828-253 253 253 253 253 253 253 253 253 253 253 253
43829-253 253 253 253 253 253 250 250 250 221 221 221
43830-218 218 218 101 101 101 2 2 6 14 14 14
43831- 18 18 18 38 38 38 10 10 10 2 2 6
43832- 2 2 6 2 2 6 2 2 6 78 78 78
43833- 58 58 58 22 22 22 6 6 6 0 0 0
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 0 0 0 0 0 0 0 0 0 0 0 0
43837- 0 0 0 0 0 0 0 0 0 0 0 0
43838- 0 0 0 0 0 0 0 0 0 0 0 0
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 6 6 6 18 18 18
43842- 54 54 54 82 82 82 2 2 6 26 26 26
43843- 22 22 22 2 2 6 123 123 123 253 253 253
43844-253 253 253 253 253 253 253 253 253 253 253 253
43845-253 253 253 253 253 253 253 253 253 253 253 253
43846-253 253 253 253 253 253 253 253 253 253 253 253
43847-253 253 253 253 253 253 253 253 253 253 253 253
43848-253 253 253 253 253 253 253 253 253 253 253 253
43849-253 253 253 253 253 253 253 253 253 250 250 250
43850-238 238 238 198 198 198 6 6 6 38 38 38
43851- 58 58 58 26 26 26 38 38 38 2 2 6
43852- 2 2 6 2 2 6 2 2 6 46 46 46
43853- 78 78 78 30 30 30 10 10 10 0 0 0
43854- 0 0 0 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 0 0 0
43856- 0 0 0 0 0 0 0 0 0 0 0 0
43857- 0 0 0 0 0 0 0 0 0 0 0 0
43858- 0 0 0 0 0 0 0 0 0 0 0 0
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 10 10 10 30 30 30
43862- 74 74 74 58 58 58 2 2 6 42 42 42
43863- 2 2 6 22 22 22 231 231 231 253 253 253
43864-253 253 253 253 253 253 253 253 253 253 253 253
43865-253 253 253 253 253 253 253 253 253 250 250 250
43866-253 253 253 253 253 253 253 253 253 253 253 253
43867-253 253 253 253 253 253 253 253 253 253 253 253
43868-253 253 253 253 253 253 253 253 253 253 253 253
43869-253 253 253 253 253 253 253 253 253 253 253 253
43870-253 253 253 246 246 246 46 46 46 38 38 38
43871- 42 42 42 14 14 14 38 38 38 14 14 14
43872- 2 2 6 2 2 6 2 2 6 6 6 6
43873- 86 86 86 46 46 46 14 14 14 0 0 0
43874- 0 0 0 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 0 0 0
43876- 0 0 0 0 0 0 0 0 0 0 0 0
43877- 0 0 0 0 0 0 0 0 0 0 0 0
43878- 0 0 0 0 0 0 0 0 0 0 0 0
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 6 6 6 14 14 14 42 42 42
43882- 90 90 90 18 18 18 18 18 18 26 26 26
43883- 2 2 6 116 116 116 253 253 253 253 253 253
43884-253 253 253 253 253 253 253 253 253 253 253 253
43885-253 253 253 253 253 253 250 250 250 238 238 238
43886-253 253 253 253 253 253 253 253 253 253 253 253
43887-253 253 253 253 253 253 253 253 253 253 253 253
43888-253 253 253 253 253 253 253 253 253 253 253 253
43889-253 253 253 253 253 253 253 253 253 253 253 253
43890-253 253 253 253 253 253 94 94 94 6 6 6
43891- 2 2 6 2 2 6 10 10 10 34 34 34
43892- 2 2 6 2 2 6 2 2 6 2 2 6
43893- 74 74 74 58 58 58 22 22 22 6 6 6
43894- 0 0 0 0 0 0 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 0 0 0
43896- 0 0 0 0 0 0 0 0 0 0 0 0
43897- 0 0 0 0 0 0 0 0 0 0 0 0
43898- 0 0 0 0 0 0 0 0 0 0 0 0
43899- 0 0 0 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 10 10 10 26 26 26 66 66 66
43902- 82 82 82 2 2 6 38 38 38 6 6 6
43903- 14 14 14 210 210 210 253 253 253 253 253 253
43904-253 253 253 253 253 253 253 253 253 253 253 253
43905-253 253 253 253 253 253 246 246 246 242 242 242
43906-253 253 253 253 253 253 253 253 253 253 253 253
43907-253 253 253 253 253 253 253 253 253 253 253 253
43908-253 253 253 253 253 253 253 253 253 253 253 253
43909-253 253 253 253 253 253 253 253 253 253 253 253
43910-253 253 253 253 253 253 144 144 144 2 2 6
43911- 2 2 6 2 2 6 2 2 6 46 46 46
43912- 2 2 6 2 2 6 2 2 6 2 2 6
43913- 42 42 42 74 74 74 30 30 30 10 10 10
43914- 0 0 0 0 0 0 0 0 0 0 0 0
43915- 0 0 0 0 0 0 0 0 0 0 0 0
43916- 0 0 0 0 0 0 0 0 0 0 0 0
43917- 0 0 0 0 0 0 0 0 0 0 0 0
43918- 0 0 0 0 0 0 0 0 0 0 0 0
43919- 0 0 0 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 6 6 6 14 14 14 42 42 42 90 90 90
43922- 26 26 26 6 6 6 42 42 42 2 2 6
43923- 74 74 74 250 250 250 253 253 253 253 253 253
43924-253 253 253 253 253 253 253 253 253 253 253 253
43925-253 253 253 253 253 253 242 242 242 242 242 242
43926-253 253 253 253 253 253 253 253 253 253 253 253
43927-253 253 253 253 253 253 253 253 253 253 253 253
43928-253 253 253 253 253 253 253 253 253 253 253 253
43929-253 253 253 253 253 253 253 253 253 253 253 253
43930-253 253 253 253 253 253 182 182 182 2 2 6
43931- 2 2 6 2 2 6 2 2 6 46 46 46
43932- 2 2 6 2 2 6 2 2 6 2 2 6
43933- 10 10 10 86 86 86 38 38 38 10 10 10
43934- 0 0 0 0 0 0 0 0 0 0 0 0
43935- 0 0 0 0 0 0 0 0 0 0 0 0
43936- 0 0 0 0 0 0 0 0 0 0 0 0
43937- 0 0 0 0 0 0 0 0 0 0 0 0
43938- 0 0 0 0 0 0 0 0 0 0 0 0
43939- 0 0 0 0 0 0 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 10 10 10 26 26 26 66 66 66 82 82 82
43942- 2 2 6 22 22 22 18 18 18 2 2 6
43943-149 149 149 253 253 253 253 253 253 253 253 253
43944-253 253 253 253 253 253 253 253 253 253 253 253
43945-253 253 253 253 253 253 234 234 234 242 242 242
43946-253 253 253 253 253 253 253 253 253 253 253 253
43947-253 253 253 253 253 253 253 253 253 253 253 253
43948-253 253 253 253 253 253 253 253 253 253 253 253
43949-253 253 253 253 253 253 253 253 253 253 253 253
43950-253 253 253 253 253 253 206 206 206 2 2 6
43951- 2 2 6 2 2 6 2 2 6 38 38 38
43952- 2 2 6 2 2 6 2 2 6 2 2 6
43953- 6 6 6 86 86 86 46 46 46 14 14 14
43954- 0 0 0 0 0 0 0 0 0 0 0 0
43955- 0 0 0 0 0 0 0 0 0 0 0 0
43956- 0 0 0 0 0 0 0 0 0 0 0 0
43957- 0 0 0 0 0 0 0 0 0 0 0 0
43958- 0 0 0 0 0 0 0 0 0 0 0 0
43959- 0 0 0 0 0 0 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 6 6 6
43961- 18 18 18 46 46 46 86 86 86 18 18 18
43962- 2 2 6 34 34 34 10 10 10 6 6 6
43963-210 210 210 253 253 253 253 253 253 253 253 253
43964-253 253 253 253 253 253 253 253 253 253 253 253
43965-253 253 253 253 253 253 234 234 234 242 242 242
43966-253 253 253 253 253 253 253 253 253 253 253 253
43967-253 253 253 253 253 253 253 253 253 253 253 253
43968-253 253 253 253 253 253 253 253 253 253 253 253
43969-253 253 253 253 253 253 253 253 253 253 253 253
43970-253 253 253 253 253 253 221 221 221 6 6 6
43971- 2 2 6 2 2 6 6 6 6 30 30 30
43972- 2 2 6 2 2 6 2 2 6 2 2 6
43973- 2 2 6 82 82 82 54 54 54 18 18 18
43974- 6 6 6 0 0 0 0 0 0 0 0 0
43975- 0 0 0 0 0 0 0 0 0 0 0 0
43976- 0 0 0 0 0 0 0 0 0 0 0 0
43977- 0 0 0 0 0 0 0 0 0 0 0 0
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 10 10 10
43981- 26 26 26 66 66 66 62 62 62 2 2 6
43982- 2 2 6 38 38 38 10 10 10 26 26 26
43983-238 238 238 253 253 253 253 253 253 253 253 253
43984-253 253 253 253 253 253 253 253 253 253 253 253
43985-253 253 253 253 253 253 231 231 231 238 238 238
43986-253 253 253 253 253 253 253 253 253 253 253 253
43987-253 253 253 253 253 253 253 253 253 253 253 253
43988-253 253 253 253 253 253 253 253 253 253 253 253
43989-253 253 253 253 253 253 253 253 253 253 253 253
43990-253 253 253 253 253 253 231 231 231 6 6 6
43991- 2 2 6 2 2 6 10 10 10 30 30 30
43992- 2 2 6 2 2 6 2 2 6 2 2 6
43993- 2 2 6 66 66 66 58 58 58 22 22 22
43994- 6 6 6 0 0 0 0 0 0 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 10 10 10
44001- 38 38 38 78 78 78 6 6 6 2 2 6
44002- 2 2 6 46 46 46 14 14 14 42 42 42
44003-246 246 246 253 253 253 253 253 253 253 253 253
44004-253 253 253 253 253 253 253 253 253 253 253 253
44005-253 253 253 253 253 253 231 231 231 242 242 242
44006-253 253 253 253 253 253 253 253 253 253 253 253
44007-253 253 253 253 253 253 253 253 253 253 253 253
44008-253 253 253 253 253 253 253 253 253 253 253 253
44009-253 253 253 253 253 253 253 253 253 253 253 253
44010-253 253 253 253 253 253 234 234 234 10 10 10
44011- 2 2 6 2 2 6 22 22 22 14 14 14
44012- 2 2 6 2 2 6 2 2 6 2 2 6
44013- 2 2 6 66 66 66 62 62 62 22 22 22
44014- 6 6 6 0 0 0 0 0 0 0 0 0
44015- 0 0 0 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 6 6 6 18 18 18
44021- 50 50 50 74 74 74 2 2 6 2 2 6
44022- 14 14 14 70 70 70 34 34 34 62 62 62
44023-250 250 250 253 253 253 253 253 253 253 253 253
44024-253 253 253 253 253 253 253 253 253 253 253 253
44025-253 253 253 253 253 253 231 231 231 246 246 246
44026-253 253 253 253 253 253 253 253 253 253 253 253
44027-253 253 253 253 253 253 253 253 253 253 253 253
44028-253 253 253 253 253 253 253 253 253 253 253 253
44029-253 253 253 253 253 253 253 253 253 253 253 253
44030-253 253 253 253 253 253 234 234 234 14 14 14
44031- 2 2 6 2 2 6 30 30 30 2 2 6
44032- 2 2 6 2 2 6 2 2 6 2 2 6
44033- 2 2 6 66 66 66 62 62 62 22 22 22
44034- 6 6 6 0 0 0 0 0 0 0 0 0
44035- 0 0 0 0 0 0 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 6 6 6 18 18 18
44041- 54 54 54 62 62 62 2 2 6 2 2 6
44042- 2 2 6 30 30 30 46 46 46 70 70 70
44043-250 250 250 253 253 253 253 253 253 253 253 253
44044-253 253 253 253 253 253 253 253 253 253 253 253
44045-253 253 253 253 253 253 231 231 231 246 246 246
44046-253 253 253 253 253 253 253 253 253 253 253 253
44047-253 253 253 253 253 253 253 253 253 253 253 253
44048-253 253 253 253 253 253 253 253 253 253 253 253
44049-253 253 253 253 253 253 253 253 253 253 253 253
44050-253 253 253 253 253 253 226 226 226 10 10 10
44051- 2 2 6 6 6 6 30 30 30 2 2 6
44052- 2 2 6 2 2 6 2 2 6 2 2 6
44053- 2 2 6 66 66 66 58 58 58 22 22 22
44054- 6 6 6 0 0 0 0 0 0 0 0 0
44055- 0 0 0 0 0 0 0 0 0 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 6 6 6 22 22 22
44061- 58 58 58 62 62 62 2 2 6 2 2 6
44062- 2 2 6 2 2 6 30 30 30 78 78 78
44063-250 250 250 253 253 253 253 253 253 253 253 253
44064-253 253 253 253 253 253 253 253 253 253 253 253
44065-253 253 253 253 253 253 231 231 231 246 246 246
44066-253 253 253 253 253 253 253 253 253 253 253 253
44067-253 253 253 253 253 253 253 253 253 253 253 253
44068-253 253 253 253 253 253 253 253 253 253 253 253
44069-253 253 253 253 253 253 253 253 253 253 253 253
44070-253 253 253 253 253 253 206 206 206 2 2 6
44071- 22 22 22 34 34 34 18 14 6 22 22 22
44072- 26 26 26 18 18 18 6 6 6 2 2 6
44073- 2 2 6 82 82 82 54 54 54 18 18 18
44074- 6 6 6 0 0 0 0 0 0 0 0 0
44075- 0 0 0 0 0 0 0 0 0 0 0 0
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 6 6 6 26 26 26
44081- 62 62 62 106 106 106 74 54 14 185 133 11
44082-210 162 10 121 92 8 6 6 6 62 62 62
44083-238 238 238 253 253 253 253 253 253 253 253 253
44084-253 253 253 253 253 253 253 253 253 253 253 253
44085-253 253 253 253 253 253 231 231 231 246 246 246
44086-253 253 253 253 253 253 253 253 253 253 253 253
44087-253 253 253 253 253 253 253 253 253 253 253 253
44088-253 253 253 253 253 253 253 253 253 253 253 253
44089-253 253 253 253 253 253 253 253 253 253 253 253
44090-253 253 253 253 253 253 158 158 158 18 18 18
44091- 14 14 14 2 2 6 2 2 6 2 2 6
44092- 6 6 6 18 18 18 66 66 66 38 38 38
44093- 6 6 6 94 94 94 50 50 50 18 18 18
44094- 6 6 6 0 0 0 0 0 0 0 0 0
44095- 0 0 0 0 0 0 0 0 0 0 0 0
44096- 0 0 0 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 6 6 6
44100- 10 10 10 10 10 10 18 18 18 38 38 38
44101- 78 78 78 142 134 106 216 158 10 242 186 14
44102-246 190 14 246 190 14 156 118 10 10 10 10
44103- 90 90 90 238 238 238 253 253 253 253 253 253
44104-253 253 253 253 253 253 253 253 253 253 253 253
44105-253 253 253 253 253 253 231 231 231 250 250 250
44106-253 253 253 253 253 253 253 253 253 253 253 253
44107-253 253 253 253 253 253 253 253 253 253 253 253
44108-253 253 253 253 253 253 253 253 253 253 253 253
44109-253 253 253 253 253 253 253 253 253 246 230 190
44110-238 204 91 238 204 91 181 142 44 37 26 9
44111- 2 2 6 2 2 6 2 2 6 2 2 6
44112- 2 2 6 2 2 6 38 38 38 46 46 46
44113- 26 26 26 106 106 106 54 54 54 18 18 18
44114- 6 6 6 0 0 0 0 0 0 0 0 0
44115- 0 0 0 0 0 0 0 0 0 0 0 0
44116- 0 0 0 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 6 6 6 14 14 14 22 22 22
44120- 30 30 30 38 38 38 50 50 50 70 70 70
44121-106 106 106 190 142 34 226 170 11 242 186 14
44122-246 190 14 246 190 14 246 190 14 154 114 10
44123- 6 6 6 74 74 74 226 226 226 253 253 253
44124-253 253 253 253 253 253 253 253 253 253 253 253
44125-253 253 253 253 253 253 231 231 231 250 250 250
44126-253 253 253 253 253 253 253 253 253 253 253 253
44127-253 253 253 253 253 253 253 253 253 253 253 253
44128-253 253 253 253 253 253 253 253 253 253 253 253
44129-253 253 253 253 253 253 253 253 253 228 184 62
44130-241 196 14 241 208 19 232 195 16 38 30 10
44131- 2 2 6 2 2 6 2 2 6 2 2 6
44132- 2 2 6 6 6 6 30 30 30 26 26 26
44133-203 166 17 154 142 90 66 66 66 26 26 26
44134- 6 6 6 0 0 0 0 0 0 0 0 0
44135- 0 0 0 0 0 0 0 0 0 0 0 0
44136- 0 0 0 0 0 0 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 6 6 6 18 18 18 38 38 38 58 58 58
44140- 78 78 78 86 86 86 101 101 101 123 123 123
44141-175 146 61 210 150 10 234 174 13 246 186 14
44142-246 190 14 246 190 14 246 190 14 238 190 10
44143-102 78 10 2 2 6 46 46 46 198 198 198
44144-253 253 253 253 253 253 253 253 253 253 253 253
44145-253 253 253 253 253 253 234 234 234 242 242 242
44146-253 253 253 253 253 253 253 253 253 253 253 253
44147-253 253 253 253 253 253 253 253 253 253 253 253
44148-253 253 253 253 253 253 253 253 253 253 253 253
44149-253 253 253 253 253 253 253 253 253 224 178 62
44150-242 186 14 241 196 14 210 166 10 22 18 6
44151- 2 2 6 2 2 6 2 2 6 2 2 6
44152- 2 2 6 2 2 6 6 6 6 121 92 8
44153-238 202 15 232 195 16 82 82 82 34 34 34
44154- 10 10 10 0 0 0 0 0 0 0 0 0
44155- 0 0 0 0 0 0 0 0 0 0 0 0
44156- 0 0 0 0 0 0 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 14 14 14 38 38 38 70 70 70 154 122 46
44160-190 142 34 200 144 11 197 138 11 197 138 11
44161-213 154 11 226 170 11 242 186 14 246 190 14
44162-246 190 14 246 190 14 246 190 14 246 190 14
44163-225 175 15 46 32 6 2 2 6 22 22 22
44164-158 158 158 250 250 250 253 253 253 253 253 253
44165-253 253 253 253 253 253 253 253 253 253 253 253
44166-253 253 253 253 253 253 253 253 253 253 253 253
44167-253 253 253 253 253 253 253 253 253 253 253 253
44168-253 253 253 253 253 253 253 253 253 253 253 253
44169-253 253 253 250 250 250 242 242 242 224 178 62
44170-239 182 13 236 186 11 213 154 11 46 32 6
44171- 2 2 6 2 2 6 2 2 6 2 2 6
44172- 2 2 6 2 2 6 61 42 6 225 175 15
44173-238 190 10 236 186 11 112 100 78 42 42 42
44174- 14 14 14 0 0 0 0 0 0 0 0 0
44175- 0 0 0 0 0 0 0 0 0 0 0 0
44176- 0 0 0 0 0 0 0 0 0 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 6 6 6
44179- 22 22 22 54 54 54 154 122 46 213 154 11
44180-226 170 11 230 174 11 226 170 11 226 170 11
44181-236 178 12 242 186 14 246 190 14 246 190 14
44182-246 190 14 246 190 14 246 190 14 246 190 14
44183-241 196 14 184 144 12 10 10 10 2 2 6
44184- 6 6 6 116 116 116 242 242 242 253 253 253
44185-253 253 253 253 253 253 253 253 253 253 253 253
44186-253 253 253 253 253 253 253 253 253 253 253 253
44187-253 253 253 253 253 253 253 253 253 253 253 253
44188-253 253 253 253 253 253 253 253 253 253 253 253
44189-253 253 253 231 231 231 198 198 198 214 170 54
44190-236 178 12 236 178 12 210 150 10 137 92 6
44191- 18 14 6 2 2 6 2 2 6 2 2 6
44192- 6 6 6 70 47 6 200 144 11 236 178 12
44193-239 182 13 239 182 13 124 112 88 58 58 58
44194- 22 22 22 6 6 6 0 0 0 0 0 0
44195- 0 0 0 0 0 0 0 0 0 0 0 0
44196- 0 0 0 0 0 0 0 0 0 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 10 10 10
44199- 30 30 30 70 70 70 180 133 36 226 170 11
44200-239 182 13 242 186 14 242 186 14 246 186 14
44201-246 190 14 246 190 14 246 190 14 246 190 14
44202-246 190 14 246 190 14 246 190 14 246 190 14
44203-246 190 14 232 195 16 98 70 6 2 2 6
44204- 2 2 6 2 2 6 66 66 66 221 221 221
44205-253 253 253 253 253 253 253 253 253 253 253 253
44206-253 253 253 253 253 253 253 253 253 253 253 253
44207-253 253 253 253 253 253 253 253 253 253 253 253
44208-253 253 253 253 253 253 253 253 253 253 253 253
44209-253 253 253 206 206 206 198 198 198 214 166 58
44210-230 174 11 230 174 11 216 158 10 192 133 9
44211-163 110 8 116 81 8 102 78 10 116 81 8
44212-167 114 7 197 138 11 226 170 11 239 182 13
44213-242 186 14 242 186 14 162 146 94 78 78 78
44214- 34 34 34 14 14 14 6 6 6 0 0 0
44215- 0 0 0 0 0 0 0 0 0 0 0 0
44216- 0 0 0 0 0 0 0 0 0 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 6 6 6
44219- 30 30 30 78 78 78 190 142 34 226 170 11
44220-239 182 13 246 190 14 246 190 14 246 190 14
44221-246 190 14 246 190 14 246 190 14 246 190 14
44222-246 190 14 246 190 14 246 190 14 246 190 14
44223-246 190 14 241 196 14 203 166 17 22 18 6
44224- 2 2 6 2 2 6 2 2 6 38 38 38
44225-218 218 218 253 253 253 253 253 253 253 253 253
44226-253 253 253 253 253 253 253 253 253 253 253 253
44227-253 253 253 253 253 253 253 253 253 253 253 253
44228-253 253 253 253 253 253 253 253 253 253 253 253
44229-250 250 250 206 206 206 198 198 198 202 162 69
44230-226 170 11 236 178 12 224 166 10 210 150 10
44231-200 144 11 197 138 11 192 133 9 197 138 11
44232-210 150 10 226 170 11 242 186 14 246 190 14
44233-246 190 14 246 186 14 225 175 15 124 112 88
44234- 62 62 62 30 30 30 14 14 14 6 6 6
44235- 0 0 0 0 0 0 0 0 0 0 0 0
44236- 0 0 0 0 0 0 0 0 0 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 10 10 10
44239- 30 30 30 78 78 78 174 135 50 224 166 10
44240-239 182 13 246 190 14 246 190 14 246 190 14
44241-246 190 14 246 190 14 246 190 14 246 190 14
44242-246 190 14 246 190 14 246 190 14 246 190 14
44243-246 190 14 246 190 14 241 196 14 139 102 15
44244- 2 2 6 2 2 6 2 2 6 2 2 6
44245- 78 78 78 250 250 250 253 253 253 253 253 253
44246-253 253 253 253 253 253 253 253 253 253 253 253
44247-253 253 253 253 253 253 253 253 253 253 253 253
44248-253 253 253 253 253 253 253 253 253 253 253 253
44249-250 250 250 214 214 214 198 198 198 190 150 46
44250-219 162 10 236 178 12 234 174 13 224 166 10
44251-216 158 10 213 154 11 213 154 11 216 158 10
44252-226 170 11 239 182 13 246 190 14 246 190 14
44253-246 190 14 246 190 14 242 186 14 206 162 42
44254-101 101 101 58 58 58 30 30 30 14 14 14
44255- 6 6 6 0 0 0 0 0 0 0 0 0
44256- 0 0 0 0 0 0 0 0 0 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 10 10 10
44259- 30 30 30 74 74 74 174 135 50 216 158 10
44260-236 178 12 246 190 14 246 190 14 246 190 14
44261-246 190 14 246 190 14 246 190 14 246 190 14
44262-246 190 14 246 190 14 246 190 14 246 190 14
44263-246 190 14 246 190 14 241 196 14 226 184 13
44264- 61 42 6 2 2 6 2 2 6 2 2 6
44265- 22 22 22 238 238 238 253 253 253 253 253 253
44266-253 253 253 253 253 253 253 253 253 253 253 253
44267-253 253 253 253 253 253 253 253 253 253 253 253
44268-253 253 253 253 253 253 253 253 253 253 253 253
44269-253 253 253 226 226 226 187 187 187 180 133 36
44270-216 158 10 236 178 12 239 182 13 236 178 12
44271-230 174 11 226 170 11 226 170 11 230 174 11
44272-236 178 12 242 186 14 246 190 14 246 190 14
44273-246 190 14 246 190 14 246 186 14 239 182 13
44274-206 162 42 106 106 106 66 66 66 34 34 34
44275- 14 14 14 6 6 6 0 0 0 0 0 0
44276- 0 0 0 0 0 0 0 0 0 0 0 0
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 6 6 6
44279- 26 26 26 70 70 70 163 133 67 213 154 11
44280-236 178 12 246 190 14 246 190 14 246 190 14
44281-246 190 14 246 190 14 246 190 14 246 190 14
44282-246 190 14 246 190 14 246 190 14 246 190 14
44283-246 190 14 246 190 14 246 190 14 241 196 14
44284-190 146 13 18 14 6 2 2 6 2 2 6
44285- 46 46 46 246 246 246 253 253 253 253 253 253
44286-253 253 253 253 253 253 253 253 253 253 253 253
44287-253 253 253 253 253 253 253 253 253 253 253 253
44288-253 253 253 253 253 253 253 253 253 253 253 253
44289-253 253 253 221 221 221 86 86 86 156 107 11
44290-216 158 10 236 178 12 242 186 14 246 186 14
44291-242 186 14 239 182 13 239 182 13 242 186 14
44292-242 186 14 246 186 14 246 190 14 246 190 14
44293-246 190 14 246 190 14 246 190 14 246 190 14
44294-242 186 14 225 175 15 142 122 72 66 66 66
44295- 30 30 30 10 10 10 0 0 0 0 0 0
44296- 0 0 0 0 0 0 0 0 0 0 0 0
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 6 6 6
44299- 26 26 26 70 70 70 163 133 67 210 150 10
44300-236 178 12 246 190 14 246 190 14 246 190 14
44301-246 190 14 246 190 14 246 190 14 246 190 14
44302-246 190 14 246 190 14 246 190 14 246 190 14
44303-246 190 14 246 190 14 246 190 14 246 190 14
44304-232 195 16 121 92 8 34 34 34 106 106 106
44305-221 221 221 253 253 253 253 253 253 253 253 253
44306-253 253 253 253 253 253 253 253 253 253 253 253
44307-253 253 253 253 253 253 253 253 253 253 253 253
44308-253 253 253 253 253 253 253 253 253 253 253 253
44309-242 242 242 82 82 82 18 14 6 163 110 8
44310-216 158 10 236 178 12 242 186 14 246 190 14
44311-246 190 14 246 190 14 246 190 14 246 190 14
44312-246 190 14 246 190 14 246 190 14 246 190 14
44313-246 190 14 246 190 14 246 190 14 246 190 14
44314-246 190 14 246 190 14 242 186 14 163 133 67
44315- 46 46 46 18 18 18 6 6 6 0 0 0
44316- 0 0 0 0 0 0 0 0 0 0 0 0
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 10 10 10
44319- 30 30 30 78 78 78 163 133 67 210 150 10
44320-236 178 12 246 186 14 246 190 14 246 190 14
44321-246 190 14 246 190 14 246 190 14 246 190 14
44322-246 190 14 246 190 14 246 190 14 246 190 14
44323-246 190 14 246 190 14 246 190 14 246 190 14
44324-241 196 14 215 174 15 190 178 144 253 253 253
44325-253 253 253 253 253 253 253 253 253 253 253 253
44326-253 253 253 253 253 253 253 253 253 253 253 253
44327-253 253 253 253 253 253 253 253 253 253 253 253
44328-253 253 253 253 253 253 253 253 253 218 218 218
44329- 58 58 58 2 2 6 22 18 6 167 114 7
44330-216 158 10 236 178 12 246 186 14 246 190 14
44331-246 190 14 246 190 14 246 190 14 246 190 14
44332-246 190 14 246 190 14 246 190 14 246 190 14
44333-246 190 14 246 190 14 246 190 14 246 190 14
44334-246 190 14 246 186 14 242 186 14 190 150 46
44335- 54 54 54 22 22 22 6 6 6 0 0 0
44336- 0 0 0 0 0 0 0 0 0 0 0 0
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 14 14 14
44339- 38 38 38 86 86 86 180 133 36 213 154 11
44340-236 178 12 246 186 14 246 190 14 246 190 14
44341-246 190 14 246 190 14 246 190 14 246 190 14
44342-246 190 14 246 190 14 246 190 14 246 190 14
44343-246 190 14 246 190 14 246 190 14 246 190 14
44344-246 190 14 232 195 16 190 146 13 214 214 214
44345-253 253 253 253 253 253 253 253 253 253 253 253
44346-253 253 253 253 253 253 253 253 253 253 253 253
44347-253 253 253 253 253 253 253 253 253 253 253 253
44348-253 253 253 250 250 250 170 170 170 26 26 26
44349- 2 2 6 2 2 6 37 26 9 163 110 8
44350-219 162 10 239 182 13 246 186 14 246 190 14
44351-246 190 14 246 190 14 246 190 14 246 190 14
44352-246 190 14 246 190 14 246 190 14 246 190 14
44353-246 190 14 246 190 14 246 190 14 246 190 14
44354-246 186 14 236 178 12 224 166 10 142 122 72
44355- 46 46 46 18 18 18 6 6 6 0 0 0
44356- 0 0 0 0 0 0 0 0 0 0 0 0
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 6 6 6 18 18 18
44359- 50 50 50 109 106 95 192 133 9 224 166 10
44360-242 186 14 246 190 14 246 190 14 246 190 14
44361-246 190 14 246 190 14 246 190 14 246 190 14
44362-246 190 14 246 190 14 246 190 14 246 190 14
44363-246 190 14 246 190 14 246 190 14 246 190 14
44364-242 186 14 226 184 13 210 162 10 142 110 46
44365-226 226 226 253 253 253 253 253 253 253 253 253
44366-253 253 253 253 253 253 253 253 253 253 253 253
44367-253 253 253 253 253 253 253 253 253 253 253 253
44368-198 198 198 66 66 66 2 2 6 2 2 6
44369- 2 2 6 2 2 6 50 34 6 156 107 11
44370-219 162 10 239 182 13 246 186 14 246 190 14
44371-246 190 14 246 190 14 246 190 14 246 190 14
44372-246 190 14 246 190 14 246 190 14 246 190 14
44373-246 190 14 246 190 14 246 190 14 242 186 14
44374-234 174 13 213 154 11 154 122 46 66 66 66
44375- 30 30 30 10 10 10 0 0 0 0 0 0
44376- 0 0 0 0 0 0 0 0 0 0 0 0
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 6 6 6 22 22 22
44379- 58 58 58 154 121 60 206 145 10 234 174 13
44380-242 186 14 246 186 14 246 190 14 246 190 14
44381-246 190 14 246 190 14 246 190 14 246 190 14
44382-246 190 14 246 190 14 246 190 14 246 190 14
44383-246 190 14 246 190 14 246 190 14 246 190 14
44384-246 186 14 236 178 12 210 162 10 163 110 8
44385- 61 42 6 138 138 138 218 218 218 250 250 250
44386-253 253 253 253 253 253 253 253 253 250 250 250
44387-242 242 242 210 210 210 144 144 144 66 66 66
44388- 6 6 6 2 2 6 2 2 6 2 2 6
44389- 2 2 6 2 2 6 61 42 6 163 110 8
44390-216 158 10 236 178 12 246 190 14 246 190 14
44391-246 190 14 246 190 14 246 190 14 246 190 14
44392-246 190 14 246 190 14 246 190 14 246 190 14
44393-246 190 14 239 182 13 230 174 11 216 158 10
44394-190 142 34 124 112 88 70 70 70 38 38 38
44395- 18 18 18 6 6 6 0 0 0 0 0 0
44396- 0 0 0 0 0 0 0 0 0 0 0 0
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 6 6 6 22 22 22
44399- 62 62 62 168 124 44 206 145 10 224 166 10
44400-236 178 12 239 182 13 242 186 14 242 186 14
44401-246 186 14 246 190 14 246 190 14 246 190 14
44402-246 190 14 246 190 14 246 190 14 246 190 14
44403-246 190 14 246 190 14 246 190 14 246 190 14
44404-246 190 14 236 178 12 216 158 10 175 118 6
44405- 80 54 7 2 2 6 6 6 6 30 30 30
44406- 54 54 54 62 62 62 50 50 50 38 38 38
44407- 14 14 14 2 2 6 2 2 6 2 2 6
44408- 2 2 6 2 2 6 2 2 6 2 2 6
44409- 2 2 6 6 6 6 80 54 7 167 114 7
44410-213 154 11 236 178 12 246 190 14 246 190 14
44411-246 190 14 246 190 14 246 190 14 246 190 14
44412-246 190 14 242 186 14 239 182 13 239 182 13
44413-230 174 11 210 150 10 174 135 50 124 112 88
44414- 82 82 82 54 54 54 34 34 34 18 18 18
44415- 6 6 6 0 0 0 0 0 0 0 0 0
44416- 0 0 0 0 0 0 0 0 0 0 0 0
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 6 6 6 18 18 18
44419- 50 50 50 158 118 36 192 133 9 200 144 11
44420-216 158 10 219 162 10 224 166 10 226 170 11
44421-230 174 11 236 178 12 239 182 13 239 182 13
44422-242 186 14 246 186 14 246 190 14 246 190 14
44423-246 190 14 246 190 14 246 190 14 246 190 14
44424-246 186 14 230 174 11 210 150 10 163 110 8
44425-104 69 6 10 10 10 2 2 6 2 2 6
44426- 2 2 6 2 2 6 2 2 6 2 2 6
44427- 2 2 6 2 2 6 2 2 6 2 2 6
44428- 2 2 6 2 2 6 2 2 6 2 2 6
44429- 2 2 6 6 6 6 91 60 6 167 114 7
44430-206 145 10 230 174 11 242 186 14 246 190 14
44431-246 190 14 246 190 14 246 186 14 242 186 14
44432-239 182 13 230 174 11 224 166 10 213 154 11
44433-180 133 36 124 112 88 86 86 86 58 58 58
44434- 38 38 38 22 22 22 10 10 10 6 6 6
44435- 0 0 0 0 0 0 0 0 0 0 0 0
44436- 0 0 0 0 0 0 0 0 0 0 0 0
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 14 14 14
44439- 34 34 34 70 70 70 138 110 50 158 118 36
44440-167 114 7 180 123 7 192 133 9 197 138 11
44441-200 144 11 206 145 10 213 154 11 219 162 10
44442-224 166 10 230 174 11 239 182 13 242 186 14
44443-246 186 14 246 186 14 246 186 14 246 186 14
44444-239 182 13 216 158 10 185 133 11 152 99 6
44445-104 69 6 18 14 6 2 2 6 2 2 6
44446- 2 2 6 2 2 6 2 2 6 2 2 6
44447- 2 2 6 2 2 6 2 2 6 2 2 6
44448- 2 2 6 2 2 6 2 2 6 2 2 6
44449- 2 2 6 6 6 6 80 54 7 152 99 6
44450-192 133 9 219 162 10 236 178 12 239 182 13
44451-246 186 14 242 186 14 239 182 13 236 178 12
44452-224 166 10 206 145 10 192 133 9 154 121 60
44453- 94 94 94 62 62 62 42 42 42 22 22 22
44454- 14 14 14 6 6 6 0 0 0 0 0 0
44455- 0 0 0 0 0 0 0 0 0 0 0 0
44456- 0 0 0 0 0 0 0 0 0 0 0 0
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 6 6 6
44459- 18 18 18 34 34 34 58 58 58 78 78 78
44460-101 98 89 124 112 88 142 110 46 156 107 11
44461-163 110 8 167 114 7 175 118 6 180 123 7
44462-185 133 11 197 138 11 210 150 10 219 162 10
44463-226 170 11 236 178 12 236 178 12 234 174 13
44464-219 162 10 197 138 11 163 110 8 130 83 6
44465- 91 60 6 10 10 10 2 2 6 2 2 6
44466- 18 18 18 38 38 38 38 38 38 38 38 38
44467- 38 38 38 38 38 38 38 38 38 38 38 38
44468- 38 38 38 38 38 38 26 26 26 2 2 6
44469- 2 2 6 6 6 6 70 47 6 137 92 6
44470-175 118 6 200 144 11 219 162 10 230 174 11
44471-234 174 13 230 174 11 219 162 10 210 150 10
44472-192 133 9 163 110 8 124 112 88 82 82 82
44473- 50 50 50 30 30 30 14 14 14 6 6 6
44474- 0 0 0 0 0 0 0 0 0 0 0 0
44475- 0 0 0 0 0 0 0 0 0 0 0 0
44476- 0 0 0 0 0 0 0 0 0 0 0 0
44477- 0 0 0 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 6 6 6 14 14 14 22 22 22 34 34 34
44480- 42 42 42 58 58 58 74 74 74 86 86 86
44481-101 98 89 122 102 70 130 98 46 121 87 25
44482-137 92 6 152 99 6 163 110 8 180 123 7
44483-185 133 11 197 138 11 206 145 10 200 144 11
44484-180 123 7 156 107 11 130 83 6 104 69 6
44485- 50 34 6 54 54 54 110 110 110 101 98 89
44486- 86 86 86 82 82 82 78 78 78 78 78 78
44487- 78 78 78 78 78 78 78 78 78 78 78 78
44488- 78 78 78 82 82 82 86 86 86 94 94 94
44489-106 106 106 101 101 101 86 66 34 124 80 6
44490-156 107 11 180 123 7 192 133 9 200 144 11
44491-206 145 10 200 144 11 192 133 9 175 118 6
44492-139 102 15 109 106 95 70 70 70 42 42 42
44493- 22 22 22 10 10 10 0 0 0 0 0 0
44494- 0 0 0 0 0 0 0 0 0 0 0 0
44495- 0 0 0 0 0 0 0 0 0 0 0 0
44496- 0 0 0 0 0 0 0 0 0 0 0 0
44497- 0 0 0 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 6 6 6 10 10 10
44500- 14 14 14 22 22 22 30 30 30 38 38 38
44501- 50 50 50 62 62 62 74 74 74 90 90 90
44502-101 98 89 112 100 78 121 87 25 124 80 6
44503-137 92 6 152 99 6 152 99 6 152 99 6
44504-138 86 6 124 80 6 98 70 6 86 66 30
44505-101 98 89 82 82 82 58 58 58 46 46 46
44506- 38 38 38 34 34 34 34 34 34 34 34 34
44507- 34 34 34 34 34 34 34 34 34 34 34 34
44508- 34 34 34 34 34 34 38 38 38 42 42 42
44509- 54 54 54 82 82 82 94 86 76 91 60 6
44510-134 86 6 156 107 11 167 114 7 175 118 6
44511-175 118 6 167 114 7 152 99 6 121 87 25
44512-101 98 89 62 62 62 34 34 34 18 18 18
44513- 6 6 6 0 0 0 0 0 0 0 0 0
44514- 0 0 0 0 0 0 0 0 0 0 0 0
44515- 0 0 0 0 0 0 0 0 0 0 0 0
44516- 0 0 0 0 0 0 0 0 0 0 0 0
44517- 0 0 0 0 0 0 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 0 0 0 0 0 0
44520- 0 0 0 6 6 6 6 6 6 10 10 10
44521- 18 18 18 22 22 22 30 30 30 42 42 42
44522- 50 50 50 66 66 66 86 86 86 101 98 89
44523-106 86 58 98 70 6 104 69 6 104 69 6
44524-104 69 6 91 60 6 82 62 34 90 90 90
44525- 62 62 62 38 38 38 22 22 22 14 14 14
44526- 10 10 10 10 10 10 10 10 10 10 10 10
44527- 10 10 10 10 10 10 6 6 6 10 10 10
44528- 10 10 10 10 10 10 10 10 10 14 14 14
44529- 22 22 22 42 42 42 70 70 70 89 81 66
44530- 80 54 7 104 69 6 124 80 6 137 92 6
44531-134 86 6 116 81 8 100 82 52 86 86 86
44532- 58 58 58 30 30 30 14 14 14 6 6 6
44533- 0 0 0 0 0 0 0 0 0 0 0 0
44534- 0 0 0 0 0 0 0 0 0 0 0 0
44535- 0 0 0 0 0 0 0 0 0 0 0 0
44536- 0 0 0 0 0 0 0 0 0 0 0 0
44537- 0 0 0 0 0 0 0 0 0 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 0 0 0 0 0 0
44540- 0 0 0 0 0 0 0 0 0 0 0 0
44541- 0 0 0 6 6 6 10 10 10 14 14 14
44542- 18 18 18 26 26 26 38 38 38 54 54 54
44543- 70 70 70 86 86 86 94 86 76 89 81 66
44544- 89 81 66 86 86 86 74 74 74 50 50 50
44545- 30 30 30 14 14 14 6 6 6 0 0 0
44546- 0 0 0 0 0 0 0 0 0 0 0 0
44547- 0 0 0 0 0 0 0 0 0 0 0 0
44548- 0 0 0 0 0 0 0 0 0 0 0 0
44549- 6 6 6 18 18 18 34 34 34 58 58 58
44550- 82 82 82 89 81 66 89 81 66 89 81 66
44551- 94 86 66 94 86 76 74 74 74 50 50 50
44552- 26 26 26 14 14 14 6 6 6 0 0 0
44553- 0 0 0 0 0 0 0 0 0 0 0 0
44554- 0 0 0 0 0 0 0 0 0 0 0 0
44555- 0 0 0 0 0 0 0 0 0 0 0 0
44556- 0 0 0 0 0 0 0 0 0 0 0 0
44557- 0 0 0 0 0 0 0 0 0 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 0 0 0 0 0 0
44560- 0 0 0 0 0 0 0 0 0 0 0 0
44561- 0 0 0 0 0 0 0 0 0 0 0 0
44562- 6 6 6 6 6 6 14 14 14 18 18 18
44563- 30 30 30 38 38 38 46 46 46 54 54 54
44564- 50 50 50 42 42 42 30 30 30 18 18 18
44565- 10 10 10 0 0 0 0 0 0 0 0 0
44566- 0 0 0 0 0 0 0 0 0 0 0 0
44567- 0 0 0 0 0 0 0 0 0 0 0 0
44568- 0 0 0 0 0 0 0 0 0 0 0 0
44569- 0 0 0 6 6 6 14 14 14 26 26 26
44570- 38 38 38 50 50 50 58 58 58 58 58 58
44571- 54 54 54 42 42 42 30 30 30 18 18 18
44572- 10 10 10 0 0 0 0 0 0 0 0 0
44573- 0 0 0 0 0 0 0 0 0 0 0 0
44574- 0 0 0 0 0 0 0 0 0 0 0 0
44575- 0 0 0 0 0 0 0 0 0 0 0 0
44576- 0 0 0 0 0 0 0 0 0 0 0 0
44577- 0 0 0 0 0 0 0 0 0 0 0 0
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 0 0 0
44580- 0 0 0 0 0 0 0 0 0 0 0 0
44581- 0 0 0 0 0 0 0 0 0 0 0 0
44582- 0 0 0 0 0 0 0 0 0 6 6 6
44583- 6 6 6 10 10 10 14 14 14 18 18 18
44584- 18 18 18 14 14 14 10 10 10 6 6 6
44585- 0 0 0 0 0 0 0 0 0 0 0 0
44586- 0 0 0 0 0 0 0 0 0 0 0 0
44587- 0 0 0 0 0 0 0 0 0 0 0 0
44588- 0 0 0 0 0 0 0 0 0 0 0 0
44589- 0 0 0 0 0 0 0 0 0 6 6 6
44590- 14 14 14 18 18 18 22 22 22 22 22 22
44591- 18 18 18 14 14 14 10 10 10 6 6 6
44592- 0 0 0 0 0 0 0 0 0 0 0 0
44593- 0 0 0 0 0 0 0 0 0 0 0 0
44594- 0 0 0 0 0 0 0 0 0 0 0 0
44595- 0 0 0 0 0 0 0 0 0 0 0 0
44596- 0 0 0 0 0 0 0 0 0 0 0 0
44597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44610+4 4 4 4 4 4
44611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44624+4 4 4 4 4 4
44625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44638+4 4 4 4 4 4
44639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44652+4 4 4 4 4 4
44653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44666+4 4 4 4 4 4
44667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44680+4 4 4 4 4 4
44681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44685+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44686+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44690+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44691+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44692+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44694+4 4 4 4 4 4
44695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44699+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44700+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44701+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44704+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44705+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44706+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44707+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44708+4 4 4 4 4 4
44709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44713+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44714+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44715+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44718+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44719+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44720+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44721+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44722+4 4 4 4 4 4
44723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44726+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44727+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44728+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44729+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44732+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44733+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44734+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44735+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44736+4 4 4 4 4 4
44737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44741+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44742+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44743+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44744+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44745+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44746+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44747+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44748+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44749+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44750+4 4 4 4 4 4
44751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44754+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44755+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44756+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44757+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44758+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44759+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44760+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44761+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44762+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44763+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44764+4 4 4 4 4 4
44765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44767+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44768+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44769+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44770+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44771+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44772+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44773+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44774+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44775+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44776+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44777+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44778+4 4 4 4 4 4
44779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44781+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44782+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44783+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44784+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44785+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44786+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44787+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44788+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44789+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44790+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44791+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44792+4 4 4 4 4 4
44793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44795+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44796+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44797+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44798+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44799+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44800+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44801+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44802+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44803+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44804+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44805+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44806+4 4 4 4 4 4
44807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44809+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44810+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44811+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44812+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44813+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44814+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44815+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44816+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44817+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44818+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44819+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44820+4 4 4 4 4 4
44821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44822+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44823+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44824+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44825+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44826+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44827+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44828+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44829+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44830+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44831+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44832+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44833+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44834+4 4 4 4 4 4
44835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44836+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44837+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44838+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44839+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44840+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44841+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44842+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44843+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44844+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44845+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44846+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44847+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44848+0 0 0 4 4 4
44849+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44850+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44851+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44852+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44853+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44854+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44855+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44856+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44857+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44858+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44859+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44860+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44861+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44862+2 0 0 0 0 0
44863+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44864+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44865+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44866+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44867+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44868+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44869+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44870+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44871+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44872+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44873+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44874+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44875+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44876+37 38 37 0 0 0
44877+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44878+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
44879+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
44880+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
44881+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
44882+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
44883+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
44884+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
44885+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
44886+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
44887+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
44888+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
44889+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
44890+85 115 134 4 0 0
44891+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
44892+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
44893+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
44894+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
44895+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
44896+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
44897+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
44898+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
44899+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
44900+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
44901+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
44902+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
44903+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
44904+60 73 81 4 0 0
44905+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
44906+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
44907+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
44908+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
44909+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
44910+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
44911+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
44912+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
44913+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
44914+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
44915+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
44916+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
44917+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
44918+16 19 21 4 0 0
44919+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
44920+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
44921+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
44922+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
44923+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
44924+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
44925+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
44926+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
44927+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
44928+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
44929+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
44930+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
44931+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
44932+4 0 0 4 3 3
44933+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
44934+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
44935+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
44936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
44937+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
44938+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
44939+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
44940+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
44941+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
44942+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
44943+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
44944+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
44945+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
44946+3 2 2 4 4 4
44947+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
44948+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
44949+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
44950+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44951+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
44952+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
44953+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
44954+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
44955+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
44956+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
44957+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
44958+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
44959+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
44960+4 4 4 4 4 4
44961+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
44962+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
44963+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
44964+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
44965+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
44966+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
44967+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
44968+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
44969+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
44970+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
44971+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
44972+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
44973+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
44974+4 4 4 4 4 4
44975+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
44976+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
44977+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
44978+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
44979+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
44980+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44981+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
44982+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
44983+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
44984+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
44985+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
44986+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
44987+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
44988+5 5 5 5 5 5
44989+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
44990+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
44991+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
44992+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
44993+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
44994+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44995+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
44996+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
44997+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
44998+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
44999+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45000+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45001+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45002+5 5 5 4 4 4
45003+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45004+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45005+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45006+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45007+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45008+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45009+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45010+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45011+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45012+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45013+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45014+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016+4 4 4 4 4 4
45017+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45018+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45019+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45020+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45021+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45022+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45023+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45024+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45025+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45026+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45027+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45028+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030+4 4 4 4 4 4
45031+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45032+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45033+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45034+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45035+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45036+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45037+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45038+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45039+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45040+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45041+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044+4 4 4 4 4 4
45045+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45046+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45047+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45048+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45049+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45050+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45051+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45052+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45053+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45054+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45055+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058+4 4 4 4 4 4
45059+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45060+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45061+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45062+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45063+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45064+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45065+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45066+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45067+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45068+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45069+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072+4 4 4 4 4 4
45073+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45074+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45075+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45076+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45077+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45078+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45079+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45080+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45081+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45082+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45083+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086+4 4 4 4 4 4
45087+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45088+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45089+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45090+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45091+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45092+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45093+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45094+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45095+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45096+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45097+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45100+4 4 4 4 4 4
45101+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45102+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45103+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45104+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45105+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45106+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45107+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45108+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45109+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45110+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45111+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45114+4 4 4 4 4 4
45115+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45116+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45117+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45118+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45119+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45120+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45121+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45122+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45123+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45124+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45125+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45128+4 4 4 4 4 4
45129+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45130+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45131+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45132+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45133+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45134+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45135+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45136+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45137+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45138+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45139+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45142+4 4 4 4 4 4
45143+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45144+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45145+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45146+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45147+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45148+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45149+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45150+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45151+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45152+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45153+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45156+4 4 4 4 4 4
45157+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45158+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45159+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45160+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45161+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45162+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45163+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45164+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45165+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45166+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45167+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45170+4 4 4 4 4 4
45171+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45172+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45173+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45174+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45175+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45176+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45177+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45178+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45179+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45180+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45181+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45184+4 4 4 4 4 4
45185+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45186+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45187+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45188+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45189+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45190+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45191+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45192+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45193+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45194+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45195+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45198+4 4 4 4 4 4
45199+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45200+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45201+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45202+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45203+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45204+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45205+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45206+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45207+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45208+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45209+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45212+4 4 4 4 4 4
45213+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45214+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45215+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45216+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45217+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45218+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45219+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45220+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45221+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45222+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45223+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45226+4 4 4 4 4 4
45227+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45228+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45229+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45230+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45231+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45232+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45233+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45234+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45235+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45236+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45237+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45240+4 4 4 4 4 4
45241+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45242+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45243+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45244+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45245+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45246+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45247+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45248+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45249+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45250+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45251+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45254+4 4 4 4 4 4
45255+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45256+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45257+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45258+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45259+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45260+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45261+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45262+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45263+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45264+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45265+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45268+4 4 4 4 4 4
45269+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45270+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45271+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45272+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45273+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45274+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45275+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45276+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45277+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45278+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45279+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45282+4 4 4 4 4 4
45283+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45284+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45285+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45286+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45287+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45288+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45289+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45290+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45291+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45292+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45293+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45296+4 4 4 4 4 4
45297+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45298+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45299+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45300+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45301+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45302+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45303+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45304+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45305+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45306+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45307+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45310+4 4 4 4 4 4
45311+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45312+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45313+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45314+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45315+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45316+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45317+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45318+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45319+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45320+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45321+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45324+4 4 4 4 4 4
45325+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45326+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45327+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45328+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45329+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45330+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45331+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45332+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45333+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45334+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45335+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45338+4 4 4 4 4 4
45339+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45340+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45341+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45342+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45343+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45344+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45345+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45346+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45347+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45348+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45349+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45352+4 4 4 4 4 4
45353+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45354+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45355+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45356+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45357+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45358+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45359+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45360+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45361+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45362+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45363+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45366+4 4 4 4 4 4
45367+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45368+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45369+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45370+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45371+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45372+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45373+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45374+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45375+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45376+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45377+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45380+4 4 4 4 4 4
45381+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45382+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45383+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45384+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45385+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45386+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45387+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45388+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45389+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45390+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45391+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45394+4 4 4 4 4 4
45395+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45396+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45397+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45398+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45399+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45400+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45401+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45402+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45403+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45404+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45405+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45408+4 4 4 4 4 4
45409+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45410+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45411+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45412+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45413+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45414+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45415+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45416+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45417+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45418+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45422+4 4 4 4 4 4
45423+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45424+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45425+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45426+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45427+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45428+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45429+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45430+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45431+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45432+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45436+4 4 4 4 4 4
45437+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45438+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45439+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45440+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45441+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45442+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45443+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45444+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45445+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45446+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45450+4 4 4 4 4 4
45451+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45452+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45453+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45454+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45455+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45456+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45457+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45458+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45459+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45460+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45464+4 4 4 4 4 4
45465+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45466+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45467+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45468+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45469+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45470+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45471+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45472+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45473+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45478+4 4 4 4 4 4
45479+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45480+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45481+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45482+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45483+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45484+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45485+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45486+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45487+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45492+4 4 4 4 4 4
45493+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45494+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45495+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45496+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45497+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45498+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45499+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45500+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45501+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45506+4 4 4 4 4 4
45507+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45508+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45509+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45510+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45511+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45512+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45513+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45514+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45520+4 4 4 4 4 4
45521+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45522+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45523+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45524+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45525+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45526+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45527+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45528+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45534+4 4 4 4 4 4
45535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45536+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45537+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45538+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45539+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45540+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45541+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45542+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45548+4 4 4 4 4 4
45549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45551+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45552+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45553+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45554+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45555+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45556+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45562+4 4 4 4 4 4
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45565+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45566+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45567+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45568+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45569+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45570+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45576+4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45580+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45581+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45582+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45583+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45584+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45594+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45595+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45596+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45597+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45609+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45610+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45611+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4
45619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45622+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45623+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45624+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45625+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4
45633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45636+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45637+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45638+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45639+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4
45647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45651+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45652+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45653+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4
45661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45666+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45667+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4
45675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45679+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45680+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45681+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4
45689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45693+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45694+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45702+4 4 4 4 4 4
45703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45707+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45708+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45716+4 4 4 4 4 4
45717diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45718index 443e3c8..c443d6a 100644
45719--- a/drivers/video/nvidia/nv_backlight.c
45720+++ b/drivers/video/nvidia/nv_backlight.c
45721@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45722 return bd->props.brightness;
45723 }
45724
45725-static struct backlight_ops nvidia_bl_ops = {
45726+static const struct backlight_ops nvidia_bl_ops = {
45727 .get_brightness = nvidia_bl_get_brightness,
45728 .update_status = nvidia_bl_update_status,
45729 };
45730diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45731index d94c57f..912984c 100644
45732--- a/drivers/video/riva/fbdev.c
45733+++ b/drivers/video/riva/fbdev.c
45734@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45735 return bd->props.brightness;
45736 }
45737
45738-static struct backlight_ops riva_bl_ops = {
45739+static const struct backlight_ops riva_bl_ops = {
45740 .get_brightness = riva_bl_get_brightness,
45741 .update_status = riva_bl_update_status,
45742 };
45743diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45744index 54fbb29..2c108fc 100644
45745--- a/drivers/video/uvesafb.c
45746+++ b/drivers/video/uvesafb.c
45747@@ -18,6 +18,7 @@
45748 #include <linux/fb.h>
45749 #include <linux/io.h>
45750 #include <linux/mutex.h>
45751+#include <linux/moduleloader.h>
45752 #include <video/edid.h>
45753 #include <video/uvesafb.h>
45754 #ifdef CONFIG_X86
45755@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45756 NULL,
45757 };
45758
45759- return call_usermodehelper(v86d_path, argv, envp, 1);
45760+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45761 }
45762
45763 /*
45764@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45765 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45766 par->pmi_setpal = par->ypan = 0;
45767 } else {
45768+
45769+#ifdef CONFIG_PAX_KERNEXEC
45770+#ifdef CONFIG_MODULES
45771+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45772+#endif
45773+ if (!par->pmi_code) {
45774+ par->pmi_setpal = par->ypan = 0;
45775+ return 0;
45776+ }
45777+#endif
45778+
45779 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45780 + task->t.regs.edi);
45781+
45782+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45783+ pax_open_kernel();
45784+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45785+ pax_close_kernel();
45786+
45787+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45788+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45789+#else
45790 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45791 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45792+#endif
45793+
45794 printk(KERN_INFO "uvesafb: protected mode interface info at "
45795 "%04x:%04x\n",
45796 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45797@@ -1799,6 +1822,11 @@ out:
45798 if (par->vbe_modes)
45799 kfree(par->vbe_modes);
45800
45801+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45802+ if (par->pmi_code)
45803+ module_free_exec(NULL, par->pmi_code);
45804+#endif
45805+
45806 framebuffer_release(info);
45807 return err;
45808 }
45809@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45810 kfree(par->vbe_state_orig);
45811 if (par->vbe_state_saved)
45812 kfree(par->vbe_state_saved);
45813+
45814+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45815+ if (par->pmi_code)
45816+ module_free_exec(NULL, par->pmi_code);
45817+#endif
45818+
45819 }
45820
45821 framebuffer_release(info);
45822diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45823index bd37ee1..cb827e8 100644
45824--- a/drivers/video/vesafb.c
45825+++ b/drivers/video/vesafb.c
45826@@ -9,6 +9,7 @@
45827 */
45828
45829 #include <linux/module.h>
45830+#include <linux/moduleloader.h>
45831 #include <linux/kernel.h>
45832 #include <linux/errno.h>
45833 #include <linux/string.h>
45834@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45835 static int vram_total __initdata; /* Set total amount of memory */
45836 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45837 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45838-static void (*pmi_start)(void) __read_mostly;
45839-static void (*pmi_pal) (void) __read_mostly;
45840+static void (*pmi_start)(void) __read_only;
45841+static void (*pmi_pal) (void) __read_only;
45842 static int depth __read_mostly;
45843 static int vga_compat __read_mostly;
45844 /* --------------------------------------------------------------------- */
45845@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45846 unsigned int size_vmode;
45847 unsigned int size_remap;
45848 unsigned int size_total;
45849+ void *pmi_code = NULL;
45850
45851 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45852 return -ENODEV;
45853@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45854 size_remap = size_total;
45855 vesafb_fix.smem_len = size_remap;
45856
45857-#ifndef __i386__
45858- screen_info.vesapm_seg = 0;
45859-#endif
45860-
45861 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45862 printk(KERN_WARNING
45863 "vesafb: cannot reserve video memory at 0x%lx\n",
45864@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45865 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45866 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45867
45868+#ifdef __i386__
45869+
45870+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45871+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
45872+ if (!pmi_code)
45873+#elif !defined(CONFIG_PAX_KERNEXEC)
45874+ if (0)
45875+#endif
45876+
45877+#endif
45878+ screen_info.vesapm_seg = 0;
45879+
45880 if (screen_info.vesapm_seg) {
45881- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
45882- screen_info.vesapm_seg,screen_info.vesapm_off);
45883+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
45884+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
45885 }
45886
45887 if (screen_info.vesapm_seg < 0xc000)
45888@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
45889
45890 if (ypan || pmi_setpal) {
45891 unsigned short *pmi_base;
45892+
45893 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
45894- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
45895- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
45896+
45897+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45898+ pax_open_kernel();
45899+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
45900+#else
45901+ pmi_code = pmi_base;
45902+#endif
45903+
45904+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
45905+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
45906+
45907+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45908+ pmi_start = ktva_ktla(pmi_start);
45909+ pmi_pal = ktva_ktla(pmi_pal);
45910+ pax_close_kernel();
45911+#endif
45912+
45913 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
45914 if (pmi_base[3]) {
45915 printk(KERN_INFO "vesafb: pmi: ports = ");
45916@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
45917 info->node, info->fix.id);
45918 return 0;
45919 err:
45920+
45921+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45922+ module_free_exec(NULL, pmi_code);
45923+#endif
45924+
45925 if (info->screen_base)
45926 iounmap(info->screen_base);
45927 framebuffer_release(info);
45928diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
45929index 88a60e0..6783cc2 100644
45930--- a/drivers/xen/sys-hypervisor.c
45931+++ b/drivers/xen/sys-hypervisor.c
45932@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
45933 return 0;
45934 }
45935
45936-static struct sysfs_ops hyp_sysfs_ops = {
45937+static const struct sysfs_ops hyp_sysfs_ops = {
45938 .show = hyp_sysfs_show,
45939 .store = hyp_sysfs_store,
45940 };
45941diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
45942index 18f74ec..3227009 100644
45943--- a/fs/9p/vfs_inode.c
45944+++ b/fs/9p/vfs_inode.c
45945@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
45946 static void
45947 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45948 {
45949- char *s = nd_get_link(nd);
45950+ const char *s = nd_get_link(nd);
45951
45952 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
45953 IS_ERR(s) ? "<error>" : s);
45954diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
45955index bb4cc5b..df5eaa0 100644
45956--- a/fs/Kconfig.binfmt
45957+++ b/fs/Kconfig.binfmt
45958@@ -86,7 +86,7 @@ config HAVE_AOUT
45959
45960 config BINFMT_AOUT
45961 tristate "Kernel support for a.out and ECOFF binaries"
45962- depends on HAVE_AOUT
45963+ depends on HAVE_AOUT && BROKEN
45964 ---help---
45965 A.out (Assembler.OUTput) is a set of formats for libraries and
45966 executables used in the earliest versions of UNIX. Linux used
45967diff --git a/fs/aio.c b/fs/aio.c
45968index 22a19ad..d484e5b 100644
45969--- a/fs/aio.c
45970+++ b/fs/aio.c
45971@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
45972 size += sizeof(struct io_event) * nr_events;
45973 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
45974
45975- if (nr_pages < 0)
45976+ if (nr_pages <= 0)
45977 return -EINVAL;
45978
45979 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
45980@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
45981 struct aio_timeout to;
45982 int retry = 0;
45983
45984+ pax_track_stack();
45985+
45986 /* needed to zero any padding within an entry (there shouldn't be
45987 * any, but C is fun!
45988 */
45989@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
45990 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
45991 {
45992 ssize_t ret;
45993+ struct iovec iovstack;
45994
45995 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
45996 kiocb->ki_nbytes, 1,
45997- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
45998+ &iovstack, &kiocb->ki_iovec);
45999 if (ret < 0)
46000 goto out;
46001
46002+ if (kiocb->ki_iovec == &iovstack) {
46003+ kiocb->ki_inline_vec = iovstack;
46004+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46005+ }
46006 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46007 kiocb->ki_cur_seg = 0;
46008 /* ki_nbytes/left now reflect bytes instead of segs */
46009diff --git a/fs/attr.c b/fs/attr.c
46010index 96d394b..33cf5b4 100644
46011--- a/fs/attr.c
46012+++ b/fs/attr.c
46013@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46014 unsigned long limit;
46015
46016 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46017+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46018 if (limit != RLIM_INFINITY && offset > limit)
46019 goto out_sig;
46020 if (offset > inode->i_sb->s_maxbytes)
46021diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46022index 4a1401c..05eb5ca 100644
46023--- a/fs/autofs/root.c
46024+++ b/fs/autofs/root.c
46025@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46026 set_bit(n,sbi->symlink_bitmap);
46027 sl = &sbi->symlink[n];
46028 sl->len = strlen(symname);
46029- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46030+ slsize = sl->len+1;
46031+ sl->data = kmalloc(slsize, GFP_KERNEL);
46032 if (!sl->data) {
46033 clear_bit(n,sbi->symlink_bitmap);
46034 unlock_kernel();
46035diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46036index b4ea829..e63ef18 100644
46037--- a/fs/autofs4/symlink.c
46038+++ b/fs/autofs4/symlink.c
46039@@ -15,7 +15,7 @@
46040 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46041 {
46042 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46043- nd_set_link(nd, (char *)ino->u.symlink);
46044+ nd_set_link(nd, ino->u.symlink);
46045 return NULL;
46046 }
46047
46048diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46049index 2341375..df9d1c2 100644
46050--- a/fs/autofs4/waitq.c
46051+++ b/fs/autofs4/waitq.c
46052@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46053 {
46054 unsigned long sigpipe, flags;
46055 mm_segment_t fs;
46056- const char *data = (const char *)addr;
46057+ const char __user *data = (const char __force_user *)addr;
46058 ssize_t wr = 0;
46059
46060 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46061diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46062index 9158c07..3f06659 100644
46063--- a/fs/befs/linuxvfs.c
46064+++ b/fs/befs/linuxvfs.c
46065@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46066 {
46067 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46068 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46069- char *link = nd_get_link(nd);
46070+ const char *link = nd_get_link(nd);
46071 if (!IS_ERR(link))
46072 kfree(link);
46073 }
46074diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46075index 0133b5a..b3baa9f 100644
46076--- a/fs/binfmt_aout.c
46077+++ b/fs/binfmt_aout.c
46078@@ -16,6 +16,7 @@
46079 #include <linux/string.h>
46080 #include <linux/fs.h>
46081 #include <linux/file.h>
46082+#include <linux/security.h>
46083 #include <linux/stat.h>
46084 #include <linux/fcntl.h>
46085 #include <linux/ptrace.h>
46086@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46087 #endif
46088 # define START_STACK(u) (u.start_stack)
46089
46090+ memset(&dump, 0, sizeof(dump));
46091+
46092 fs = get_fs();
46093 set_fs(KERNEL_DS);
46094 has_dumped = 1;
46095@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46096
46097 /* If the size of the dump file exceeds the rlimit, then see what would happen
46098 if we wrote the stack, but not the data area. */
46099+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46100 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46101 dump.u_dsize = 0;
46102
46103 /* Make sure we have enough room to write the stack and data areas. */
46104+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46105 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46106 dump.u_ssize = 0;
46107
46108@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46109 dump_size = dump.u_ssize << PAGE_SHIFT;
46110 DUMP_WRITE(dump_start,dump_size);
46111 }
46112-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46113- set_fs(KERNEL_DS);
46114- DUMP_WRITE(current,sizeof(*current));
46115+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46116 end_coredump:
46117 set_fs(fs);
46118 return has_dumped;
46119@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46120 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46121 if (rlim >= RLIM_INFINITY)
46122 rlim = ~0;
46123+
46124+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46125 if (ex.a_data + ex.a_bss > rlim)
46126 return -ENOMEM;
46127
46128@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46129 install_exec_creds(bprm);
46130 current->flags &= ~PF_FORKNOEXEC;
46131
46132+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46133+ current->mm->pax_flags = 0UL;
46134+#endif
46135+
46136+#ifdef CONFIG_PAX_PAGEEXEC
46137+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46138+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46139+
46140+#ifdef CONFIG_PAX_EMUTRAMP
46141+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46142+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46143+#endif
46144+
46145+#ifdef CONFIG_PAX_MPROTECT
46146+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46147+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46148+#endif
46149+
46150+ }
46151+#endif
46152+
46153 if (N_MAGIC(ex) == OMAGIC) {
46154 unsigned long text_addr, map_size;
46155 loff_t pos;
46156@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46157
46158 down_write(&current->mm->mmap_sem);
46159 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46160- PROT_READ | PROT_WRITE | PROT_EXEC,
46161+ PROT_READ | PROT_WRITE,
46162 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46163 fd_offset + ex.a_text);
46164 up_write(&current->mm->mmap_sem);
46165diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46166index 1ed37ba..32cc555 100644
46167--- a/fs/binfmt_elf.c
46168+++ b/fs/binfmt_elf.c
46169@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46170 #define elf_core_dump NULL
46171 #endif
46172
46173+#ifdef CONFIG_PAX_MPROTECT
46174+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46175+#endif
46176+
46177 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46178 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46179 #else
46180@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format = {
46181 .load_binary = load_elf_binary,
46182 .load_shlib = load_elf_library,
46183 .core_dump = elf_core_dump,
46184+
46185+#ifdef CONFIG_PAX_MPROTECT
46186+ .handle_mprotect= elf_handle_mprotect,
46187+#endif
46188+
46189 .min_coredump = ELF_EXEC_PAGESIZE,
46190 .hasvdso = 1
46191 };
46192@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
46193
46194 static int set_brk(unsigned long start, unsigned long end)
46195 {
46196+ unsigned long e = end;
46197+
46198 start = ELF_PAGEALIGN(start);
46199 end = ELF_PAGEALIGN(end);
46200 if (end > start) {
46201@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
46202 if (BAD_ADDR(addr))
46203 return addr;
46204 }
46205- current->mm->start_brk = current->mm->brk = end;
46206+ current->mm->start_brk = current->mm->brk = e;
46207 return 0;
46208 }
46209
46210@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46211 elf_addr_t __user *u_rand_bytes;
46212 const char *k_platform = ELF_PLATFORM;
46213 const char *k_base_platform = ELF_BASE_PLATFORM;
46214- unsigned char k_rand_bytes[16];
46215+ u32 k_rand_bytes[4];
46216 int items;
46217 elf_addr_t *elf_info;
46218 int ei_index = 0;
46219 const struct cred *cred = current_cred();
46220 struct vm_area_struct *vma;
46221+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46222+
46223+ pax_track_stack();
46224
46225 /*
46226 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46227@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46228 * Generate 16 random bytes for userspace PRNG seeding.
46229 */
46230 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46231- u_rand_bytes = (elf_addr_t __user *)
46232- STACK_ALLOC(p, sizeof(k_rand_bytes));
46233+ srandom32(k_rand_bytes[0] ^ random32());
46234+ srandom32(k_rand_bytes[1] ^ random32());
46235+ srandom32(k_rand_bytes[2] ^ random32());
46236+ srandom32(k_rand_bytes[3] ^ random32());
46237+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46238+ u_rand_bytes = (elf_addr_t __user *) p;
46239 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46240 return -EFAULT;
46241
46242@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46243 return -EFAULT;
46244 current->mm->env_end = p;
46245
46246+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46247+
46248 /* Put the elf_info on the stack in the right place. */
46249 sp = (elf_addr_t __user *)envp + 1;
46250- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46251+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46252 return -EFAULT;
46253 return 0;
46254 }
46255@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46256 {
46257 struct elf_phdr *elf_phdata;
46258 struct elf_phdr *eppnt;
46259- unsigned long load_addr = 0;
46260+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46261 int load_addr_set = 0;
46262 unsigned long last_bss = 0, elf_bss = 0;
46263- unsigned long error = ~0UL;
46264+ unsigned long error = -EINVAL;
46265 unsigned long total_size;
46266 int retval, i, size;
46267
46268@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46269 goto out_close;
46270 }
46271
46272+#ifdef CONFIG_PAX_SEGMEXEC
46273+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46274+ pax_task_size = SEGMEXEC_TASK_SIZE;
46275+#endif
46276+
46277 eppnt = elf_phdata;
46278 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46279 if (eppnt->p_type == PT_LOAD) {
46280@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46281 k = load_addr + eppnt->p_vaddr;
46282 if (BAD_ADDR(k) ||
46283 eppnt->p_filesz > eppnt->p_memsz ||
46284- eppnt->p_memsz > TASK_SIZE ||
46285- TASK_SIZE - eppnt->p_memsz < k) {
46286+ eppnt->p_memsz > pax_task_size ||
46287+ pax_task_size - eppnt->p_memsz < k) {
46288 error = -ENOMEM;
46289 goto out_close;
46290 }
46291@@ -532,6 +557,194 @@ out:
46292 return error;
46293 }
46294
46295+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
46296+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
46297+{
46298+ unsigned long pax_flags = 0UL;
46299+
46300+#ifdef CONFIG_PAX_PAGEEXEC
46301+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46302+ pax_flags |= MF_PAX_PAGEEXEC;
46303+#endif
46304+
46305+#ifdef CONFIG_PAX_SEGMEXEC
46306+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46307+ pax_flags |= MF_PAX_SEGMEXEC;
46308+#endif
46309+
46310+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46311+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46312+ if (nx_enabled)
46313+ pax_flags &= ~MF_PAX_SEGMEXEC;
46314+ else
46315+ pax_flags &= ~MF_PAX_PAGEEXEC;
46316+ }
46317+#endif
46318+
46319+#ifdef CONFIG_PAX_EMUTRAMP
46320+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46321+ pax_flags |= MF_PAX_EMUTRAMP;
46322+#endif
46323+
46324+#ifdef CONFIG_PAX_MPROTECT
46325+ if (elf_phdata->p_flags & PF_MPROTECT)
46326+ pax_flags |= MF_PAX_MPROTECT;
46327+#endif
46328+
46329+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46330+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46331+ pax_flags |= MF_PAX_RANDMMAP;
46332+#endif
46333+
46334+ return pax_flags;
46335+}
46336+#endif
46337+
46338+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46339+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
46340+{
46341+ unsigned long pax_flags = 0UL;
46342+
46343+#ifdef CONFIG_PAX_PAGEEXEC
46344+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46345+ pax_flags |= MF_PAX_PAGEEXEC;
46346+#endif
46347+
46348+#ifdef CONFIG_PAX_SEGMEXEC
46349+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46350+ pax_flags |= MF_PAX_SEGMEXEC;
46351+#endif
46352+
46353+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46354+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46355+ if (nx_enabled)
46356+ pax_flags &= ~MF_PAX_SEGMEXEC;
46357+ else
46358+ pax_flags &= ~MF_PAX_PAGEEXEC;
46359+ }
46360+#endif
46361+
46362+#ifdef CONFIG_PAX_EMUTRAMP
46363+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46364+ pax_flags |= MF_PAX_EMUTRAMP;
46365+#endif
46366+
46367+#ifdef CONFIG_PAX_MPROTECT
46368+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46369+ pax_flags |= MF_PAX_MPROTECT;
46370+#endif
46371+
46372+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46373+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46374+ pax_flags |= MF_PAX_RANDMMAP;
46375+#endif
46376+
46377+ return pax_flags;
46378+}
46379+#endif
46380+
46381+#ifdef CONFIG_PAX_EI_PAX
46382+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46383+{
46384+ unsigned long pax_flags = 0UL;
46385+
46386+#ifdef CONFIG_PAX_PAGEEXEC
46387+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46388+ pax_flags |= MF_PAX_PAGEEXEC;
46389+#endif
46390+
46391+#ifdef CONFIG_PAX_SEGMEXEC
46392+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46393+ pax_flags |= MF_PAX_SEGMEXEC;
46394+#endif
46395+
46396+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46397+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46398+ if (nx_enabled)
46399+ pax_flags &= ~MF_PAX_SEGMEXEC;
46400+ else
46401+ pax_flags &= ~MF_PAX_PAGEEXEC;
46402+ }
46403+#endif
46404+
46405+#ifdef CONFIG_PAX_EMUTRAMP
46406+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46407+ pax_flags |= MF_PAX_EMUTRAMP;
46408+#endif
46409+
46410+#ifdef CONFIG_PAX_MPROTECT
46411+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46412+ pax_flags |= MF_PAX_MPROTECT;
46413+#endif
46414+
46415+#ifdef CONFIG_PAX_ASLR
46416+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46417+ pax_flags |= MF_PAX_RANDMMAP;
46418+#endif
46419+
46420+ return pax_flags;
46421+}
46422+#endif
46423+
46424+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46425+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46426+{
46427+ unsigned long pax_flags = 0UL;
46428+
46429+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46430+ unsigned long i;
46431+ int found_flags = 0;
46432+#endif
46433+
46434+#ifdef CONFIG_PAX_EI_PAX
46435+ pax_flags = pax_parse_ei_pax(elf_ex);
46436+#endif
46437+
46438+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46439+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46440+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46441+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46442+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46443+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46444+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46445+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46446+ return -EINVAL;
46447+
46448+#ifdef CONFIG_PAX_SOFTMODE
46449+ if (pax_softmode)
46450+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
46451+ else
46452+#endif
46453+
46454+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
46455+ found_flags = 1;
46456+ break;
46457+ }
46458+#endif
46459+
46460+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
46461+ if (found_flags == 0) {
46462+ struct elf_phdr phdr;
46463+ memset(&phdr, 0, sizeof(phdr));
46464+ phdr.p_flags = PF_NOEMUTRAMP;
46465+#ifdef CONFIG_PAX_SOFTMODE
46466+ if (pax_softmode)
46467+ pax_flags = pax_parse_softmode(&phdr);
46468+ else
46469+#endif
46470+ pax_flags = pax_parse_hardmode(&phdr);
46471+ }
46472+#endif
46473+
46474+
46475+ if (0 > pax_check_flags(&pax_flags))
46476+ return -EINVAL;
46477+
46478+ current->mm->pax_flags = pax_flags;
46479+ return 0;
46480+}
46481+#endif
46482+
46483 /*
46484 * These are the functions used to load ELF style executables and shared
46485 * libraries. There is no binary dependent code anywhere else.
46486@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46487 {
46488 unsigned int random_variable = 0;
46489
46490+#ifdef CONFIG_PAX_RANDUSTACK
46491+ if (randomize_va_space)
46492+ return stack_top - current->mm->delta_stack;
46493+#endif
46494+
46495 if ((current->flags & PF_RANDOMIZE) &&
46496 !(current->personality & ADDR_NO_RANDOMIZE)) {
46497 random_variable = get_random_int() & STACK_RND_MASK;
46498@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46499 unsigned long load_addr = 0, load_bias = 0;
46500 int load_addr_set = 0;
46501 char * elf_interpreter = NULL;
46502- unsigned long error;
46503+ unsigned long error = 0;
46504 struct elf_phdr *elf_ppnt, *elf_phdata;
46505 unsigned long elf_bss, elf_brk;
46506 int retval, i;
46507@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46508 unsigned long start_code, end_code, start_data, end_data;
46509 unsigned long reloc_func_desc = 0;
46510 int executable_stack = EXSTACK_DEFAULT;
46511- unsigned long def_flags = 0;
46512 struct {
46513 struct elfhdr elf_ex;
46514 struct elfhdr interp_elf_ex;
46515 } *loc;
46516+ unsigned long pax_task_size = TASK_SIZE;
46517
46518 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46519 if (!loc) {
46520@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46521
46522 /* OK, This is the point of no return */
46523 current->flags &= ~PF_FORKNOEXEC;
46524- current->mm->def_flags = def_flags;
46525+
46526+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46527+ current->mm->pax_flags = 0UL;
46528+#endif
46529+
46530+#ifdef CONFIG_PAX_DLRESOLVE
46531+ current->mm->call_dl_resolve = 0UL;
46532+#endif
46533+
46534+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46535+ current->mm->call_syscall = 0UL;
46536+#endif
46537+
46538+#ifdef CONFIG_PAX_ASLR
46539+ current->mm->delta_mmap = 0UL;
46540+ current->mm->delta_stack = 0UL;
46541+#endif
46542+
46543+ current->mm->def_flags = 0;
46544+
46545+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46546+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
46547+ send_sig(SIGKILL, current, 0);
46548+ goto out_free_dentry;
46549+ }
46550+#endif
46551+
46552+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46553+ pax_set_initial_flags(bprm);
46554+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46555+ if (pax_set_initial_flags_func)
46556+ (pax_set_initial_flags_func)(bprm);
46557+#endif
46558+
46559+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46560+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46561+ current->mm->context.user_cs_limit = PAGE_SIZE;
46562+ current->mm->def_flags |= VM_PAGEEXEC;
46563+ }
46564+#endif
46565+
46566+#ifdef CONFIG_PAX_SEGMEXEC
46567+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46568+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46569+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46570+ pax_task_size = SEGMEXEC_TASK_SIZE;
46571+ }
46572+#endif
46573+
46574+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46575+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46576+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46577+ put_cpu();
46578+ }
46579+#endif
46580
46581 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46582 may depend on the personality. */
46583 SET_PERSONALITY(loc->elf_ex);
46584+
46585+#ifdef CONFIG_PAX_ASLR
46586+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46587+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46588+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46589+ }
46590+#endif
46591+
46592+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46593+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46594+ executable_stack = EXSTACK_DISABLE_X;
46595+ current->personality &= ~READ_IMPLIES_EXEC;
46596+ } else
46597+#endif
46598+
46599 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46600 current->personality |= READ_IMPLIES_EXEC;
46601
46602@@ -800,10 +1087,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46603 * might try to exec. This is because the brk will
46604 * follow the loader, and is not movable. */
46605 #ifdef CONFIG_X86
46606- load_bias = 0;
46607+ if (current->flags & PF_RANDOMIZE)
46608+ load_bias = 0;
46609+ else
46610+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46611 #else
46612 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46613 #endif
46614+
46615+#ifdef CONFIG_PAX_RANDMMAP
46616+ /* PaX: randomize base address at the default exe base if requested */
46617+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46618+#ifdef CONFIG_SPARC64
46619+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46620+#else
46621+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46622+#endif
46623+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46624+ elf_flags |= MAP_FIXED;
46625+ }
46626+#endif
46627+
46628 }
46629
46630 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46631@@ -836,9 +1140,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46632 * allowed task size. Note that p_filesz must always be
46633 * <= p_memsz so it is only necessary to check p_memsz.
46634 */
46635- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46636- elf_ppnt->p_memsz > TASK_SIZE ||
46637- TASK_SIZE - elf_ppnt->p_memsz < k) {
46638+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46639+ elf_ppnt->p_memsz > pax_task_size ||
46640+ pax_task_size - elf_ppnt->p_memsz < k) {
46641 /* set_brk can never work. Avoid overflows. */
46642 send_sig(SIGKILL, current, 0);
46643 retval = -EINVAL;
46644@@ -866,6 +1170,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46645 start_data += load_bias;
46646 end_data += load_bias;
46647
46648+#ifdef CONFIG_PAX_RANDMMAP
46649+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46650+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46651+#endif
46652+
46653 /* Calling set_brk effectively mmaps the pages that we need
46654 * for the bss and break sections. We must do this before
46655 * mapping in the interpreter, to make sure it doesn't wind
46656@@ -877,9 +1186,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46657 goto out_free_dentry;
46658 }
46659 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46660- send_sig(SIGSEGV, current, 0);
46661- retval = -EFAULT; /* Nobody gets to see this, but.. */
46662- goto out_free_dentry;
46663+ /*
46664+ * This bss-zeroing can fail if the ELF
46665+ * file specifies odd protections. So
46666+ * we don't check the return value
46667+ */
46668 }
46669
46670 if (elf_interpreter) {
46671@@ -1112,8 +1423,10 @@ static int dump_seek(struct file *file, loff_t off)
46672 unsigned long n = off;
46673 if (n > PAGE_SIZE)
46674 n = PAGE_SIZE;
46675- if (!dump_write(file, buf, n))
46676+ if (!dump_write(file, buf, n)) {
46677+ free_page((unsigned long)buf);
46678 return 0;
46679+ }
46680 off -= n;
46681 }
46682 free_page((unsigned long)buf);
46683@@ -1125,7 +1438,7 @@ static int dump_seek(struct file *file, loff_t off)
46684 * Decide what to dump of a segment, part, all or none.
46685 */
46686 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46687- unsigned long mm_flags)
46688+ unsigned long mm_flags, long signr)
46689 {
46690 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46691
46692@@ -1159,7 +1472,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46693 if (vma->vm_file == NULL)
46694 return 0;
46695
46696- if (FILTER(MAPPED_PRIVATE))
46697+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46698 goto whole;
46699
46700 /*
46701@@ -1255,8 +1568,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46702 #undef DUMP_WRITE
46703
46704 #define DUMP_WRITE(addr, nr) \
46705+ do { \
46706+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46707 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46708- goto end_coredump;
46709+ goto end_coredump; \
46710+ } while (0);
46711
46712 static void fill_elf_header(struct elfhdr *elf, int segs,
46713 u16 machine, u32 flags, u8 osabi)
46714@@ -1385,9 +1701,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46715 {
46716 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
46717 int i = 0;
46718- do
46719+ do {
46720 i += 2;
46721- while (auxv[i - 2] != AT_NULL);
46722+ } while (auxv[i - 2] != AT_NULL);
46723 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
46724 }
46725
46726@@ -1973,7 +2289,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46727 phdr.p_offset = offset;
46728 phdr.p_vaddr = vma->vm_start;
46729 phdr.p_paddr = 0;
46730- phdr.p_filesz = vma_dump_size(vma, mm_flags);
46731+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
46732 phdr.p_memsz = vma->vm_end - vma->vm_start;
46733 offset += phdr.p_filesz;
46734 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
46735@@ -2006,7 +2322,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46736 unsigned long addr;
46737 unsigned long end;
46738
46739- end = vma->vm_start + vma_dump_size(vma, mm_flags);
46740+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
46741
46742 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
46743 struct page *page;
46744@@ -2015,6 +2331,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46745 page = get_dump_page(addr);
46746 if (page) {
46747 void *kaddr = kmap(page);
46748+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
46749 stop = ((size += PAGE_SIZE) > limit) ||
46750 !dump_write(file, kaddr, PAGE_SIZE);
46751 kunmap(page);
46752@@ -2042,6 +2359,97 @@ out:
46753
46754 #endif /* USE_ELF_CORE_DUMP */
46755
46756+#ifdef CONFIG_PAX_MPROTECT
46757+/* PaX: non-PIC ELF libraries need relocations on their executable segments
46758+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
46759+ * we'll remove VM_MAYWRITE for good on RELRO segments.
46760+ *
46761+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
46762+ * basis because we want to allow the common case and not the special ones.
46763+ */
46764+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
46765+{
46766+ struct elfhdr elf_h;
46767+ struct elf_phdr elf_p;
46768+ unsigned long i;
46769+ unsigned long oldflags;
46770+ bool is_textrel_rw, is_textrel_rx, is_relro;
46771+
46772+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
46773+ return;
46774+
46775+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
46776+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
46777+
46778+#ifdef CONFIG_PAX_ELFRELOCS
46779+ /* possible TEXTREL */
46780+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
46781+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
46782+#else
46783+ is_textrel_rw = false;
46784+ is_textrel_rx = false;
46785+#endif
46786+
46787+ /* possible RELRO */
46788+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
46789+
46790+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
46791+ return;
46792+
46793+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
46794+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
46795+
46796+#ifdef CONFIG_PAX_ETEXECRELOCS
46797+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46798+#else
46799+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
46800+#endif
46801+
46802+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46803+ !elf_check_arch(&elf_h) ||
46804+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
46805+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
46806+ return;
46807+
46808+ for (i = 0UL; i < elf_h.e_phnum; i++) {
46809+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
46810+ return;
46811+ switch (elf_p.p_type) {
46812+ case PT_DYNAMIC:
46813+ if (!is_textrel_rw && !is_textrel_rx)
46814+ continue;
46815+ i = 0UL;
46816+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
46817+ elf_dyn dyn;
46818+
46819+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
46820+ return;
46821+ if (dyn.d_tag == DT_NULL)
46822+ return;
46823+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
46824+ gr_log_textrel(vma);
46825+ if (is_textrel_rw)
46826+ vma->vm_flags |= VM_MAYWRITE;
46827+ else
46828+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
46829+ vma->vm_flags &= ~VM_MAYWRITE;
46830+ return;
46831+ }
46832+ i++;
46833+ }
46834+ return;
46835+
46836+ case PT_GNU_RELRO:
46837+ if (!is_relro)
46838+ continue;
46839+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
46840+ vma->vm_flags &= ~VM_MAYWRITE;
46841+ return;
46842+ }
46843+ }
46844+}
46845+#endif
46846+
46847 static int __init init_elf_binfmt(void)
46848 {
46849 return register_binfmt(&elf_format);
46850diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
46851index ca88c46..f155a60 100644
46852--- a/fs/binfmt_flat.c
46853+++ b/fs/binfmt_flat.c
46854@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
46855 realdatastart = (unsigned long) -ENOMEM;
46856 printk("Unable to allocate RAM for process data, errno %d\n",
46857 (int)-realdatastart);
46858+ down_write(&current->mm->mmap_sem);
46859 do_munmap(current->mm, textpos, text_len);
46860+ up_write(&current->mm->mmap_sem);
46861 ret = realdatastart;
46862 goto err;
46863 }
46864@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46865 }
46866 if (IS_ERR_VALUE(result)) {
46867 printk("Unable to read data+bss, errno %d\n", (int)-result);
46868+ down_write(&current->mm->mmap_sem);
46869 do_munmap(current->mm, textpos, text_len);
46870 do_munmap(current->mm, realdatastart, data_len + extra);
46871+ up_write(&current->mm->mmap_sem);
46872 ret = result;
46873 goto err;
46874 }
46875@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46876 }
46877 if (IS_ERR_VALUE(result)) {
46878 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
46879+ down_write(&current->mm->mmap_sem);
46880 do_munmap(current->mm, textpos, text_len + data_len + extra +
46881 MAX_SHARED_LIBS * sizeof(unsigned long));
46882+ up_write(&current->mm->mmap_sem);
46883 ret = result;
46884 goto err;
46885 }
46886diff --git a/fs/bio.c b/fs/bio.c
46887index e696713..83de133 100644
46888--- a/fs/bio.c
46889+++ b/fs/bio.c
46890@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
46891
46892 i = 0;
46893 while (i < bio_slab_nr) {
46894- struct bio_slab *bslab = &bio_slabs[i];
46895+ bslab = &bio_slabs[i];
46896
46897 if (!bslab->slab && entry == -1)
46898 entry = i;
46899@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
46900 const int read = bio_data_dir(bio) == READ;
46901 struct bio_map_data *bmd = bio->bi_private;
46902 int i;
46903- char *p = bmd->sgvecs[0].iov_base;
46904+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
46905
46906 __bio_for_each_segment(bvec, bio, i, 0) {
46907 char *addr = page_address(bvec->bv_page);
46908diff --git a/fs/block_dev.c b/fs/block_dev.c
46909index e65efa2..04fae57 100644
46910--- a/fs/block_dev.c
46911+++ b/fs/block_dev.c
46912@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
46913 else if (bdev->bd_contains == bdev)
46914 res = 0; /* is a whole device which isn't held */
46915
46916- else if (bdev->bd_contains->bd_holder == bd_claim)
46917+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
46918 res = 0; /* is a partition of a device that is being partitioned */
46919 else if (bdev->bd_contains->bd_holder != NULL)
46920 res = -EBUSY; /* is a partition of a held device */
46921diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
46922index c4bc570..42acd8d 100644
46923--- a/fs/btrfs/ctree.c
46924+++ b/fs/btrfs/ctree.c
46925@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
46926 free_extent_buffer(buf);
46927 add_root_to_dirty_list(root);
46928 } else {
46929- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
46930- parent_start = parent->start;
46931- else
46932+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
46933+ if (parent)
46934+ parent_start = parent->start;
46935+ else
46936+ parent_start = 0;
46937+ } else
46938 parent_start = 0;
46939
46940 WARN_ON(trans->transid != btrfs_header_generation(parent));
46941@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
46942
46943 ret = 0;
46944 if (slot == 0) {
46945- struct btrfs_disk_key disk_key;
46946 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
46947 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
46948 }
46949diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
46950index f447188..59c17c5 100644
46951--- a/fs/btrfs/disk-io.c
46952+++ b/fs/btrfs/disk-io.c
46953@@ -39,7 +39,7 @@
46954 #include "tree-log.h"
46955 #include "free-space-cache.h"
46956
46957-static struct extent_io_ops btree_extent_io_ops;
46958+static const struct extent_io_ops btree_extent_io_ops;
46959 static void end_workqueue_fn(struct btrfs_work *work);
46960 static void free_fs_root(struct btrfs_root *root);
46961
46962@@ -2607,7 +2607,7 @@ out:
46963 return 0;
46964 }
46965
46966-static struct extent_io_ops btree_extent_io_ops = {
46967+static const struct extent_io_ops btree_extent_io_ops = {
46968 .write_cache_pages_lock_hook = btree_lock_page_hook,
46969 .readpage_end_io_hook = btree_readpage_end_io_hook,
46970 .submit_bio_hook = btree_submit_bio_hook,
46971diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
46972index 559f724..a026171 100644
46973--- a/fs/btrfs/extent-tree.c
46974+++ b/fs/btrfs/extent-tree.c
46975@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
46976 u64 group_start = group->key.objectid;
46977 new_extents = kmalloc(sizeof(*new_extents),
46978 GFP_NOFS);
46979+ if (!new_extents) {
46980+ ret = -ENOMEM;
46981+ goto out;
46982+ }
46983 nr_extents = 1;
46984 ret = get_new_locations(reloc_inode,
46985 extent_key,
46986diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
46987index 36de250..7ec75c7 100644
46988--- a/fs/btrfs/extent_io.h
46989+++ b/fs/btrfs/extent_io.h
46990@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
46991 struct bio *bio, int mirror_num,
46992 unsigned long bio_flags);
46993 struct extent_io_ops {
46994- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
46995+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
46996 u64 start, u64 end, int *page_started,
46997 unsigned long *nr_written);
46998- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
46999- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47000+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47001+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47002 extent_submit_bio_hook_t *submit_bio_hook;
47003- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47004+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47005 size_t size, struct bio *bio,
47006 unsigned long bio_flags);
47007- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47008- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47009+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47010+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47011 u64 start, u64 end,
47012 struct extent_state *state);
47013- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47014+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47015 u64 start, u64 end,
47016 struct extent_state *state);
47017- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47018+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47019 struct extent_state *state);
47020- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47021+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47022 struct extent_state *state, int uptodate);
47023- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47024+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47025 unsigned long old, unsigned long bits);
47026- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47027+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47028 unsigned long bits);
47029- int (*merge_extent_hook)(struct inode *inode,
47030+ int (* const merge_extent_hook)(struct inode *inode,
47031 struct extent_state *new,
47032 struct extent_state *other);
47033- int (*split_extent_hook)(struct inode *inode,
47034+ int (* const split_extent_hook)(struct inode *inode,
47035 struct extent_state *orig, u64 split);
47036- int (*write_cache_pages_lock_hook)(struct page *page);
47037+ int (* const write_cache_pages_lock_hook)(struct page *page);
47038 };
47039
47040 struct extent_io_tree {
47041@@ -88,7 +88,7 @@ struct extent_io_tree {
47042 u64 dirty_bytes;
47043 spinlock_t lock;
47044 spinlock_t buffer_lock;
47045- struct extent_io_ops *ops;
47046+ const struct extent_io_ops *ops;
47047 };
47048
47049 struct extent_state {
47050diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47051index cb2849f..3718fb4 100644
47052--- a/fs/btrfs/free-space-cache.c
47053+++ b/fs/btrfs/free-space-cache.c
47054@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47055
47056 while(1) {
47057 if (entry->bytes < bytes || entry->offset < min_start) {
47058- struct rb_node *node;
47059-
47060 node = rb_next(&entry->offset_index);
47061 if (!node)
47062 break;
47063@@ -1226,7 +1224,7 @@ again:
47064 */
47065 while (entry->bitmap || found_bitmap ||
47066 (!entry->bitmap && entry->bytes < min_bytes)) {
47067- struct rb_node *node = rb_next(&entry->offset_index);
47068+ node = rb_next(&entry->offset_index);
47069
47070 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47071 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47072diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47073index e03a836..323837e 100644
47074--- a/fs/btrfs/inode.c
47075+++ b/fs/btrfs/inode.c
47076@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47077 static const struct address_space_operations btrfs_aops;
47078 static const struct address_space_operations btrfs_symlink_aops;
47079 static const struct file_operations btrfs_dir_file_operations;
47080-static struct extent_io_ops btrfs_extent_io_ops;
47081+static const struct extent_io_ops btrfs_extent_io_ops;
47082
47083 static struct kmem_cache *btrfs_inode_cachep;
47084 struct kmem_cache *btrfs_trans_handle_cachep;
47085@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47086 1, 0, NULL, GFP_NOFS);
47087 while (start < end) {
47088 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47089+ BUG_ON(!async_cow);
47090 async_cow->inode = inode;
47091 async_cow->root = root;
47092 async_cow->locked_page = locked_page;
47093@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47094 inline_size = btrfs_file_extent_inline_item_len(leaf,
47095 btrfs_item_nr(leaf, path->slots[0]));
47096 tmp = kmalloc(inline_size, GFP_NOFS);
47097+ if (!tmp)
47098+ return -ENOMEM;
47099 ptr = btrfs_file_extent_inline_start(item);
47100
47101 read_extent_buffer(leaf, tmp, ptr, inline_size);
47102@@ -5410,7 +5413,7 @@ fail:
47103 return -ENOMEM;
47104 }
47105
47106-static int btrfs_getattr(struct vfsmount *mnt,
47107+int btrfs_getattr(struct vfsmount *mnt,
47108 struct dentry *dentry, struct kstat *stat)
47109 {
47110 struct inode *inode = dentry->d_inode;
47111@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47112 return 0;
47113 }
47114
47115+EXPORT_SYMBOL(btrfs_getattr);
47116+
47117+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47118+{
47119+ return BTRFS_I(inode)->root->anon_super.s_dev;
47120+}
47121+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47122+
47123 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47124 struct inode *new_dir, struct dentry *new_dentry)
47125 {
47126@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47127 .fsync = btrfs_sync_file,
47128 };
47129
47130-static struct extent_io_ops btrfs_extent_io_ops = {
47131+static const struct extent_io_ops btrfs_extent_io_ops = {
47132 .fill_delalloc = run_delalloc_range,
47133 .submit_bio_hook = btrfs_submit_bio_hook,
47134 .merge_bio_hook = btrfs_merge_bio_hook,
47135diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47136index ab7ab53..94e0781 100644
47137--- a/fs/btrfs/relocation.c
47138+++ b/fs/btrfs/relocation.c
47139@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47140 }
47141 spin_unlock(&rc->reloc_root_tree.lock);
47142
47143- BUG_ON((struct btrfs_root *)node->data != root);
47144+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47145
47146 if (!del) {
47147 spin_lock(&rc->reloc_root_tree.lock);
47148diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47149index a240b6f..4ce16ef 100644
47150--- a/fs/btrfs/sysfs.c
47151+++ b/fs/btrfs/sysfs.c
47152@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47153 complete(&root->kobj_unregister);
47154 }
47155
47156-static struct sysfs_ops btrfs_super_attr_ops = {
47157+static const struct sysfs_ops btrfs_super_attr_ops = {
47158 .show = btrfs_super_attr_show,
47159 .store = btrfs_super_attr_store,
47160 };
47161
47162-static struct sysfs_ops btrfs_root_attr_ops = {
47163+static const struct sysfs_ops btrfs_root_attr_ops = {
47164 .show = btrfs_root_attr_show,
47165 .store = btrfs_root_attr_store,
47166 };
47167diff --git a/fs/buffer.c b/fs/buffer.c
47168index 6fa5302..395d9f6 100644
47169--- a/fs/buffer.c
47170+++ b/fs/buffer.c
47171@@ -25,6 +25,7 @@
47172 #include <linux/percpu.h>
47173 #include <linux/slab.h>
47174 #include <linux/capability.h>
47175+#include <linux/security.h>
47176 #include <linux/blkdev.h>
47177 #include <linux/file.h>
47178 #include <linux/quotaops.h>
47179diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47180index 3797e00..ce776f6 100644
47181--- a/fs/cachefiles/bind.c
47182+++ b/fs/cachefiles/bind.c
47183@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47184 args);
47185
47186 /* start by checking things over */
47187- ASSERT(cache->fstop_percent >= 0 &&
47188- cache->fstop_percent < cache->fcull_percent &&
47189+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47190 cache->fcull_percent < cache->frun_percent &&
47191 cache->frun_percent < 100);
47192
47193- ASSERT(cache->bstop_percent >= 0 &&
47194- cache->bstop_percent < cache->bcull_percent &&
47195+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47196 cache->bcull_percent < cache->brun_percent &&
47197 cache->brun_percent < 100);
47198
47199diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47200index 4618516..bb30d01 100644
47201--- a/fs/cachefiles/daemon.c
47202+++ b/fs/cachefiles/daemon.c
47203@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47204 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47205 return -EIO;
47206
47207- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47208+ if (datalen > PAGE_SIZE - 1)
47209 return -EOPNOTSUPP;
47210
47211 /* drag the command string into the kernel so we can parse it */
47212@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47213 if (args[0] != '%' || args[1] != '\0')
47214 return -EINVAL;
47215
47216- if (fstop < 0 || fstop >= cache->fcull_percent)
47217+ if (fstop >= cache->fcull_percent)
47218 return cachefiles_daemon_range_error(cache, args);
47219
47220 cache->fstop_percent = fstop;
47221@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47222 if (args[0] != '%' || args[1] != '\0')
47223 return -EINVAL;
47224
47225- if (bstop < 0 || bstop >= cache->bcull_percent)
47226+ if (bstop >= cache->bcull_percent)
47227 return cachefiles_daemon_range_error(cache, args);
47228
47229 cache->bstop_percent = bstop;
47230diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47231index f7c255f..fcd61de 100644
47232--- a/fs/cachefiles/internal.h
47233+++ b/fs/cachefiles/internal.h
47234@@ -56,7 +56,7 @@ struct cachefiles_cache {
47235 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47236 struct rb_root active_nodes; /* active nodes (can't be culled) */
47237 rwlock_t active_lock; /* lock for active_nodes */
47238- atomic_t gravecounter; /* graveyard uniquifier */
47239+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47240 unsigned frun_percent; /* when to stop culling (% files) */
47241 unsigned fcull_percent; /* when to start culling (% files) */
47242 unsigned fstop_percent; /* when to stop allocating (% files) */
47243@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47244 * proc.c
47245 */
47246 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47247-extern atomic_t cachefiles_lookup_histogram[HZ];
47248-extern atomic_t cachefiles_mkdir_histogram[HZ];
47249-extern atomic_t cachefiles_create_histogram[HZ];
47250+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47251+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47252+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47253
47254 extern int __init cachefiles_proc_init(void);
47255 extern void cachefiles_proc_cleanup(void);
47256 static inline
47257-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47258+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47259 {
47260 unsigned long jif = jiffies - start_jif;
47261 if (jif >= HZ)
47262 jif = HZ - 1;
47263- atomic_inc(&histogram[jif]);
47264+ atomic_inc_unchecked(&histogram[jif]);
47265 }
47266
47267 #else
47268diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47269index 14ac480..a62766c 100644
47270--- a/fs/cachefiles/namei.c
47271+++ b/fs/cachefiles/namei.c
47272@@ -250,7 +250,7 @@ try_again:
47273 /* first step is to make up a grave dentry in the graveyard */
47274 sprintf(nbuffer, "%08x%08x",
47275 (uint32_t) get_seconds(),
47276- (uint32_t) atomic_inc_return(&cache->gravecounter));
47277+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47278
47279 /* do the multiway lock magic */
47280 trap = lock_rename(cache->graveyard, dir);
47281diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47282index eccd339..4c1d995 100644
47283--- a/fs/cachefiles/proc.c
47284+++ b/fs/cachefiles/proc.c
47285@@ -14,9 +14,9 @@
47286 #include <linux/seq_file.h>
47287 #include "internal.h"
47288
47289-atomic_t cachefiles_lookup_histogram[HZ];
47290-atomic_t cachefiles_mkdir_histogram[HZ];
47291-atomic_t cachefiles_create_histogram[HZ];
47292+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47293+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47294+atomic_unchecked_t cachefiles_create_histogram[HZ];
47295
47296 /*
47297 * display the latency histogram
47298@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47299 return 0;
47300 default:
47301 index = (unsigned long) v - 3;
47302- x = atomic_read(&cachefiles_lookup_histogram[index]);
47303- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47304- z = atomic_read(&cachefiles_create_histogram[index]);
47305+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47306+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47307+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47308 if (x == 0 && y == 0 && z == 0)
47309 return 0;
47310
47311diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47312index a6c8c6f..5cf8517 100644
47313--- a/fs/cachefiles/rdwr.c
47314+++ b/fs/cachefiles/rdwr.c
47315@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47316 old_fs = get_fs();
47317 set_fs(KERNEL_DS);
47318 ret = file->f_op->write(
47319- file, (const void __user *) data, len, &pos);
47320+ file, (const void __force_user *) data, len, &pos);
47321 set_fs(old_fs);
47322 kunmap(page);
47323 if (ret != len)
47324diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47325index 42cec2a..2aba466 100644
47326--- a/fs/cifs/cifs_debug.c
47327+++ b/fs/cifs/cifs_debug.c
47328@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47329 tcon = list_entry(tmp3,
47330 struct cifsTconInfo,
47331 tcon_list);
47332- atomic_set(&tcon->num_smbs_sent, 0);
47333- atomic_set(&tcon->num_writes, 0);
47334- atomic_set(&tcon->num_reads, 0);
47335- atomic_set(&tcon->num_oplock_brks, 0);
47336- atomic_set(&tcon->num_opens, 0);
47337- atomic_set(&tcon->num_posixopens, 0);
47338- atomic_set(&tcon->num_posixmkdirs, 0);
47339- atomic_set(&tcon->num_closes, 0);
47340- atomic_set(&tcon->num_deletes, 0);
47341- atomic_set(&tcon->num_mkdirs, 0);
47342- atomic_set(&tcon->num_rmdirs, 0);
47343- atomic_set(&tcon->num_renames, 0);
47344- atomic_set(&tcon->num_t2renames, 0);
47345- atomic_set(&tcon->num_ffirst, 0);
47346- atomic_set(&tcon->num_fnext, 0);
47347- atomic_set(&tcon->num_fclose, 0);
47348- atomic_set(&tcon->num_hardlinks, 0);
47349- atomic_set(&tcon->num_symlinks, 0);
47350- atomic_set(&tcon->num_locks, 0);
47351+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47352+ atomic_set_unchecked(&tcon->num_writes, 0);
47353+ atomic_set_unchecked(&tcon->num_reads, 0);
47354+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47355+ atomic_set_unchecked(&tcon->num_opens, 0);
47356+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47357+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47358+ atomic_set_unchecked(&tcon->num_closes, 0);
47359+ atomic_set_unchecked(&tcon->num_deletes, 0);
47360+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47361+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47362+ atomic_set_unchecked(&tcon->num_renames, 0);
47363+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47364+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47365+ atomic_set_unchecked(&tcon->num_fnext, 0);
47366+ atomic_set_unchecked(&tcon->num_fclose, 0);
47367+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47368+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47369+ atomic_set_unchecked(&tcon->num_locks, 0);
47370 }
47371 }
47372 }
47373@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47374 if (tcon->need_reconnect)
47375 seq_puts(m, "\tDISCONNECTED ");
47376 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47377- atomic_read(&tcon->num_smbs_sent),
47378- atomic_read(&tcon->num_oplock_brks));
47379+ atomic_read_unchecked(&tcon->num_smbs_sent),
47380+ atomic_read_unchecked(&tcon->num_oplock_brks));
47381 seq_printf(m, "\nReads: %d Bytes: %lld",
47382- atomic_read(&tcon->num_reads),
47383+ atomic_read_unchecked(&tcon->num_reads),
47384 (long long)(tcon->bytes_read));
47385 seq_printf(m, "\nWrites: %d Bytes: %lld",
47386- atomic_read(&tcon->num_writes),
47387+ atomic_read_unchecked(&tcon->num_writes),
47388 (long long)(tcon->bytes_written));
47389 seq_printf(m, "\nFlushes: %d",
47390- atomic_read(&tcon->num_flushes));
47391+ atomic_read_unchecked(&tcon->num_flushes));
47392 seq_printf(m, "\nLocks: %d HardLinks: %d "
47393 "Symlinks: %d",
47394- atomic_read(&tcon->num_locks),
47395- atomic_read(&tcon->num_hardlinks),
47396- atomic_read(&tcon->num_symlinks));
47397+ atomic_read_unchecked(&tcon->num_locks),
47398+ atomic_read_unchecked(&tcon->num_hardlinks),
47399+ atomic_read_unchecked(&tcon->num_symlinks));
47400 seq_printf(m, "\nOpens: %d Closes: %d "
47401 "Deletes: %d",
47402- atomic_read(&tcon->num_opens),
47403- atomic_read(&tcon->num_closes),
47404- atomic_read(&tcon->num_deletes));
47405+ atomic_read_unchecked(&tcon->num_opens),
47406+ atomic_read_unchecked(&tcon->num_closes),
47407+ atomic_read_unchecked(&tcon->num_deletes));
47408 seq_printf(m, "\nPosix Opens: %d "
47409 "Posix Mkdirs: %d",
47410- atomic_read(&tcon->num_posixopens),
47411- atomic_read(&tcon->num_posixmkdirs));
47412+ atomic_read_unchecked(&tcon->num_posixopens),
47413+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47414 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47415- atomic_read(&tcon->num_mkdirs),
47416- atomic_read(&tcon->num_rmdirs));
47417+ atomic_read_unchecked(&tcon->num_mkdirs),
47418+ atomic_read_unchecked(&tcon->num_rmdirs));
47419 seq_printf(m, "\nRenames: %d T2 Renames %d",
47420- atomic_read(&tcon->num_renames),
47421- atomic_read(&tcon->num_t2renames));
47422+ atomic_read_unchecked(&tcon->num_renames),
47423+ atomic_read_unchecked(&tcon->num_t2renames));
47424 seq_printf(m, "\nFindFirst: %d FNext %d "
47425 "FClose %d",
47426- atomic_read(&tcon->num_ffirst),
47427- atomic_read(&tcon->num_fnext),
47428- atomic_read(&tcon->num_fclose));
47429+ atomic_read_unchecked(&tcon->num_ffirst),
47430+ atomic_read_unchecked(&tcon->num_fnext),
47431+ atomic_read_unchecked(&tcon->num_fclose));
47432 }
47433 }
47434 }
47435diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47436index 1445407..68cb0dc 100644
47437--- a/fs/cifs/cifsfs.c
47438+++ b/fs/cifs/cifsfs.c
47439@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47440 cifs_req_cachep = kmem_cache_create("cifs_request",
47441 CIFSMaxBufSize +
47442 MAX_CIFS_HDR_SIZE, 0,
47443- SLAB_HWCACHE_ALIGN, NULL);
47444+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47445 if (cifs_req_cachep == NULL)
47446 return -ENOMEM;
47447
47448@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47449 efficient to alloc 1 per page off the slab compared to 17K (5page)
47450 alloc of large cifs buffers even when page debugging is on */
47451 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47452- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47453+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47454 NULL);
47455 if (cifs_sm_req_cachep == NULL) {
47456 mempool_destroy(cifs_req_poolp);
47457@@ -991,8 +991,8 @@ init_cifs(void)
47458 atomic_set(&bufAllocCount, 0);
47459 atomic_set(&smBufAllocCount, 0);
47460 #ifdef CONFIG_CIFS_STATS2
47461- atomic_set(&totBufAllocCount, 0);
47462- atomic_set(&totSmBufAllocCount, 0);
47463+ atomic_set_unchecked(&totBufAllocCount, 0);
47464+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47465 #endif /* CONFIG_CIFS_STATS2 */
47466
47467 atomic_set(&midCount, 0);
47468diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47469index e29581e..1c22bab 100644
47470--- a/fs/cifs/cifsglob.h
47471+++ b/fs/cifs/cifsglob.h
47472@@ -252,28 +252,28 @@ struct cifsTconInfo {
47473 __u16 Flags; /* optional support bits */
47474 enum statusEnum tidStatus;
47475 #ifdef CONFIG_CIFS_STATS
47476- atomic_t num_smbs_sent;
47477- atomic_t num_writes;
47478- atomic_t num_reads;
47479- atomic_t num_flushes;
47480- atomic_t num_oplock_brks;
47481- atomic_t num_opens;
47482- atomic_t num_closes;
47483- atomic_t num_deletes;
47484- atomic_t num_mkdirs;
47485- atomic_t num_posixopens;
47486- atomic_t num_posixmkdirs;
47487- atomic_t num_rmdirs;
47488- atomic_t num_renames;
47489- atomic_t num_t2renames;
47490- atomic_t num_ffirst;
47491- atomic_t num_fnext;
47492- atomic_t num_fclose;
47493- atomic_t num_hardlinks;
47494- atomic_t num_symlinks;
47495- atomic_t num_locks;
47496- atomic_t num_acl_get;
47497- atomic_t num_acl_set;
47498+ atomic_unchecked_t num_smbs_sent;
47499+ atomic_unchecked_t num_writes;
47500+ atomic_unchecked_t num_reads;
47501+ atomic_unchecked_t num_flushes;
47502+ atomic_unchecked_t num_oplock_brks;
47503+ atomic_unchecked_t num_opens;
47504+ atomic_unchecked_t num_closes;
47505+ atomic_unchecked_t num_deletes;
47506+ atomic_unchecked_t num_mkdirs;
47507+ atomic_unchecked_t num_posixopens;
47508+ atomic_unchecked_t num_posixmkdirs;
47509+ atomic_unchecked_t num_rmdirs;
47510+ atomic_unchecked_t num_renames;
47511+ atomic_unchecked_t num_t2renames;
47512+ atomic_unchecked_t num_ffirst;
47513+ atomic_unchecked_t num_fnext;
47514+ atomic_unchecked_t num_fclose;
47515+ atomic_unchecked_t num_hardlinks;
47516+ atomic_unchecked_t num_symlinks;
47517+ atomic_unchecked_t num_locks;
47518+ atomic_unchecked_t num_acl_get;
47519+ atomic_unchecked_t num_acl_set;
47520 #ifdef CONFIG_CIFS_STATS2
47521 unsigned long long time_writes;
47522 unsigned long long time_reads;
47523@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47524 }
47525
47526 #ifdef CONFIG_CIFS_STATS
47527-#define cifs_stats_inc atomic_inc
47528+#define cifs_stats_inc atomic_inc_unchecked
47529
47530 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47531 unsigned int bytes)
47532@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47533 /* Various Debug counters */
47534 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47535 #ifdef CONFIG_CIFS_STATS2
47536-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47537-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47538+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47539+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47540 #endif
47541 GLOBAL_EXTERN atomic_t smBufAllocCount;
47542 GLOBAL_EXTERN atomic_t midCount;
47543diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47544index fc1e048..28b3441 100644
47545--- a/fs/cifs/link.c
47546+++ b/fs/cifs/link.c
47547@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47548
47549 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47550 {
47551- char *p = nd_get_link(nd);
47552+ const char *p = nd_get_link(nd);
47553 if (!IS_ERR(p))
47554 kfree(p);
47555 }
47556diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47557index 95b82e8..12a538d 100644
47558--- a/fs/cifs/misc.c
47559+++ b/fs/cifs/misc.c
47560@@ -155,7 +155,7 @@ cifs_buf_get(void)
47561 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47562 atomic_inc(&bufAllocCount);
47563 #ifdef CONFIG_CIFS_STATS2
47564- atomic_inc(&totBufAllocCount);
47565+ atomic_inc_unchecked(&totBufAllocCount);
47566 #endif /* CONFIG_CIFS_STATS2 */
47567 }
47568
47569@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47570 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47571 atomic_inc(&smBufAllocCount);
47572 #ifdef CONFIG_CIFS_STATS2
47573- atomic_inc(&totSmBufAllocCount);
47574+ atomic_inc_unchecked(&totSmBufAllocCount);
47575 #endif /* CONFIG_CIFS_STATS2 */
47576
47577 }
47578diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47579index a5bf577..6d19845 100644
47580--- a/fs/coda/cache.c
47581+++ b/fs/coda/cache.c
47582@@ -24,14 +24,14 @@
47583 #include <linux/coda_fs_i.h>
47584 #include <linux/coda_cache.h>
47585
47586-static atomic_t permission_epoch = ATOMIC_INIT(0);
47587+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47588
47589 /* replace or extend an acl cache hit */
47590 void coda_cache_enter(struct inode *inode, int mask)
47591 {
47592 struct coda_inode_info *cii = ITOC(inode);
47593
47594- cii->c_cached_epoch = atomic_read(&permission_epoch);
47595+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47596 if (cii->c_uid != current_fsuid()) {
47597 cii->c_uid = current_fsuid();
47598 cii->c_cached_perm = mask;
47599@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47600 void coda_cache_clear_inode(struct inode *inode)
47601 {
47602 struct coda_inode_info *cii = ITOC(inode);
47603- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47604+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47605 }
47606
47607 /* remove all acl caches */
47608 void coda_cache_clear_all(struct super_block *sb)
47609 {
47610- atomic_inc(&permission_epoch);
47611+ atomic_inc_unchecked(&permission_epoch);
47612 }
47613
47614
47615@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47616
47617 hit = (mask & cii->c_cached_perm) == mask &&
47618 cii->c_uid == current_fsuid() &&
47619- cii->c_cached_epoch == atomic_read(&permission_epoch);
47620+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47621
47622 return hit;
47623 }
47624diff --git a/fs/compat.c b/fs/compat.c
47625index d1e2411..27064e4 100644
47626--- a/fs/compat.c
47627+++ b/fs/compat.c
47628@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47629 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47630 {
47631 compat_ino_t ino = stat->ino;
47632- typeof(ubuf->st_uid) uid = 0;
47633- typeof(ubuf->st_gid) gid = 0;
47634+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47635+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47636 int err;
47637
47638 SET_UID(uid, stat->uid);
47639@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47640
47641 set_fs(KERNEL_DS);
47642 /* The __user pointer cast is valid because of the set_fs() */
47643- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47644+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47645 set_fs(oldfs);
47646 /* truncating is ok because it's a user address */
47647 if (!ret)
47648@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47649
47650 struct compat_readdir_callback {
47651 struct compat_old_linux_dirent __user *dirent;
47652+ struct file * file;
47653 int result;
47654 };
47655
47656@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47657 buf->result = -EOVERFLOW;
47658 return -EOVERFLOW;
47659 }
47660+
47661+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47662+ return 0;
47663+
47664 buf->result++;
47665 dirent = buf->dirent;
47666 if (!access_ok(VERIFY_WRITE, dirent,
47667@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47668
47669 buf.result = 0;
47670 buf.dirent = dirent;
47671+ buf.file = file;
47672
47673 error = vfs_readdir(file, compat_fillonedir, &buf);
47674 if (buf.result)
47675@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47676 struct compat_getdents_callback {
47677 struct compat_linux_dirent __user *current_dir;
47678 struct compat_linux_dirent __user *previous;
47679+ struct file * file;
47680 int count;
47681 int error;
47682 };
47683@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47684 buf->error = -EOVERFLOW;
47685 return -EOVERFLOW;
47686 }
47687+
47688+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47689+ return 0;
47690+
47691 dirent = buf->previous;
47692 if (dirent) {
47693 if (__put_user(offset, &dirent->d_off))
47694@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47695 buf.previous = NULL;
47696 buf.count = count;
47697 buf.error = 0;
47698+ buf.file = file;
47699
47700 error = vfs_readdir(file, compat_filldir, &buf);
47701 if (error >= 0)
47702@@ -987,6 +999,7 @@ out:
47703 struct compat_getdents_callback64 {
47704 struct linux_dirent64 __user *current_dir;
47705 struct linux_dirent64 __user *previous;
47706+ struct file * file;
47707 int count;
47708 int error;
47709 };
47710@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47711 buf->error = -EINVAL; /* only used if we fail.. */
47712 if (reclen > buf->count)
47713 return -EINVAL;
47714+
47715+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47716+ return 0;
47717+
47718 dirent = buf->previous;
47719
47720 if (dirent) {
47721@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
47722 buf.previous = NULL;
47723 buf.count = count;
47724 buf.error = 0;
47725+ buf.file = file;
47726
47727 error = vfs_readdir(file, compat_filldir64, &buf);
47728 if (error >= 0)
47729 error = buf.error;
47730 lastdirent = buf.previous;
47731 if (lastdirent) {
47732- typeof(lastdirent->d_off) d_off = file->f_pos;
47733+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47734 if (__put_user_unaligned(d_off, &lastdirent->d_off))
47735 error = -EFAULT;
47736 else
47737@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
47738 * verify all the pointers
47739 */
47740 ret = -EINVAL;
47741- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
47742+ if (nr_segs > UIO_MAXIOV)
47743 goto out;
47744 if (!file->f_op)
47745 goto out;
47746@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
47747 compat_uptr_t __user *envp,
47748 struct pt_regs * regs)
47749 {
47750+#ifdef CONFIG_GRKERNSEC
47751+ struct file *old_exec_file;
47752+ struct acl_subject_label *old_acl;
47753+ struct rlimit old_rlim[RLIM_NLIMITS];
47754+#endif
47755 struct linux_binprm *bprm;
47756 struct file *file;
47757 struct files_struct *displaced;
47758 bool clear_in_exec;
47759 int retval;
47760+ const struct cred *cred = current_cred();
47761+
47762+ /*
47763+ * We move the actual failure in case of RLIMIT_NPROC excess from
47764+ * set*uid() to execve() because too many poorly written programs
47765+ * don't check setuid() return code. Here we additionally recheck
47766+ * whether NPROC limit is still exceeded.
47767+ */
47768+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
47769+
47770+ if ((current->flags & PF_NPROC_EXCEEDED) &&
47771+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
47772+ retval = -EAGAIN;
47773+ goto out_ret;
47774+ }
47775+
47776+ /* We're below the limit (still or again), so we don't want to make
47777+ * further execve() calls fail. */
47778+ current->flags &= ~PF_NPROC_EXCEEDED;
47779
47780 retval = unshare_files(&displaced);
47781 if (retval)
47782@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
47783 bprm->filename = filename;
47784 bprm->interp = filename;
47785
47786+ if (gr_process_user_ban()) {
47787+ retval = -EPERM;
47788+ goto out_file;
47789+ }
47790+
47791+ retval = -EACCES;
47792+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
47793+ goto out_file;
47794+
47795 retval = bprm_mm_init(bprm);
47796 if (retval)
47797 goto out_file;
47798@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
47799 if (retval < 0)
47800 goto out;
47801
47802+ if (!gr_tpe_allow(file)) {
47803+ retval = -EACCES;
47804+ goto out;
47805+ }
47806+
47807+ if (gr_check_crash_exec(file)) {
47808+ retval = -EACCES;
47809+ goto out;
47810+ }
47811+
47812+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
47813+
47814+ gr_handle_exec_args_compat(bprm, argv);
47815+
47816+#ifdef CONFIG_GRKERNSEC
47817+ old_acl = current->acl;
47818+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
47819+ old_exec_file = current->exec_file;
47820+ get_file(file);
47821+ current->exec_file = file;
47822+#endif
47823+
47824+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
47825+ bprm->unsafe & LSM_UNSAFE_SHARE);
47826+ if (retval < 0)
47827+ goto out_fail;
47828+
47829 retval = search_binary_handler(bprm, regs);
47830 if (retval < 0)
47831- goto out;
47832+ goto out_fail;
47833+#ifdef CONFIG_GRKERNSEC
47834+ if (old_exec_file)
47835+ fput(old_exec_file);
47836+#endif
47837
47838 /* execve succeeded */
47839 current->fs->in_exec = 0;
47840@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
47841 put_files_struct(displaced);
47842 return retval;
47843
47844+out_fail:
47845+#ifdef CONFIG_GRKERNSEC
47846+ current->acl = old_acl;
47847+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
47848+ fput(current->exec_file);
47849+ current->exec_file = old_exec_file;
47850+#endif
47851+
47852 out:
47853 if (bprm->mm) {
47854 acct_arg_size(bprm, 0);
47855@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
47856 struct fdtable *fdt;
47857 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
47858
47859+ pax_track_stack();
47860+
47861 if (n < 0)
47862 goto out_nofds;
47863
47864@@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
47865 oldfs = get_fs();
47866 set_fs(KERNEL_DS);
47867 /* The __user pointer casts are valid because of the set_fs() */
47868- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
47869+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
47870 set_fs(oldfs);
47871
47872 if (err)
47873diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
47874index 0adced2..bbb1b0d 100644
47875--- a/fs/compat_binfmt_elf.c
47876+++ b/fs/compat_binfmt_elf.c
47877@@ -29,10 +29,12 @@
47878 #undef elfhdr
47879 #undef elf_phdr
47880 #undef elf_note
47881+#undef elf_dyn
47882 #undef elf_addr_t
47883 #define elfhdr elf32_hdr
47884 #define elf_phdr elf32_phdr
47885 #define elf_note elf32_note
47886+#define elf_dyn Elf32_Dyn
47887 #define elf_addr_t Elf32_Addr
47888
47889 /*
47890diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
47891index d84e705..d8c364c 100644
47892--- a/fs/compat_ioctl.c
47893+++ b/fs/compat_ioctl.c
47894@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
47895 up = (struct compat_video_spu_palette __user *) arg;
47896 err = get_user(palp, &up->palette);
47897 err |= get_user(length, &up->length);
47898+ if (err)
47899+ return -EFAULT;
47900
47901 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
47902 err = put_user(compat_ptr(palp), &up_native->palette);
47903@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
47904 return -EFAULT;
47905 if (__get_user(udata, &ss32->iomem_base))
47906 return -EFAULT;
47907- ss.iomem_base = compat_ptr(udata);
47908+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
47909 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
47910 __get_user(ss.port_high, &ss32->port_high))
47911 return -EFAULT;
47912@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
47913 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
47914 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
47915 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
47916- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47917+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47918 return -EFAULT;
47919
47920 return ioctl_preallocate(file, p);
47921diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
47922index 8e48b52..f01ed91 100644
47923--- a/fs/configfs/dir.c
47924+++ b/fs/configfs/dir.c
47925@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47926 }
47927 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
47928 struct configfs_dirent *next;
47929- const char * name;
47930+ const unsigned char * name;
47931+ char d_name[sizeof(next->s_dentry->d_iname)];
47932 int len;
47933
47934 next = list_entry(p, struct configfs_dirent,
47935@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47936 continue;
47937
47938 name = configfs_get_name(next);
47939- len = strlen(name);
47940+ if (next->s_dentry && name == next->s_dentry->d_iname) {
47941+ len = next->s_dentry->d_name.len;
47942+ memcpy(d_name, name, len);
47943+ name = d_name;
47944+ } else
47945+ len = strlen(name);
47946 if (next->s_dentry)
47947 ino = next->s_dentry->d_inode->i_ino;
47948 else
47949diff --git a/fs/dcache.c b/fs/dcache.c
47950index 44c0aea..2529092 100644
47951--- a/fs/dcache.c
47952+++ b/fs/dcache.c
47953@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
47954
47955 static struct kmem_cache *dentry_cache __read_mostly;
47956
47957-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
47958-
47959 /*
47960 * This is the single most critical data structure when it comes
47961 * to the dcache: the hashtable for lookups. Somebody should try
47962@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
47963 mempages -= reserve;
47964
47965 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
47966- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
47967+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
47968
47969 dcache_init();
47970 inode_init();
47971diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
47972index c010ecf..a8d8c59 100644
47973--- a/fs/dlm/lockspace.c
47974+++ b/fs/dlm/lockspace.c
47975@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
47976 kfree(ls);
47977 }
47978
47979-static struct sysfs_ops dlm_attr_ops = {
47980+static const struct sysfs_ops dlm_attr_ops = {
47981 .show = dlm_attr_show,
47982 .store = dlm_attr_store,
47983 };
47984diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
47985index 88ba4d4..073f003 100644
47986--- a/fs/ecryptfs/inode.c
47987+++ b/fs/ecryptfs/inode.c
47988@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
47989 old_fs = get_fs();
47990 set_fs(get_ds());
47991 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
47992- (char __user *)lower_buf,
47993+ (char __force_user *)lower_buf,
47994 lower_bufsiz);
47995 set_fs(old_fs);
47996 if (rc < 0)
47997@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47998 }
47999 old_fs = get_fs();
48000 set_fs(get_ds());
48001- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48002+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48003 set_fs(old_fs);
48004 if (rc < 0)
48005 goto out_free;
48006diff --git a/fs/exec.c b/fs/exec.c
48007index 86fafc6..b307bfa 100644
48008--- a/fs/exec.c
48009+++ b/fs/exec.c
48010@@ -56,12 +56,24 @@
48011 #include <linux/fsnotify.h>
48012 #include <linux/fs_struct.h>
48013 #include <linux/pipe_fs_i.h>
48014+#include <linux/random.h>
48015+#include <linux/seq_file.h>
48016+
48017+#ifdef CONFIG_PAX_REFCOUNT
48018+#include <linux/kallsyms.h>
48019+#include <linux/kdebug.h>
48020+#endif
48021
48022 #include <asm/uaccess.h>
48023 #include <asm/mmu_context.h>
48024 #include <asm/tlb.h>
48025 #include "internal.h"
48026
48027+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48028+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48029+EXPORT_SYMBOL(pax_set_initial_flags_func);
48030+#endif
48031+
48032 int core_uses_pid;
48033 char core_pattern[CORENAME_MAX_SIZE] = "core";
48034 unsigned int core_pipe_limit;
48035@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48036 int write)
48037 {
48038 struct page *page;
48039- int ret;
48040
48041-#ifdef CONFIG_STACK_GROWSUP
48042- if (write) {
48043- ret = expand_stack_downwards(bprm->vma, pos);
48044- if (ret < 0)
48045- return NULL;
48046- }
48047-#endif
48048- ret = get_user_pages(current, bprm->mm, pos,
48049- 1, write, 1, &page, NULL);
48050- if (ret <= 0)
48051+ if (0 > expand_stack_downwards(bprm->vma, pos))
48052+ return NULL;
48053+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48054 return NULL;
48055
48056 if (write) {
48057@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48058 vma->vm_end = STACK_TOP_MAX;
48059 vma->vm_start = vma->vm_end - PAGE_SIZE;
48060 vma->vm_flags = VM_STACK_FLAGS;
48061+
48062+#ifdef CONFIG_PAX_SEGMEXEC
48063+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48064+#endif
48065+
48066 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48067
48068 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48069@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48070 mm->stack_vm = mm->total_vm = 1;
48071 up_write(&mm->mmap_sem);
48072 bprm->p = vma->vm_end - sizeof(void *);
48073+
48074+#ifdef CONFIG_PAX_RANDUSTACK
48075+ if (randomize_va_space)
48076+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48077+#endif
48078+
48079 return 0;
48080 err:
48081 up_write(&mm->mmap_sem);
48082@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48083 int r;
48084 mm_segment_t oldfs = get_fs();
48085 set_fs(KERNEL_DS);
48086- r = copy_strings(argc, (char __user * __user *)argv, bprm);
48087+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48088 set_fs(oldfs);
48089 return r;
48090 }
48091@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48092 unsigned long new_end = old_end - shift;
48093 struct mmu_gather *tlb;
48094
48095- BUG_ON(new_start > new_end);
48096+ if (new_start >= new_end || new_start < mmap_min_addr)
48097+ return -ENOMEM;
48098
48099 /*
48100 * ensure there are no vmas between where we want to go
48101@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48102 if (vma != find_vma(mm, new_start))
48103 return -EFAULT;
48104
48105+#ifdef CONFIG_PAX_SEGMEXEC
48106+ BUG_ON(pax_find_mirror_vma(vma));
48107+#endif
48108+
48109 /*
48110 * cover the whole range: [new_start, old_end)
48111 */
48112@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48113 stack_top = arch_align_stack(stack_top);
48114 stack_top = PAGE_ALIGN(stack_top);
48115
48116- if (unlikely(stack_top < mmap_min_addr) ||
48117- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48118- return -ENOMEM;
48119-
48120 stack_shift = vma->vm_end - stack_top;
48121
48122 bprm->p -= stack_shift;
48123@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48124 bprm->exec -= stack_shift;
48125
48126 down_write(&mm->mmap_sem);
48127+
48128+ /* Move stack pages down in memory. */
48129+ if (stack_shift) {
48130+ ret = shift_arg_pages(vma, stack_shift);
48131+ if (ret)
48132+ goto out_unlock;
48133+ }
48134+
48135 vm_flags = VM_STACK_FLAGS;
48136
48137 /*
48138@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48139 vm_flags &= ~VM_EXEC;
48140 vm_flags |= mm->def_flags;
48141
48142+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48143+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48144+ vm_flags &= ~VM_EXEC;
48145+
48146+#ifdef CONFIG_PAX_MPROTECT
48147+ if (mm->pax_flags & MF_PAX_MPROTECT)
48148+ vm_flags &= ~VM_MAYEXEC;
48149+#endif
48150+
48151+ }
48152+#endif
48153+
48154 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48155 vm_flags);
48156 if (ret)
48157 goto out_unlock;
48158 BUG_ON(prev != vma);
48159
48160- /* Move stack pages down in memory. */
48161- if (stack_shift) {
48162- ret = shift_arg_pages(vma, stack_shift);
48163- if (ret)
48164- goto out_unlock;
48165- }
48166-
48167 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48168 stack_size = vma->vm_end - vma->vm_start;
48169 /*
48170@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_t offset,
48171 old_fs = get_fs();
48172 set_fs(get_ds());
48173 /* The cast to a user pointer is valid due to the set_fs() */
48174- result = vfs_read(file, (void __user *)addr, count, &pos);
48175+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48176 set_fs(old_fs);
48177 return result;
48178 }
48179@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48180 }
48181 rcu_read_unlock();
48182
48183- if (p->fs->users > n_fs) {
48184+ if (atomic_read(&p->fs->users) > n_fs) {
48185 bprm->unsafe |= LSM_UNSAFE_SHARE;
48186 } else {
48187 res = -EAGAIN;
48188@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
48189 char __user *__user *envp,
48190 struct pt_regs * regs)
48191 {
48192+#ifdef CONFIG_GRKERNSEC
48193+ struct file *old_exec_file;
48194+ struct acl_subject_label *old_acl;
48195+ struct rlimit old_rlim[RLIM_NLIMITS];
48196+#endif
48197 struct linux_binprm *bprm;
48198 struct file *file;
48199 struct files_struct *displaced;
48200 bool clear_in_exec;
48201 int retval;
48202+ const struct cred *cred = current_cred();
48203+
48204+ /*
48205+ * We move the actual failure in case of RLIMIT_NPROC excess from
48206+ * set*uid() to execve() because too many poorly written programs
48207+ * don't check setuid() return code. Here we additionally recheck
48208+ * whether NPROC limit is still exceeded.
48209+ */
48210+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48211+
48212+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48213+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48214+ retval = -EAGAIN;
48215+ goto out_ret;
48216+ }
48217+
48218+ /* We're below the limit (still or again), so we don't want to make
48219+ * further execve() calls fail. */
48220+ current->flags &= ~PF_NPROC_EXCEEDED;
48221
48222 retval = unshare_files(&displaced);
48223 if (retval)
48224@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
48225 bprm->filename = filename;
48226 bprm->interp = filename;
48227
48228+ if (gr_process_user_ban()) {
48229+ retval = -EPERM;
48230+ goto out_file;
48231+ }
48232+
48233+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48234+ retval = -EACCES;
48235+ goto out_file;
48236+ }
48237+
48238 retval = bprm_mm_init(bprm);
48239 if (retval)
48240 goto out_file;
48241@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
48242 if (retval < 0)
48243 goto out;
48244
48245+ if (!gr_tpe_allow(file)) {
48246+ retval = -EACCES;
48247+ goto out;
48248+ }
48249+
48250+ if (gr_check_crash_exec(file)) {
48251+ retval = -EACCES;
48252+ goto out;
48253+ }
48254+
48255+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48256+
48257+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48258+
48259+#ifdef CONFIG_GRKERNSEC
48260+ old_acl = current->acl;
48261+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48262+ old_exec_file = current->exec_file;
48263+ get_file(file);
48264+ current->exec_file = file;
48265+#endif
48266+
48267+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48268+ bprm->unsafe & LSM_UNSAFE_SHARE);
48269+ if (retval < 0)
48270+ goto out_fail;
48271+
48272 current->flags &= ~PF_KTHREAD;
48273 retval = search_binary_handler(bprm,regs);
48274 if (retval < 0)
48275- goto out;
48276+ goto out_fail;
48277+#ifdef CONFIG_GRKERNSEC
48278+ if (old_exec_file)
48279+ fput(old_exec_file);
48280+#endif
48281
48282 /* execve succeeded */
48283 current->fs->in_exec = 0;
48284@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
48285 put_files_struct(displaced);
48286 return retval;
48287
48288+out_fail:
48289+#ifdef CONFIG_GRKERNSEC
48290+ current->acl = old_acl;
48291+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48292+ fput(current->exec_file);
48293+ current->exec_file = old_exec_file;
48294+#endif
48295+
48296 out:
48297 if (bprm->mm) {
48298 acct_arg_size(bprm, 0);
48299@@ -1591,6 +1693,220 @@ out:
48300 return ispipe;
48301 }
48302
48303+int pax_check_flags(unsigned long *flags)
48304+{
48305+ int retval = 0;
48306+
48307+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48308+ if (*flags & MF_PAX_SEGMEXEC)
48309+ {
48310+ *flags &= ~MF_PAX_SEGMEXEC;
48311+ retval = -EINVAL;
48312+ }
48313+#endif
48314+
48315+ if ((*flags & MF_PAX_PAGEEXEC)
48316+
48317+#ifdef CONFIG_PAX_PAGEEXEC
48318+ && (*flags & MF_PAX_SEGMEXEC)
48319+#endif
48320+
48321+ )
48322+ {
48323+ *flags &= ~MF_PAX_PAGEEXEC;
48324+ retval = -EINVAL;
48325+ }
48326+
48327+ if ((*flags & MF_PAX_MPROTECT)
48328+
48329+#ifdef CONFIG_PAX_MPROTECT
48330+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48331+#endif
48332+
48333+ )
48334+ {
48335+ *flags &= ~MF_PAX_MPROTECT;
48336+ retval = -EINVAL;
48337+ }
48338+
48339+ if ((*flags & MF_PAX_EMUTRAMP)
48340+
48341+#ifdef CONFIG_PAX_EMUTRAMP
48342+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48343+#endif
48344+
48345+ )
48346+ {
48347+ *flags &= ~MF_PAX_EMUTRAMP;
48348+ retval = -EINVAL;
48349+ }
48350+
48351+ return retval;
48352+}
48353+
48354+EXPORT_SYMBOL(pax_check_flags);
48355+
48356+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48357+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48358+{
48359+ struct task_struct *tsk = current;
48360+ struct mm_struct *mm = current->mm;
48361+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48362+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48363+ char *path_exec = NULL;
48364+ char *path_fault = NULL;
48365+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48366+
48367+ if (buffer_exec && buffer_fault) {
48368+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48369+
48370+ down_read(&mm->mmap_sem);
48371+ vma = mm->mmap;
48372+ while (vma && (!vma_exec || !vma_fault)) {
48373+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48374+ vma_exec = vma;
48375+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48376+ vma_fault = vma;
48377+ vma = vma->vm_next;
48378+ }
48379+ if (vma_exec) {
48380+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48381+ if (IS_ERR(path_exec))
48382+ path_exec = "<path too long>";
48383+ else {
48384+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48385+ if (path_exec) {
48386+ *path_exec = 0;
48387+ path_exec = buffer_exec;
48388+ } else
48389+ path_exec = "<path too long>";
48390+ }
48391+ }
48392+ if (vma_fault) {
48393+ start = vma_fault->vm_start;
48394+ end = vma_fault->vm_end;
48395+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48396+ if (vma_fault->vm_file) {
48397+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48398+ if (IS_ERR(path_fault))
48399+ path_fault = "<path too long>";
48400+ else {
48401+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48402+ if (path_fault) {
48403+ *path_fault = 0;
48404+ path_fault = buffer_fault;
48405+ } else
48406+ path_fault = "<path too long>";
48407+ }
48408+ } else
48409+ path_fault = "<anonymous mapping>";
48410+ }
48411+ up_read(&mm->mmap_sem);
48412+ }
48413+ if (tsk->signal->curr_ip)
48414+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48415+ else
48416+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48417+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48418+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48419+ task_uid(tsk), task_euid(tsk), pc, sp);
48420+ free_page((unsigned long)buffer_exec);
48421+ free_page((unsigned long)buffer_fault);
48422+ pax_report_insns(regs, pc, sp);
48423+ do_coredump(SIGKILL, SIGKILL, regs);
48424+}
48425+#endif
48426+
48427+#ifdef CONFIG_PAX_REFCOUNT
48428+void pax_report_refcount_overflow(struct pt_regs *regs)
48429+{
48430+ if (current->signal->curr_ip)
48431+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48432+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48433+ else
48434+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48435+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48436+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48437+ show_regs(regs);
48438+ force_sig_specific(SIGKILL, current);
48439+}
48440+#endif
48441+
48442+#ifdef CONFIG_PAX_USERCOPY
48443+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48444+int object_is_on_stack(const void *obj, unsigned long len)
48445+{
48446+ const void * const stack = task_stack_page(current);
48447+ const void * const stackend = stack + THREAD_SIZE;
48448+
48449+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48450+ const void *frame = NULL;
48451+ const void *oldframe;
48452+#endif
48453+
48454+ if (obj + len < obj)
48455+ return -1;
48456+
48457+ if (obj + len <= stack || stackend <= obj)
48458+ return 0;
48459+
48460+ if (obj < stack || stackend < obj + len)
48461+ return -1;
48462+
48463+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48464+ oldframe = __builtin_frame_address(1);
48465+ if (oldframe)
48466+ frame = __builtin_frame_address(2);
48467+ /*
48468+ low ----------------------------------------------> high
48469+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48470+ ^----------------^
48471+ allow copies only within here
48472+ */
48473+ while (stack <= frame && frame < stackend) {
48474+ /* if obj + len extends past the last frame, this
48475+ check won't pass and the next frame will be 0,
48476+ causing us to bail out and correctly report
48477+ the copy as invalid
48478+ */
48479+ if (obj + len <= frame)
48480+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48481+ oldframe = frame;
48482+ frame = *(const void * const *)frame;
48483+ }
48484+ return -1;
48485+#else
48486+ return 1;
48487+#endif
48488+}
48489+
48490+
48491+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48492+{
48493+ if (current->signal->curr_ip)
48494+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48495+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48496+ else
48497+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48498+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48499+
48500+ dump_stack();
48501+ gr_handle_kernel_exploit();
48502+ do_group_exit(SIGKILL);
48503+}
48504+#endif
48505+
48506+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48507+void pax_track_stack(void)
48508+{
48509+ unsigned long sp = (unsigned long)&sp;
48510+ if (sp < current_thread_info()->lowest_stack &&
48511+ sp > (unsigned long)task_stack_page(current))
48512+ current_thread_info()->lowest_stack = sp;
48513+}
48514+EXPORT_SYMBOL(pax_track_stack);
48515+#endif
48516+
48517 static int zap_process(struct task_struct *start)
48518 {
48519 struct task_struct *t;
48520@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct file *file)
48521 pipe = file->f_path.dentry->d_inode->i_pipe;
48522
48523 pipe_lock(pipe);
48524- pipe->readers++;
48525- pipe->writers--;
48526+ atomic_inc(&pipe->readers);
48527+ atomic_dec(&pipe->writers);
48528
48529- while ((pipe->readers > 1) && (!signal_pending(current))) {
48530+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48531 wake_up_interruptible_sync(&pipe->wait);
48532 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48533 pipe_wait(pipe);
48534 }
48535
48536- pipe->readers--;
48537- pipe->writers++;
48538+ atomic_dec(&pipe->readers);
48539+ atomic_inc(&pipe->writers);
48540 pipe_unlock(pipe);
48541
48542 }
48543@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48544 char **helper_argv = NULL;
48545 int helper_argc = 0;
48546 int dump_count = 0;
48547- static atomic_t core_dump_count = ATOMIC_INIT(0);
48548+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48549
48550 audit_core_dumps(signr);
48551
48552+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48553+ gr_handle_brute_attach(current, mm->flags);
48554+
48555 binfmt = mm->binfmt;
48556 if (!binfmt || !binfmt->core_dump)
48557 goto fail;
48558@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48559 */
48560 clear_thread_flag(TIF_SIGPENDING);
48561
48562+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48563+
48564 /*
48565 * lock_kernel() because format_corename() is controlled by sysctl, which
48566 * uses lock_kernel()
48567@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48568 goto fail_unlock;
48569 }
48570
48571- dump_count = atomic_inc_return(&core_dump_count);
48572+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
48573 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
48574 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
48575 task_tgid_vnr(current), current->comm);
48576@@ -1972,7 +2293,7 @@ close_fail:
48577 filp_close(file, NULL);
48578 fail_dropcount:
48579 if (dump_count)
48580- atomic_dec(&core_dump_count);
48581+ atomic_dec_unchecked(&core_dump_count);
48582 fail_unlock:
48583 if (helper_argv)
48584 argv_free(helper_argv);
48585diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
48586index 7f8d2e5..a1abdbb 100644
48587--- a/fs/ext2/balloc.c
48588+++ b/fs/ext2/balloc.c
48589@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
48590
48591 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48592 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48593- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48594+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48595 sbi->s_resuid != current_fsuid() &&
48596 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48597 return 0;
48598diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
48599index 27967f9..9f2a5fb 100644
48600--- a/fs/ext3/balloc.c
48601+++ b/fs/ext3/balloc.c
48602@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
48603
48604 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48605 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48606- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48607+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48608 sbi->s_resuid != current_fsuid() &&
48609 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48610 return 0;
48611diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
48612index e85b63c..80398e6 100644
48613--- a/fs/ext4/balloc.c
48614+++ b/fs/ext4/balloc.c
48615@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
48616 /* Hm, nope. Are (enough) root reserved blocks available? */
48617 if (sbi->s_resuid == current_fsuid() ||
48618 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
48619- capable(CAP_SYS_RESOURCE)) {
48620+ capable_nolog(CAP_SYS_RESOURCE)) {
48621 if (free_blocks >= (nblocks + dirty_blocks))
48622 return 1;
48623 }
48624diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
48625index 67c46ed..1f237e5 100644
48626--- a/fs/ext4/ext4.h
48627+++ b/fs/ext4/ext4.h
48628@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
48629
48630 /* stats for buddy allocator */
48631 spinlock_t s_mb_pa_lock;
48632- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
48633- atomic_t s_bal_success; /* we found long enough chunks */
48634- atomic_t s_bal_allocated; /* in blocks */
48635- atomic_t s_bal_ex_scanned; /* total extents scanned */
48636- atomic_t s_bal_goals; /* goal hits */
48637- atomic_t s_bal_breaks; /* too long searches */
48638- atomic_t s_bal_2orders; /* 2^order hits */
48639+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
48640+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
48641+ atomic_unchecked_t s_bal_allocated; /* in blocks */
48642+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
48643+ atomic_unchecked_t s_bal_goals; /* goal hits */
48644+ atomic_unchecked_t s_bal_breaks; /* too long searches */
48645+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
48646 spinlock_t s_bal_lock;
48647 unsigned long s_mb_buddies_generated;
48648 unsigned long long s_mb_generation_time;
48649- atomic_t s_mb_lost_chunks;
48650- atomic_t s_mb_preallocated;
48651- atomic_t s_mb_discarded;
48652+ atomic_unchecked_t s_mb_lost_chunks;
48653+ atomic_unchecked_t s_mb_preallocated;
48654+ atomic_unchecked_t s_mb_discarded;
48655 atomic_t s_lock_busy;
48656
48657 /* locality groups */
48658diff --git a/fs/ext4/file.c b/fs/ext4/file.c
48659index 2a60541..7439d61 100644
48660--- a/fs/ext4/file.c
48661+++ b/fs/ext4/file.c
48662@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
48663 cp = d_path(&path, buf, sizeof(buf));
48664 path_put(&path);
48665 if (!IS_ERR(cp)) {
48666- memcpy(sbi->s_es->s_last_mounted, cp,
48667- sizeof(sbi->s_es->s_last_mounted));
48668+ strlcpy(sbi->s_es->s_last_mounted, cp,
48669+ sizeof(sbi->s_es->s_last_mounted));
48670 sb->s_dirt = 1;
48671 }
48672 }
48673diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
48674index 42bac1b..0aab9d8 100644
48675--- a/fs/ext4/mballoc.c
48676+++ b/fs/ext4/mballoc.c
48677@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
48678 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
48679
48680 if (EXT4_SB(sb)->s_mb_stats)
48681- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
48682+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
48683
48684 break;
48685 }
48686@@ -2131,7 +2131,7 @@ repeat:
48687 ac->ac_status = AC_STATUS_CONTINUE;
48688 ac->ac_flags |= EXT4_MB_HINT_FIRST;
48689 cr = 3;
48690- atomic_inc(&sbi->s_mb_lost_chunks);
48691+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
48692 goto repeat;
48693 }
48694 }
48695@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
48696 ext4_grpblk_t counters[16];
48697 } sg;
48698
48699+ pax_track_stack();
48700+
48701 group--;
48702 if (group == 0)
48703 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
48704@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
48705 if (sbi->s_mb_stats) {
48706 printk(KERN_INFO
48707 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
48708- atomic_read(&sbi->s_bal_allocated),
48709- atomic_read(&sbi->s_bal_reqs),
48710- atomic_read(&sbi->s_bal_success));
48711+ atomic_read_unchecked(&sbi->s_bal_allocated),
48712+ atomic_read_unchecked(&sbi->s_bal_reqs),
48713+ atomic_read_unchecked(&sbi->s_bal_success));
48714 printk(KERN_INFO
48715 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
48716 "%u 2^N hits, %u breaks, %u lost\n",
48717- atomic_read(&sbi->s_bal_ex_scanned),
48718- atomic_read(&sbi->s_bal_goals),
48719- atomic_read(&sbi->s_bal_2orders),
48720- atomic_read(&sbi->s_bal_breaks),
48721- atomic_read(&sbi->s_mb_lost_chunks));
48722+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
48723+ atomic_read_unchecked(&sbi->s_bal_goals),
48724+ atomic_read_unchecked(&sbi->s_bal_2orders),
48725+ atomic_read_unchecked(&sbi->s_bal_breaks),
48726+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
48727 printk(KERN_INFO
48728 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
48729 sbi->s_mb_buddies_generated++,
48730 sbi->s_mb_generation_time);
48731 printk(KERN_INFO
48732 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
48733- atomic_read(&sbi->s_mb_preallocated),
48734- atomic_read(&sbi->s_mb_discarded));
48735+ atomic_read_unchecked(&sbi->s_mb_preallocated),
48736+ atomic_read_unchecked(&sbi->s_mb_discarded));
48737 }
48738
48739 free_percpu(sbi->s_locality_groups);
48740@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
48741 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
48742
48743 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
48744- atomic_inc(&sbi->s_bal_reqs);
48745- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48746+ atomic_inc_unchecked(&sbi->s_bal_reqs);
48747+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48748 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
48749- atomic_inc(&sbi->s_bal_success);
48750- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
48751+ atomic_inc_unchecked(&sbi->s_bal_success);
48752+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
48753 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
48754 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
48755- atomic_inc(&sbi->s_bal_goals);
48756+ atomic_inc_unchecked(&sbi->s_bal_goals);
48757 if (ac->ac_found > sbi->s_mb_max_to_scan)
48758- atomic_inc(&sbi->s_bal_breaks);
48759+ atomic_inc_unchecked(&sbi->s_bal_breaks);
48760 }
48761
48762 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
48763@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
48764 trace_ext4_mb_new_inode_pa(ac, pa);
48765
48766 ext4_mb_use_inode_pa(ac, pa);
48767- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48768+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48769
48770 ei = EXT4_I(ac->ac_inode);
48771 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48772@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
48773 trace_ext4_mb_new_group_pa(ac, pa);
48774
48775 ext4_mb_use_group_pa(ac, pa);
48776- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48777+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48778
48779 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48780 lg = ac->ac_lg;
48781@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
48782 * from the bitmap and continue.
48783 */
48784 }
48785- atomic_add(free, &sbi->s_mb_discarded);
48786+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
48787
48788 return err;
48789 }
48790@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
48791 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
48792 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
48793 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
48794- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48795+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48796
48797 if (ac) {
48798 ac->ac_sb = sb;
48799diff --git a/fs/ext4/super.c b/fs/ext4/super.c
48800index f27e045..be5a1c3 100644
48801--- a/fs/ext4/super.c
48802+++ b/fs/ext4/super.c
48803@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobject *kobj)
48804 }
48805
48806
48807-static struct sysfs_ops ext4_attr_ops = {
48808+static const struct sysfs_ops ext4_attr_ops = {
48809 .show = ext4_attr_show,
48810 .store = ext4_attr_store,
48811 };
48812diff --git a/fs/fcntl.c b/fs/fcntl.c
48813index 97e01dc..e9aab2d 100644
48814--- a/fs/fcntl.c
48815+++ b/fs/fcntl.c
48816@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
48817 if (err)
48818 return err;
48819
48820+ if (gr_handle_chroot_fowner(pid, type))
48821+ return -ENOENT;
48822+ if (gr_check_protected_task_fowner(pid, type))
48823+ return -EACCES;
48824+
48825 f_modown(filp, pid, type, force);
48826 return 0;
48827 }
48828@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
48829
48830 static int f_setown_ex(struct file *filp, unsigned long arg)
48831 {
48832- struct f_owner_ex * __user owner_p = (void * __user)arg;
48833+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48834 struct f_owner_ex owner;
48835 struct pid *pid;
48836 int type;
48837@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
48838
48839 static int f_getown_ex(struct file *filp, unsigned long arg)
48840 {
48841- struct f_owner_ex * __user owner_p = (void * __user)arg;
48842+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48843 struct f_owner_ex owner;
48844 int ret = 0;
48845
48846@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
48847 switch (cmd) {
48848 case F_DUPFD:
48849 case F_DUPFD_CLOEXEC:
48850+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
48851 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48852 break;
48853 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
48854diff --git a/fs/fifo.c b/fs/fifo.c
48855index f8f97b8..b1f2259 100644
48856--- a/fs/fifo.c
48857+++ b/fs/fifo.c
48858@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
48859 */
48860 filp->f_op = &read_pipefifo_fops;
48861 pipe->r_counter++;
48862- if (pipe->readers++ == 0)
48863+ if (atomic_inc_return(&pipe->readers) == 1)
48864 wake_up_partner(inode);
48865
48866- if (!pipe->writers) {
48867+ if (!atomic_read(&pipe->writers)) {
48868 if ((filp->f_flags & O_NONBLOCK)) {
48869 /* suppress POLLHUP until we have
48870 * seen a writer */
48871@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
48872 * errno=ENXIO when there is no process reading the FIFO.
48873 */
48874 ret = -ENXIO;
48875- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
48876+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
48877 goto err;
48878
48879 filp->f_op = &write_pipefifo_fops;
48880 pipe->w_counter++;
48881- if (!pipe->writers++)
48882+ if (atomic_inc_return(&pipe->writers) == 1)
48883 wake_up_partner(inode);
48884
48885- if (!pipe->readers) {
48886+ if (!atomic_read(&pipe->readers)) {
48887 wait_for_partner(inode, &pipe->r_counter);
48888 if (signal_pending(current))
48889 goto err_wr;
48890@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
48891 */
48892 filp->f_op = &rdwr_pipefifo_fops;
48893
48894- pipe->readers++;
48895- pipe->writers++;
48896+ atomic_inc(&pipe->readers);
48897+ atomic_inc(&pipe->writers);
48898 pipe->r_counter++;
48899 pipe->w_counter++;
48900- if (pipe->readers == 1 || pipe->writers == 1)
48901+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
48902 wake_up_partner(inode);
48903 break;
48904
48905@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
48906 return 0;
48907
48908 err_rd:
48909- if (!--pipe->readers)
48910+ if (atomic_dec_and_test(&pipe->readers))
48911 wake_up_interruptible(&pipe->wait);
48912 ret = -ERESTARTSYS;
48913 goto err;
48914
48915 err_wr:
48916- if (!--pipe->writers)
48917+ if (atomic_dec_and_test(&pipe->writers))
48918 wake_up_interruptible(&pipe->wait);
48919 ret = -ERESTARTSYS;
48920 goto err;
48921
48922 err:
48923- if (!pipe->readers && !pipe->writers)
48924+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
48925 free_pipe_info(inode);
48926
48927 err_nocleanup:
48928diff --git a/fs/file.c b/fs/file.c
48929index 87e1290..a930cc4 100644
48930--- a/fs/file.c
48931+++ b/fs/file.c
48932@@ -14,6 +14,7 @@
48933 #include <linux/slab.h>
48934 #include <linux/vmalloc.h>
48935 #include <linux/file.h>
48936+#include <linux/security.h>
48937 #include <linux/fdtable.h>
48938 #include <linux/bitops.h>
48939 #include <linux/interrupt.h>
48940@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
48941 * N.B. For clone tasks sharing a files structure, this test
48942 * will limit the total number of files that can be opened.
48943 */
48944+
48945+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
48946 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48947 return -EMFILE;
48948
48949diff --git a/fs/filesystems.c b/fs/filesystems.c
48950index a24c58e..53f91ee 100644
48951--- a/fs/filesystems.c
48952+++ b/fs/filesystems.c
48953@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
48954 int len = dot ? dot - name : strlen(name);
48955
48956 fs = __get_fs_type(name, len);
48957+
48958+#ifdef CONFIG_GRKERNSEC_MODHARDEN
48959+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
48960+#else
48961 if (!fs && (request_module("%.*s", len, name) == 0))
48962+#endif
48963 fs = __get_fs_type(name, len);
48964
48965 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
48966diff --git a/fs/fs_struct.c b/fs/fs_struct.c
48967index eee0590..ef5bc0e 100644
48968--- a/fs/fs_struct.c
48969+++ b/fs/fs_struct.c
48970@@ -4,6 +4,7 @@
48971 #include <linux/path.h>
48972 #include <linux/slab.h>
48973 #include <linux/fs_struct.h>
48974+#include <linux/grsecurity.h>
48975
48976 /*
48977 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
48978@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
48979 old_root = fs->root;
48980 fs->root = *path;
48981 path_get(path);
48982+ gr_set_chroot_entries(current, path);
48983 write_unlock(&fs->lock);
48984 if (old_root.dentry)
48985 path_put(&old_root);
48986@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
48987 && fs->root.mnt == old_root->mnt) {
48988 path_get(new_root);
48989 fs->root = *new_root;
48990+ gr_set_chroot_entries(p, new_root);
48991 count++;
48992 }
48993 if (fs->pwd.dentry == old_root->dentry
48994@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
48995 task_lock(tsk);
48996 write_lock(&fs->lock);
48997 tsk->fs = NULL;
48998- kill = !--fs->users;
48999+ gr_clear_chroot_entries(tsk);
49000+ kill = !atomic_dec_return(&fs->users);
49001 write_unlock(&fs->lock);
49002 task_unlock(tsk);
49003 if (kill)
49004@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49005 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49006 /* We don't need to lock fs - think why ;-) */
49007 if (fs) {
49008- fs->users = 1;
49009+ atomic_set(&fs->users, 1);
49010 fs->in_exec = 0;
49011 rwlock_init(&fs->lock);
49012 fs->umask = old->umask;
49013@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49014
49015 task_lock(current);
49016 write_lock(&fs->lock);
49017- kill = !--fs->users;
49018+ kill = !atomic_dec_return(&fs->users);
49019 current->fs = new_fs;
49020+ gr_set_chroot_entries(current, &new_fs->root);
49021 write_unlock(&fs->lock);
49022 task_unlock(current);
49023
49024@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49025
49026 /* to be mentioned only in INIT_TASK */
49027 struct fs_struct init_fs = {
49028- .users = 1,
49029+ .users = ATOMIC_INIT(1),
49030 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49031 .umask = 0022,
49032 };
49033@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49034 task_lock(current);
49035
49036 write_lock(&init_fs.lock);
49037- init_fs.users++;
49038+ atomic_inc(&init_fs.users);
49039 write_unlock(&init_fs.lock);
49040
49041 write_lock(&fs->lock);
49042 current->fs = &init_fs;
49043- kill = !--fs->users;
49044+ gr_set_chroot_entries(current, &current->fs->root);
49045+ kill = !atomic_dec_return(&fs->users);
49046 write_unlock(&fs->lock);
49047
49048 task_unlock(current);
49049diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49050index 9905350..02eaec4 100644
49051--- a/fs/fscache/cookie.c
49052+++ b/fs/fscache/cookie.c
49053@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49054 parent ? (char *) parent->def->name : "<no-parent>",
49055 def->name, netfs_data);
49056
49057- fscache_stat(&fscache_n_acquires);
49058+ fscache_stat_unchecked(&fscache_n_acquires);
49059
49060 /* if there's no parent cookie, then we don't create one here either */
49061 if (!parent) {
49062- fscache_stat(&fscache_n_acquires_null);
49063+ fscache_stat_unchecked(&fscache_n_acquires_null);
49064 _leave(" [no parent]");
49065 return NULL;
49066 }
49067@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49068 /* allocate and initialise a cookie */
49069 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49070 if (!cookie) {
49071- fscache_stat(&fscache_n_acquires_oom);
49072+ fscache_stat_unchecked(&fscache_n_acquires_oom);
49073 _leave(" [ENOMEM]");
49074 return NULL;
49075 }
49076@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49077
49078 switch (cookie->def->type) {
49079 case FSCACHE_COOKIE_TYPE_INDEX:
49080- fscache_stat(&fscache_n_cookie_index);
49081+ fscache_stat_unchecked(&fscache_n_cookie_index);
49082 break;
49083 case FSCACHE_COOKIE_TYPE_DATAFILE:
49084- fscache_stat(&fscache_n_cookie_data);
49085+ fscache_stat_unchecked(&fscache_n_cookie_data);
49086 break;
49087 default:
49088- fscache_stat(&fscache_n_cookie_special);
49089+ fscache_stat_unchecked(&fscache_n_cookie_special);
49090 break;
49091 }
49092
49093@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49094 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49095 atomic_dec(&parent->n_children);
49096 __fscache_cookie_put(cookie);
49097- fscache_stat(&fscache_n_acquires_nobufs);
49098+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49099 _leave(" = NULL");
49100 return NULL;
49101 }
49102 }
49103
49104- fscache_stat(&fscache_n_acquires_ok);
49105+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49106 _leave(" = %p", cookie);
49107 return cookie;
49108 }
49109@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49110 cache = fscache_select_cache_for_object(cookie->parent);
49111 if (!cache) {
49112 up_read(&fscache_addremove_sem);
49113- fscache_stat(&fscache_n_acquires_no_cache);
49114+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49115 _leave(" = -ENOMEDIUM [no cache]");
49116 return -ENOMEDIUM;
49117 }
49118@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49119 object = cache->ops->alloc_object(cache, cookie);
49120 fscache_stat_d(&fscache_n_cop_alloc_object);
49121 if (IS_ERR(object)) {
49122- fscache_stat(&fscache_n_object_no_alloc);
49123+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49124 ret = PTR_ERR(object);
49125 goto error;
49126 }
49127
49128- fscache_stat(&fscache_n_object_alloc);
49129+ fscache_stat_unchecked(&fscache_n_object_alloc);
49130
49131 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49132
49133@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49134 struct fscache_object *object;
49135 struct hlist_node *_p;
49136
49137- fscache_stat(&fscache_n_updates);
49138+ fscache_stat_unchecked(&fscache_n_updates);
49139
49140 if (!cookie) {
49141- fscache_stat(&fscache_n_updates_null);
49142+ fscache_stat_unchecked(&fscache_n_updates_null);
49143 _leave(" [no cookie]");
49144 return;
49145 }
49146@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49147 struct fscache_object *object;
49148 unsigned long event;
49149
49150- fscache_stat(&fscache_n_relinquishes);
49151+ fscache_stat_unchecked(&fscache_n_relinquishes);
49152 if (retire)
49153- fscache_stat(&fscache_n_relinquishes_retire);
49154+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49155
49156 if (!cookie) {
49157- fscache_stat(&fscache_n_relinquishes_null);
49158+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49159 _leave(" [no cookie]");
49160 return;
49161 }
49162@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49163
49164 /* wait for the cookie to finish being instantiated (or to fail) */
49165 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49166- fscache_stat(&fscache_n_relinquishes_waitcrt);
49167+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49168 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49169 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49170 }
49171diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49172index edd7434..0725e66 100644
49173--- a/fs/fscache/internal.h
49174+++ b/fs/fscache/internal.h
49175@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49176 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49177 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49178
49179-extern atomic_t fscache_n_op_pend;
49180-extern atomic_t fscache_n_op_run;
49181-extern atomic_t fscache_n_op_enqueue;
49182-extern atomic_t fscache_n_op_deferred_release;
49183-extern atomic_t fscache_n_op_release;
49184-extern atomic_t fscache_n_op_gc;
49185-extern atomic_t fscache_n_op_cancelled;
49186-extern atomic_t fscache_n_op_rejected;
49187+extern atomic_unchecked_t fscache_n_op_pend;
49188+extern atomic_unchecked_t fscache_n_op_run;
49189+extern atomic_unchecked_t fscache_n_op_enqueue;
49190+extern atomic_unchecked_t fscache_n_op_deferred_release;
49191+extern atomic_unchecked_t fscache_n_op_release;
49192+extern atomic_unchecked_t fscache_n_op_gc;
49193+extern atomic_unchecked_t fscache_n_op_cancelled;
49194+extern atomic_unchecked_t fscache_n_op_rejected;
49195
49196-extern atomic_t fscache_n_attr_changed;
49197-extern atomic_t fscache_n_attr_changed_ok;
49198-extern atomic_t fscache_n_attr_changed_nobufs;
49199-extern atomic_t fscache_n_attr_changed_nomem;
49200-extern atomic_t fscache_n_attr_changed_calls;
49201+extern atomic_unchecked_t fscache_n_attr_changed;
49202+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49203+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49204+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49205+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49206
49207-extern atomic_t fscache_n_allocs;
49208-extern atomic_t fscache_n_allocs_ok;
49209-extern atomic_t fscache_n_allocs_wait;
49210-extern atomic_t fscache_n_allocs_nobufs;
49211-extern atomic_t fscache_n_allocs_intr;
49212-extern atomic_t fscache_n_allocs_object_dead;
49213-extern atomic_t fscache_n_alloc_ops;
49214-extern atomic_t fscache_n_alloc_op_waits;
49215+extern atomic_unchecked_t fscache_n_allocs;
49216+extern atomic_unchecked_t fscache_n_allocs_ok;
49217+extern atomic_unchecked_t fscache_n_allocs_wait;
49218+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49219+extern atomic_unchecked_t fscache_n_allocs_intr;
49220+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49221+extern atomic_unchecked_t fscache_n_alloc_ops;
49222+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49223
49224-extern atomic_t fscache_n_retrievals;
49225-extern atomic_t fscache_n_retrievals_ok;
49226-extern atomic_t fscache_n_retrievals_wait;
49227-extern atomic_t fscache_n_retrievals_nodata;
49228-extern atomic_t fscache_n_retrievals_nobufs;
49229-extern atomic_t fscache_n_retrievals_intr;
49230-extern atomic_t fscache_n_retrievals_nomem;
49231-extern atomic_t fscache_n_retrievals_object_dead;
49232-extern atomic_t fscache_n_retrieval_ops;
49233-extern atomic_t fscache_n_retrieval_op_waits;
49234+extern atomic_unchecked_t fscache_n_retrievals;
49235+extern atomic_unchecked_t fscache_n_retrievals_ok;
49236+extern atomic_unchecked_t fscache_n_retrievals_wait;
49237+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49238+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49239+extern atomic_unchecked_t fscache_n_retrievals_intr;
49240+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49241+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49242+extern atomic_unchecked_t fscache_n_retrieval_ops;
49243+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49244
49245-extern atomic_t fscache_n_stores;
49246-extern atomic_t fscache_n_stores_ok;
49247-extern atomic_t fscache_n_stores_again;
49248-extern atomic_t fscache_n_stores_nobufs;
49249-extern atomic_t fscache_n_stores_oom;
49250-extern atomic_t fscache_n_store_ops;
49251-extern atomic_t fscache_n_store_calls;
49252-extern atomic_t fscache_n_store_pages;
49253-extern atomic_t fscache_n_store_radix_deletes;
49254-extern atomic_t fscache_n_store_pages_over_limit;
49255+extern atomic_unchecked_t fscache_n_stores;
49256+extern atomic_unchecked_t fscache_n_stores_ok;
49257+extern atomic_unchecked_t fscache_n_stores_again;
49258+extern atomic_unchecked_t fscache_n_stores_nobufs;
49259+extern atomic_unchecked_t fscache_n_stores_oom;
49260+extern atomic_unchecked_t fscache_n_store_ops;
49261+extern atomic_unchecked_t fscache_n_store_calls;
49262+extern atomic_unchecked_t fscache_n_store_pages;
49263+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49264+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49265
49266-extern atomic_t fscache_n_store_vmscan_not_storing;
49267-extern atomic_t fscache_n_store_vmscan_gone;
49268-extern atomic_t fscache_n_store_vmscan_busy;
49269-extern atomic_t fscache_n_store_vmscan_cancelled;
49270+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49271+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49272+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49273+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49274
49275-extern atomic_t fscache_n_marks;
49276-extern atomic_t fscache_n_uncaches;
49277+extern atomic_unchecked_t fscache_n_marks;
49278+extern atomic_unchecked_t fscache_n_uncaches;
49279
49280-extern atomic_t fscache_n_acquires;
49281-extern atomic_t fscache_n_acquires_null;
49282-extern atomic_t fscache_n_acquires_no_cache;
49283-extern atomic_t fscache_n_acquires_ok;
49284-extern atomic_t fscache_n_acquires_nobufs;
49285-extern atomic_t fscache_n_acquires_oom;
49286+extern atomic_unchecked_t fscache_n_acquires;
49287+extern atomic_unchecked_t fscache_n_acquires_null;
49288+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49289+extern atomic_unchecked_t fscache_n_acquires_ok;
49290+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49291+extern atomic_unchecked_t fscache_n_acquires_oom;
49292
49293-extern atomic_t fscache_n_updates;
49294-extern atomic_t fscache_n_updates_null;
49295-extern atomic_t fscache_n_updates_run;
49296+extern atomic_unchecked_t fscache_n_updates;
49297+extern atomic_unchecked_t fscache_n_updates_null;
49298+extern atomic_unchecked_t fscache_n_updates_run;
49299
49300-extern atomic_t fscache_n_relinquishes;
49301-extern atomic_t fscache_n_relinquishes_null;
49302-extern atomic_t fscache_n_relinquishes_waitcrt;
49303-extern atomic_t fscache_n_relinquishes_retire;
49304+extern atomic_unchecked_t fscache_n_relinquishes;
49305+extern atomic_unchecked_t fscache_n_relinquishes_null;
49306+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49307+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49308
49309-extern atomic_t fscache_n_cookie_index;
49310-extern atomic_t fscache_n_cookie_data;
49311-extern atomic_t fscache_n_cookie_special;
49312+extern atomic_unchecked_t fscache_n_cookie_index;
49313+extern atomic_unchecked_t fscache_n_cookie_data;
49314+extern atomic_unchecked_t fscache_n_cookie_special;
49315
49316-extern atomic_t fscache_n_object_alloc;
49317-extern atomic_t fscache_n_object_no_alloc;
49318-extern atomic_t fscache_n_object_lookups;
49319-extern atomic_t fscache_n_object_lookups_negative;
49320-extern atomic_t fscache_n_object_lookups_positive;
49321-extern atomic_t fscache_n_object_lookups_timed_out;
49322-extern atomic_t fscache_n_object_created;
49323-extern atomic_t fscache_n_object_avail;
49324-extern atomic_t fscache_n_object_dead;
49325+extern atomic_unchecked_t fscache_n_object_alloc;
49326+extern atomic_unchecked_t fscache_n_object_no_alloc;
49327+extern atomic_unchecked_t fscache_n_object_lookups;
49328+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49329+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49330+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49331+extern atomic_unchecked_t fscache_n_object_created;
49332+extern atomic_unchecked_t fscache_n_object_avail;
49333+extern atomic_unchecked_t fscache_n_object_dead;
49334
49335-extern atomic_t fscache_n_checkaux_none;
49336-extern atomic_t fscache_n_checkaux_okay;
49337-extern atomic_t fscache_n_checkaux_update;
49338-extern atomic_t fscache_n_checkaux_obsolete;
49339+extern atomic_unchecked_t fscache_n_checkaux_none;
49340+extern atomic_unchecked_t fscache_n_checkaux_okay;
49341+extern atomic_unchecked_t fscache_n_checkaux_update;
49342+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49343
49344 extern atomic_t fscache_n_cop_alloc_object;
49345 extern atomic_t fscache_n_cop_lookup_object;
49346@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49347 atomic_inc(stat);
49348 }
49349
49350+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49351+{
49352+ atomic_inc_unchecked(stat);
49353+}
49354+
49355 static inline void fscache_stat_d(atomic_t *stat)
49356 {
49357 atomic_dec(stat);
49358@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49359
49360 #define __fscache_stat(stat) (NULL)
49361 #define fscache_stat(stat) do {} while (0)
49362+#define fscache_stat_unchecked(stat) do {} while (0)
49363 #define fscache_stat_d(stat) do {} while (0)
49364 #endif
49365
49366diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49367index e513ac5..e888d34 100644
49368--- a/fs/fscache/object.c
49369+++ b/fs/fscache/object.c
49370@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49371 /* update the object metadata on disk */
49372 case FSCACHE_OBJECT_UPDATING:
49373 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49374- fscache_stat(&fscache_n_updates_run);
49375+ fscache_stat_unchecked(&fscache_n_updates_run);
49376 fscache_stat(&fscache_n_cop_update_object);
49377 object->cache->ops->update_object(object);
49378 fscache_stat_d(&fscache_n_cop_update_object);
49379@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49380 spin_lock(&object->lock);
49381 object->state = FSCACHE_OBJECT_DEAD;
49382 spin_unlock(&object->lock);
49383- fscache_stat(&fscache_n_object_dead);
49384+ fscache_stat_unchecked(&fscache_n_object_dead);
49385 goto terminal_transit;
49386
49387 /* handle the parent cache of this object being withdrawn from
49388@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49389 spin_lock(&object->lock);
49390 object->state = FSCACHE_OBJECT_DEAD;
49391 spin_unlock(&object->lock);
49392- fscache_stat(&fscache_n_object_dead);
49393+ fscache_stat_unchecked(&fscache_n_object_dead);
49394 goto terminal_transit;
49395
49396 /* complain about the object being woken up once it is
49397@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49398 parent->cookie->def->name, cookie->def->name,
49399 object->cache->tag->name);
49400
49401- fscache_stat(&fscache_n_object_lookups);
49402+ fscache_stat_unchecked(&fscache_n_object_lookups);
49403 fscache_stat(&fscache_n_cop_lookup_object);
49404 ret = object->cache->ops->lookup_object(object);
49405 fscache_stat_d(&fscache_n_cop_lookup_object);
49406@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49407 if (ret == -ETIMEDOUT) {
49408 /* probably stuck behind another object, so move this one to
49409 * the back of the queue */
49410- fscache_stat(&fscache_n_object_lookups_timed_out);
49411+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49412 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49413 }
49414
49415@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49416
49417 spin_lock(&object->lock);
49418 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49419- fscache_stat(&fscache_n_object_lookups_negative);
49420+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49421
49422 /* transit here to allow write requests to begin stacking up
49423 * and read requests to begin returning ENODATA */
49424@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49425 * result, in which case there may be data available */
49426 spin_lock(&object->lock);
49427 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49428- fscache_stat(&fscache_n_object_lookups_positive);
49429+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49430
49431 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49432
49433@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49434 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49435 } else {
49436 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49437- fscache_stat(&fscache_n_object_created);
49438+ fscache_stat_unchecked(&fscache_n_object_created);
49439
49440 object->state = FSCACHE_OBJECT_AVAILABLE;
49441 spin_unlock(&object->lock);
49442@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49443 fscache_enqueue_dependents(object);
49444
49445 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49446- fscache_stat(&fscache_n_object_avail);
49447+ fscache_stat_unchecked(&fscache_n_object_avail);
49448
49449 _leave("");
49450 }
49451@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49452 enum fscache_checkaux result;
49453
49454 if (!object->cookie->def->check_aux) {
49455- fscache_stat(&fscache_n_checkaux_none);
49456+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49457 return FSCACHE_CHECKAUX_OKAY;
49458 }
49459
49460@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49461 switch (result) {
49462 /* entry okay as is */
49463 case FSCACHE_CHECKAUX_OKAY:
49464- fscache_stat(&fscache_n_checkaux_okay);
49465+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49466 break;
49467
49468 /* entry requires update */
49469 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49470- fscache_stat(&fscache_n_checkaux_update);
49471+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49472 break;
49473
49474 /* entry requires deletion */
49475 case FSCACHE_CHECKAUX_OBSOLETE:
49476- fscache_stat(&fscache_n_checkaux_obsolete);
49477+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49478 break;
49479
49480 default:
49481diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49482index 313e79a..775240f 100644
49483--- a/fs/fscache/operation.c
49484+++ b/fs/fscache/operation.c
49485@@ -16,7 +16,7 @@
49486 #include <linux/seq_file.h>
49487 #include "internal.h"
49488
49489-atomic_t fscache_op_debug_id;
49490+atomic_unchecked_t fscache_op_debug_id;
49491 EXPORT_SYMBOL(fscache_op_debug_id);
49492
49493 /**
49494@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49495 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49496 ASSERTCMP(atomic_read(&op->usage), >, 0);
49497
49498- fscache_stat(&fscache_n_op_enqueue);
49499+ fscache_stat_unchecked(&fscache_n_op_enqueue);
49500 switch (op->flags & FSCACHE_OP_TYPE) {
49501 case FSCACHE_OP_FAST:
49502 _debug("queue fast");
49503@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49504 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49505 if (op->processor)
49506 fscache_enqueue_operation(op);
49507- fscache_stat(&fscache_n_op_run);
49508+ fscache_stat_unchecked(&fscache_n_op_run);
49509 }
49510
49511 /*
49512@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49513 if (object->n_ops > 0) {
49514 atomic_inc(&op->usage);
49515 list_add_tail(&op->pend_link, &object->pending_ops);
49516- fscache_stat(&fscache_n_op_pend);
49517+ fscache_stat_unchecked(&fscache_n_op_pend);
49518 } else if (!list_empty(&object->pending_ops)) {
49519 atomic_inc(&op->usage);
49520 list_add_tail(&op->pend_link, &object->pending_ops);
49521- fscache_stat(&fscache_n_op_pend);
49522+ fscache_stat_unchecked(&fscache_n_op_pend);
49523 fscache_start_operations(object);
49524 } else {
49525 ASSERTCMP(object->n_in_progress, ==, 0);
49526@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49527 object->n_exclusive++; /* reads and writes must wait */
49528 atomic_inc(&op->usage);
49529 list_add_tail(&op->pend_link, &object->pending_ops);
49530- fscache_stat(&fscache_n_op_pend);
49531+ fscache_stat_unchecked(&fscache_n_op_pend);
49532 ret = 0;
49533 } else {
49534 /* not allowed to submit ops in any other state */
49535@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49536 if (object->n_exclusive > 0) {
49537 atomic_inc(&op->usage);
49538 list_add_tail(&op->pend_link, &object->pending_ops);
49539- fscache_stat(&fscache_n_op_pend);
49540+ fscache_stat_unchecked(&fscache_n_op_pend);
49541 } else if (!list_empty(&object->pending_ops)) {
49542 atomic_inc(&op->usage);
49543 list_add_tail(&op->pend_link, &object->pending_ops);
49544- fscache_stat(&fscache_n_op_pend);
49545+ fscache_stat_unchecked(&fscache_n_op_pend);
49546 fscache_start_operations(object);
49547 } else {
49548 ASSERTCMP(object->n_exclusive, ==, 0);
49549@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49550 object->n_ops++;
49551 atomic_inc(&op->usage);
49552 list_add_tail(&op->pend_link, &object->pending_ops);
49553- fscache_stat(&fscache_n_op_pend);
49554+ fscache_stat_unchecked(&fscache_n_op_pend);
49555 ret = 0;
49556 } else if (object->state == FSCACHE_OBJECT_DYING ||
49557 object->state == FSCACHE_OBJECT_LC_DYING ||
49558 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49559- fscache_stat(&fscache_n_op_rejected);
49560+ fscache_stat_unchecked(&fscache_n_op_rejected);
49561 ret = -ENOBUFS;
49562 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49563 fscache_report_unexpected_submission(object, op, ostate);
49564@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
49565
49566 ret = -EBUSY;
49567 if (!list_empty(&op->pend_link)) {
49568- fscache_stat(&fscache_n_op_cancelled);
49569+ fscache_stat_unchecked(&fscache_n_op_cancelled);
49570 list_del_init(&op->pend_link);
49571 object->n_ops--;
49572 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
49573@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
49574 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
49575 BUG();
49576
49577- fscache_stat(&fscache_n_op_release);
49578+ fscache_stat_unchecked(&fscache_n_op_release);
49579
49580 if (op->release) {
49581 op->release(op);
49582@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
49583 * lock, and defer it otherwise */
49584 if (!spin_trylock(&object->lock)) {
49585 _debug("defer put");
49586- fscache_stat(&fscache_n_op_deferred_release);
49587+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
49588
49589 cache = object->cache;
49590 spin_lock(&cache->op_gc_list_lock);
49591@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
49592
49593 _debug("GC DEFERRED REL OBJ%x OP%x",
49594 object->debug_id, op->debug_id);
49595- fscache_stat(&fscache_n_op_gc);
49596+ fscache_stat_unchecked(&fscache_n_op_gc);
49597
49598 ASSERTCMP(atomic_read(&op->usage), ==, 0);
49599
49600diff --git a/fs/fscache/page.c b/fs/fscache/page.c
49601index c598ea4..6aac13e 100644
49602--- a/fs/fscache/page.c
49603+++ b/fs/fscache/page.c
49604@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49605 val = radix_tree_lookup(&cookie->stores, page->index);
49606 if (!val) {
49607 rcu_read_unlock();
49608- fscache_stat(&fscache_n_store_vmscan_not_storing);
49609+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
49610 __fscache_uncache_page(cookie, page);
49611 return true;
49612 }
49613@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49614 spin_unlock(&cookie->stores_lock);
49615
49616 if (xpage) {
49617- fscache_stat(&fscache_n_store_vmscan_cancelled);
49618- fscache_stat(&fscache_n_store_radix_deletes);
49619+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
49620+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49621 ASSERTCMP(xpage, ==, page);
49622 } else {
49623- fscache_stat(&fscache_n_store_vmscan_gone);
49624+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
49625 }
49626
49627 wake_up_bit(&cookie->flags, 0);
49628@@ -106,7 +106,7 @@ page_busy:
49629 /* we might want to wait here, but that could deadlock the allocator as
49630 * the slow-work threads writing to the cache may all end up sleeping
49631 * on memory allocation */
49632- fscache_stat(&fscache_n_store_vmscan_busy);
49633+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
49634 return false;
49635 }
49636 EXPORT_SYMBOL(__fscache_maybe_release_page);
49637@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
49638 FSCACHE_COOKIE_STORING_TAG);
49639 if (!radix_tree_tag_get(&cookie->stores, page->index,
49640 FSCACHE_COOKIE_PENDING_TAG)) {
49641- fscache_stat(&fscache_n_store_radix_deletes);
49642+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49643 xpage = radix_tree_delete(&cookie->stores, page->index);
49644 }
49645 spin_unlock(&cookie->stores_lock);
49646@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
49647
49648 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
49649
49650- fscache_stat(&fscache_n_attr_changed_calls);
49651+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
49652
49653 if (fscache_object_is_active(object)) {
49654 fscache_set_op_state(op, "CallFS");
49655@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49656
49657 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49658
49659- fscache_stat(&fscache_n_attr_changed);
49660+ fscache_stat_unchecked(&fscache_n_attr_changed);
49661
49662 op = kzalloc(sizeof(*op), GFP_KERNEL);
49663 if (!op) {
49664- fscache_stat(&fscache_n_attr_changed_nomem);
49665+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
49666 _leave(" = -ENOMEM");
49667 return -ENOMEM;
49668 }
49669@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49670 if (fscache_submit_exclusive_op(object, op) < 0)
49671 goto nobufs;
49672 spin_unlock(&cookie->lock);
49673- fscache_stat(&fscache_n_attr_changed_ok);
49674+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
49675 fscache_put_operation(op);
49676 _leave(" = 0");
49677 return 0;
49678@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49679 nobufs:
49680 spin_unlock(&cookie->lock);
49681 kfree(op);
49682- fscache_stat(&fscache_n_attr_changed_nobufs);
49683+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
49684 _leave(" = %d", -ENOBUFS);
49685 return -ENOBUFS;
49686 }
49687@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
49688 /* allocate a retrieval operation and attempt to submit it */
49689 op = kzalloc(sizeof(*op), GFP_NOIO);
49690 if (!op) {
49691- fscache_stat(&fscache_n_retrievals_nomem);
49692+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49693 return NULL;
49694 }
49695
49696@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49697 return 0;
49698 }
49699
49700- fscache_stat(&fscache_n_retrievals_wait);
49701+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
49702
49703 jif = jiffies;
49704 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
49705 fscache_wait_bit_interruptible,
49706 TASK_INTERRUPTIBLE) != 0) {
49707- fscache_stat(&fscache_n_retrievals_intr);
49708+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49709 _leave(" = -ERESTARTSYS");
49710 return -ERESTARTSYS;
49711 }
49712@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49713 */
49714 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49715 struct fscache_retrieval *op,
49716- atomic_t *stat_op_waits,
49717- atomic_t *stat_object_dead)
49718+ atomic_unchecked_t *stat_op_waits,
49719+ atomic_unchecked_t *stat_object_dead)
49720 {
49721 int ret;
49722
49723@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49724 goto check_if_dead;
49725
49726 _debug(">>> WT");
49727- fscache_stat(stat_op_waits);
49728+ fscache_stat_unchecked(stat_op_waits);
49729 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
49730 fscache_wait_bit_interruptible,
49731 TASK_INTERRUPTIBLE) < 0) {
49732@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49733
49734 check_if_dead:
49735 if (unlikely(fscache_object_is_dead(object))) {
49736- fscache_stat(stat_object_dead);
49737+ fscache_stat_unchecked(stat_object_dead);
49738 return -ENOBUFS;
49739 }
49740 return 0;
49741@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49742
49743 _enter("%p,%p,,,", cookie, page);
49744
49745- fscache_stat(&fscache_n_retrievals);
49746+ fscache_stat_unchecked(&fscache_n_retrievals);
49747
49748 if (hlist_empty(&cookie->backing_objects))
49749 goto nobufs;
49750@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49751 goto nobufs_unlock;
49752 spin_unlock(&cookie->lock);
49753
49754- fscache_stat(&fscache_n_retrieval_ops);
49755+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49756
49757 /* pin the netfs read context in case we need to do the actual netfs
49758 * read because we've encountered a cache read failure */
49759@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49760
49761 error:
49762 if (ret == -ENOMEM)
49763- fscache_stat(&fscache_n_retrievals_nomem);
49764+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49765 else if (ret == -ERESTARTSYS)
49766- fscache_stat(&fscache_n_retrievals_intr);
49767+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49768 else if (ret == -ENODATA)
49769- fscache_stat(&fscache_n_retrievals_nodata);
49770+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49771 else if (ret < 0)
49772- fscache_stat(&fscache_n_retrievals_nobufs);
49773+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49774 else
49775- fscache_stat(&fscache_n_retrievals_ok);
49776+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49777
49778 fscache_put_retrieval(op);
49779 _leave(" = %d", ret);
49780@@ -453,7 +453,7 @@ nobufs_unlock:
49781 spin_unlock(&cookie->lock);
49782 kfree(op);
49783 nobufs:
49784- fscache_stat(&fscache_n_retrievals_nobufs);
49785+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49786 _leave(" = -ENOBUFS");
49787 return -ENOBUFS;
49788 }
49789@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49790
49791 _enter("%p,,%d,,,", cookie, *nr_pages);
49792
49793- fscache_stat(&fscache_n_retrievals);
49794+ fscache_stat_unchecked(&fscache_n_retrievals);
49795
49796 if (hlist_empty(&cookie->backing_objects))
49797 goto nobufs;
49798@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49799 goto nobufs_unlock;
49800 spin_unlock(&cookie->lock);
49801
49802- fscache_stat(&fscache_n_retrieval_ops);
49803+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49804
49805 /* pin the netfs read context in case we need to do the actual netfs
49806 * read because we've encountered a cache read failure */
49807@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49808
49809 error:
49810 if (ret == -ENOMEM)
49811- fscache_stat(&fscache_n_retrievals_nomem);
49812+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49813 else if (ret == -ERESTARTSYS)
49814- fscache_stat(&fscache_n_retrievals_intr);
49815+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49816 else if (ret == -ENODATA)
49817- fscache_stat(&fscache_n_retrievals_nodata);
49818+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49819 else if (ret < 0)
49820- fscache_stat(&fscache_n_retrievals_nobufs);
49821+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49822 else
49823- fscache_stat(&fscache_n_retrievals_ok);
49824+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49825
49826 fscache_put_retrieval(op);
49827 _leave(" = %d", ret);
49828@@ -570,7 +570,7 @@ nobufs_unlock:
49829 spin_unlock(&cookie->lock);
49830 kfree(op);
49831 nobufs:
49832- fscache_stat(&fscache_n_retrievals_nobufs);
49833+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49834 _leave(" = -ENOBUFS");
49835 return -ENOBUFS;
49836 }
49837@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49838
49839 _enter("%p,%p,,,", cookie, page);
49840
49841- fscache_stat(&fscache_n_allocs);
49842+ fscache_stat_unchecked(&fscache_n_allocs);
49843
49844 if (hlist_empty(&cookie->backing_objects))
49845 goto nobufs;
49846@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49847 goto nobufs_unlock;
49848 spin_unlock(&cookie->lock);
49849
49850- fscache_stat(&fscache_n_alloc_ops);
49851+ fscache_stat_unchecked(&fscache_n_alloc_ops);
49852
49853 ret = fscache_wait_for_retrieval_activation(
49854 object, op,
49855@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49856
49857 error:
49858 if (ret == -ERESTARTSYS)
49859- fscache_stat(&fscache_n_allocs_intr);
49860+ fscache_stat_unchecked(&fscache_n_allocs_intr);
49861 else if (ret < 0)
49862- fscache_stat(&fscache_n_allocs_nobufs);
49863+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49864 else
49865- fscache_stat(&fscache_n_allocs_ok);
49866+ fscache_stat_unchecked(&fscache_n_allocs_ok);
49867
49868 fscache_put_retrieval(op);
49869 _leave(" = %d", ret);
49870@@ -651,7 +651,7 @@ nobufs_unlock:
49871 spin_unlock(&cookie->lock);
49872 kfree(op);
49873 nobufs:
49874- fscache_stat(&fscache_n_allocs_nobufs);
49875+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49876 _leave(" = -ENOBUFS");
49877 return -ENOBUFS;
49878 }
49879@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49880
49881 spin_lock(&cookie->stores_lock);
49882
49883- fscache_stat(&fscache_n_store_calls);
49884+ fscache_stat_unchecked(&fscache_n_store_calls);
49885
49886 /* find a page to store */
49887 page = NULL;
49888@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49889 page = results[0];
49890 _debug("gang %d [%lx]", n, page->index);
49891 if (page->index > op->store_limit) {
49892- fscache_stat(&fscache_n_store_pages_over_limit);
49893+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
49894 goto superseded;
49895 }
49896
49897@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49898
49899 if (page) {
49900 fscache_set_op_state(&op->op, "Store");
49901- fscache_stat(&fscache_n_store_pages);
49902+ fscache_stat_unchecked(&fscache_n_store_pages);
49903 fscache_stat(&fscache_n_cop_write_page);
49904 ret = object->cache->ops->write_page(op, page);
49905 fscache_stat_d(&fscache_n_cop_write_page);
49906@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49907 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49908 ASSERT(PageFsCache(page));
49909
49910- fscache_stat(&fscache_n_stores);
49911+ fscache_stat_unchecked(&fscache_n_stores);
49912
49913 op = kzalloc(sizeof(*op), GFP_NOIO);
49914 if (!op)
49915@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49916 spin_unlock(&cookie->stores_lock);
49917 spin_unlock(&object->lock);
49918
49919- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
49920+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
49921 op->store_limit = object->store_limit;
49922
49923 if (fscache_submit_op(object, &op->op) < 0)
49924@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49925
49926 spin_unlock(&cookie->lock);
49927 radix_tree_preload_end();
49928- fscache_stat(&fscache_n_store_ops);
49929- fscache_stat(&fscache_n_stores_ok);
49930+ fscache_stat_unchecked(&fscache_n_store_ops);
49931+ fscache_stat_unchecked(&fscache_n_stores_ok);
49932
49933 /* the slow work queue now carries its own ref on the object */
49934 fscache_put_operation(&op->op);
49935@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49936 return 0;
49937
49938 already_queued:
49939- fscache_stat(&fscache_n_stores_again);
49940+ fscache_stat_unchecked(&fscache_n_stores_again);
49941 already_pending:
49942 spin_unlock(&cookie->stores_lock);
49943 spin_unlock(&object->lock);
49944 spin_unlock(&cookie->lock);
49945 radix_tree_preload_end();
49946 kfree(op);
49947- fscache_stat(&fscache_n_stores_ok);
49948+ fscache_stat_unchecked(&fscache_n_stores_ok);
49949 _leave(" = 0");
49950 return 0;
49951
49952@@ -886,14 +886,14 @@ nobufs:
49953 spin_unlock(&cookie->lock);
49954 radix_tree_preload_end();
49955 kfree(op);
49956- fscache_stat(&fscache_n_stores_nobufs);
49957+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
49958 _leave(" = -ENOBUFS");
49959 return -ENOBUFS;
49960
49961 nomem_free:
49962 kfree(op);
49963 nomem:
49964- fscache_stat(&fscache_n_stores_oom);
49965+ fscache_stat_unchecked(&fscache_n_stores_oom);
49966 _leave(" = -ENOMEM");
49967 return -ENOMEM;
49968 }
49969@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
49970 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49971 ASSERTCMP(page, !=, NULL);
49972
49973- fscache_stat(&fscache_n_uncaches);
49974+ fscache_stat_unchecked(&fscache_n_uncaches);
49975
49976 /* cache withdrawal may beat us to it */
49977 if (!PageFsCache(page))
49978@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
49979 unsigned long loop;
49980
49981 #ifdef CONFIG_FSCACHE_STATS
49982- atomic_add(pagevec->nr, &fscache_n_marks);
49983+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
49984 #endif
49985
49986 for (loop = 0; loop < pagevec->nr; loop++) {
49987diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
49988index 46435f3..8cddf18 100644
49989--- a/fs/fscache/stats.c
49990+++ b/fs/fscache/stats.c
49991@@ -18,95 +18,95 @@
49992 /*
49993 * operation counters
49994 */
49995-atomic_t fscache_n_op_pend;
49996-atomic_t fscache_n_op_run;
49997-atomic_t fscache_n_op_enqueue;
49998-atomic_t fscache_n_op_requeue;
49999-atomic_t fscache_n_op_deferred_release;
50000-atomic_t fscache_n_op_release;
50001-atomic_t fscache_n_op_gc;
50002-atomic_t fscache_n_op_cancelled;
50003-atomic_t fscache_n_op_rejected;
50004+atomic_unchecked_t fscache_n_op_pend;
50005+atomic_unchecked_t fscache_n_op_run;
50006+atomic_unchecked_t fscache_n_op_enqueue;
50007+atomic_unchecked_t fscache_n_op_requeue;
50008+atomic_unchecked_t fscache_n_op_deferred_release;
50009+atomic_unchecked_t fscache_n_op_release;
50010+atomic_unchecked_t fscache_n_op_gc;
50011+atomic_unchecked_t fscache_n_op_cancelled;
50012+atomic_unchecked_t fscache_n_op_rejected;
50013
50014-atomic_t fscache_n_attr_changed;
50015-atomic_t fscache_n_attr_changed_ok;
50016-atomic_t fscache_n_attr_changed_nobufs;
50017-atomic_t fscache_n_attr_changed_nomem;
50018-atomic_t fscache_n_attr_changed_calls;
50019+atomic_unchecked_t fscache_n_attr_changed;
50020+atomic_unchecked_t fscache_n_attr_changed_ok;
50021+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50022+atomic_unchecked_t fscache_n_attr_changed_nomem;
50023+atomic_unchecked_t fscache_n_attr_changed_calls;
50024
50025-atomic_t fscache_n_allocs;
50026-atomic_t fscache_n_allocs_ok;
50027-atomic_t fscache_n_allocs_wait;
50028-atomic_t fscache_n_allocs_nobufs;
50029-atomic_t fscache_n_allocs_intr;
50030-atomic_t fscache_n_allocs_object_dead;
50031-atomic_t fscache_n_alloc_ops;
50032-atomic_t fscache_n_alloc_op_waits;
50033+atomic_unchecked_t fscache_n_allocs;
50034+atomic_unchecked_t fscache_n_allocs_ok;
50035+atomic_unchecked_t fscache_n_allocs_wait;
50036+atomic_unchecked_t fscache_n_allocs_nobufs;
50037+atomic_unchecked_t fscache_n_allocs_intr;
50038+atomic_unchecked_t fscache_n_allocs_object_dead;
50039+atomic_unchecked_t fscache_n_alloc_ops;
50040+atomic_unchecked_t fscache_n_alloc_op_waits;
50041
50042-atomic_t fscache_n_retrievals;
50043-atomic_t fscache_n_retrievals_ok;
50044-atomic_t fscache_n_retrievals_wait;
50045-atomic_t fscache_n_retrievals_nodata;
50046-atomic_t fscache_n_retrievals_nobufs;
50047-atomic_t fscache_n_retrievals_intr;
50048-atomic_t fscache_n_retrievals_nomem;
50049-atomic_t fscache_n_retrievals_object_dead;
50050-atomic_t fscache_n_retrieval_ops;
50051-atomic_t fscache_n_retrieval_op_waits;
50052+atomic_unchecked_t fscache_n_retrievals;
50053+atomic_unchecked_t fscache_n_retrievals_ok;
50054+atomic_unchecked_t fscache_n_retrievals_wait;
50055+atomic_unchecked_t fscache_n_retrievals_nodata;
50056+atomic_unchecked_t fscache_n_retrievals_nobufs;
50057+atomic_unchecked_t fscache_n_retrievals_intr;
50058+atomic_unchecked_t fscache_n_retrievals_nomem;
50059+atomic_unchecked_t fscache_n_retrievals_object_dead;
50060+atomic_unchecked_t fscache_n_retrieval_ops;
50061+atomic_unchecked_t fscache_n_retrieval_op_waits;
50062
50063-atomic_t fscache_n_stores;
50064-atomic_t fscache_n_stores_ok;
50065-atomic_t fscache_n_stores_again;
50066-atomic_t fscache_n_stores_nobufs;
50067-atomic_t fscache_n_stores_oom;
50068-atomic_t fscache_n_store_ops;
50069-atomic_t fscache_n_store_calls;
50070-atomic_t fscache_n_store_pages;
50071-atomic_t fscache_n_store_radix_deletes;
50072-atomic_t fscache_n_store_pages_over_limit;
50073+atomic_unchecked_t fscache_n_stores;
50074+atomic_unchecked_t fscache_n_stores_ok;
50075+atomic_unchecked_t fscache_n_stores_again;
50076+atomic_unchecked_t fscache_n_stores_nobufs;
50077+atomic_unchecked_t fscache_n_stores_oom;
50078+atomic_unchecked_t fscache_n_store_ops;
50079+atomic_unchecked_t fscache_n_store_calls;
50080+atomic_unchecked_t fscache_n_store_pages;
50081+atomic_unchecked_t fscache_n_store_radix_deletes;
50082+atomic_unchecked_t fscache_n_store_pages_over_limit;
50083
50084-atomic_t fscache_n_store_vmscan_not_storing;
50085-atomic_t fscache_n_store_vmscan_gone;
50086-atomic_t fscache_n_store_vmscan_busy;
50087-atomic_t fscache_n_store_vmscan_cancelled;
50088+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50089+atomic_unchecked_t fscache_n_store_vmscan_gone;
50090+atomic_unchecked_t fscache_n_store_vmscan_busy;
50091+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50092
50093-atomic_t fscache_n_marks;
50094-atomic_t fscache_n_uncaches;
50095+atomic_unchecked_t fscache_n_marks;
50096+atomic_unchecked_t fscache_n_uncaches;
50097
50098-atomic_t fscache_n_acquires;
50099-atomic_t fscache_n_acquires_null;
50100-atomic_t fscache_n_acquires_no_cache;
50101-atomic_t fscache_n_acquires_ok;
50102-atomic_t fscache_n_acquires_nobufs;
50103-atomic_t fscache_n_acquires_oom;
50104+atomic_unchecked_t fscache_n_acquires;
50105+atomic_unchecked_t fscache_n_acquires_null;
50106+atomic_unchecked_t fscache_n_acquires_no_cache;
50107+atomic_unchecked_t fscache_n_acquires_ok;
50108+atomic_unchecked_t fscache_n_acquires_nobufs;
50109+atomic_unchecked_t fscache_n_acquires_oom;
50110
50111-atomic_t fscache_n_updates;
50112-atomic_t fscache_n_updates_null;
50113-atomic_t fscache_n_updates_run;
50114+atomic_unchecked_t fscache_n_updates;
50115+atomic_unchecked_t fscache_n_updates_null;
50116+atomic_unchecked_t fscache_n_updates_run;
50117
50118-atomic_t fscache_n_relinquishes;
50119-atomic_t fscache_n_relinquishes_null;
50120-atomic_t fscache_n_relinquishes_waitcrt;
50121-atomic_t fscache_n_relinquishes_retire;
50122+atomic_unchecked_t fscache_n_relinquishes;
50123+atomic_unchecked_t fscache_n_relinquishes_null;
50124+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50125+atomic_unchecked_t fscache_n_relinquishes_retire;
50126
50127-atomic_t fscache_n_cookie_index;
50128-atomic_t fscache_n_cookie_data;
50129-atomic_t fscache_n_cookie_special;
50130+atomic_unchecked_t fscache_n_cookie_index;
50131+atomic_unchecked_t fscache_n_cookie_data;
50132+atomic_unchecked_t fscache_n_cookie_special;
50133
50134-atomic_t fscache_n_object_alloc;
50135-atomic_t fscache_n_object_no_alloc;
50136-atomic_t fscache_n_object_lookups;
50137-atomic_t fscache_n_object_lookups_negative;
50138-atomic_t fscache_n_object_lookups_positive;
50139-atomic_t fscache_n_object_lookups_timed_out;
50140-atomic_t fscache_n_object_created;
50141-atomic_t fscache_n_object_avail;
50142-atomic_t fscache_n_object_dead;
50143+atomic_unchecked_t fscache_n_object_alloc;
50144+atomic_unchecked_t fscache_n_object_no_alloc;
50145+atomic_unchecked_t fscache_n_object_lookups;
50146+atomic_unchecked_t fscache_n_object_lookups_negative;
50147+atomic_unchecked_t fscache_n_object_lookups_positive;
50148+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50149+atomic_unchecked_t fscache_n_object_created;
50150+atomic_unchecked_t fscache_n_object_avail;
50151+atomic_unchecked_t fscache_n_object_dead;
50152
50153-atomic_t fscache_n_checkaux_none;
50154-atomic_t fscache_n_checkaux_okay;
50155-atomic_t fscache_n_checkaux_update;
50156-atomic_t fscache_n_checkaux_obsolete;
50157+atomic_unchecked_t fscache_n_checkaux_none;
50158+atomic_unchecked_t fscache_n_checkaux_okay;
50159+atomic_unchecked_t fscache_n_checkaux_update;
50160+atomic_unchecked_t fscache_n_checkaux_obsolete;
50161
50162 atomic_t fscache_n_cop_alloc_object;
50163 atomic_t fscache_n_cop_lookup_object;
50164@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50165 seq_puts(m, "FS-Cache statistics\n");
50166
50167 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50168- atomic_read(&fscache_n_cookie_index),
50169- atomic_read(&fscache_n_cookie_data),
50170- atomic_read(&fscache_n_cookie_special));
50171+ atomic_read_unchecked(&fscache_n_cookie_index),
50172+ atomic_read_unchecked(&fscache_n_cookie_data),
50173+ atomic_read_unchecked(&fscache_n_cookie_special));
50174
50175 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50176- atomic_read(&fscache_n_object_alloc),
50177- atomic_read(&fscache_n_object_no_alloc),
50178- atomic_read(&fscache_n_object_avail),
50179- atomic_read(&fscache_n_object_dead));
50180+ atomic_read_unchecked(&fscache_n_object_alloc),
50181+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50182+ atomic_read_unchecked(&fscache_n_object_avail),
50183+ atomic_read_unchecked(&fscache_n_object_dead));
50184 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50185- atomic_read(&fscache_n_checkaux_none),
50186- atomic_read(&fscache_n_checkaux_okay),
50187- atomic_read(&fscache_n_checkaux_update),
50188- atomic_read(&fscache_n_checkaux_obsolete));
50189+ atomic_read_unchecked(&fscache_n_checkaux_none),
50190+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50191+ atomic_read_unchecked(&fscache_n_checkaux_update),
50192+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50193
50194 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50195- atomic_read(&fscache_n_marks),
50196- atomic_read(&fscache_n_uncaches));
50197+ atomic_read_unchecked(&fscache_n_marks),
50198+ atomic_read_unchecked(&fscache_n_uncaches));
50199
50200 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50201 " oom=%u\n",
50202- atomic_read(&fscache_n_acquires),
50203- atomic_read(&fscache_n_acquires_null),
50204- atomic_read(&fscache_n_acquires_no_cache),
50205- atomic_read(&fscache_n_acquires_ok),
50206- atomic_read(&fscache_n_acquires_nobufs),
50207- atomic_read(&fscache_n_acquires_oom));
50208+ atomic_read_unchecked(&fscache_n_acquires),
50209+ atomic_read_unchecked(&fscache_n_acquires_null),
50210+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50211+ atomic_read_unchecked(&fscache_n_acquires_ok),
50212+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50213+ atomic_read_unchecked(&fscache_n_acquires_oom));
50214
50215 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50216- atomic_read(&fscache_n_object_lookups),
50217- atomic_read(&fscache_n_object_lookups_negative),
50218- atomic_read(&fscache_n_object_lookups_positive),
50219- atomic_read(&fscache_n_object_lookups_timed_out),
50220- atomic_read(&fscache_n_object_created));
50221+ atomic_read_unchecked(&fscache_n_object_lookups),
50222+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50223+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50224+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50225+ atomic_read_unchecked(&fscache_n_object_created));
50226
50227 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50228- atomic_read(&fscache_n_updates),
50229- atomic_read(&fscache_n_updates_null),
50230- atomic_read(&fscache_n_updates_run));
50231+ atomic_read_unchecked(&fscache_n_updates),
50232+ atomic_read_unchecked(&fscache_n_updates_null),
50233+ atomic_read_unchecked(&fscache_n_updates_run));
50234
50235 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50236- atomic_read(&fscache_n_relinquishes),
50237- atomic_read(&fscache_n_relinquishes_null),
50238- atomic_read(&fscache_n_relinquishes_waitcrt),
50239- atomic_read(&fscache_n_relinquishes_retire));
50240+ atomic_read_unchecked(&fscache_n_relinquishes),
50241+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50242+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50243+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50244
50245 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50246- atomic_read(&fscache_n_attr_changed),
50247- atomic_read(&fscache_n_attr_changed_ok),
50248- atomic_read(&fscache_n_attr_changed_nobufs),
50249- atomic_read(&fscache_n_attr_changed_nomem),
50250- atomic_read(&fscache_n_attr_changed_calls));
50251+ atomic_read_unchecked(&fscache_n_attr_changed),
50252+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50253+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50254+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50255+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50256
50257 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50258- atomic_read(&fscache_n_allocs),
50259- atomic_read(&fscache_n_allocs_ok),
50260- atomic_read(&fscache_n_allocs_wait),
50261- atomic_read(&fscache_n_allocs_nobufs),
50262- atomic_read(&fscache_n_allocs_intr));
50263+ atomic_read_unchecked(&fscache_n_allocs),
50264+ atomic_read_unchecked(&fscache_n_allocs_ok),
50265+ atomic_read_unchecked(&fscache_n_allocs_wait),
50266+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50267+ atomic_read_unchecked(&fscache_n_allocs_intr));
50268 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50269- atomic_read(&fscache_n_alloc_ops),
50270- atomic_read(&fscache_n_alloc_op_waits),
50271- atomic_read(&fscache_n_allocs_object_dead));
50272+ atomic_read_unchecked(&fscache_n_alloc_ops),
50273+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50274+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50275
50276 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50277 " int=%u oom=%u\n",
50278- atomic_read(&fscache_n_retrievals),
50279- atomic_read(&fscache_n_retrievals_ok),
50280- atomic_read(&fscache_n_retrievals_wait),
50281- atomic_read(&fscache_n_retrievals_nodata),
50282- atomic_read(&fscache_n_retrievals_nobufs),
50283- atomic_read(&fscache_n_retrievals_intr),
50284- atomic_read(&fscache_n_retrievals_nomem));
50285+ atomic_read_unchecked(&fscache_n_retrievals),
50286+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50287+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50288+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50289+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50290+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50291+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50292 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50293- atomic_read(&fscache_n_retrieval_ops),
50294- atomic_read(&fscache_n_retrieval_op_waits),
50295- atomic_read(&fscache_n_retrievals_object_dead));
50296+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50297+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50298+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50299
50300 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50301- atomic_read(&fscache_n_stores),
50302- atomic_read(&fscache_n_stores_ok),
50303- atomic_read(&fscache_n_stores_again),
50304- atomic_read(&fscache_n_stores_nobufs),
50305- atomic_read(&fscache_n_stores_oom));
50306+ atomic_read_unchecked(&fscache_n_stores),
50307+ atomic_read_unchecked(&fscache_n_stores_ok),
50308+ atomic_read_unchecked(&fscache_n_stores_again),
50309+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50310+ atomic_read_unchecked(&fscache_n_stores_oom));
50311 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50312- atomic_read(&fscache_n_store_ops),
50313- atomic_read(&fscache_n_store_calls),
50314- atomic_read(&fscache_n_store_pages),
50315- atomic_read(&fscache_n_store_radix_deletes),
50316- atomic_read(&fscache_n_store_pages_over_limit));
50317+ atomic_read_unchecked(&fscache_n_store_ops),
50318+ atomic_read_unchecked(&fscache_n_store_calls),
50319+ atomic_read_unchecked(&fscache_n_store_pages),
50320+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50321+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50322
50323 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50324- atomic_read(&fscache_n_store_vmscan_not_storing),
50325- atomic_read(&fscache_n_store_vmscan_gone),
50326- atomic_read(&fscache_n_store_vmscan_busy),
50327- atomic_read(&fscache_n_store_vmscan_cancelled));
50328+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50329+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50330+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50331+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50332
50333 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50334- atomic_read(&fscache_n_op_pend),
50335- atomic_read(&fscache_n_op_run),
50336- atomic_read(&fscache_n_op_enqueue),
50337- atomic_read(&fscache_n_op_cancelled),
50338- atomic_read(&fscache_n_op_rejected));
50339+ atomic_read_unchecked(&fscache_n_op_pend),
50340+ atomic_read_unchecked(&fscache_n_op_run),
50341+ atomic_read_unchecked(&fscache_n_op_enqueue),
50342+ atomic_read_unchecked(&fscache_n_op_cancelled),
50343+ atomic_read_unchecked(&fscache_n_op_rejected));
50344 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50345- atomic_read(&fscache_n_op_deferred_release),
50346- atomic_read(&fscache_n_op_release),
50347- atomic_read(&fscache_n_op_gc));
50348+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50349+ atomic_read_unchecked(&fscache_n_op_release),
50350+ atomic_read_unchecked(&fscache_n_op_gc));
50351
50352 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50353 atomic_read(&fscache_n_cop_alloc_object),
50354diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50355index de792dc..448b532 100644
50356--- a/fs/fuse/cuse.c
50357+++ b/fs/fuse/cuse.c
50358@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50359 INIT_LIST_HEAD(&cuse_conntbl[i]);
50360
50361 /* inherit and extend fuse_dev_operations */
50362- cuse_channel_fops = fuse_dev_operations;
50363- cuse_channel_fops.owner = THIS_MODULE;
50364- cuse_channel_fops.open = cuse_channel_open;
50365- cuse_channel_fops.release = cuse_channel_release;
50366+ pax_open_kernel();
50367+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50368+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50369+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50370+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50371+ pax_close_kernel();
50372
50373 cuse_class = class_create(THIS_MODULE, "cuse");
50374 if (IS_ERR(cuse_class))
50375diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50376index 1facb39..7f48557 100644
50377--- a/fs/fuse/dev.c
50378+++ b/fs/fuse/dev.c
50379@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50380 {
50381 struct fuse_notify_inval_entry_out outarg;
50382 int err = -EINVAL;
50383- char buf[FUSE_NAME_MAX+1];
50384+ char *buf = NULL;
50385 struct qstr name;
50386
50387 if (size < sizeof(outarg))
50388@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50389 if (outarg.namelen > FUSE_NAME_MAX)
50390 goto err;
50391
50392+ err = -ENOMEM;
50393+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50394+ if (!buf)
50395+ goto err;
50396+
50397 err = -EINVAL;
50398 if (size != sizeof(outarg) + outarg.namelen + 1)
50399 goto err;
50400@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50401
50402 down_read(&fc->killsb);
50403 err = -ENOENT;
50404- if (!fc->sb)
50405- goto err_unlock;
50406-
50407- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50408-
50409-err_unlock:
50410+ if (fc->sb)
50411+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50412 up_read(&fc->killsb);
50413+ kfree(buf);
50414 return err;
50415
50416 err:
50417 fuse_copy_finish(cs);
50418+ kfree(buf);
50419 return err;
50420 }
50421
50422diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50423index 4787ae6..73efff7 100644
50424--- a/fs/fuse/dir.c
50425+++ b/fs/fuse/dir.c
50426@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50427 return link;
50428 }
50429
50430-static void free_link(char *link)
50431+static void free_link(const char *link)
50432 {
50433 if (!IS_ERR(link))
50434 free_page((unsigned long) link);
50435diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50436index 247436c..e650ccb 100644
50437--- a/fs/gfs2/ops_inode.c
50438+++ b/fs/gfs2/ops_inode.c
50439@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50440 unsigned int x;
50441 int error;
50442
50443+ pax_track_stack();
50444+
50445 if (ndentry->d_inode) {
50446 nip = GFS2_I(ndentry->d_inode);
50447 if (ip == nip)
50448diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50449index 4463297..4fed53b 100644
50450--- a/fs/gfs2/sys.c
50451+++ b/fs/gfs2/sys.c
50452@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50453 return a->store ? a->store(sdp, buf, len) : len;
50454 }
50455
50456-static struct sysfs_ops gfs2_attr_ops = {
50457+static const struct sysfs_ops gfs2_attr_ops = {
50458 .show = gfs2_attr_show,
50459 .store = gfs2_attr_store,
50460 };
50461@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50462 return 0;
50463 }
50464
50465-static struct kset_uevent_ops gfs2_uevent_ops = {
50466+static const struct kset_uevent_ops gfs2_uevent_ops = {
50467 .uevent = gfs2_uevent,
50468 };
50469
50470diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
50471index 052f214..2462c5b 100644
50472--- a/fs/hfs/btree.c
50473+++ b/fs/hfs/btree.c
50474@@ -45,11 +45,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50475 case HFS_EXT_CNID:
50476 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
50477 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
50478+
50479+ if (HFS_I(tree->inode)->alloc_blocks >
50480+ HFS_I(tree->inode)->first_blocks) {
50481+ printk(KERN_ERR "hfs: invalid btree extent records\n");
50482+ unlock_new_inode(tree->inode);
50483+ goto free_inode;
50484+ }
50485+
50486 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50487 break;
50488 case HFS_CAT_CNID:
50489 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
50490 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
50491+
50492+ if (!HFS_I(tree->inode)->first_blocks) {
50493+ printk(KERN_ERR "hfs: invalid btree extent records "
50494+ "(0 size).\n");
50495+ unlock_new_inode(tree->inode);
50496+ goto free_inode;
50497+ }
50498+
50499 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50500 break;
50501 default:
50502@@ -58,11 +74,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50503 }
50504 unlock_new_inode(tree->inode);
50505
50506- if (!HFS_I(tree->inode)->first_blocks) {
50507- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
50508- goto free_inode;
50509- }
50510-
50511 mapping = tree->inode->i_mapping;
50512 page = read_mapping_page(mapping, 0, NULL);
50513 if (IS_ERR(page))
50514diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50515index f6874ac..7cd98a8 100644
50516--- a/fs/hfsplus/catalog.c
50517+++ b/fs/hfsplus/catalog.c
50518@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50519 int err;
50520 u16 type;
50521
50522+ pax_track_stack();
50523+
50524 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50525 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50526 if (err)
50527@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50528 int entry_size;
50529 int err;
50530
50531+ pax_track_stack();
50532+
50533 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50534 sb = dir->i_sb;
50535 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50536@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50537 int entry_size, type;
50538 int err = 0;
50539
50540+ pax_track_stack();
50541+
50542 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50543 dst_dir->i_ino, dst_name->name);
50544 sb = src_dir->i_sb;
50545diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50546index 5f40236..dac3421 100644
50547--- a/fs/hfsplus/dir.c
50548+++ b/fs/hfsplus/dir.c
50549@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50550 struct hfsplus_readdir_data *rd;
50551 u16 type;
50552
50553+ pax_track_stack();
50554+
50555 if (filp->f_pos >= inode->i_size)
50556 return 0;
50557
50558diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50559index 1bcf597..905a251 100644
50560--- a/fs/hfsplus/inode.c
50561+++ b/fs/hfsplus/inode.c
50562@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50563 int res = 0;
50564 u16 type;
50565
50566+ pax_track_stack();
50567+
50568 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50569
50570 HFSPLUS_I(inode).dev = 0;
50571@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50572 struct hfs_find_data fd;
50573 hfsplus_cat_entry entry;
50574
50575+ pax_track_stack();
50576+
50577 if (HFSPLUS_IS_RSRC(inode))
50578 main_inode = HFSPLUS_I(inode).rsrc_inode;
50579
50580diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50581index f457d2c..7ef4ad5 100644
50582--- a/fs/hfsplus/ioctl.c
50583+++ b/fs/hfsplus/ioctl.c
50584@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50585 struct hfsplus_cat_file *file;
50586 int res;
50587
50588+ pax_track_stack();
50589+
50590 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50591 return -EOPNOTSUPP;
50592
50593@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50594 struct hfsplus_cat_file *file;
50595 ssize_t res = 0;
50596
50597+ pax_track_stack();
50598+
50599 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50600 return -EOPNOTSUPP;
50601
50602diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50603index 43022f3..7298079 100644
50604--- a/fs/hfsplus/super.c
50605+++ b/fs/hfsplus/super.c
50606@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50607 struct nls_table *nls = NULL;
50608 int err = -EINVAL;
50609
50610+ pax_track_stack();
50611+
50612 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
50613 if (!sbi)
50614 return -ENOMEM;
50615diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50616index 87a1258..5694d91 100644
50617--- a/fs/hugetlbfs/inode.c
50618+++ b/fs/hugetlbfs/inode.c
50619@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
50620 .kill_sb = kill_litter_super,
50621 };
50622
50623-static struct vfsmount *hugetlbfs_vfsmount;
50624+struct vfsmount *hugetlbfs_vfsmount;
50625
50626 static int can_do_hugetlb_shm(void)
50627 {
50628diff --git a/fs/ioctl.c b/fs/ioctl.c
50629index 6c75110..19d2c3c 100644
50630--- a/fs/ioctl.c
50631+++ b/fs/ioctl.c
50632@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
50633 u64 phys, u64 len, u32 flags)
50634 {
50635 struct fiemap_extent extent;
50636- struct fiemap_extent *dest = fieinfo->fi_extents_start;
50637+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
50638
50639 /* only count the extents */
50640 if (fieinfo->fi_extents_max == 0) {
50641@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50642
50643 fieinfo.fi_flags = fiemap.fm_flags;
50644 fieinfo.fi_extents_max = fiemap.fm_extent_count;
50645- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
50646+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
50647
50648 if (fiemap.fm_extent_count != 0 &&
50649 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
50650@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50651 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
50652 fiemap.fm_flags = fieinfo.fi_flags;
50653 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
50654- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
50655+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
50656 error = -EFAULT;
50657
50658 return error;
50659diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
50660index b0435dd..81ee0be 100644
50661--- a/fs/jbd/checkpoint.c
50662+++ b/fs/jbd/checkpoint.c
50663@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
50664 tid_t this_tid;
50665 int result;
50666
50667+ pax_track_stack();
50668+
50669 jbd_debug(1, "Start checkpoint\n");
50670
50671 /*
50672diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
50673index 546d153..736896c 100644
50674--- a/fs/jffs2/compr_rtime.c
50675+++ b/fs/jffs2/compr_rtime.c
50676@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
50677 int outpos = 0;
50678 int pos=0;
50679
50680+ pax_track_stack();
50681+
50682 memset(positions,0,sizeof(positions));
50683
50684 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
50685@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
50686 int outpos = 0;
50687 int pos=0;
50688
50689+ pax_track_stack();
50690+
50691 memset(positions,0,sizeof(positions));
50692
50693 while (outpos<destlen) {
50694diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
50695index 170d289..3254b98 100644
50696--- a/fs/jffs2/compr_rubin.c
50697+++ b/fs/jffs2/compr_rubin.c
50698@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
50699 int ret;
50700 uint32_t mysrclen, mydstlen;
50701
50702+ pax_track_stack();
50703+
50704 mysrclen = *sourcelen;
50705 mydstlen = *dstlen - 8;
50706
50707diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
50708index b47679b..00d65d3 100644
50709--- a/fs/jffs2/erase.c
50710+++ b/fs/jffs2/erase.c
50711@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
50712 struct jffs2_unknown_node marker = {
50713 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
50714 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50715- .totlen = cpu_to_je32(c->cleanmarker_size)
50716+ .totlen = cpu_to_je32(c->cleanmarker_size),
50717+ .hdr_crc = cpu_to_je32(0)
50718 };
50719
50720 jffs2_prealloc_raw_node_refs(c, jeb, 1);
50721diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
50722index 5ef7bac..4fd1e3c 100644
50723--- a/fs/jffs2/wbuf.c
50724+++ b/fs/jffs2/wbuf.c
50725@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
50726 {
50727 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
50728 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50729- .totlen = constant_cpu_to_je32(8)
50730+ .totlen = constant_cpu_to_je32(8),
50731+ .hdr_crc = constant_cpu_to_je32(0)
50732 };
50733
50734 /*
50735diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
50736index 082e844..52012a1 100644
50737--- a/fs/jffs2/xattr.c
50738+++ b/fs/jffs2/xattr.c
50739@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
50740
50741 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
50742
50743+ pax_track_stack();
50744+
50745 /* Phase.1 : Merge same xref */
50746 for (i=0; i < XREF_TMPHASH_SIZE; i++)
50747 xref_tmphash[i] = NULL;
50748diff --git a/fs/jfs/super.c b/fs/jfs/super.c
50749index 2234c73..f6e6e6b 100644
50750--- a/fs/jfs/super.c
50751+++ b/fs/jfs/super.c
50752@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
50753
50754 jfs_inode_cachep =
50755 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
50756- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50757+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
50758 init_once);
50759 if (jfs_inode_cachep == NULL)
50760 return -ENOMEM;
50761diff --git a/fs/libfs.c b/fs/libfs.c
50762index ba36e93..3153fce 100644
50763--- a/fs/libfs.c
50764+++ b/fs/libfs.c
50765@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50766
50767 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
50768 struct dentry *next;
50769+ char d_name[sizeof(next->d_iname)];
50770+ const unsigned char *name;
50771+
50772 next = list_entry(p, struct dentry, d_u.d_child);
50773 if (d_unhashed(next) || !next->d_inode)
50774 continue;
50775
50776 spin_unlock(&dcache_lock);
50777- if (filldir(dirent, next->d_name.name,
50778+ name = next->d_name.name;
50779+ if (name == next->d_iname) {
50780+ memcpy(d_name, name, next->d_name.len);
50781+ name = d_name;
50782+ }
50783+ if (filldir(dirent, name,
50784 next->d_name.len, filp->f_pos,
50785 next->d_inode->i_ino,
50786 dt_type(next->d_inode)) < 0)
50787diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
50788index c325a83..d15b07b 100644
50789--- a/fs/lockd/clntproc.c
50790+++ b/fs/lockd/clntproc.c
50791@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
50792 /*
50793 * Cookie counter for NLM requests
50794 */
50795-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
50796+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
50797
50798 void nlmclnt_next_cookie(struct nlm_cookie *c)
50799 {
50800- u32 cookie = atomic_inc_return(&nlm_cookie);
50801+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
50802
50803 memcpy(c->data, &cookie, 4);
50804 c->len=4;
50805@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
50806 struct nlm_rqst reqst, *req;
50807 int status;
50808
50809+ pax_track_stack();
50810+
50811 req = &reqst;
50812 memset(req, 0, sizeof(*req));
50813 locks_init_lock(&req->a_args.lock.fl);
50814diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
50815index 1a54ae1..6a16c27 100644
50816--- a/fs/lockd/svc.c
50817+++ b/fs/lockd/svc.c
50818@@ -43,7 +43,7 @@
50819
50820 static struct svc_program nlmsvc_program;
50821
50822-struct nlmsvc_binding * nlmsvc_ops;
50823+const struct nlmsvc_binding * nlmsvc_ops;
50824 EXPORT_SYMBOL_GPL(nlmsvc_ops);
50825
50826 static DEFINE_MUTEX(nlmsvc_mutex);
50827diff --git a/fs/locks.c b/fs/locks.c
50828index a8794f2..4041e55 100644
50829--- a/fs/locks.c
50830+++ b/fs/locks.c
50831@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
50832
50833 static struct kmem_cache *filelock_cache __read_mostly;
50834
50835+static void locks_init_lock_always(struct file_lock *fl)
50836+{
50837+ fl->fl_next = NULL;
50838+ fl->fl_fasync = NULL;
50839+ fl->fl_owner = NULL;
50840+ fl->fl_pid = 0;
50841+ fl->fl_nspid = NULL;
50842+ fl->fl_file = NULL;
50843+ fl->fl_flags = 0;
50844+ fl->fl_type = 0;
50845+ fl->fl_start = fl->fl_end = 0;
50846+}
50847+
50848 /* Allocate an empty lock structure. */
50849 static struct file_lock *locks_alloc_lock(void)
50850 {
50851- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50852+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50853+
50854+ if (fl)
50855+ locks_init_lock_always(fl);
50856+
50857+ return fl;
50858 }
50859
50860 void locks_release_private(struct file_lock *fl)
50861@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
50862 INIT_LIST_HEAD(&fl->fl_link);
50863 INIT_LIST_HEAD(&fl->fl_block);
50864 init_waitqueue_head(&fl->fl_wait);
50865- fl->fl_next = NULL;
50866- fl->fl_fasync = NULL;
50867- fl->fl_owner = NULL;
50868- fl->fl_pid = 0;
50869- fl->fl_nspid = NULL;
50870- fl->fl_file = NULL;
50871- fl->fl_flags = 0;
50872- fl->fl_type = 0;
50873- fl->fl_start = fl->fl_end = 0;
50874 fl->fl_ops = NULL;
50875 fl->fl_lmops = NULL;
50876+ locks_init_lock_always(fl);
50877 }
50878
50879 EXPORT_SYMBOL(locks_init_lock);
50880@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
50881 return;
50882
50883 if (filp->f_op && filp->f_op->flock) {
50884- struct file_lock fl = {
50885+ struct file_lock flock = {
50886 .fl_pid = current->tgid,
50887 .fl_file = filp,
50888 .fl_flags = FL_FLOCK,
50889 .fl_type = F_UNLCK,
50890 .fl_end = OFFSET_MAX,
50891 };
50892- filp->f_op->flock(filp, F_SETLKW, &fl);
50893- if (fl.fl_ops && fl.fl_ops->fl_release_private)
50894- fl.fl_ops->fl_release_private(&fl);
50895+ filp->f_op->flock(filp, F_SETLKW, &flock);
50896+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
50897+ flock.fl_ops->fl_release_private(&flock);
50898 }
50899
50900 lock_kernel();
50901diff --git a/fs/mbcache.c b/fs/mbcache.c
50902index ec88ff3..b843a82 100644
50903--- a/fs/mbcache.c
50904+++ b/fs/mbcache.c
50905@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
50906 if (!cache)
50907 goto fail;
50908 cache->c_name = name;
50909- cache->c_op.free = NULL;
50910+ *(void **)&cache->c_op.free = NULL;
50911 if (cache_op)
50912- cache->c_op.free = cache_op->free;
50913+ *(void **)&cache->c_op.free = cache_op->free;
50914 atomic_set(&cache->c_entry_count, 0);
50915 cache->c_bucket_bits = bucket_bits;
50916 #ifdef MB_CACHE_INDEXES_COUNT
50917diff --git a/fs/namei.c b/fs/namei.c
50918index b0afbd4..8d065a1 100644
50919--- a/fs/namei.c
50920+++ b/fs/namei.c
50921@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
50922 return ret;
50923
50924 /*
50925+ * Searching includes executable on directories, else just read.
50926+ */
50927+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50928+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
50929+ if (capable(CAP_DAC_READ_SEARCH))
50930+ return 0;
50931+
50932+ /*
50933 * Read/write DACs are always overridable.
50934 * Executable DACs are overridable if at least one exec bit is set.
50935 */
50936@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
50937 if (capable(CAP_DAC_OVERRIDE))
50938 return 0;
50939
50940- /*
50941- * Searching includes executable on directories, else just read.
50942- */
50943- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50944- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
50945- if (capable(CAP_DAC_READ_SEARCH))
50946- return 0;
50947-
50948 return -EACCES;
50949 }
50950
50951@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
50952 if (!ret)
50953 goto ok;
50954
50955- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
50956+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
50957+ capable(CAP_DAC_OVERRIDE))
50958 goto ok;
50959
50960 return ret;
50961@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
50962 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
50963 error = PTR_ERR(cookie);
50964 if (!IS_ERR(cookie)) {
50965- char *s = nd_get_link(nd);
50966+ const char *s = nd_get_link(nd);
50967 error = 0;
50968 if (s)
50969 error = __vfs_follow_link(nd, s);
50970@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
50971 err = security_inode_follow_link(path->dentry, nd);
50972 if (err)
50973 goto loop;
50974+
50975+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
50976+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
50977+ err = -EACCES;
50978+ goto loop;
50979+ }
50980+
50981 current->link_count++;
50982 current->total_link_count++;
50983 nd->depth++;
50984@@ -1016,11 +1024,19 @@ return_reval:
50985 break;
50986 }
50987 return_base:
50988+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
50989+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50990+ path_put(&nd->path);
50991+ return -ENOENT;
50992+ }
50993 return 0;
50994 out_dput:
50995 path_put_conditional(&next, nd);
50996 break;
50997 }
50998+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
50999+ err = -ENOENT;
51000+
51001 path_put(&nd->path);
51002 return_err:
51003 return err;
51004@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51005 int retval = path_init(dfd, name, flags, nd);
51006 if (!retval)
51007 retval = path_walk(name, nd);
51008- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51009- nd->path.dentry->d_inode))
51010- audit_inode(name, nd->path.dentry);
51011+
51012+ if (likely(!retval)) {
51013+ if (nd->path.dentry && nd->path.dentry->d_inode) {
51014+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51015+ retval = -ENOENT;
51016+ if (!audit_dummy_context())
51017+ audit_inode(name, nd->path.dentry);
51018+ }
51019+ }
51020 if (nd->root.mnt) {
51021 path_put(&nd->root);
51022 nd->root.mnt = NULL;
51023 }
51024+
51025 return retval;
51026 }
51027
51028@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51029 if (error)
51030 goto err_out;
51031
51032+
51033+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51034+ error = -EPERM;
51035+ goto err_out;
51036+ }
51037+ if (gr_handle_rawio(inode)) {
51038+ error = -EPERM;
51039+ goto err_out;
51040+ }
51041+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51042+ error = -EACCES;
51043+ goto err_out;
51044+ }
51045+
51046 if (flag & O_TRUNC) {
51047 error = get_write_access(inode);
51048 if (error)
51049@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51050 {
51051 int error;
51052 struct dentry *dir = nd->path.dentry;
51053+ int acc_mode = ACC_MODE(flag);
51054+
51055+ if (flag & O_TRUNC)
51056+ acc_mode |= MAY_WRITE;
51057+ if (flag & O_APPEND)
51058+ acc_mode |= MAY_APPEND;
51059+
51060+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51061+ error = -EACCES;
51062+ goto out_unlock;
51063+ }
51064
51065 if (!IS_POSIXACL(dir->d_inode))
51066 mode &= ~current_umask();
51067@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51068 if (error)
51069 goto out_unlock;
51070 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51071+ if (!error)
51072+ gr_handle_create(path->dentry, nd->path.mnt);
51073 out_unlock:
51074 mutex_unlock(&dir->d_inode->i_mutex);
51075 dput(nd->path.dentry);
51076@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51077 &nd, flag);
51078 if (error)
51079 return ERR_PTR(error);
51080+
51081+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51082+ error = -EPERM;
51083+ goto exit;
51084+ }
51085+
51086+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51087+ error = -EPERM;
51088+ goto exit;
51089+ }
51090+
51091+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51092+ error = -EACCES;
51093+ goto exit;
51094+ }
51095+
51096 goto ok;
51097 }
51098
51099@@ -1795,6 +1861,19 @@ do_last:
51100 /*
51101 * It already exists.
51102 */
51103+
51104+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51105+ error = -ENOENT;
51106+ goto exit_mutex_unlock;
51107+ }
51108+
51109+ /* only check if O_CREAT is specified, all other checks need
51110+ to go into may_open */
51111+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51112+ error = -EACCES;
51113+ goto exit_mutex_unlock;
51114+ }
51115+
51116 mutex_unlock(&dir->d_inode->i_mutex);
51117 audit_inode(pathname, path.dentry);
51118
51119@@ -1887,6 +1966,13 @@ do_link:
51120 error = security_inode_follow_link(path.dentry, &nd);
51121 if (error)
51122 goto exit_dput;
51123+
51124+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51125+ path.dentry, nd.path.mnt)) {
51126+ error = -EACCES;
51127+ goto exit_dput;
51128+ }
51129+
51130 error = __do_follow_link(&path, &nd);
51131 if (error) {
51132 /* Does someone understand code flow here? Or it is only
51133@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51134 }
51135 return dentry;
51136 eexist:
51137+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51138+ dput(dentry);
51139+ return ERR_PTR(-ENOENT);
51140+ }
51141 dput(dentry);
51142 dentry = ERR_PTR(-EEXIST);
51143 fail:
51144@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51145 error = may_mknod(mode);
51146 if (error)
51147 goto out_dput;
51148+
51149+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51150+ error = -EPERM;
51151+ goto out_dput;
51152+ }
51153+
51154+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51155+ error = -EACCES;
51156+ goto out_dput;
51157+ }
51158+
51159 error = mnt_want_write(nd.path.mnt);
51160 if (error)
51161 goto out_dput;
51162@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51163 }
51164 out_drop_write:
51165 mnt_drop_write(nd.path.mnt);
51166+
51167+ if (!error)
51168+ gr_handle_create(dentry, nd.path.mnt);
51169 out_dput:
51170 dput(dentry);
51171 out_unlock:
51172@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51173 if (IS_ERR(dentry))
51174 goto out_unlock;
51175
51176+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51177+ error = -EACCES;
51178+ goto out_dput;
51179+ }
51180+
51181 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51182 mode &= ~current_umask();
51183 error = mnt_want_write(nd.path.mnt);
51184@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51185 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51186 out_drop_write:
51187 mnt_drop_write(nd.path.mnt);
51188+
51189+ if (!error)
51190+ gr_handle_create(dentry, nd.path.mnt);
51191+
51192 out_dput:
51193 dput(dentry);
51194 out_unlock:
51195@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51196 char * name;
51197 struct dentry *dentry;
51198 struct nameidata nd;
51199+ ino_t saved_ino = 0;
51200+ dev_t saved_dev = 0;
51201
51202 error = user_path_parent(dfd, pathname, &nd, &name);
51203 if (error)
51204@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51205 error = PTR_ERR(dentry);
51206 if (IS_ERR(dentry))
51207 goto exit2;
51208+
51209+ if (dentry->d_inode != NULL) {
51210+ saved_ino = dentry->d_inode->i_ino;
51211+ saved_dev = gr_get_dev_from_dentry(dentry);
51212+
51213+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51214+ error = -EACCES;
51215+ goto exit3;
51216+ }
51217+ }
51218+
51219 error = mnt_want_write(nd.path.mnt);
51220 if (error)
51221 goto exit3;
51222@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51223 if (error)
51224 goto exit4;
51225 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51226+ if (!error && (saved_dev || saved_ino))
51227+ gr_handle_delete(saved_ino, saved_dev);
51228 exit4:
51229 mnt_drop_write(nd.path.mnt);
51230 exit3:
51231@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51232 struct dentry *dentry;
51233 struct nameidata nd;
51234 struct inode *inode = NULL;
51235+ ino_t saved_ino = 0;
51236+ dev_t saved_dev = 0;
51237
51238 error = user_path_parent(dfd, pathname, &nd, &name);
51239 if (error)
51240@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51241 if (nd.last.name[nd.last.len])
51242 goto slashes;
51243 inode = dentry->d_inode;
51244- if (inode)
51245+ if (inode) {
51246+ if (inode->i_nlink <= 1) {
51247+ saved_ino = inode->i_ino;
51248+ saved_dev = gr_get_dev_from_dentry(dentry);
51249+ }
51250+
51251 atomic_inc(&inode->i_count);
51252+
51253+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51254+ error = -EACCES;
51255+ goto exit2;
51256+ }
51257+ }
51258 error = mnt_want_write(nd.path.mnt);
51259 if (error)
51260 goto exit2;
51261@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51262 if (error)
51263 goto exit3;
51264 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51265+ if (!error && (saved_ino || saved_dev))
51266+ gr_handle_delete(saved_ino, saved_dev);
51267 exit3:
51268 mnt_drop_write(nd.path.mnt);
51269 exit2:
51270@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51271 if (IS_ERR(dentry))
51272 goto out_unlock;
51273
51274+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51275+ error = -EACCES;
51276+ goto out_dput;
51277+ }
51278+
51279 error = mnt_want_write(nd.path.mnt);
51280 if (error)
51281 goto out_dput;
51282@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51283 if (error)
51284 goto out_drop_write;
51285 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51286+ if (!error)
51287+ gr_handle_create(dentry, nd.path.mnt);
51288 out_drop_write:
51289 mnt_drop_write(nd.path.mnt);
51290 out_dput:
51291@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51292 error = PTR_ERR(new_dentry);
51293 if (IS_ERR(new_dentry))
51294 goto out_unlock;
51295+
51296+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51297+ old_path.dentry->d_inode,
51298+ old_path.dentry->d_inode->i_mode, to)) {
51299+ error = -EACCES;
51300+ goto out_dput;
51301+ }
51302+
51303+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51304+ old_path.dentry, old_path.mnt, to)) {
51305+ error = -EACCES;
51306+ goto out_dput;
51307+ }
51308+
51309 error = mnt_want_write(nd.path.mnt);
51310 if (error)
51311 goto out_dput;
51312@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51313 if (error)
51314 goto out_drop_write;
51315 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51316+ if (!error)
51317+ gr_handle_create(new_dentry, nd.path.mnt);
51318 out_drop_write:
51319 mnt_drop_write(nd.path.mnt);
51320 out_dput:
51321@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51322 char *to;
51323 int error;
51324
51325+ pax_track_stack();
51326+
51327 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51328 if (error)
51329 goto exit;
51330@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51331 if (new_dentry == trap)
51332 goto exit5;
51333
51334+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51335+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51336+ to);
51337+ if (error)
51338+ goto exit5;
51339+
51340 error = mnt_want_write(oldnd.path.mnt);
51341 if (error)
51342 goto exit5;
51343@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51344 goto exit6;
51345 error = vfs_rename(old_dir->d_inode, old_dentry,
51346 new_dir->d_inode, new_dentry);
51347+ if (!error)
51348+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51349+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51350 exit6:
51351 mnt_drop_write(oldnd.path.mnt);
51352 exit5:
51353@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51354
51355 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51356 {
51357+ char tmpbuf[64];
51358+ const char *newlink;
51359 int len;
51360
51361 len = PTR_ERR(link);
51362@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51363 len = strlen(link);
51364 if (len > (unsigned) buflen)
51365 len = buflen;
51366- if (copy_to_user(buffer, link, len))
51367+
51368+ if (len < sizeof(tmpbuf)) {
51369+ memcpy(tmpbuf, link, len);
51370+ newlink = tmpbuf;
51371+ } else
51372+ newlink = link;
51373+
51374+ if (copy_to_user(buffer, newlink, len))
51375 len = -EFAULT;
51376 out:
51377 return len;
51378diff --git a/fs/namespace.c b/fs/namespace.c
51379index 2beb0fb..11a95a5 100644
51380--- a/fs/namespace.c
51381+++ b/fs/namespace.c
51382@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51383 if (!(sb->s_flags & MS_RDONLY))
51384 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51385 up_write(&sb->s_umount);
51386+
51387+ gr_log_remount(mnt->mnt_devname, retval);
51388+
51389 return retval;
51390 }
51391
51392@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51393 security_sb_umount_busy(mnt);
51394 up_write(&namespace_sem);
51395 release_mounts(&umount_list);
51396+
51397+ gr_log_unmount(mnt->mnt_devname, retval);
51398+
51399 return retval;
51400 }
51401
51402@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51403 if (retval)
51404 goto dput_out;
51405
51406+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51407+ retval = -EPERM;
51408+ goto dput_out;
51409+ }
51410+
51411+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51412+ retval = -EPERM;
51413+ goto dput_out;
51414+ }
51415+
51416 if (flags & MS_REMOUNT)
51417 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51418 data_page);
51419@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51420 dev_name, data_page);
51421 dput_out:
51422 path_put(&path);
51423+
51424+ gr_log_mount(dev_name, dir_name, retval);
51425+
51426 return retval;
51427 }
51428
51429@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51430 goto out1;
51431 }
51432
51433+ if (gr_handle_chroot_pivot()) {
51434+ error = -EPERM;
51435+ path_put(&old);
51436+ goto out1;
51437+ }
51438+
51439 read_lock(&current->fs->lock);
51440 root = current->fs->root;
51441 path_get(&current->fs->root);
51442diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51443index b8b5b30..2bd9ccb 100644
51444--- a/fs/ncpfs/dir.c
51445+++ b/fs/ncpfs/dir.c
51446@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51447 int res, val = 0, len;
51448 __u8 __name[NCP_MAXPATHLEN + 1];
51449
51450+ pax_track_stack();
51451+
51452 parent = dget_parent(dentry);
51453 dir = parent->d_inode;
51454
51455@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51456 int error, res, len;
51457 __u8 __name[NCP_MAXPATHLEN + 1];
51458
51459+ pax_track_stack();
51460+
51461 lock_kernel();
51462 error = -EIO;
51463 if (!ncp_conn_valid(server))
51464@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51465 int error, result, len;
51466 int opmode;
51467 __u8 __name[NCP_MAXPATHLEN + 1];
51468-
51469+
51470 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51471 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51472
51473+ pax_track_stack();
51474+
51475 error = -EIO;
51476 lock_kernel();
51477 if (!ncp_conn_valid(server))
51478@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51479 int error, len;
51480 __u8 __name[NCP_MAXPATHLEN + 1];
51481
51482+ pax_track_stack();
51483+
51484 DPRINTK("ncp_mkdir: making %s/%s\n",
51485 dentry->d_parent->d_name.name, dentry->d_name.name);
51486
51487@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51488 if (!ncp_conn_valid(server))
51489 goto out;
51490
51491+ pax_track_stack();
51492+
51493 ncp_age_dentry(server, dentry);
51494 len = sizeof(__name);
51495 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51496@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51497 int old_len, new_len;
51498 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51499
51500+ pax_track_stack();
51501+
51502 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51503 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51504 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51505diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51506index cf98da1..da890a9 100644
51507--- a/fs/ncpfs/inode.c
51508+++ b/fs/ncpfs/inode.c
51509@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51510 #endif
51511 struct ncp_entry_info finfo;
51512
51513+ pax_track_stack();
51514+
51515 data.wdog_pid = NULL;
51516 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51517 if (!server)
51518diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51519index bfaef7b..e9d03ca 100644
51520--- a/fs/nfs/inode.c
51521+++ b/fs/nfs/inode.c
51522@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51523 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51524 nfsi->attrtimeo_timestamp = jiffies;
51525
51526- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51527+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51528 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51529 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51530 else
51531@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51532 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51533 }
51534
51535-static atomic_long_t nfs_attr_generation_counter;
51536+static atomic_long_unchecked_t nfs_attr_generation_counter;
51537
51538 static unsigned long nfs_read_attr_generation_counter(void)
51539 {
51540- return atomic_long_read(&nfs_attr_generation_counter);
51541+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51542 }
51543
51544 unsigned long nfs_inc_attr_generation_counter(void)
51545 {
51546- return atomic_long_inc_return(&nfs_attr_generation_counter);
51547+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51548 }
51549
51550 void nfs_fattr_init(struct nfs_fattr *fattr)
51551diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51552index cc2f505..f6a236f 100644
51553--- a/fs/nfsd/lockd.c
51554+++ b/fs/nfsd/lockd.c
51555@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51556 fput(filp);
51557 }
51558
51559-static struct nlmsvc_binding nfsd_nlm_ops = {
51560+static const struct nlmsvc_binding nfsd_nlm_ops = {
51561 .fopen = nlm_fopen, /* open file for locking */
51562 .fclose = nlm_fclose, /* close file */
51563 };
51564diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51565index cfc3391..dcc083a 100644
51566--- a/fs/nfsd/nfs4state.c
51567+++ b/fs/nfsd/nfs4state.c
51568@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51569 unsigned int cmd;
51570 int err;
51571
51572+ pax_track_stack();
51573+
51574 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51575 (long long) lock->lk_offset,
51576 (long long) lock->lk_length);
51577diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51578index 4a82a96..0d5fb49 100644
51579--- a/fs/nfsd/nfs4xdr.c
51580+++ b/fs/nfsd/nfs4xdr.c
51581@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51582 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51583 u32 minorversion = resp->cstate.minorversion;
51584
51585+ pax_track_stack();
51586+
51587 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51588 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51589 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51590diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51591index 2e09588..596421d 100644
51592--- a/fs/nfsd/vfs.c
51593+++ b/fs/nfsd/vfs.c
51594@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51595 } else {
51596 oldfs = get_fs();
51597 set_fs(KERNEL_DS);
51598- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51599+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51600 set_fs(oldfs);
51601 }
51602
51603@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51604
51605 /* Write the data. */
51606 oldfs = get_fs(); set_fs(KERNEL_DS);
51607- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
51608+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
51609 set_fs(oldfs);
51610 if (host_err < 0)
51611 goto out_nfserr;
51612@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
51613 */
51614
51615 oldfs = get_fs(); set_fs(KERNEL_DS);
51616- host_err = inode->i_op->readlink(dentry, buf, *lenp);
51617+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
51618 set_fs(oldfs);
51619
51620 if (host_err < 0)
51621diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
51622index f6af760..d6b2b83 100644
51623--- a/fs/nilfs2/ioctl.c
51624+++ b/fs/nilfs2/ioctl.c
51625@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
51626 unsigned int cmd, void __user *argp)
51627 {
51628 struct nilfs_argv argv[5];
51629- const static size_t argsz[5] = {
51630+ static const size_t argsz[5] = {
51631 sizeof(struct nilfs_vdesc),
51632 sizeof(struct nilfs_period),
51633 sizeof(__u64),
51634diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
51635index 7e54e52..9337248 100644
51636--- a/fs/notify/dnotify/dnotify.c
51637+++ b/fs/notify/dnotify/dnotify.c
51638@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
51639 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
51640 }
51641
51642-static struct fsnotify_ops dnotify_fsnotify_ops = {
51643+static const struct fsnotify_ops dnotify_fsnotify_ops = {
51644 .handle_event = dnotify_handle_event,
51645 .should_send_event = dnotify_should_send_event,
51646 .free_group_priv = NULL,
51647diff --git a/fs/notify/notification.c b/fs/notify/notification.c
51648index b8bf53b..c518688 100644
51649--- a/fs/notify/notification.c
51650+++ b/fs/notify/notification.c
51651@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
51652 * get set to 0 so it will never get 'freed'
51653 */
51654 static struct fsnotify_event q_overflow_event;
51655-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51656+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51657
51658 /**
51659 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
51660@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51661 */
51662 u32 fsnotify_get_cookie(void)
51663 {
51664- return atomic_inc_return(&fsnotify_sync_cookie);
51665+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
51666 }
51667 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
51668
51669diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
51670index 5a9e344..0f8cd28 100644
51671--- a/fs/ntfs/dir.c
51672+++ b/fs/ntfs/dir.c
51673@@ -1328,7 +1328,7 @@ find_next_index_buffer:
51674 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
51675 ~(s64)(ndir->itype.index.block_size - 1)));
51676 /* Bounds checks. */
51677- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51678+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51679 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
51680 "inode 0x%lx or driver bug.", vdir->i_ino);
51681 goto err_out;
51682diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
51683index 663c0e3..b6868e9 100644
51684--- a/fs/ntfs/file.c
51685+++ b/fs/ntfs/file.c
51686@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
51687 #endif /* NTFS_RW */
51688 };
51689
51690-const struct file_operations ntfs_empty_file_ops = {};
51691+const struct file_operations ntfs_empty_file_ops __read_only;
51692
51693-const struct inode_operations ntfs_empty_inode_ops = {};
51694+const struct inode_operations ntfs_empty_inode_ops __read_only;
51695diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
51696index 1cd2934..880b5d2 100644
51697--- a/fs/ocfs2/cluster/masklog.c
51698+++ b/fs/ocfs2/cluster/masklog.c
51699@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
51700 return mlog_mask_store(mlog_attr->mask, buf, count);
51701 }
51702
51703-static struct sysfs_ops mlog_attr_ops = {
51704+static const struct sysfs_ops mlog_attr_ops = {
51705 .show = mlog_show,
51706 .store = mlog_store,
51707 };
51708diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
51709index ac10f83..2cd2607 100644
51710--- a/fs/ocfs2/localalloc.c
51711+++ b/fs/ocfs2/localalloc.c
51712@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
51713 goto bail;
51714 }
51715
51716- atomic_inc(&osb->alloc_stats.moves);
51717+ atomic_inc_unchecked(&osb->alloc_stats.moves);
51718
51719 status = 0;
51720 bail:
51721diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
51722index f010b22..9f9ed34 100644
51723--- a/fs/ocfs2/namei.c
51724+++ b/fs/ocfs2/namei.c
51725@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
51726 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
51727 struct ocfs2_dir_lookup_result target_insert = { NULL, };
51728
51729+ pax_track_stack();
51730+
51731 /* At some point it might be nice to break this function up a
51732 * bit. */
51733
51734diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
51735index d963d86..914cfbd 100644
51736--- a/fs/ocfs2/ocfs2.h
51737+++ b/fs/ocfs2/ocfs2.h
51738@@ -217,11 +217,11 @@ enum ocfs2_vol_state
51739
51740 struct ocfs2_alloc_stats
51741 {
51742- atomic_t moves;
51743- atomic_t local_data;
51744- atomic_t bitmap_data;
51745- atomic_t bg_allocs;
51746- atomic_t bg_extends;
51747+ atomic_unchecked_t moves;
51748+ atomic_unchecked_t local_data;
51749+ atomic_unchecked_t bitmap_data;
51750+ atomic_unchecked_t bg_allocs;
51751+ atomic_unchecked_t bg_extends;
51752 };
51753
51754 enum ocfs2_local_alloc_state
51755diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
51756index 79b5dac..d322952 100644
51757--- a/fs/ocfs2/suballoc.c
51758+++ b/fs/ocfs2/suballoc.c
51759@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
51760 mlog_errno(status);
51761 goto bail;
51762 }
51763- atomic_inc(&osb->alloc_stats.bg_extends);
51764+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
51765
51766 /* You should never ask for this much metadata */
51767 BUG_ON(bits_wanted >
51768@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
51769 mlog_errno(status);
51770 goto bail;
51771 }
51772- atomic_inc(&osb->alloc_stats.bg_allocs);
51773+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51774
51775 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
51776 ac->ac_bits_given += (*num_bits);
51777@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
51778 mlog_errno(status);
51779 goto bail;
51780 }
51781- atomic_inc(&osb->alloc_stats.bg_allocs);
51782+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51783
51784 BUG_ON(num_bits != 1);
51785
51786@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51787 cluster_start,
51788 num_clusters);
51789 if (!status)
51790- atomic_inc(&osb->alloc_stats.local_data);
51791+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
51792 } else {
51793 if (min_clusters > (osb->bitmap_cpg - 1)) {
51794 /* The only paths asking for contiguousness
51795@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51796 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
51797 bg_blkno,
51798 bg_bit_off);
51799- atomic_inc(&osb->alloc_stats.bitmap_data);
51800+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
51801 }
51802 }
51803 if (status < 0) {
51804diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
51805index 9f55be4..a3f8048 100644
51806--- a/fs/ocfs2/super.c
51807+++ b/fs/ocfs2/super.c
51808@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
51809 "%10s => GlobalAllocs: %d LocalAllocs: %d "
51810 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
51811 "Stats",
51812- atomic_read(&osb->alloc_stats.bitmap_data),
51813- atomic_read(&osb->alloc_stats.local_data),
51814- atomic_read(&osb->alloc_stats.bg_allocs),
51815- atomic_read(&osb->alloc_stats.moves),
51816- atomic_read(&osb->alloc_stats.bg_extends));
51817+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
51818+ atomic_read_unchecked(&osb->alloc_stats.local_data),
51819+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
51820+ atomic_read_unchecked(&osb->alloc_stats.moves),
51821+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
51822
51823 out += snprintf(buf + out, len - out,
51824 "%10s => State: %u Descriptor: %llu Size: %u bits "
51825@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
51826 spin_lock_init(&osb->osb_xattr_lock);
51827 ocfs2_init_inode_steal_slot(osb);
51828
51829- atomic_set(&osb->alloc_stats.moves, 0);
51830- atomic_set(&osb->alloc_stats.local_data, 0);
51831- atomic_set(&osb->alloc_stats.bitmap_data, 0);
51832- atomic_set(&osb->alloc_stats.bg_allocs, 0);
51833- atomic_set(&osb->alloc_stats.bg_extends, 0);
51834+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
51835+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
51836+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
51837+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
51838+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
51839
51840 /* Copy the blockcheck stats from the superblock probe */
51841 osb->osb_ecc_stats = *stats;
51842diff --git a/fs/open.c b/fs/open.c
51843index 4f01e06..091f6c3 100644
51844--- a/fs/open.c
51845+++ b/fs/open.c
51846@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
51847 error = locks_verify_truncate(inode, NULL, length);
51848 if (!error)
51849 error = security_path_truncate(&path, length, 0);
51850+
51851+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
51852+ error = -EACCES;
51853+
51854 if (!error) {
51855 vfs_dq_init(inode);
51856 error = do_truncate(path.dentry, length, 0, NULL);
51857@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
51858 if (__mnt_is_readonly(path.mnt))
51859 res = -EROFS;
51860
51861+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
51862+ res = -EACCES;
51863+
51864 out_path_release:
51865 path_put(&path);
51866 out:
51867@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
51868 if (error)
51869 goto dput_and_out;
51870
51871+ gr_log_chdir(path.dentry, path.mnt);
51872+
51873 set_fs_pwd(current->fs, &path);
51874
51875 dput_and_out:
51876@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
51877 goto out_putf;
51878
51879 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
51880+
51881+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
51882+ error = -EPERM;
51883+
51884+ if (!error)
51885+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
51886+
51887 if (!error)
51888 set_fs_pwd(current->fs, &file->f_path);
51889 out_putf:
51890@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
51891 if (!capable(CAP_SYS_CHROOT))
51892 goto dput_and_out;
51893
51894+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
51895+ goto dput_and_out;
51896+
51897 set_fs_root(current->fs, &path);
51898+
51899+ gr_handle_chroot_chdir(&path);
51900+
51901 error = 0;
51902 dput_and_out:
51903 path_put(&path);
51904@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
51905 err = mnt_want_write_file(file);
51906 if (err)
51907 goto out_putf;
51908+
51909 mutex_lock(&inode->i_mutex);
51910+
51911+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
51912+ err = -EACCES;
51913+ goto out_unlock;
51914+ }
51915+
51916 if (mode == (mode_t) -1)
51917 mode = inode->i_mode;
51918+
51919+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
51920+ err = -EPERM;
51921+ goto out_unlock;
51922+ }
51923+
51924 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51925 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51926 err = notify_change(dentry, &newattrs);
51927+
51928+out_unlock:
51929 mutex_unlock(&inode->i_mutex);
51930 mnt_drop_write(file->f_path.mnt);
51931 out_putf:
51932@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
51933 error = mnt_want_write(path.mnt);
51934 if (error)
51935 goto dput_and_out;
51936+
51937 mutex_lock(&inode->i_mutex);
51938+
51939+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
51940+ error = -EACCES;
51941+ goto out_unlock;
51942+ }
51943+
51944 if (mode == (mode_t) -1)
51945 mode = inode->i_mode;
51946+
51947+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
51948+ error = -EACCES;
51949+ goto out_unlock;
51950+ }
51951+
51952 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51953 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51954 error = notify_change(path.dentry, &newattrs);
51955+
51956+out_unlock:
51957 mutex_unlock(&inode->i_mutex);
51958 mnt_drop_write(path.mnt);
51959 dput_and_out:
51960@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
51961 return sys_fchmodat(AT_FDCWD, filename, mode);
51962 }
51963
51964-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
51965+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
51966 {
51967 struct inode *inode = dentry->d_inode;
51968 int error;
51969 struct iattr newattrs;
51970
51971+ if (!gr_acl_handle_chown(dentry, mnt))
51972+ return -EACCES;
51973+
51974 newattrs.ia_valid = ATTR_CTIME;
51975 if (user != (uid_t) -1) {
51976 newattrs.ia_valid |= ATTR_UID;
51977@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
51978 error = mnt_want_write(path.mnt);
51979 if (error)
51980 goto out_release;
51981- error = chown_common(path.dentry, user, group);
51982+ error = chown_common(path.dentry, user, group, path.mnt);
51983 mnt_drop_write(path.mnt);
51984 out_release:
51985 path_put(&path);
51986@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
51987 error = mnt_want_write(path.mnt);
51988 if (error)
51989 goto out_release;
51990- error = chown_common(path.dentry, user, group);
51991+ error = chown_common(path.dentry, user, group, path.mnt);
51992 mnt_drop_write(path.mnt);
51993 out_release:
51994 path_put(&path);
51995@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
51996 error = mnt_want_write(path.mnt);
51997 if (error)
51998 goto out_release;
51999- error = chown_common(path.dentry, user, group);
52000+ error = chown_common(path.dentry, user, group, path.mnt);
52001 mnt_drop_write(path.mnt);
52002 out_release:
52003 path_put(&path);
52004@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52005 goto out_fput;
52006 dentry = file->f_path.dentry;
52007 audit_inode(NULL, dentry);
52008- error = chown_common(dentry, user, group);
52009+ error = chown_common(dentry, user, group, file->f_path.mnt);
52010 mnt_drop_write(file->f_path.mnt);
52011 out_fput:
52012 fput(file);
52013@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52014 if (!IS_ERR(tmp)) {
52015 fd = get_unused_fd_flags(flags);
52016 if (fd >= 0) {
52017- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52018+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52019 if (IS_ERR(f)) {
52020 put_unused_fd(fd);
52021 fd = PTR_ERR(f);
52022diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52023index 6ab70f4..f4103d1 100644
52024--- a/fs/partitions/efi.c
52025+++ b/fs/partitions/efi.c
52026@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52027 if (!bdev || !gpt)
52028 return NULL;
52029
52030+ if (!le32_to_cpu(gpt->num_partition_entries))
52031+ return NULL;
52032+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52033+ if (!pte)
52034+ return NULL;
52035+
52036 count = le32_to_cpu(gpt->num_partition_entries) *
52037 le32_to_cpu(gpt->sizeof_partition_entry);
52038- if (!count)
52039- return NULL;
52040- pte = kzalloc(count, GFP_KERNEL);
52041- if (!pte)
52042- return NULL;
52043-
52044 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52045 (u8 *) pte,
52046 count) < count) {
52047diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52048index dd6efdb..3babc6c 100644
52049--- a/fs/partitions/ldm.c
52050+++ b/fs/partitions/ldm.c
52051@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52052 ldm_error ("A VBLK claims to have %d parts.", num);
52053 return false;
52054 }
52055+
52056 if (rec >= num) {
52057 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52058 return false;
52059@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52060 goto found;
52061 }
52062
52063- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52064+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52065 if (!f) {
52066 ldm_crit ("Out of memory.");
52067 return false;
52068diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52069index 5765198..7f8e9e0 100644
52070--- a/fs/partitions/mac.c
52071+++ b/fs/partitions/mac.c
52072@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52073 return 0; /* not a MacOS disk */
52074 }
52075 blocks_in_map = be32_to_cpu(part->map_count);
52076- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52077- put_dev_sector(sect);
52078- return 0;
52079- }
52080 printk(" [mac]");
52081+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52082+ put_dev_sector(sect);
52083+ return 0;
52084+ }
52085 for (slot = 1; slot <= blocks_in_map; ++slot) {
52086 int pos = slot * secsize;
52087 put_dev_sector(sect);
52088diff --git a/fs/pipe.c b/fs/pipe.c
52089index d0cc080..8a6f211 100644
52090--- a/fs/pipe.c
52091+++ b/fs/pipe.c
52092@@ -401,9 +401,9 @@ redo:
52093 }
52094 if (bufs) /* More to do? */
52095 continue;
52096- if (!pipe->writers)
52097+ if (!atomic_read(&pipe->writers))
52098 break;
52099- if (!pipe->waiting_writers) {
52100+ if (!atomic_read(&pipe->waiting_writers)) {
52101 /* syscall merging: Usually we must not sleep
52102 * if O_NONBLOCK is set, or if we got some data.
52103 * But if a writer sleeps in kernel space, then
52104@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52105 mutex_lock(&inode->i_mutex);
52106 pipe = inode->i_pipe;
52107
52108- if (!pipe->readers) {
52109+ if (!atomic_read(&pipe->readers)) {
52110 send_sig(SIGPIPE, current, 0);
52111 ret = -EPIPE;
52112 goto out;
52113@@ -511,7 +511,7 @@ redo1:
52114 for (;;) {
52115 int bufs;
52116
52117- if (!pipe->readers) {
52118+ if (!atomic_read(&pipe->readers)) {
52119 send_sig(SIGPIPE, current, 0);
52120 if (!ret)
52121 ret = -EPIPE;
52122@@ -597,9 +597,9 @@ redo2:
52123 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52124 do_wakeup = 0;
52125 }
52126- pipe->waiting_writers++;
52127+ atomic_inc(&pipe->waiting_writers);
52128 pipe_wait(pipe);
52129- pipe->waiting_writers--;
52130+ atomic_dec(&pipe->waiting_writers);
52131 }
52132 out:
52133 mutex_unlock(&inode->i_mutex);
52134@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52135 mask = 0;
52136 if (filp->f_mode & FMODE_READ) {
52137 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52138- if (!pipe->writers && filp->f_version != pipe->w_counter)
52139+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52140 mask |= POLLHUP;
52141 }
52142
52143@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52144 * Most Unices do not set POLLERR for FIFOs but on Linux they
52145 * behave exactly like pipes for poll().
52146 */
52147- if (!pipe->readers)
52148+ if (!atomic_read(&pipe->readers))
52149 mask |= POLLERR;
52150 }
52151
52152@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52153
52154 mutex_lock(&inode->i_mutex);
52155 pipe = inode->i_pipe;
52156- pipe->readers -= decr;
52157- pipe->writers -= decw;
52158+ atomic_sub(decr, &pipe->readers);
52159+ atomic_sub(decw, &pipe->writers);
52160
52161- if (!pipe->readers && !pipe->writers) {
52162+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52163 free_pipe_info(inode);
52164 } else {
52165 wake_up_interruptible_sync(&pipe->wait);
52166@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52167
52168 if (inode->i_pipe) {
52169 ret = 0;
52170- inode->i_pipe->readers++;
52171+ atomic_inc(&inode->i_pipe->readers);
52172 }
52173
52174 mutex_unlock(&inode->i_mutex);
52175@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52176
52177 if (inode->i_pipe) {
52178 ret = 0;
52179- inode->i_pipe->writers++;
52180+ atomic_inc(&inode->i_pipe->writers);
52181 }
52182
52183 mutex_unlock(&inode->i_mutex);
52184@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52185 if (inode->i_pipe) {
52186 ret = 0;
52187 if (filp->f_mode & FMODE_READ)
52188- inode->i_pipe->readers++;
52189+ atomic_inc(&inode->i_pipe->readers);
52190 if (filp->f_mode & FMODE_WRITE)
52191- inode->i_pipe->writers++;
52192+ atomic_inc(&inode->i_pipe->writers);
52193 }
52194
52195 mutex_unlock(&inode->i_mutex);
52196@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52197 inode->i_pipe = NULL;
52198 }
52199
52200-static struct vfsmount *pipe_mnt __read_mostly;
52201+struct vfsmount *pipe_mnt __read_mostly;
52202 static int pipefs_delete_dentry(struct dentry *dentry)
52203 {
52204 /*
52205@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52206 goto fail_iput;
52207 inode->i_pipe = pipe;
52208
52209- pipe->readers = pipe->writers = 1;
52210+ atomic_set(&pipe->readers, 1);
52211+ atomic_set(&pipe->writers, 1);
52212 inode->i_fop = &rdwr_pipefifo_fops;
52213
52214 /*
52215diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52216index 50f8f06..c5755df 100644
52217--- a/fs/proc/Kconfig
52218+++ b/fs/proc/Kconfig
52219@@ -30,12 +30,12 @@ config PROC_FS
52220
52221 config PROC_KCORE
52222 bool "/proc/kcore support" if !ARM
52223- depends on PROC_FS && MMU
52224+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52225
52226 config PROC_VMCORE
52227 bool "/proc/vmcore support (EXPERIMENTAL)"
52228- depends on PROC_FS && CRASH_DUMP
52229- default y
52230+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52231+ default n
52232 help
52233 Exports the dump image of crashed kernel in ELF format.
52234
52235@@ -59,8 +59,8 @@ config PROC_SYSCTL
52236 limited in memory.
52237
52238 config PROC_PAGE_MONITOR
52239- default y
52240- depends on PROC_FS && MMU
52241+ default n
52242+ depends on PROC_FS && MMU && !GRKERNSEC
52243 bool "Enable /proc page monitoring" if EMBEDDED
52244 help
52245 Various /proc files exist to monitor process memory utilization:
52246diff --git a/fs/proc/array.c b/fs/proc/array.c
52247index c5ef152..1363194 100644
52248--- a/fs/proc/array.c
52249+++ b/fs/proc/array.c
52250@@ -60,6 +60,7 @@
52251 #include <linux/tty.h>
52252 #include <linux/string.h>
52253 #include <linux/mman.h>
52254+#include <linux/grsecurity.h>
52255 #include <linux/proc_fs.h>
52256 #include <linux/ioport.h>
52257 #include <linux/uaccess.h>
52258@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52259 p->nivcsw);
52260 }
52261
52262+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52263+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52264+{
52265+ if (p->mm)
52266+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52267+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52268+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52269+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52270+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52271+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52272+ else
52273+ seq_printf(m, "PaX:\t-----\n");
52274+}
52275+#endif
52276+
52277 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52278 struct pid *pid, struct task_struct *task)
52279 {
52280@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52281 task_cap(m, task);
52282 cpuset_task_status_allowed(m, task);
52283 task_context_switch_counts(m, task);
52284+
52285+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52286+ task_pax(m, task);
52287+#endif
52288+
52289+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52290+ task_grsec_rbac(m, task);
52291+#endif
52292+
52293 return 0;
52294 }
52295
52296+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52297+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52298+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52299+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52300+#endif
52301+
52302 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52303 struct pid *pid, struct task_struct *task, int whole)
52304 {
52305@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52306 cputime_t cutime, cstime, utime, stime;
52307 cputime_t cgtime, gtime;
52308 unsigned long rsslim = 0;
52309- char tcomm[sizeof(task->comm)];
52310+ char tcomm[sizeof(task->comm)] = { 0 };
52311 unsigned long flags;
52312
52313+ pax_track_stack();
52314+
52315 state = *get_task_state(task);
52316 vsize = eip = esp = 0;
52317 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52318@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52319 gtime = task_gtime(task);
52320 }
52321
52322+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52323+ if (PAX_RAND_FLAGS(mm)) {
52324+ eip = 0;
52325+ esp = 0;
52326+ wchan = 0;
52327+ }
52328+#endif
52329+#ifdef CONFIG_GRKERNSEC_HIDESYM
52330+ wchan = 0;
52331+ eip =0;
52332+ esp =0;
52333+#endif
52334+
52335 /* scale priority and nice values from timeslices to -20..20 */
52336 /* to make it look like a "normal" Unix priority/nice value */
52337 priority = task_prio(task);
52338@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52339 vsize,
52340 mm ? get_mm_rss(mm) : 0,
52341 rsslim,
52342+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52343+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52344+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52345+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52346+#else
52347 mm ? (permitted ? mm->start_code : 1) : 0,
52348 mm ? (permitted ? mm->end_code : 1) : 0,
52349 (permitted && mm) ? mm->start_stack : 0,
52350+#endif
52351 esp,
52352 eip,
52353 /* The signal information here is obsolete.
52354@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52355
52356 return 0;
52357 }
52358+
52359+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52360+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52361+{
52362+ u32 curr_ip = 0;
52363+ unsigned long flags;
52364+
52365+ if (lock_task_sighand(task, &flags)) {
52366+ curr_ip = task->signal->curr_ip;
52367+ unlock_task_sighand(task, &flags);
52368+ }
52369+
52370+ return sprintf(buffer, "%pI4\n", &curr_ip);
52371+}
52372+#endif
52373diff --git a/fs/proc/base.c b/fs/proc/base.c
52374index 67f7dc0..e95ea4f 100644
52375--- a/fs/proc/base.c
52376+++ b/fs/proc/base.c
52377@@ -102,6 +102,22 @@ struct pid_entry {
52378 union proc_op op;
52379 };
52380
52381+struct getdents_callback {
52382+ struct linux_dirent __user * current_dir;
52383+ struct linux_dirent __user * previous;
52384+ struct file * file;
52385+ int count;
52386+ int error;
52387+};
52388+
52389+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52390+ loff_t offset, u64 ino, unsigned int d_type)
52391+{
52392+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52393+ buf->error = -EINVAL;
52394+ return 0;
52395+}
52396+
52397 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52398 .name = (NAME), \
52399 .len = sizeof(NAME) - 1, \
52400@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52401 if (task == current)
52402 return 0;
52403
52404+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52405+ return -EPERM;
52406+
52407 /*
52408 * If current is actively ptrace'ing, and would also be
52409 * permitted to freshly attach with ptrace now, permit it.
52410@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52411 if (!mm->arg_end)
52412 goto out_mm; /* Shh! No looking before we're done */
52413
52414+ if (gr_acl_handle_procpidmem(task))
52415+ goto out_mm;
52416+
52417 len = mm->arg_end - mm->arg_start;
52418
52419 if (len > PAGE_SIZE)
52420@@ -287,12 +309,28 @@ out:
52421 return res;
52422 }
52423
52424+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52425+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52426+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52427+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52428+#endif
52429+
52430 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52431 {
52432 int res = 0;
52433 struct mm_struct *mm = get_task_mm(task);
52434 if (mm) {
52435 unsigned int nwords = 0;
52436+
52437+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52438+ /* allow if we're currently ptracing this task */
52439+ if (PAX_RAND_FLAGS(mm) &&
52440+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52441+ mmput(mm);
52442+ return 0;
52443+ }
52444+#endif
52445+
52446 do {
52447 nwords += 2;
52448 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52449@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52450 }
52451
52452
52453-#ifdef CONFIG_KALLSYMS
52454+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52455 /*
52456 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52457 * Returns the resolved symbol. If that fails, simply return the address.
52458@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52459 mutex_unlock(&task->cred_guard_mutex);
52460 }
52461
52462-#ifdef CONFIG_STACKTRACE
52463+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52464
52465 #define MAX_STACK_TRACE_DEPTH 64
52466
52467@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52468 return count;
52469 }
52470
52471-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52472+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52473 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52474 {
52475 long nr;
52476@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52477 /************************************************************************/
52478
52479 /* permission checks */
52480-static int proc_fd_access_allowed(struct inode *inode)
52481+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52482 {
52483 struct task_struct *task;
52484 int allowed = 0;
52485@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52486 */
52487 task = get_proc_task(inode);
52488 if (task) {
52489- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52490+ if (log)
52491+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52492+ else
52493+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52494 put_task_struct(task);
52495 }
52496 return allowed;
52497@@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52498 if (!task)
52499 goto out_no_task;
52500
52501+ if (gr_acl_handle_procpidmem(task))
52502+ goto out;
52503+
52504 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52505 goto out;
52506
52507@@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52508 path_put(&nd->path);
52509
52510 /* Are we allowed to snoop on the tasks file descriptors? */
52511- if (!proc_fd_access_allowed(inode))
52512+ if (!proc_fd_access_allowed(inode,0))
52513 goto out;
52514
52515 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52516@@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52517 struct path path;
52518
52519 /* Are we allowed to snoop on the tasks file descriptors? */
52520- if (!proc_fd_access_allowed(inode))
52521- goto out;
52522+ /* logging this is needed for learning on chromium to work properly,
52523+ but we don't want to flood the logs from 'ps' which does a readlink
52524+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52525+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
52526+ */
52527+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52528+ if (!proc_fd_access_allowed(inode,0))
52529+ goto out;
52530+ } else {
52531+ if (!proc_fd_access_allowed(inode,1))
52532+ goto out;
52533+ }
52534
52535 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52536 if (error)
52537@@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52538 rcu_read_lock();
52539 cred = __task_cred(task);
52540 inode->i_uid = cred->euid;
52541+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52542+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52543+#else
52544 inode->i_gid = cred->egid;
52545+#endif
52546 rcu_read_unlock();
52547 }
52548 security_task_to_inode(task, inode);
52549@@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52550 struct inode *inode = dentry->d_inode;
52551 struct task_struct *task;
52552 const struct cred *cred;
52553+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52554+ const struct cred *tmpcred = current_cred();
52555+#endif
52556
52557 generic_fillattr(inode, stat);
52558
52559@@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52560 stat->uid = 0;
52561 stat->gid = 0;
52562 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52563+
52564+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52565+ rcu_read_unlock();
52566+ return -ENOENT;
52567+ }
52568+
52569 if (task) {
52570+ cred = __task_cred(task);
52571+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52572+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
52573+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52574+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52575+#endif
52576+ ) {
52577+#endif
52578 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52579+#ifdef CONFIG_GRKERNSEC_PROC_USER
52580+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52581+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52582+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52583+#endif
52584 task_dumpable(task)) {
52585- cred = __task_cred(task);
52586 stat->uid = cred->euid;
52587+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52588+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
52589+#else
52590 stat->gid = cred->egid;
52591+#endif
52592 }
52593+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52594+ } else {
52595+ rcu_read_unlock();
52596+ return -ENOENT;
52597+ }
52598+#endif
52599 }
52600 rcu_read_unlock();
52601 return 0;
52602@@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
52603
52604 if (task) {
52605 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52606+#ifdef CONFIG_GRKERNSEC_PROC_USER
52607+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52608+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52609+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52610+#endif
52611 task_dumpable(task)) {
52612 rcu_read_lock();
52613 cred = __task_cred(task);
52614 inode->i_uid = cred->euid;
52615+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52616+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52617+#else
52618 inode->i_gid = cred->egid;
52619+#endif
52620 rcu_read_unlock();
52621 } else {
52622 inode->i_uid = 0;
52623@@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
52624 int fd = proc_fd(inode);
52625
52626 if (task) {
52627- files = get_files_struct(task);
52628+ if (!gr_acl_handle_procpidmem(task))
52629+ files = get_files_struct(task);
52630 put_task_struct(task);
52631 }
52632 if (files) {
52633@@ -1922,12 +2021,22 @@ static const struct file_operations proc_fd_operations = {
52634 static int proc_fd_permission(struct inode *inode, int mask)
52635 {
52636 int rv;
52637+ struct task_struct *task;
52638
52639 rv = generic_permission(inode, mask, NULL);
52640- if (rv == 0)
52641- return 0;
52642+
52643 if (task_pid(current) == proc_pid(inode))
52644 rv = 0;
52645+
52646+ task = get_proc_task(inode);
52647+ if (task == NULL)
52648+ return rv;
52649+
52650+ if (gr_acl_handle_procpidmem(task))
52651+ rv = -EACCES;
52652+
52653+ put_task_struct(task);
52654+
52655 return rv;
52656 }
52657
52658@@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
52659 if (!task)
52660 goto out_no_task;
52661
52662+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52663+ goto out;
52664+
52665 /*
52666 * Yes, it does not scale. And it should not. Don't add
52667 * new entries into /proc/<tgid>/ without very good reasons.
52668@@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct file *filp,
52669 if (!task)
52670 goto out_no_task;
52671
52672+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52673+ goto out;
52674+
52675 ret = 0;
52676 i = filp->f_pos;
52677 switch (i) {
52678@@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
52679 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
52680 void *cookie)
52681 {
52682- char *s = nd_get_link(nd);
52683+ const char *s = nd_get_link(nd);
52684 if (!IS_ERR(s))
52685 __putname(s);
52686 }
52687@@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_stuff[] = {
52688 #ifdef CONFIG_SCHED_DEBUG
52689 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52690 #endif
52691-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52692+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52693 INF("syscall", S_IRUGO, proc_pid_syscall),
52694 #endif
52695 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52696@@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_stuff[] = {
52697 #ifdef CONFIG_SECURITY
52698 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52699 #endif
52700-#ifdef CONFIG_KALLSYMS
52701+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52702 INF("wchan", S_IRUGO, proc_pid_wchan),
52703 #endif
52704-#ifdef CONFIG_STACKTRACE
52705+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52706 ONE("stack", S_IRUGO, proc_pid_stack),
52707 #endif
52708 #ifdef CONFIG_SCHEDSTATS
52709@@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_stuff[] = {
52710 #ifdef CONFIG_TASK_IO_ACCOUNTING
52711 INF("io", S_IRUSR, proc_tgid_io_accounting),
52712 #endif
52713+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52714+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
52715+#endif
52716 };
52717
52718 static int proc_tgid_base_readdir(struct file * filp,
52719@@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
52720 if (!inode)
52721 goto out;
52722
52723+#ifdef CONFIG_GRKERNSEC_PROC_USER
52724+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
52725+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52726+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52727+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
52728+#else
52729 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
52730+#endif
52731 inode->i_op = &proc_tgid_base_inode_operations;
52732 inode->i_fop = &proc_tgid_base_operations;
52733 inode->i_flags|=S_IMMUTABLE;
52734@@ -2777,7 +2902,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
52735 if (!task)
52736 goto out;
52737
52738+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52739+ goto out_put_task;
52740+
52741 result = proc_pid_instantiate(dir, dentry, task, NULL);
52742+out_put_task:
52743 put_task_struct(task);
52744 out:
52745 return result;
52746@@ -2842,6 +2971,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52747 {
52748 unsigned int nr;
52749 struct task_struct *reaper;
52750+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52751+ const struct cred *tmpcred = current_cred();
52752+ const struct cred *itercred;
52753+#endif
52754+ filldir_t __filldir = filldir;
52755 struct tgid_iter iter;
52756 struct pid_namespace *ns;
52757
52758@@ -2865,8 +2999,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52759 for (iter = next_tgid(ns, iter);
52760 iter.task;
52761 iter.tgid += 1, iter = next_tgid(ns, iter)) {
52762+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52763+ rcu_read_lock();
52764+ itercred = __task_cred(iter.task);
52765+#endif
52766+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
52767+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52768+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
52769+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52770+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52771+#endif
52772+ )
52773+#endif
52774+ )
52775+ __filldir = &gr_fake_filldir;
52776+ else
52777+ __filldir = filldir;
52778+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52779+ rcu_read_unlock();
52780+#endif
52781 filp->f_pos = iter.tgid + TGID_OFFSET;
52782- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
52783+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
52784 put_task_struct(iter.task);
52785 goto out;
52786 }
52787@@ -2892,7 +3045,7 @@ static const struct pid_entry tid_base_stuff[] = {
52788 #ifdef CONFIG_SCHED_DEBUG
52789 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52790 #endif
52791-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52792+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52793 INF("syscall", S_IRUGO, proc_pid_syscall),
52794 #endif
52795 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52796@@ -2916,10 +3069,10 @@ static const struct pid_entry tid_base_stuff[] = {
52797 #ifdef CONFIG_SECURITY
52798 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52799 #endif
52800-#ifdef CONFIG_KALLSYMS
52801+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52802 INF("wchan", S_IRUGO, proc_pid_wchan),
52803 #endif
52804-#ifdef CONFIG_STACKTRACE
52805+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52806 ONE("stack", S_IRUGO, proc_pid_stack),
52807 #endif
52808 #ifdef CONFIG_SCHEDSTATS
52809diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
52810index 82676e3..5f8518a 100644
52811--- a/fs/proc/cmdline.c
52812+++ b/fs/proc/cmdline.c
52813@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
52814
52815 static int __init proc_cmdline_init(void)
52816 {
52817+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52818+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
52819+#else
52820 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
52821+#endif
52822 return 0;
52823 }
52824 module_init(proc_cmdline_init);
52825diff --git a/fs/proc/devices.c b/fs/proc/devices.c
52826index 59ee7da..469b4b6 100644
52827--- a/fs/proc/devices.c
52828+++ b/fs/proc/devices.c
52829@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
52830
52831 static int __init proc_devices_init(void)
52832 {
52833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52834+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
52835+#else
52836 proc_create("devices", 0, NULL, &proc_devinfo_operations);
52837+#endif
52838 return 0;
52839 }
52840 module_init(proc_devices_init);
52841diff --git a/fs/proc/inode.c b/fs/proc/inode.c
52842index d78ade3..81767f9 100644
52843--- a/fs/proc/inode.c
52844+++ b/fs/proc/inode.c
52845@@ -18,12 +18,19 @@
52846 #include <linux/module.h>
52847 #include <linux/smp_lock.h>
52848 #include <linux/sysctl.h>
52849+#include <linux/grsecurity.h>
52850
52851 #include <asm/system.h>
52852 #include <asm/uaccess.h>
52853
52854 #include "internal.h"
52855
52856+#ifdef CONFIG_PROC_SYSCTL
52857+extern const struct inode_operations proc_sys_inode_operations;
52858+extern const struct inode_operations proc_sys_dir_operations;
52859+#endif
52860+
52861+
52862 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
52863 {
52864 atomic_inc(&de->count);
52865@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
52866 de_put(de);
52867 if (PROC_I(inode)->sysctl)
52868 sysctl_head_put(PROC_I(inode)->sysctl);
52869+
52870+#ifdef CONFIG_PROC_SYSCTL
52871+ if (inode->i_op == &proc_sys_inode_operations ||
52872+ inode->i_op == &proc_sys_dir_operations)
52873+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
52874+#endif
52875+
52876 clear_inode(inode);
52877 }
52878
52879@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
52880 if (de->mode) {
52881 inode->i_mode = de->mode;
52882 inode->i_uid = de->uid;
52883+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52884+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52885+#else
52886 inode->i_gid = de->gid;
52887+#endif
52888 }
52889 if (de->size)
52890 inode->i_size = de->size;
52891diff --git a/fs/proc/internal.h b/fs/proc/internal.h
52892index 753ca37..26bcf3b 100644
52893--- a/fs/proc/internal.h
52894+++ b/fs/proc/internal.h
52895@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52896 struct pid *pid, struct task_struct *task);
52897 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52898 struct pid *pid, struct task_struct *task);
52899+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52900+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
52901+#endif
52902 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
52903
52904 extern const struct file_operations proc_maps_operations;
52905diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
52906index b442dac..aab29cb 100644
52907--- a/fs/proc/kcore.c
52908+++ b/fs/proc/kcore.c
52909@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
52910 off_t offset = 0;
52911 struct kcore_list *m;
52912
52913+ pax_track_stack();
52914+
52915 /* setup ELF header */
52916 elf = (struct elfhdr *) bufp;
52917 bufp += sizeof(struct elfhdr);
52918@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52919 * the addresses in the elf_phdr on our list.
52920 */
52921 start = kc_offset_to_vaddr(*fpos - elf_buflen);
52922- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
52923+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
52924+ if (tsz > buflen)
52925 tsz = buflen;
52926-
52927+
52928 while (buflen) {
52929 struct kcore_list *m;
52930
52931@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52932 kfree(elf_buf);
52933 } else {
52934 if (kern_addr_valid(start)) {
52935- unsigned long n;
52936+ char *elf_buf;
52937+ mm_segment_t oldfs;
52938
52939- n = copy_to_user(buffer, (char *)start, tsz);
52940- /*
52941- * We cannot distingush between fault on source
52942- * and fault on destination. When this happens
52943- * we clear too and hope it will trigger the
52944- * EFAULT again.
52945- */
52946- if (n) {
52947- if (clear_user(buffer + tsz - n,
52948- n))
52949+ elf_buf = kmalloc(tsz, GFP_KERNEL);
52950+ if (!elf_buf)
52951+ return -ENOMEM;
52952+ oldfs = get_fs();
52953+ set_fs(KERNEL_DS);
52954+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
52955+ set_fs(oldfs);
52956+ if (copy_to_user(buffer, elf_buf, tsz)) {
52957+ kfree(elf_buf);
52958 return -EFAULT;
52959+ }
52960 }
52961+ set_fs(oldfs);
52962+ kfree(elf_buf);
52963 } else {
52964 if (clear_user(buffer, tsz))
52965 return -EFAULT;
52966@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52967
52968 static int open_kcore(struct inode *inode, struct file *filp)
52969 {
52970+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
52971+ return -EPERM;
52972+#endif
52973 if (!capable(CAP_SYS_RAWIO))
52974 return -EPERM;
52975 if (kcore_need_update)
52976diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
52977index a65239c..ad1182a 100644
52978--- a/fs/proc/meminfo.c
52979+++ b/fs/proc/meminfo.c
52980@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52981 unsigned long pages[NR_LRU_LISTS];
52982 int lru;
52983
52984+ pax_track_stack();
52985+
52986 /*
52987 * display in kilobytes.
52988 */
52989@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52990 vmi.used >> 10,
52991 vmi.largest_chunk >> 10
52992 #ifdef CONFIG_MEMORY_FAILURE
52993- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
52994+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
52995 #endif
52996 );
52997
52998diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
52999index 9fe7d7e..cdb62c9 100644
53000--- a/fs/proc/nommu.c
53001+++ b/fs/proc/nommu.c
53002@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53003 if (len < 1)
53004 len = 1;
53005 seq_printf(m, "%*c", len, ' ');
53006- seq_path(m, &file->f_path, "");
53007+ seq_path(m, &file->f_path, "\n\\");
53008 }
53009
53010 seq_putc(m, '\n');
53011diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53012index 04d1270..25e1173 100644
53013--- a/fs/proc/proc_net.c
53014+++ b/fs/proc/proc_net.c
53015@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53016 struct task_struct *task;
53017 struct nsproxy *ns;
53018 struct net *net = NULL;
53019+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53020+ const struct cred *cred = current_cred();
53021+#endif
53022+
53023+#ifdef CONFIG_GRKERNSEC_PROC_USER
53024+ if (cred->fsuid)
53025+ return net;
53026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53027+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53028+ return net;
53029+#endif
53030
53031 rcu_read_lock();
53032 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53033diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53034index f667e8a..55f4d96 100644
53035--- a/fs/proc/proc_sysctl.c
53036+++ b/fs/proc/proc_sysctl.c
53037@@ -7,11 +7,13 @@
53038 #include <linux/security.h>
53039 #include "internal.h"
53040
53041+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53042+
53043 static const struct dentry_operations proc_sys_dentry_operations;
53044 static const struct file_operations proc_sys_file_operations;
53045-static const struct inode_operations proc_sys_inode_operations;
53046+const struct inode_operations proc_sys_inode_operations;
53047 static const struct file_operations proc_sys_dir_file_operations;
53048-static const struct inode_operations proc_sys_dir_operations;
53049+const struct inode_operations proc_sys_dir_operations;
53050
53051 static struct inode *proc_sys_make_inode(struct super_block *sb,
53052 struct ctl_table_header *head, struct ctl_table *table)
53053@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53054 if (!p)
53055 goto out;
53056
53057+ if (gr_handle_sysctl(p, MAY_EXEC))
53058+ goto out;
53059+
53060 err = ERR_PTR(-ENOMEM);
53061 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53062 if (h)
53063@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53064
53065 err = NULL;
53066 dentry->d_op = &proc_sys_dentry_operations;
53067+
53068+ gr_handle_proc_create(dentry, inode);
53069+
53070 d_add(dentry, inode);
53071
53072 out:
53073@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53074 return -ENOMEM;
53075 } else {
53076 child->d_op = &proc_sys_dentry_operations;
53077+
53078+ gr_handle_proc_create(child, inode);
53079+
53080 d_add(child, inode);
53081 }
53082 } else {
53083@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53084 if (*pos < file->f_pos)
53085 continue;
53086
53087+ if (gr_handle_sysctl(table, 0))
53088+ continue;
53089+
53090 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53091 if (res)
53092 return res;
53093@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53094 if (IS_ERR(head))
53095 return PTR_ERR(head);
53096
53097+ if (table && gr_handle_sysctl(table, MAY_EXEC))
53098+ return -ENOENT;
53099+
53100 generic_fillattr(inode, stat);
53101 if (table)
53102 stat->mode = (stat->mode & S_IFMT) | table->mode;
53103@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53104 };
53105
53106 static const struct file_operations proc_sys_dir_file_operations = {
53107+ .read = generic_read_dir,
53108 .readdir = proc_sys_readdir,
53109 .llseek = generic_file_llseek,
53110 };
53111
53112-static const struct inode_operations proc_sys_inode_operations = {
53113+const struct inode_operations proc_sys_inode_operations = {
53114 .permission = proc_sys_permission,
53115 .setattr = proc_sys_setattr,
53116 .getattr = proc_sys_getattr,
53117 };
53118
53119-static const struct inode_operations proc_sys_dir_operations = {
53120+const struct inode_operations proc_sys_dir_operations = {
53121 .lookup = proc_sys_lookup,
53122 .permission = proc_sys_permission,
53123 .setattr = proc_sys_setattr,
53124diff --git a/fs/proc/root.c b/fs/proc/root.c
53125index b080b79..d957e63 100644
53126--- a/fs/proc/root.c
53127+++ b/fs/proc/root.c
53128@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53129 #ifdef CONFIG_PROC_DEVICETREE
53130 proc_device_tree_init();
53131 #endif
53132+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53133+#ifdef CONFIG_GRKERNSEC_PROC_USER
53134+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53135+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53136+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53137+#endif
53138+#else
53139 proc_mkdir("bus", NULL);
53140+#endif
53141 proc_sys_init();
53142 }
53143
53144diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53145index 3b7b82a..7dbb571 100644
53146--- a/fs/proc/task_mmu.c
53147+++ b/fs/proc/task_mmu.c
53148@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53149 "VmStk:\t%8lu kB\n"
53150 "VmExe:\t%8lu kB\n"
53151 "VmLib:\t%8lu kB\n"
53152- "VmPTE:\t%8lu kB\n",
53153- hiwater_vm << (PAGE_SHIFT-10),
53154+ "VmPTE:\t%8lu kB\n"
53155+
53156+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53157+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53158+#endif
53159+
53160+ ,hiwater_vm << (PAGE_SHIFT-10),
53161 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53162 mm->locked_vm << (PAGE_SHIFT-10),
53163 hiwater_rss << (PAGE_SHIFT-10),
53164 total_rss << (PAGE_SHIFT-10),
53165 data << (PAGE_SHIFT-10),
53166 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53167- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53168+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53169+
53170+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53171+ , mm->context.user_cs_base, mm->context.user_cs_limit
53172+#endif
53173+
53174+ );
53175 }
53176
53177 unsigned long task_vsize(struct mm_struct *mm)
53178@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53179 struct proc_maps_private *priv = m->private;
53180 struct vm_area_struct *vma = v;
53181
53182- vma_stop(priv, vma);
53183+ if (!IS_ERR(vma))
53184+ vma_stop(priv, vma);
53185 if (priv->task)
53186 put_task_struct(priv->task);
53187 }
53188@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53189 return ret;
53190 }
53191
53192+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53193+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53194+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53195+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53196+#endif
53197+
53198 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53199 {
53200 struct mm_struct *mm = vma->vm_mm;
53201@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53202 int flags = vma->vm_flags;
53203 unsigned long ino = 0;
53204 unsigned long long pgoff = 0;
53205- unsigned long start;
53206 dev_t dev = 0;
53207 int len;
53208
53209@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53210 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53211 }
53212
53213- /* We don't show the stack guard page in /proc/maps */
53214- start = vma->vm_start;
53215- if (vma->vm_flags & VM_GROWSDOWN)
53216- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53217- start += PAGE_SIZE;
53218-
53219 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53220- start,
53221+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53222+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53223+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53224+#else
53225+ vma->vm_start,
53226 vma->vm_end,
53227+#endif
53228 flags & VM_READ ? 'r' : '-',
53229 flags & VM_WRITE ? 'w' : '-',
53230 flags & VM_EXEC ? 'x' : '-',
53231 flags & VM_MAYSHARE ? 's' : 'p',
53232+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53233+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53234+#else
53235 pgoff,
53236+#endif
53237 MAJOR(dev), MINOR(dev), ino, &len);
53238
53239 /*
53240@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53241 */
53242 if (file) {
53243 pad_len_spaces(m, len);
53244- seq_path(m, &file->f_path, "\n");
53245+ seq_path(m, &file->f_path, "\n\\");
53246 } else {
53247 const char *name = arch_vma_name(vma);
53248 if (!name) {
53249@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53250 if (vma->vm_start <= mm->brk &&
53251 vma->vm_end >= mm->start_brk) {
53252 name = "[heap]";
53253- } else if (vma->vm_start <= mm->start_stack &&
53254- vma->vm_end >= mm->start_stack) {
53255+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53256+ (vma->vm_start <= mm->start_stack &&
53257+ vma->vm_end >= mm->start_stack)) {
53258 name = "[stack]";
53259 }
53260 } else {
53261@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53262 };
53263
53264 memset(&mss, 0, sizeof mss);
53265- mss.vma = vma;
53266- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53267- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53268+
53269+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53270+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53271+#endif
53272+ mss.vma = vma;
53273+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53274+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53275+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53276+ }
53277+#endif
53278
53279 show_map_vma(m, vma);
53280
53281@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53282 "Swap: %8lu kB\n"
53283 "KernelPageSize: %8lu kB\n"
53284 "MMUPageSize: %8lu kB\n",
53285+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53286+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53287+#else
53288 (vma->vm_end - vma->vm_start) >> 10,
53289+#endif
53290 mss.resident >> 10,
53291 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53292 mss.shared_clean >> 10,
53293diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53294index 8f5c05d..c99c76d 100644
53295--- a/fs/proc/task_nommu.c
53296+++ b/fs/proc/task_nommu.c
53297@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53298 else
53299 bytes += kobjsize(mm);
53300
53301- if (current->fs && current->fs->users > 1)
53302+ if (current->fs && atomic_read(&current->fs->users) > 1)
53303 sbytes += kobjsize(current->fs);
53304 else
53305 bytes += kobjsize(current->fs);
53306@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53307 if (len < 1)
53308 len = 1;
53309 seq_printf(m, "%*c", len, ' ');
53310- seq_path(m, &file->f_path, "");
53311+ seq_path(m, &file->f_path, "\n\\");
53312 }
53313
53314 seq_putc(m, '\n');
53315diff --git a/fs/readdir.c b/fs/readdir.c
53316index 7723401..30059a6 100644
53317--- a/fs/readdir.c
53318+++ b/fs/readdir.c
53319@@ -16,6 +16,7 @@
53320 #include <linux/security.h>
53321 #include <linux/syscalls.h>
53322 #include <linux/unistd.h>
53323+#include <linux/namei.h>
53324
53325 #include <asm/uaccess.h>
53326
53327@@ -67,6 +68,7 @@ struct old_linux_dirent {
53328
53329 struct readdir_callback {
53330 struct old_linux_dirent __user * dirent;
53331+ struct file * file;
53332 int result;
53333 };
53334
53335@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53336 buf->result = -EOVERFLOW;
53337 return -EOVERFLOW;
53338 }
53339+
53340+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53341+ return 0;
53342+
53343 buf->result++;
53344 dirent = buf->dirent;
53345 if (!access_ok(VERIFY_WRITE, dirent,
53346@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53347
53348 buf.result = 0;
53349 buf.dirent = dirent;
53350+ buf.file = file;
53351
53352 error = vfs_readdir(file, fillonedir, &buf);
53353 if (buf.result)
53354@@ -142,6 +149,7 @@ struct linux_dirent {
53355 struct getdents_callback {
53356 struct linux_dirent __user * current_dir;
53357 struct linux_dirent __user * previous;
53358+ struct file * file;
53359 int count;
53360 int error;
53361 };
53362@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53363 buf->error = -EOVERFLOW;
53364 return -EOVERFLOW;
53365 }
53366+
53367+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53368+ return 0;
53369+
53370 dirent = buf->previous;
53371 if (dirent) {
53372 if (__put_user(offset, &dirent->d_off))
53373@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53374 buf.previous = NULL;
53375 buf.count = count;
53376 buf.error = 0;
53377+ buf.file = file;
53378
53379 error = vfs_readdir(file, filldir, &buf);
53380 if (error >= 0)
53381@@ -228,6 +241,7 @@ out:
53382 struct getdents_callback64 {
53383 struct linux_dirent64 __user * current_dir;
53384 struct linux_dirent64 __user * previous;
53385+ struct file *file;
53386 int count;
53387 int error;
53388 };
53389@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53390 buf->error = -EINVAL; /* only used if we fail.. */
53391 if (reclen > buf->count)
53392 return -EINVAL;
53393+
53394+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53395+ return 0;
53396+
53397 dirent = buf->previous;
53398 if (dirent) {
53399 if (__put_user(offset, &dirent->d_off))
53400@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53401
53402 buf.current_dir = dirent;
53403 buf.previous = NULL;
53404+ buf.file = file;
53405 buf.count = count;
53406 buf.error = 0;
53407
53408@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53409 error = buf.error;
53410 lastdirent = buf.previous;
53411 if (lastdirent) {
53412- typeof(lastdirent->d_off) d_off = file->f_pos;
53413+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53414 if (__put_user(d_off, &lastdirent->d_off))
53415 error = -EFAULT;
53416 else
53417diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53418index d42c30c..4fd8718 100644
53419--- a/fs/reiserfs/dir.c
53420+++ b/fs/reiserfs/dir.c
53421@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53422 struct reiserfs_dir_entry de;
53423 int ret = 0;
53424
53425+ pax_track_stack();
53426+
53427 reiserfs_write_lock(inode->i_sb);
53428
53429 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53430diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53431index 128d3f7..8840d44 100644
53432--- a/fs/reiserfs/do_balan.c
53433+++ b/fs/reiserfs/do_balan.c
53434@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53435 return;
53436 }
53437
53438- atomic_inc(&(fs_generation(tb->tb_sb)));
53439+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53440 do_balance_starts(tb);
53441
53442 /* balance leaf returns 0 except if combining L R and S into
53443diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53444index 72cb1cc..d0e3181 100644
53445--- a/fs/reiserfs/item_ops.c
53446+++ b/fs/reiserfs/item_ops.c
53447@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53448 vi->vi_index, vi->vi_type, vi->vi_ih);
53449 }
53450
53451-static struct item_operations stat_data_ops = {
53452+static const struct item_operations stat_data_ops = {
53453 .bytes_number = sd_bytes_number,
53454 .decrement_key = sd_decrement_key,
53455 .is_left_mergeable = sd_is_left_mergeable,
53456@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53457 vi->vi_index, vi->vi_type, vi->vi_ih);
53458 }
53459
53460-static struct item_operations direct_ops = {
53461+static const struct item_operations direct_ops = {
53462 .bytes_number = direct_bytes_number,
53463 .decrement_key = direct_decrement_key,
53464 .is_left_mergeable = direct_is_left_mergeable,
53465@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53466 vi->vi_index, vi->vi_type, vi->vi_ih);
53467 }
53468
53469-static struct item_operations indirect_ops = {
53470+static const struct item_operations indirect_ops = {
53471 .bytes_number = indirect_bytes_number,
53472 .decrement_key = indirect_decrement_key,
53473 .is_left_mergeable = indirect_is_left_mergeable,
53474@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53475 printk("\n");
53476 }
53477
53478-static struct item_operations direntry_ops = {
53479+static const struct item_operations direntry_ops = {
53480 .bytes_number = direntry_bytes_number,
53481 .decrement_key = direntry_decrement_key,
53482 .is_left_mergeable = direntry_is_left_mergeable,
53483@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53484 "Invalid item type observed, run fsck ASAP");
53485 }
53486
53487-static struct item_operations errcatch_ops = {
53488+static const struct item_operations errcatch_ops = {
53489 errcatch_bytes_number,
53490 errcatch_decrement_key,
53491 errcatch_is_left_mergeable,
53492@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53493 #error Item types must use disk-format assigned values.
53494 #endif
53495
53496-struct item_operations *item_ops[TYPE_ANY + 1] = {
53497+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53498 &stat_data_ops,
53499 &indirect_ops,
53500 &direct_ops,
53501diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53502index b5fe0aa..e0e25c4 100644
53503--- a/fs/reiserfs/journal.c
53504+++ b/fs/reiserfs/journal.c
53505@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53506 struct buffer_head *bh;
53507 int i, j;
53508
53509+ pax_track_stack();
53510+
53511 bh = __getblk(dev, block, bufsize);
53512 if (buffer_uptodate(bh))
53513 return (bh);
53514diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53515index 2715791..b8996db 100644
53516--- a/fs/reiserfs/namei.c
53517+++ b/fs/reiserfs/namei.c
53518@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53519 unsigned long savelink = 1;
53520 struct timespec ctime;
53521
53522+ pax_track_stack();
53523+
53524 /* three balancings: (1) old name removal, (2) new name insertion
53525 and (3) maybe "save" link insertion
53526 stat data updates: (1) old directory,
53527diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
53528index 9229e55..3d2e3b7 100644
53529--- a/fs/reiserfs/procfs.c
53530+++ b/fs/reiserfs/procfs.c
53531@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
53532 "SMALL_TAILS " : "NO_TAILS ",
53533 replay_only(sb) ? "REPLAY_ONLY " : "",
53534 convert_reiserfs(sb) ? "CONV " : "",
53535- atomic_read(&r->s_generation_counter),
53536+ atomic_read_unchecked(&r->s_generation_counter),
53537 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
53538 SF(s_do_balance), SF(s_unneeded_left_neighbor),
53539 SF(s_good_search_by_key_reada), SF(s_bmaps),
53540@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
53541 struct journal_params *jp = &rs->s_v1.s_journal;
53542 char b[BDEVNAME_SIZE];
53543
53544+ pax_track_stack();
53545+
53546 seq_printf(m, /* on-disk fields */
53547 "jp_journal_1st_block: \t%i\n"
53548 "jp_journal_dev: \t%s[%x]\n"
53549diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
53550index d036ee5..4c7dca1 100644
53551--- a/fs/reiserfs/stree.c
53552+++ b/fs/reiserfs/stree.c
53553@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
53554 int iter = 0;
53555 #endif
53556
53557+ pax_track_stack();
53558+
53559 BUG_ON(!th->t_trans_id);
53560
53561 init_tb_struct(th, &s_del_balance, sb, path,
53562@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
53563 int retval;
53564 int quota_cut_bytes = 0;
53565
53566+ pax_track_stack();
53567+
53568 BUG_ON(!th->t_trans_id);
53569
53570 le_key2cpu_key(&cpu_key, key);
53571@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
53572 int quota_cut_bytes;
53573 loff_t tail_pos = 0;
53574
53575+ pax_track_stack();
53576+
53577 BUG_ON(!th->t_trans_id);
53578
53579 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
53580@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
53581 int retval;
53582 int fs_gen;
53583
53584+ pax_track_stack();
53585+
53586 BUG_ON(!th->t_trans_id);
53587
53588 fs_gen = get_generation(inode->i_sb);
53589@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
53590 int fs_gen = 0;
53591 int quota_bytes = 0;
53592
53593+ pax_track_stack();
53594+
53595 BUG_ON(!th->t_trans_id);
53596
53597 if (inode) { /* Do we count quotas for item? */
53598diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
53599index f0ad05f..af3306f 100644
53600--- a/fs/reiserfs/super.c
53601+++ b/fs/reiserfs/super.c
53602@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
53603 {.option_name = NULL}
53604 };
53605
53606+ pax_track_stack();
53607+
53608 *blocks = 0;
53609 if (!options || !*options)
53610 /* use default configuration: create tails, journaling on, no
53611diff --git a/fs/select.c b/fs/select.c
53612index fd38ce2..f5381b8 100644
53613--- a/fs/select.c
53614+++ b/fs/select.c
53615@@ -20,6 +20,7 @@
53616 #include <linux/module.h>
53617 #include <linux/slab.h>
53618 #include <linux/poll.h>
53619+#include <linux/security.h>
53620 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
53621 #include <linux/file.h>
53622 #include <linux/fdtable.h>
53623@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
53624 int retval, i, timed_out = 0;
53625 unsigned long slack = 0;
53626
53627+ pax_track_stack();
53628+
53629 rcu_read_lock();
53630 retval = max_select_fd(n, fds);
53631 rcu_read_unlock();
53632@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
53633 /* Allocate small arguments on the stack to save memory and be faster */
53634 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
53635
53636+ pax_track_stack();
53637+
53638 ret = -EINVAL;
53639 if (n < 0)
53640 goto out_nofds;
53641@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
53642 struct poll_list *walk = head;
53643 unsigned long todo = nfds;
53644
53645+ pax_track_stack();
53646+
53647+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
53648 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
53649 return -EINVAL;
53650
53651diff --git a/fs/seq_file.c b/fs/seq_file.c
53652index eae7d9d..679f099 100644
53653--- a/fs/seq_file.c
53654+++ b/fs/seq_file.c
53655@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53656 return 0;
53657 }
53658 if (!m->buf) {
53659- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53660+ m->size = PAGE_SIZE;
53661+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53662 if (!m->buf)
53663 return -ENOMEM;
53664 }
53665@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53666 Eoverflow:
53667 m->op->stop(m, p);
53668 kfree(m->buf);
53669- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53670+ m->size <<= 1;
53671+ m->buf = kmalloc(m->size, GFP_KERNEL);
53672 return !m->buf ? -ENOMEM : -EAGAIN;
53673 }
53674
53675@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53676 m->version = file->f_version;
53677 /* grab buffer if we didn't have one */
53678 if (!m->buf) {
53679- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53680+ m->size = PAGE_SIZE;
53681+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53682 if (!m->buf)
53683 goto Enomem;
53684 }
53685@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53686 goto Fill;
53687 m->op->stop(m, p);
53688 kfree(m->buf);
53689- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53690+ m->size <<= 1;
53691+ m->buf = kmalloc(m->size, GFP_KERNEL);
53692 if (!m->buf)
53693 goto Enomem;
53694 m->count = 0;
53695@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
53696 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
53697 void *data)
53698 {
53699- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
53700+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
53701 int res = -ENOMEM;
53702
53703 if (op) {
53704diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
53705index 71c29b6..54694dd 100644
53706--- a/fs/smbfs/proc.c
53707+++ b/fs/smbfs/proc.c
53708@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
53709
53710 out:
53711 if (server->local_nls != NULL && server->remote_nls != NULL)
53712- server->ops->convert = convert_cp;
53713+ *(void **)&server->ops->convert = convert_cp;
53714 else
53715- server->ops->convert = convert_memcpy;
53716+ *(void **)&server->ops->convert = convert_memcpy;
53717
53718 smb_unlock_server(server);
53719 return n;
53720@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
53721
53722 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
53723 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
53724- server->ops->getattr = smb_proc_getattr_core;
53725+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
53726 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
53727- server->ops->getattr = smb_proc_getattr_ff;
53728+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
53729 }
53730
53731 /* Decode server capabilities */
53732@@ -3439,7 +3439,7 @@ out:
53733 static void
53734 install_ops(struct smb_ops *dst, struct smb_ops *src)
53735 {
53736- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53737+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53738 }
53739
53740 /* < LANMAN2 */
53741diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
53742index 00b2909..2ace383 100644
53743--- a/fs/smbfs/symlink.c
53744+++ b/fs/smbfs/symlink.c
53745@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
53746
53747 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53748 {
53749- char *s = nd_get_link(nd);
53750+ const char *s = nd_get_link(nd);
53751 if (!IS_ERR(s))
53752 __putname(s);
53753 }
53754diff --git a/fs/splice.c b/fs/splice.c
53755index bb92b7c..5aa72b0 100644
53756--- a/fs/splice.c
53757+++ b/fs/splice.c
53758@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53759 pipe_lock(pipe);
53760
53761 for (;;) {
53762- if (!pipe->readers) {
53763+ if (!atomic_read(&pipe->readers)) {
53764 send_sig(SIGPIPE, current, 0);
53765 if (!ret)
53766 ret = -EPIPE;
53767@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53768 do_wakeup = 0;
53769 }
53770
53771- pipe->waiting_writers++;
53772+ atomic_inc(&pipe->waiting_writers);
53773 pipe_wait(pipe);
53774- pipe->waiting_writers--;
53775+ atomic_dec(&pipe->waiting_writers);
53776 }
53777
53778 pipe_unlock(pipe);
53779@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
53780 .spd_release = spd_release_page,
53781 };
53782
53783+ pax_track_stack();
53784+
53785 index = *ppos >> PAGE_CACHE_SHIFT;
53786 loff = *ppos & ~PAGE_CACHE_MASK;
53787 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53788@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
53789 old_fs = get_fs();
53790 set_fs(get_ds());
53791 /* The cast to a user pointer is valid due to the set_fs() */
53792- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
53793+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
53794 set_fs(old_fs);
53795
53796 return res;
53797@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
53798 old_fs = get_fs();
53799 set_fs(get_ds());
53800 /* The cast to a user pointer is valid due to the set_fs() */
53801- res = vfs_write(file, (const char __user *)buf, count, &pos);
53802+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
53803 set_fs(old_fs);
53804
53805 return res;
53806@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53807 .spd_release = spd_release_page,
53808 };
53809
53810+ pax_track_stack();
53811+
53812 index = *ppos >> PAGE_CACHE_SHIFT;
53813 offset = *ppos & ~PAGE_CACHE_MASK;
53814 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53815@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53816 goto err;
53817
53818 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
53819- vec[i].iov_base = (void __user *) page_address(page);
53820+ vec[i].iov_base = (__force void __user *) page_address(page);
53821 vec[i].iov_len = this_len;
53822 pages[i] = page;
53823 spd.nr_pages++;
53824@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
53825 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
53826 {
53827 while (!pipe->nrbufs) {
53828- if (!pipe->writers)
53829+ if (!atomic_read(&pipe->writers))
53830 return 0;
53831
53832- if (!pipe->waiting_writers && sd->num_spliced)
53833+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
53834 return 0;
53835
53836 if (sd->flags & SPLICE_F_NONBLOCK)
53837@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
53838 * out of the pipe right after the splice_to_pipe(). So set
53839 * PIPE_READERS appropriately.
53840 */
53841- pipe->readers = 1;
53842+ atomic_set(&pipe->readers, 1);
53843
53844 current->splice_pipe = pipe;
53845 }
53846@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
53847 .spd_release = spd_release_page,
53848 };
53849
53850+ pax_track_stack();
53851+
53852 pipe = pipe_info(file->f_path.dentry->d_inode);
53853 if (!pipe)
53854 return -EBADF;
53855@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53856 ret = -ERESTARTSYS;
53857 break;
53858 }
53859- if (!pipe->writers)
53860+ if (!atomic_read(&pipe->writers))
53861 break;
53862- if (!pipe->waiting_writers) {
53863+ if (!atomic_read(&pipe->waiting_writers)) {
53864 if (flags & SPLICE_F_NONBLOCK) {
53865 ret = -EAGAIN;
53866 break;
53867@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53868 pipe_lock(pipe);
53869
53870 while (pipe->nrbufs >= PIPE_BUFFERS) {
53871- if (!pipe->readers) {
53872+ if (!atomic_read(&pipe->readers)) {
53873 send_sig(SIGPIPE, current, 0);
53874 ret = -EPIPE;
53875 break;
53876@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53877 ret = -ERESTARTSYS;
53878 break;
53879 }
53880- pipe->waiting_writers++;
53881+ atomic_inc(&pipe->waiting_writers);
53882 pipe_wait(pipe);
53883- pipe->waiting_writers--;
53884+ atomic_dec(&pipe->waiting_writers);
53885 }
53886
53887 pipe_unlock(pipe);
53888@@ -1786,14 +1792,14 @@ retry:
53889 pipe_double_lock(ipipe, opipe);
53890
53891 do {
53892- if (!opipe->readers) {
53893+ if (!atomic_read(&opipe->readers)) {
53894 send_sig(SIGPIPE, current, 0);
53895 if (!ret)
53896 ret = -EPIPE;
53897 break;
53898 }
53899
53900- if (!ipipe->nrbufs && !ipipe->writers)
53901+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
53902 break;
53903
53904 /*
53905@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53906 pipe_double_lock(ipipe, opipe);
53907
53908 do {
53909- if (!opipe->readers) {
53910+ if (!atomic_read(&opipe->readers)) {
53911 send_sig(SIGPIPE, current, 0);
53912 if (!ret)
53913 ret = -EPIPE;
53914@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53915 * return EAGAIN if we have the potential of some data in the
53916 * future, otherwise just return 0
53917 */
53918- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
53919+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
53920 ret = -EAGAIN;
53921
53922 pipe_unlock(ipipe);
53923diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
53924index 7118a38..70af853 100644
53925--- a/fs/sysfs/file.c
53926+++ b/fs/sysfs/file.c
53927@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
53928
53929 struct sysfs_open_dirent {
53930 atomic_t refcnt;
53931- atomic_t event;
53932+ atomic_unchecked_t event;
53933 wait_queue_head_t poll;
53934 struct list_head buffers; /* goes through sysfs_buffer.list */
53935 };
53936@@ -53,7 +53,7 @@ struct sysfs_buffer {
53937 size_t count;
53938 loff_t pos;
53939 char * page;
53940- struct sysfs_ops * ops;
53941+ const struct sysfs_ops * ops;
53942 struct mutex mutex;
53943 int needs_read_fill;
53944 int event;
53945@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53946 {
53947 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53948 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53949- struct sysfs_ops * ops = buffer->ops;
53950+ const struct sysfs_ops * ops = buffer->ops;
53951 int ret = 0;
53952 ssize_t count;
53953
53954@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53955 if (!sysfs_get_active_two(attr_sd))
53956 return -ENODEV;
53957
53958- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
53959+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
53960 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
53961
53962 sysfs_put_active_two(attr_sd);
53963@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
53964 {
53965 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53966 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53967- struct sysfs_ops * ops = buffer->ops;
53968+ const struct sysfs_ops * ops = buffer->ops;
53969 int rc;
53970
53971 /* need attr_sd for attr and ops, its parent for kobj */
53972@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
53973 return -ENOMEM;
53974
53975 atomic_set(&new_od->refcnt, 0);
53976- atomic_set(&new_od->event, 1);
53977+ atomic_set_unchecked(&new_od->event, 1);
53978 init_waitqueue_head(&new_od->poll);
53979 INIT_LIST_HEAD(&new_od->buffers);
53980 goto retry;
53981@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
53982 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
53983 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53984 struct sysfs_buffer *buffer;
53985- struct sysfs_ops *ops;
53986+ const struct sysfs_ops *ops;
53987 int error = -EACCES;
53988 char *p;
53989
53990@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
53991
53992 sysfs_put_active_two(attr_sd);
53993
53994- if (buffer->event != atomic_read(&od->event))
53995+ if (buffer->event != atomic_read_unchecked(&od->event))
53996 goto trigger;
53997
53998 return DEFAULT_POLLMASK;
53999@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54000
54001 od = sd->s_attr.open;
54002 if (od) {
54003- atomic_inc(&od->event);
54004+ atomic_inc_unchecked(&od->event);
54005 wake_up_interruptible(&od->poll);
54006 }
54007
54008diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
54009index 4974995..c26609c 100644
54010--- a/fs/sysfs/mount.c
54011+++ b/fs/sysfs/mount.c
54012@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
54013 .s_name = "",
54014 .s_count = ATOMIC_INIT(1),
54015 .s_flags = SYSFS_DIR,
54016+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54017+ .s_mode = S_IFDIR | S_IRWXU,
54018+#else
54019 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54020+#endif
54021 .s_ino = 1,
54022 };
54023
54024diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54025index c5081ad..342ea86 100644
54026--- a/fs/sysfs/symlink.c
54027+++ b/fs/sysfs/symlink.c
54028@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54029
54030 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54031 {
54032- char *page = nd_get_link(nd);
54033+ const char *page = nd_get_link(nd);
54034 if (!IS_ERR(page))
54035 free_page((unsigned long)page);
54036 }
54037diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54038index 1e06853..b06d325 100644
54039--- a/fs/udf/balloc.c
54040+++ b/fs/udf/balloc.c
54041@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54042
54043 mutex_lock(&sbi->s_alloc_mutex);
54044 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54045- if (bloc->logicalBlockNum < 0 ||
54046- (bloc->logicalBlockNum + count) >
54047- partmap->s_partition_len) {
54048+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54049 udf_debug("%d < %d || %d + %d > %d\n",
54050 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54051 count, partmap->s_partition_len);
54052@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54053
54054 mutex_lock(&sbi->s_alloc_mutex);
54055 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54056- if (bloc->logicalBlockNum < 0 ||
54057- (bloc->logicalBlockNum + count) >
54058- partmap->s_partition_len) {
54059+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54060 udf_debug("%d < %d || %d + %d > %d\n",
54061 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54062 partmap->s_partition_len);
54063diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54064index 6d24c2c..fff470f 100644
54065--- a/fs/udf/inode.c
54066+++ b/fs/udf/inode.c
54067@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54068 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54069 int lastblock = 0;
54070
54071+ pax_track_stack();
54072+
54073 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54074 prev_epos.block = iinfo->i_location;
54075 prev_epos.bh = NULL;
54076diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54077index 9215700..bf1f68e 100644
54078--- a/fs/udf/misc.c
54079+++ b/fs/udf/misc.c
54080@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54081
54082 u8 udf_tag_checksum(const struct tag *t)
54083 {
54084- u8 *data = (u8 *)t;
54085+ const u8 *data = (const u8 *)t;
54086 u8 checksum = 0;
54087 int i;
54088 for (i = 0; i < sizeof(struct tag); ++i)
54089diff --git a/fs/utimes.c b/fs/utimes.c
54090index e4c75db..b4df0e0 100644
54091--- a/fs/utimes.c
54092+++ b/fs/utimes.c
54093@@ -1,6 +1,7 @@
54094 #include <linux/compiler.h>
54095 #include <linux/file.h>
54096 #include <linux/fs.h>
54097+#include <linux/security.h>
54098 #include <linux/linkage.h>
54099 #include <linux/mount.h>
54100 #include <linux/namei.h>
54101@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54102 goto mnt_drop_write_and_out;
54103 }
54104 }
54105+
54106+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54107+ error = -EACCES;
54108+ goto mnt_drop_write_and_out;
54109+ }
54110+
54111 mutex_lock(&inode->i_mutex);
54112 error = notify_change(path->dentry, &newattrs);
54113 mutex_unlock(&inode->i_mutex);
54114diff --git a/fs/xattr.c b/fs/xattr.c
54115index 6d4f6d3..cda3958 100644
54116--- a/fs/xattr.c
54117+++ b/fs/xattr.c
54118@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54119 * Extended attribute SET operations
54120 */
54121 static long
54122-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54123+setxattr(struct path *path, const char __user *name, const void __user *value,
54124 size_t size, int flags)
54125 {
54126 int error;
54127@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54128 return PTR_ERR(kvalue);
54129 }
54130
54131- error = vfs_setxattr(d, kname, kvalue, size, flags);
54132+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54133+ error = -EACCES;
54134+ goto out;
54135+ }
54136+
54137+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54138+out:
54139 kfree(kvalue);
54140 return error;
54141 }
54142@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54143 return error;
54144 error = mnt_want_write(path.mnt);
54145 if (!error) {
54146- error = setxattr(path.dentry, name, value, size, flags);
54147+ error = setxattr(&path, name, value, size, flags);
54148 mnt_drop_write(path.mnt);
54149 }
54150 path_put(&path);
54151@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54152 return error;
54153 error = mnt_want_write(path.mnt);
54154 if (!error) {
54155- error = setxattr(path.dentry, name, value, size, flags);
54156+ error = setxattr(&path, name, value, size, flags);
54157 mnt_drop_write(path.mnt);
54158 }
54159 path_put(&path);
54160@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54161 const void __user *,value, size_t, size, int, flags)
54162 {
54163 struct file *f;
54164- struct dentry *dentry;
54165 int error = -EBADF;
54166
54167 f = fget(fd);
54168 if (!f)
54169 return error;
54170- dentry = f->f_path.dentry;
54171- audit_inode(NULL, dentry);
54172+ audit_inode(NULL, f->f_path.dentry);
54173 error = mnt_want_write_file(f);
54174 if (!error) {
54175- error = setxattr(dentry, name, value, size, flags);
54176+ error = setxattr(&f->f_path, name, value, size, flags);
54177 mnt_drop_write(f->f_path.mnt);
54178 }
54179 fput(f);
54180diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54181index c6ad7c7..f2847a7 100644
54182--- a/fs/xattr_acl.c
54183+++ b/fs/xattr_acl.c
54184@@ -17,8 +17,8 @@
54185 struct posix_acl *
54186 posix_acl_from_xattr(const void *value, size_t size)
54187 {
54188- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54189- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54190+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54191+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54192 int count;
54193 struct posix_acl *acl;
54194 struct posix_acl_entry *acl_e;
54195diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54196index 942362f..88f96f5 100644
54197--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54198+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54199@@ -134,7 +134,7 @@ xfs_find_handle(
54200 }
54201
54202 error = -EFAULT;
54203- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54204+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54205 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54206 goto out_put;
54207
54208@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54209 if (IS_ERR(dentry))
54210 return PTR_ERR(dentry);
54211
54212- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54213+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54214 if (!kbuf)
54215 goto out_dput;
54216
54217@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54218 xfs_mount_t *mp,
54219 void __user *arg)
54220 {
54221- xfs_fsop_geom_t fsgeo;
54222+ xfs_fsop_geom_t fsgeo;
54223 int error;
54224
54225 error = xfs_fs_geometry(mp, &fsgeo, 3);
54226diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54227index bad485a..479bd32 100644
54228--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54229+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54230@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54231 xfs_fsop_geom_t fsgeo;
54232 int error;
54233
54234+ memset(&fsgeo, 0, sizeof(fsgeo));
54235 error = xfs_fs_geometry(mp, &fsgeo, 3);
54236 if (error)
54237 return -error;
54238diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54239index 1f3b4b8..6102f6d 100644
54240--- a/fs/xfs/linux-2.6/xfs_iops.c
54241+++ b/fs/xfs/linux-2.6/xfs_iops.c
54242@@ -468,7 +468,7 @@ xfs_vn_put_link(
54243 struct nameidata *nd,
54244 void *p)
54245 {
54246- char *s = nd_get_link(nd);
54247+ const char *s = nd_get_link(nd);
54248
54249 if (!IS_ERR(s))
54250 kfree(s);
54251diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54252index 8971fb0..5fc1eb2 100644
54253--- a/fs/xfs/xfs_bmap.c
54254+++ b/fs/xfs/xfs_bmap.c
54255@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54256 int nmap,
54257 int ret_nmap);
54258 #else
54259-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54260+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54261 #endif /* DEBUG */
54262
54263 #if defined(XFS_RW_TRACE)
54264diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54265index e89734e..5e84d8d 100644
54266--- a/fs/xfs/xfs_dir2_sf.c
54267+++ b/fs/xfs/xfs_dir2_sf.c
54268@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54269 }
54270
54271 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54272- if (filldir(dirent, sfep->name, sfep->namelen,
54273+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54274+ char name[sfep->namelen];
54275+ memcpy(name, sfep->name, sfep->namelen);
54276+ if (filldir(dirent, name, sfep->namelen,
54277+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54278+ *offset = off & 0x7fffffff;
54279+ return 0;
54280+ }
54281+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54282 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54283 *offset = off & 0x7fffffff;
54284 return 0;
54285diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54286index 8f32f50..859e8a3 100644
54287--- a/fs/xfs/xfs_vnodeops.c
54288+++ b/fs/xfs/xfs_vnodeops.c
54289@@ -564,13 +564,17 @@ xfs_readlink(
54290
54291 xfs_ilock(ip, XFS_ILOCK_SHARED);
54292
54293- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54294- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54295-
54296 pathlen = ip->i_d.di_size;
54297 if (!pathlen)
54298 goto out;
54299
54300+ if (pathlen > MAXPATHLEN) {
54301+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54302+ __func__, (unsigned long long)ip->i_ino, pathlen);
54303+ ASSERT(0);
54304+ return XFS_ERROR(EFSCORRUPTED);
54305+ }
54306+
54307 if (ip->i_df.if_flags & XFS_IFINLINE) {
54308 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54309 link[pathlen] = '\0';
54310diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54311new file mode 100644
54312index 0000000..f27a8e8
54313--- /dev/null
54314+++ b/grsecurity/Kconfig
54315@@ -0,0 +1,1036 @@
54316+#
54317+# grecurity configuration
54318+#
54319+
54320+menu "Grsecurity"
54321+
54322+config GRKERNSEC
54323+ bool "Grsecurity"
54324+ select CRYPTO
54325+ select CRYPTO_SHA256
54326+ help
54327+ If you say Y here, you will be able to configure many features
54328+ that will enhance the security of your system. It is highly
54329+ recommended that you say Y here and read through the help
54330+ for each option so that you fully understand the features and
54331+ can evaluate their usefulness for your machine.
54332+
54333+choice
54334+ prompt "Security Level"
54335+ depends on GRKERNSEC
54336+ default GRKERNSEC_CUSTOM
54337+
54338+config GRKERNSEC_LOW
54339+ bool "Low"
54340+ select GRKERNSEC_LINK
54341+ select GRKERNSEC_FIFO
54342+ select GRKERNSEC_RANDNET
54343+ select GRKERNSEC_DMESG
54344+ select GRKERNSEC_CHROOT
54345+ select GRKERNSEC_CHROOT_CHDIR
54346+
54347+ help
54348+ If you choose this option, several of the grsecurity options will
54349+ be enabled that will give you greater protection against a number
54350+ of attacks, while assuring that none of your software will have any
54351+ conflicts with the additional security measures. If you run a lot
54352+ of unusual software, or you are having problems with the higher
54353+ security levels, you should say Y here. With this option, the
54354+ following features are enabled:
54355+
54356+ - Linking restrictions
54357+ - FIFO restrictions
54358+ - Restricted dmesg
54359+ - Enforced chdir("/") on chroot
54360+ - Runtime module disabling
54361+
54362+config GRKERNSEC_MEDIUM
54363+ bool "Medium"
54364+ select PAX
54365+ select PAX_EI_PAX
54366+ select PAX_PT_PAX_FLAGS
54367+ select PAX_HAVE_ACL_FLAGS
54368+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54369+ select GRKERNSEC_CHROOT
54370+ select GRKERNSEC_CHROOT_SYSCTL
54371+ select GRKERNSEC_LINK
54372+ select GRKERNSEC_FIFO
54373+ select GRKERNSEC_DMESG
54374+ select GRKERNSEC_RANDNET
54375+ select GRKERNSEC_FORKFAIL
54376+ select GRKERNSEC_TIME
54377+ select GRKERNSEC_SIGNAL
54378+ select GRKERNSEC_CHROOT
54379+ select GRKERNSEC_CHROOT_UNIX
54380+ select GRKERNSEC_CHROOT_MOUNT
54381+ select GRKERNSEC_CHROOT_PIVOT
54382+ select GRKERNSEC_CHROOT_DOUBLE
54383+ select GRKERNSEC_CHROOT_CHDIR
54384+ select GRKERNSEC_CHROOT_MKNOD
54385+ select GRKERNSEC_PROC
54386+ select GRKERNSEC_PROC_USERGROUP
54387+ select PAX_RANDUSTACK
54388+ select PAX_ASLR
54389+ select PAX_RANDMMAP
54390+ select PAX_REFCOUNT if (X86 || SPARC64)
54391+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54392+
54393+ help
54394+ If you say Y here, several features in addition to those included
54395+ in the low additional security level will be enabled. These
54396+ features provide even more security to your system, though in rare
54397+ cases they may be incompatible with very old or poorly written
54398+ software. If you enable this option, make sure that your auth
54399+ service (identd) is running as gid 1001. With this option,
54400+ the following features (in addition to those provided in the
54401+ low additional security level) will be enabled:
54402+
54403+ - Failed fork logging
54404+ - Time change logging
54405+ - Signal logging
54406+ - Deny mounts in chroot
54407+ - Deny double chrooting
54408+ - Deny sysctl writes in chroot
54409+ - Deny mknod in chroot
54410+ - Deny access to abstract AF_UNIX sockets out of chroot
54411+ - Deny pivot_root in chroot
54412+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54413+ - /proc restrictions with special GID set to 10 (usually wheel)
54414+ - Address Space Layout Randomization (ASLR)
54415+ - Prevent exploitation of most refcount overflows
54416+ - Bounds checking of copying between the kernel and userland
54417+
54418+config GRKERNSEC_HIGH
54419+ bool "High"
54420+ select GRKERNSEC_LINK
54421+ select GRKERNSEC_FIFO
54422+ select GRKERNSEC_DMESG
54423+ select GRKERNSEC_FORKFAIL
54424+ select GRKERNSEC_TIME
54425+ select GRKERNSEC_SIGNAL
54426+ select GRKERNSEC_CHROOT
54427+ select GRKERNSEC_CHROOT_SHMAT
54428+ select GRKERNSEC_CHROOT_UNIX
54429+ select GRKERNSEC_CHROOT_MOUNT
54430+ select GRKERNSEC_CHROOT_FCHDIR
54431+ select GRKERNSEC_CHROOT_PIVOT
54432+ select GRKERNSEC_CHROOT_DOUBLE
54433+ select GRKERNSEC_CHROOT_CHDIR
54434+ select GRKERNSEC_CHROOT_MKNOD
54435+ select GRKERNSEC_CHROOT_CAPS
54436+ select GRKERNSEC_CHROOT_SYSCTL
54437+ select GRKERNSEC_CHROOT_FINDTASK
54438+ select GRKERNSEC_SYSFS_RESTRICT
54439+ select GRKERNSEC_PROC
54440+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54441+ select GRKERNSEC_HIDESYM
54442+ select GRKERNSEC_BRUTE
54443+ select GRKERNSEC_PROC_USERGROUP
54444+ select GRKERNSEC_KMEM
54445+ select GRKERNSEC_RESLOG
54446+ select GRKERNSEC_RANDNET
54447+ select GRKERNSEC_PROC_ADD
54448+ select GRKERNSEC_CHROOT_CHMOD
54449+ select GRKERNSEC_CHROOT_NICE
54450+ select GRKERNSEC_AUDIT_MOUNT
54451+ select GRKERNSEC_MODHARDEN if (MODULES)
54452+ select GRKERNSEC_HARDEN_PTRACE
54453+ select GRKERNSEC_VM86 if (X86_32)
54454+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54455+ select PAX
54456+ select PAX_RANDUSTACK
54457+ select PAX_ASLR
54458+ select PAX_RANDMMAP
54459+ select PAX_NOEXEC
54460+ select PAX_MPROTECT
54461+ select PAX_EI_PAX
54462+ select PAX_PT_PAX_FLAGS
54463+ select PAX_HAVE_ACL_FLAGS
54464+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54465+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54466+ select PAX_RANDKSTACK if (X86_TSC && X86)
54467+ select PAX_SEGMEXEC if (X86_32)
54468+ select PAX_PAGEEXEC
54469+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54470+ select PAX_EMUTRAMP if (PARISC)
54471+ select PAX_EMUSIGRT if (PARISC)
54472+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54473+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54474+ select PAX_REFCOUNT if (X86 || SPARC64)
54475+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54476+ help
54477+ If you say Y here, many of the features of grsecurity will be
54478+ enabled, which will protect you against many kinds of attacks
54479+ against your system. The heightened security comes at a cost
54480+ of an increased chance of incompatibilities with rare software
54481+ on your machine. Since this security level enables PaX, you should
54482+ view <http://pax.grsecurity.net> and read about the PaX
54483+ project. While you are there, download chpax and run it on
54484+ binaries that cause problems with PaX. Also remember that
54485+ since the /proc restrictions are enabled, you must run your
54486+ identd as gid 1001. This security level enables the following
54487+ features in addition to those listed in the low and medium
54488+ security levels:
54489+
54490+ - Additional /proc restrictions
54491+ - Chmod restrictions in chroot
54492+ - No signals, ptrace, or viewing of processes outside of chroot
54493+ - Capability restrictions in chroot
54494+ - Deny fchdir out of chroot
54495+ - Priority restrictions in chroot
54496+ - Segmentation-based implementation of PaX
54497+ - Mprotect restrictions
54498+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54499+ - Kernel stack randomization
54500+ - Mount/unmount/remount logging
54501+ - Kernel symbol hiding
54502+ - Hardening of module auto-loading
54503+ - Ptrace restrictions
54504+ - Restricted vm86 mode
54505+ - Restricted sysfs/debugfs
54506+ - Active kernel exploit response
54507+
54508+config GRKERNSEC_CUSTOM
54509+ bool "Custom"
54510+ help
54511+ If you say Y here, you will be able to configure every grsecurity
54512+ option, which allows you to enable many more features that aren't
54513+ covered in the basic security levels. These additional features
54514+ include TPE, socket restrictions, and the sysctl system for
54515+ grsecurity. It is advised that you read through the help for
54516+ each option to determine its usefulness in your situation.
54517+
54518+endchoice
54519+
54520+menu "Address Space Protection"
54521+depends on GRKERNSEC
54522+
54523+config GRKERNSEC_KMEM
54524+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
54525+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54526+ help
54527+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54528+ be written to or read from to modify or leak the contents of the running
54529+ kernel. /dev/port will also not be allowed to be opened. If you have module
54530+ support disabled, enabling this will close up four ways that are
54531+ currently used to insert malicious code into the running kernel.
54532+ Even with all these features enabled, we still highly recommend that
54533+ you use the RBAC system, as it is still possible for an attacker to
54534+ modify the running kernel through privileged I/O granted by ioperm/iopl.
54535+ If you are not using XFree86, you may be able to stop this additional
54536+ case by enabling the 'Disable privileged I/O' option. Though nothing
54537+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54538+ but only to video memory, which is the only writing we allow in this
54539+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54540+ not be allowed to mprotect it with PROT_WRITE later.
54541+ It is highly recommended that you say Y here if you meet all the
54542+ conditions above.
54543+
54544+config GRKERNSEC_VM86
54545+ bool "Restrict VM86 mode"
54546+ depends on X86_32
54547+
54548+ help
54549+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54550+ make use of a special execution mode on 32bit x86 processors called
54551+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54552+ video cards and will still work with this option enabled. The purpose
54553+ of the option is to prevent exploitation of emulation errors in
54554+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
54555+ Nearly all users should be able to enable this option.
54556+
54557+config GRKERNSEC_IO
54558+ bool "Disable privileged I/O"
54559+ depends on X86
54560+ select RTC_CLASS
54561+ select RTC_INTF_DEV
54562+ select RTC_DRV_CMOS
54563+
54564+ help
54565+ If you say Y here, all ioperm and iopl calls will return an error.
54566+ Ioperm and iopl can be used to modify the running kernel.
54567+ Unfortunately, some programs need this access to operate properly,
54568+ the most notable of which are XFree86 and hwclock. hwclock can be
54569+ remedied by having RTC support in the kernel, so real-time
54570+ clock support is enabled if this option is enabled, to ensure
54571+ that hwclock operates correctly. XFree86 still will not
54572+ operate correctly with this option enabled, so DO NOT CHOOSE Y
54573+ IF YOU USE XFree86. If you use XFree86 and you still want to
54574+ protect your kernel against modification, use the RBAC system.
54575+
54576+config GRKERNSEC_PROC_MEMMAP
54577+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54578+ default y if (PAX_NOEXEC || PAX_ASLR)
54579+ depends on PAX_NOEXEC || PAX_ASLR
54580+ help
54581+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54582+ give no information about the addresses of its mappings if
54583+ PaX features that rely on random addresses are enabled on the task.
54584+ If you use PaX it is greatly recommended that you say Y here as it
54585+ closes up a hole that makes the full ASLR useless for suid
54586+ binaries.
54587+
54588+config GRKERNSEC_BRUTE
54589+ bool "Deter exploit bruteforcing"
54590+ help
54591+ If you say Y here, attempts to bruteforce exploits against forking
54592+ daemons such as apache or sshd, as well as against suid/sgid binaries
54593+ will be deterred. When a child of a forking daemon is killed by PaX
54594+ or crashes due to an illegal instruction or other suspicious signal,
54595+ the parent process will be delayed 30 seconds upon every subsequent
54596+ fork until the administrator is able to assess the situation and
54597+ restart the daemon.
54598+ In the suid/sgid case, the attempt is logged, the user has all their
54599+ processes terminated, and they are prevented from executing any further
54600+ processes for 15 minutes.
54601+ It is recommended that you also enable signal logging in the auditing
54602+ section so that logs are generated when a process triggers a suspicious
54603+ signal.
54604+ If the sysctl option is enabled, a sysctl option with name
54605+ "deter_bruteforce" is created.
54606+
54607+config GRKERNSEC_MODHARDEN
54608+ bool "Harden module auto-loading"
54609+ depends on MODULES
54610+ help
54611+ If you say Y here, module auto-loading in response to use of some
54612+ feature implemented by an unloaded module will be restricted to
54613+ root users. Enabling this option helps defend against attacks
54614+ by unprivileged users who abuse the auto-loading behavior to
54615+ cause a vulnerable module to load that is then exploited.
54616+
54617+ If this option prevents a legitimate use of auto-loading for a
54618+ non-root user, the administrator can execute modprobe manually
54619+ with the exact name of the module mentioned in the alert log.
54620+ Alternatively, the administrator can add the module to the list
54621+ of modules loaded at boot by modifying init scripts.
54622+
54623+ Modification of init scripts will most likely be needed on
54624+ Ubuntu servers with encrypted home directory support enabled,
54625+ as the first non-root user logging in will cause the ecb(aes),
54626+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54627+
54628+config GRKERNSEC_HIDESYM
54629+ bool "Hide kernel symbols"
54630+ help
54631+ If you say Y here, getting information on loaded modules, and
54632+ displaying all kernel symbols through a syscall will be restricted
54633+ to users with CAP_SYS_MODULE. For software compatibility reasons,
54634+ /proc/kallsyms will be restricted to the root user. The RBAC
54635+ system can hide that entry even from root.
54636+
54637+ This option also prevents leaking of kernel addresses through
54638+ several /proc entries.
54639+
54640+ Note that this option is only effective provided the following
54641+ conditions are met:
54642+ 1) The kernel using grsecurity is not precompiled by some distribution
54643+ 2) You have also enabled GRKERNSEC_DMESG
54644+ 3) You are using the RBAC system and hiding other files such as your
54645+ kernel image and System.map. Alternatively, enabling this option
54646+ causes the permissions on /boot, /lib/modules, and the kernel
54647+ source directory to change at compile time to prevent
54648+ reading by non-root users.
54649+ If the above conditions are met, this option will aid in providing a
54650+ useful protection against local kernel exploitation of overflows
54651+ and arbitrary read/write vulnerabilities.
54652+
54653+config GRKERNSEC_KERN_LOCKOUT
54654+ bool "Active kernel exploit response"
54655+ depends on X86 || ARM || PPC || SPARC
54656+ help
54657+ If you say Y here, when a PaX alert is triggered due to suspicious
54658+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54659+ or an OOPs occurs due to bad memory accesses, instead of just
54660+ terminating the offending process (and potentially allowing
54661+ a subsequent exploit from the same user), we will take one of two
54662+ actions:
54663+ If the user was root, we will panic the system
54664+ If the user was non-root, we will log the attempt, terminate
54665+ all processes owned by the user, then prevent them from creating
54666+ any new processes until the system is restarted
54667+ This deters repeated kernel exploitation/bruteforcing attempts
54668+ and is useful for later forensics.
54669+
54670+endmenu
54671+menu "Role Based Access Control Options"
54672+depends on GRKERNSEC
54673+
54674+config GRKERNSEC_RBAC_DEBUG
54675+ bool
54676+
54677+config GRKERNSEC_NO_RBAC
54678+ bool "Disable RBAC system"
54679+ help
54680+ If you say Y here, the /dev/grsec device will be removed from the kernel,
54681+ preventing the RBAC system from being enabled. You should only say Y
54682+ here if you have no intention of using the RBAC system, so as to prevent
54683+ an attacker with root access from misusing the RBAC system to hide files
54684+ and processes when loadable module support and /dev/[k]mem have been
54685+ locked down.
54686+
54687+config GRKERNSEC_ACL_HIDEKERN
54688+ bool "Hide kernel processes"
54689+ help
54690+ If you say Y here, all kernel threads will be hidden to all
54691+ processes but those whose subject has the "view hidden processes"
54692+ flag.
54693+
54694+config GRKERNSEC_ACL_MAXTRIES
54695+ int "Maximum tries before password lockout"
54696+ default 3
54697+ help
54698+ This option enforces the maximum number of times a user can attempt
54699+ to authorize themselves with the grsecurity RBAC system before being
54700+ denied the ability to attempt authorization again for a specified time.
54701+ The lower the number, the harder it will be to brute-force a password.
54702+
54703+config GRKERNSEC_ACL_TIMEOUT
54704+ int "Time to wait after max password tries, in seconds"
54705+ default 30
54706+ help
54707+ This option specifies the time the user must wait after attempting to
54708+ authorize to the RBAC system with the maximum number of invalid
54709+ passwords. The higher the number, the harder it will be to brute-force
54710+ a password.
54711+
54712+endmenu
54713+menu "Filesystem Protections"
54714+depends on GRKERNSEC
54715+
54716+config GRKERNSEC_PROC
54717+ bool "Proc restrictions"
54718+ help
54719+ If you say Y here, the permissions of the /proc filesystem
54720+ will be altered to enhance system security and privacy. You MUST
54721+ choose either a user only restriction or a user and group restriction.
54722+ Depending upon the option you choose, you can either restrict users to
54723+ see only the processes they themselves run, or choose a group that can
54724+ view all processes and files normally restricted to root if you choose
54725+ the "restrict to user only" option. NOTE: If you're running identd as
54726+ a non-root user, you will have to run it as the group you specify here.
54727+
54728+config GRKERNSEC_PROC_USER
54729+ bool "Restrict /proc to user only"
54730+ depends on GRKERNSEC_PROC
54731+ help
54732+ If you say Y here, non-root users will only be able to view their own
54733+ processes, and restricts them from viewing network-related information,
54734+ and viewing kernel symbol and module information.
54735+
54736+config GRKERNSEC_PROC_USERGROUP
54737+ bool "Allow special group"
54738+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54739+ help
54740+ If you say Y here, you will be able to select a group that will be
54741+ able to view all processes and network-related information. If you've
54742+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54743+ remain hidden. This option is useful if you want to run identd as
54744+ a non-root user.
54745+
54746+config GRKERNSEC_PROC_GID
54747+ int "GID for special group"
54748+ depends on GRKERNSEC_PROC_USERGROUP
54749+ default 1001
54750+
54751+config GRKERNSEC_PROC_ADD
54752+ bool "Additional restrictions"
54753+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54754+ help
54755+ If you say Y here, additional restrictions will be placed on
54756+ /proc that keep normal users from viewing device information and
54757+ slabinfo information that could be useful for exploits.
54758+
54759+config GRKERNSEC_LINK
54760+ bool "Linking restrictions"
54761+ help
54762+ If you say Y here, /tmp race exploits will be prevented, since users
54763+ will no longer be able to follow symlinks owned by other users in
54764+ world-writable +t directories (e.g. /tmp), unless the owner of the
54765+ symlink is the owner of the directory. users will also not be
54766+ able to hardlink to files they do not own. If the sysctl option is
54767+ enabled, a sysctl option with name "linking_restrictions" is created.
54768+
54769+config GRKERNSEC_FIFO
54770+ bool "FIFO restrictions"
54771+ help
54772+ If you say Y here, users will not be able to write to FIFOs they don't
54773+ own in world-writable +t directories (e.g. /tmp), unless the owner of
54774+ the FIFO is the same owner of the directory it's held in. If the sysctl
54775+ option is enabled, a sysctl option with name "fifo_restrictions" is
54776+ created.
54777+
54778+config GRKERNSEC_SYSFS_RESTRICT
54779+ bool "Sysfs/debugfs restriction"
54780+ depends on SYSFS
54781+ help
54782+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54783+ any filesystem normally mounted under it (e.g. debugfs) will only
54784+ be accessible by root. These filesystems generally provide access
54785+ to hardware and debug information that isn't appropriate for unprivileged
54786+ users of the system. Sysfs and debugfs have also become a large source
54787+ of new vulnerabilities, ranging from infoleaks to local compromise.
54788+ There has been very little oversight with an eye toward security involved
54789+ in adding new exporters of information to these filesystems, so their
54790+ use is discouraged.
54791+ This option is equivalent to a chmod 0700 of the mount paths.
54792+
54793+config GRKERNSEC_ROFS
54794+ bool "Runtime read-only mount protection"
54795+ help
54796+ If you say Y here, a sysctl option with name "romount_protect" will
54797+ be created. By setting this option to 1 at runtime, filesystems
54798+ will be protected in the following ways:
54799+ * No new writable mounts will be allowed
54800+ * Existing read-only mounts won't be able to be remounted read/write
54801+ * Write operations will be denied on all block devices
54802+ This option acts independently of grsec_lock: once it is set to 1,
54803+ it cannot be turned off. Therefore, please be mindful of the resulting
54804+ behavior if this option is enabled in an init script on a read-only
54805+ filesystem. This feature is mainly intended for secure embedded systems.
54806+
54807+config GRKERNSEC_CHROOT
54808+ bool "Chroot jail restrictions"
54809+ help
54810+ If you say Y here, you will be able to choose several options that will
54811+ make breaking out of a chrooted jail much more difficult. If you
54812+ encounter no software incompatibilities with the following options, it
54813+ is recommended that you enable each one.
54814+
54815+config GRKERNSEC_CHROOT_MOUNT
54816+ bool "Deny mounts"
54817+ depends on GRKERNSEC_CHROOT
54818+ help
54819+ If you say Y here, processes inside a chroot will not be able to
54820+ mount or remount filesystems. If the sysctl option is enabled, a
54821+ sysctl option with name "chroot_deny_mount" is created.
54822+
54823+config GRKERNSEC_CHROOT_DOUBLE
54824+ bool "Deny double-chroots"
54825+ depends on GRKERNSEC_CHROOT
54826+ help
54827+ If you say Y here, processes inside a chroot will not be able to chroot
54828+ again outside the chroot. This is a widely used method of breaking
54829+ out of a chroot jail and should not be allowed. If the sysctl
54830+ option is enabled, a sysctl option with name
54831+ "chroot_deny_chroot" is created.
54832+
54833+config GRKERNSEC_CHROOT_PIVOT
54834+ bool "Deny pivot_root in chroot"
54835+ depends on GRKERNSEC_CHROOT
54836+ help
54837+ If you say Y here, processes inside a chroot will not be able to use
54838+ a function called pivot_root() that was introduced in Linux 2.3.41. It
54839+ works similar to chroot in that it changes the root filesystem. This
54840+ function could be misused in a chrooted process to attempt to break out
54841+ of the chroot, and therefore should not be allowed. If the sysctl
54842+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
54843+ created.
54844+
54845+config GRKERNSEC_CHROOT_CHDIR
54846+ bool "Enforce chdir(\"/\") on all chroots"
54847+ depends on GRKERNSEC_CHROOT
54848+ help
54849+ If you say Y here, the current working directory of all newly-chrooted
54850+ applications will be set to the the root directory of the chroot.
54851+ The man page on chroot(2) states:
54852+ Note that this call does not change the current working
54853+ directory, so that `.' can be outside the tree rooted at
54854+ `/'. In particular, the super-user can escape from a
54855+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
54856+
54857+ It is recommended that you say Y here, since it's not known to break
54858+ any software. If the sysctl option is enabled, a sysctl option with
54859+ name "chroot_enforce_chdir" is created.
54860+
54861+config GRKERNSEC_CHROOT_CHMOD
54862+ bool "Deny (f)chmod +s"
54863+ depends on GRKERNSEC_CHROOT
54864+ help
54865+ If you say Y here, processes inside a chroot will not be able to chmod
54866+ or fchmod files to make them have suid or sgid bits. This protects
54867+ against another published method of breaking a chroot. If the sysctl
54868+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
54869+ created.
54870+
54871+config GRKERNSEC_CHROOT_FCHDIR
54872+ bool "Deny fchdir out of chroot"
54873+ depends on GRKERNSEC_CHROOT
54874+ help
54875+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
54876+ to a file descriptor of the chrooting process that points to a directory
54877+ outside the filesystem will be stopped. If the sysctl option
54878+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
54879+
54880+config GRKERNSEC_CHROOT_MKNOD
54881+ bool "Deny mknod"
54882+ depends on GRKERNSEC_CHROOT
54883+ help
54884+ If you say Y here, processes inside a chroot will not be allowed to
54885+ mknod. The problem with using mknod inside a chroot is that it
54886+ would allow an attacker to create a device entry that is the same
54887+ as one on the physical root of your system, which could range from
54888+ anything from the console device to a device for your harddrive (which
54889+ they could then use to wipe the drive or steal data). It is recommended
54890+ that you say Y here, unless you run into software incompatibilities.
54891+ If the sysctl option is enabled, a sysctl option with name
54892+ "chroot_deny_mknod" is created.
54893+
54894+config GRKERNSEC_CHROOT_SHMAT
54895+ bool "Deny shmat() out of chroot"
54896+ depends on GRKERNSEC_CHROOT
54897+ help
54898+ If you say Y here, processes inside a chroot will not be able to attach
54899+ to shared memory segments that were created outside of the chroot jail.
54900+ It is recommended that you say Y here. If the sysctl option is enabled,
54901+ a sysctl option with name "chroot_deny_shmat" is created.
54902+
54903+config GRKERNSEC_CHROOT_UNIX
54904+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
54905+ depends on GRKERNSEC_CHROOT
54906+ help
54907+ If you say Y here, processes inside a chroot will not be able to
54908+ connect to abstract (meaning not belonging to a filesystem) Unix
54909+ domain sockets that were bound outside of a chroot. It is recommended
54910+ that you say Y here. If the sysctl option is enabled, a sysctl option
54911+ with name "chroot_deny_unix" is created.
54912+
54913+config GRKERNSEC_CHROOT_FINDTASK
54914+ bool "Protect outside processes"
54915+ depends on GRKERNSEC_CHROOT
54916+ help
54917+ If you say Y here, processes inside a chroot will not be able to
54918+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
54919+ getsid, or view any process outside of the chroot. If the sysctl
54920+ option is enabled, a sysctl option with name "chroot_findtask" is
54921+ created.
54922+
54923+config GRKERNSEC_CHROOT_NICE
54924+ bool "Restrict priority changes"
54925+ depends on GRKERNSEC_CHROOT
54926+ help
54927+ If you say Y here, processes inside a chroot will not be able to raise
54928+ the priority of processes in the chroot, or alter the priority of
54929+ processes outside the chroot. This provides more security than simply
54930+ removing CAP_SYS_NICE from the process' capability set. If the
54931+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
54932+ is created.
54933+
54934+config GRKERNSEC_CHROOT_SYSCTL
54935+ bool "Deny sysctl writes"
54936+ depends on GRKERNSEC_CHROOT
54937+ help
54938+ If you say Y here, an attacker in a chroot will not be able to
54939+ write to sysctl entries, either by sysctl(2) or through a /proc
54940+ interface. It is strongly recommended that you say Y here. If the
54941+ sysctl option is enabled, a sysctl option with name
54942+ "chroot_deny_sysctl" is created.
54943+
54944+config GRKERNSEC_CHROOT_CAPS
54945+ bool "Capability restrictions"
54946+ depends on GRKERNSEC_CHROOT
54947+ help
54948+ If you say Y here, the capabilities on all processes within a
54949+ chroot jail will be lowered to stop module insertion, raw i/o,
54950+ system and net admin tasks, rebooting the system, modifying immutable
54951+ files, modifying IPC owned by another, and changing the system time.
54952+ This is left an option because it can break some apps. Disable this
54953+ if your chrooted apps are having problems performing those kinds of
54954+ tasks. If the sysctl option is enabled, a sysctl option with
54955+ name "chroot_caps" is created.
54956+
54957+endmenu
54958+menu "Kernel Auditing"
54959+depends on GRKERNSEC
54960+
54961+config GRKERNSEC_AUDIT_GROUP
54962+ bool "Single group for auditing"
54963+ help
54964+ If you say Y here, the exec, chdir, and (un)mount logging features
54965+ will only operate on a group you specify. This option is recommended
54966+ if you only want to watch certain users instead of having a large
54967+ amount of logs from the entire system. If the sysctl option is enabled,
54968+ a sysctl option with name "audit_group" is created.
54969+
54970+config GRKERNSEC_AUDIT_GID
54971+ int "GID for auditing"
54972+ depends on GRKERNSEC_AUDIT_GROUP
54973+ default 1007
54974+
54975+config GRKERNSEC_EXECLOG
54976+ bool "Exec logging"
54977+ help
54978+ If you say Y here, all execve() calls will be logged (since the
54979+ other exec*() calls are frontends to execve(), all execution
54980+ will be logged). Useful for shell-servers that like to keep track
54981+ of their users. If the sysctl option is enabled, a sysctl option with
54982+ name "exec_logging" is created.
54983+ WARNING: This option when enabled will produce a LOT of logs, especially
54984+ on an active system.
54985+
54986+config GRKERNSEC_RESLOG
54987+ bool "Resource logging"
54988+ help
54989+ If you say Y here, all attempts to overstep resource limits will
54990+ be logged with the resource name, the requested size, and the current
54991+ limit. It is highly recommended that you say Y here. If the sysctl
54992+ option is enabled, a sysctl option with name "resource_logging" is
54993+ created. If the RBAC system is enabled, the sysctl value is ignored.
54994+
54995+config GRKERNSEC_CHROOT_EXECLOG
54996+ bool "Log execs within chroot"
54997+ help
54998+ If you say Y here, all executions inside a chroot jail will be logged
54999+ to syslog. This can cause a large amount of logs if certain
55000+ applications (eg. djb's daemontools) are installed on the system, and
55001+ is therefore left as an option. If the sysctl option is enabled, a
55002+ sysctl option with name "chroot_execlog" is created.
55003+
55004+config GRKERNSEC_AUDIT_PTRACE
55005+ bool "Ptrace logging"
55006+ help
55007+ If you say Y here, all attempts to attach to a process via ptrace
55008+ will be logged. If the sysctl option is enabled, a sysctl option
55009+ with name "audit_ptrace" is created.
55010+
55011+config GRKERNSEC_AUDIT_CHDIR
55012+ bool "Chdir logging"
55013+ help
55014+ If you say Y here, all chdir() calls will be logged. If the sysctl
55015+ option is enabled, a sysctl option with name "audit_chdir" is created.
55016+
55017+config GRKERNSEC_AUDIT_MOUNT
55018+ bool "(Un)Mount logging"
55019+ help
55020+ If you say Y here, all mounts and unmounts will be logged. If the
55021+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55022+ created.
55023+
55024+config GRKERNSEC_SIGNAL
55025+ bool "Signal logging"
55026+ help
55027+ If you say Y here, certain important signals will be logged, such as
55028+ SIGSEGV, which will as a result inform you of when a error in a program
55029+ occurred, which in some cases could mean a possible exploit attempt.
55030+ If the sysctl option is enabled, a sysctl option with name
55031+ "signal_logging" is created.
55032+
55033+config GRKERNSEC_FORKFAIL
55034+ bool "Fork failure logging"
55035+ help
55036+ If you say Y here, all failed fork() attempts will be logged.
55037+ This could suggest a fork bomb, or someone attempting to overstep
55038+ their process limit. If the sysctl option is enabled, a sysctl option
55039+ with name "forkfail_logging" is created.
55040+
55041+config GRKERNSEC_TIME
55042+ bool "Time change logging"
55043+ help
55044+ If you say Y here, any changes of the system clock will be logged.
55045+ If the sysctl option is enabled, a sysctl option with name
55046+ "timechange_logging" is created.
55047+
55048+config GRKERNSEC_PROC_IPADDR
55049+ bool "/proc/<pid>/ipaddr support"
55050+ help
55051+ If you say Y here, a new entry will be added to each /proc/<pid>
55052+ directory that contains the IP address of the person using the task.
55053+ The IP is carried across local TCP and AF_UNIX stream sockets.
55054+ This information can be useful for IDS/IPSes to perform remote response
55055+ to a local attack. The entry is readable by only the owner of the
55056+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55057+ the RBAC system), and thus does not create privacy concerns.
55058+
55059+config GRKERNSEC_RWXMAP_LOG
55060+ bool 'Denied RWX mmap/mprotect logging'
55061+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55062+ help
55063+ If you say Y here, calls to mmap() and mprotect() with explicit
55064+ usage of PROT_WRITE and PROT_EXEC together will be logged when
55065+ denied by the PAX_MPROTECT feature. If the sysctl option is
55066+ enabled, a sysctl option with name "rwxmap_logging" is created.
55067+
55068+config GRKERNSEC_AUDIT_TEXTREL
55069+ bool 'ELF text relocations logging (READ HELP)'
55070+ depends on PAX_MPROTECT
55071+ help
55072+ If you say Y here, text relocations will be logged with the filename
55073+ of the offending library or binary. The purpose of the feature is
55074+ to help Linux distribution developers get rid of libraries and
55075+ binaries that need text relocations which hinder the future progress
55076+ of PaX. Only Linux distribution developers should say Y here, and
55077+ never on a production machine, as this option creates an information
55078+ leak that could aid an attacker in defeating the randomization of
55079+ a single memory region. If the sysctl option is enabled, a sysctl
55080+ option with name "audit_textrel" is created.
55081+
55082+endmenu
55083+
55084+menu "Executable Protections"
55085+depends on GRKERNSEC
55086+
55087+config GRKERNSEC_DMESG
55088+ bool "Dmesg(8) restriction"
55089+ help
55090+ If you say Y here, non-root users will not be able to use dmesg(8)
55091+ to view up to the last 4kb of messages in the kernel's log buffer.
55092+ The kernel's log buffer often contains kernel addresses and other
55093+ identifying information useful to an attacker in fingerprinting a
55094+ system for a targeted exploit.
55095+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
55096+ created.
55097+
55098+config GRKERNSEC_HARDEN_PTRACE
55099+ bool "Deter ptrace-based process snooping"
55100+ help
55101+ If you say Y here, TTY sniffers and other malicious monitoring
55102+ programs implemented through ptrace will be defeated. If you
55103+ have been using the RBAC system, this option has already been
55104+ enabled for several years for all users, with the ability to make
55105+ fine-grained exceptions.
55106+
55107+ This option only affects the ability of non-root users to ptrace
55108+ processes that are not a descendent of the ptracing process.
55109+ This means that strace ./binary and gdb ./binary will still work,
55110+ but attaching to arbitrary processes will not. If the sysctl
55111+ option is enabled, a sysctl option with name "harden_ptrace" is
55112+ created.
55113+
55114+config GRKERNSEC_TPE
55115+ bool "Trusted Path Execution (TPE)"
55116+ help
55117+ If you say Y here, you will be able to choose a gid to add to the
55118+ supplementary groups of users you want to mark as "untrusted."
55119+ These users will not be able to execute any files that are not in
55120+ root-owned directories writable only by root. If the sysctl option
55121+ is enabled, a sysctl option with name "tpe" is created.
55122+
55123+config GRKERNSEC_TPE_ALL
55124+ bool "Partially restrict all non-root users"
55125+ depends on GRKERNSEC_TPE
55126+ help
55127+ If you say Y here, all non-root users will be covered under
55128+ a weaker TPE restriction. This is separate from, and in addition to,
55129+ the main TPE options that you have selected elsewhere. Thus, if a
55130+ "trusted" GID is chosen, this restriction applies to even that GID.
55131+ Under this restriction, all non-root users will only be allowed to
55132+ execute files in directories they own that are not group or
55133+ world-writable, or in directories owned by root and writable only by
55134+ root. If the sysctl option is enabled, a sysctl option with name
55135+ "tpe_restrict_all" is created.
55136+
55137+config GRKERNSEC_TPE_INVERT
55138+ bool "Invert GID option"
55139+ depends on GRKERNSEC_TPE
55140+ help
55141+ If you say Y here, the group you specify in the TPE configuration will
55142+ decide what group TPE restrictions will be *disabled* for. This
55143+ option is useful if you want TPE restrictions to be applied to most
55144+ users on the system. If the sysctl option is enabled, a sysctl option
55145+ with name "tpe_invert" is created. Unlike other sysctl options, this
55146+ entry will default to on for backward-compatibility.
55147+
55148+config GRKERNSEC_TPE_GID
55149+ int "GID for untrusted users"
55150+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55151+ default 1005
55152+ help
55153+ Setting this GID determines what group TPE restrictions will be
55154+ *enabled* for. If the sysctl option is enabled, a sysctl option
55155+ with name "tpe_gid" is created.
55156+
55157+config GRKERNSEC_TPE_GID
55158+ int "GID for trusted users"
55159+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55160+ default 1005
55161+ help
55162+ Setting this GID determines what group TPE restrictions will be
55163+ *disabled* for. If the sysctl option is enabled, a sysctl option
55164+ with name "tpe_gid" is created.
55165+
55166+endmenu
55167+menu "Network Protections"
55168+depends on GRKERNSEC
55169+
55170+config GRKERNSEC_RANDNET
55171+ bool "Larger entropy pools"
55172+ help
55173+ If you say Y here, the entropy pools used for many features of Linux
55174+ and grsecurity will be doubled in size. Since several grsecurity
55175+ features use additional randomness, it is recommended that you say Y
55176+ here. Saying Y here has a similar effect as modifying
55177+ /proc/sys/kernel/random/poolsize.
55178+
55179+config GRKERNSEC_BLACKHOLE
55180+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55181+ depends on NET
55182+ help
55183+ If you say Y here, neither TCP resets nor ICMP
55184+ destination-unreachable packets will be sent in response to packets
55185+ sent to ports for which no associated listening process exists.
55186+ This feature supports both IPV4 and IPV6 and exempts the
55187+ loopback interface from blackholing. Enabling this feature
55188+ makes a host more resilient to DoS attacks and reduces network
55189+ visibility against scanners.
55190+
55191+ The blackhole feature as-implemented is equivalent to the FreeBSD
55192+ blackhole feature, as it prevents RST responses to all packets, not
55193+ just SYNs. Under most application behavior this causes no
55194+ problems, but applications (like haproxy) may not close certain
55195+ connections in a way that cleanly terminates them on the remote
55196+ end, leaving the remote host in LAST_ACK state. Because of this
55197+ side-effect and to prevent intentional LAST_ACK DoSes, this
55198+ feature also adds automatic mitigation against such attacks.
55199+ The mitigation drastically reduces the amount of time a socket
55200+ can spend in LAST_ACK state. If you're using haproxy and not
55201+ all servers it connects to have this option enabled, consider
55202+ disabling this feature on the haproxy host.
55203+
55204+ If the sysctl option is enabled, two sysctl options with names
55205+ "ip_blackhole" and "lastack_retries" will be created.
55206+ While "ip_blackhole" takes the standard zero/non-zero on/off
55207+ toggle, "lastack_retries" uses the same kinds of values as
55208+ "tcp_retries1" and "tcp_retries2". The default value of 4
55209+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55210+ state.
55211+
55212+config GRKERNSEC_SOCKET
55213+ bool "Socket restrictions"
55214+ depends on NET
55215+ help
55216+ If you say Y here, you will be able to choose from several options.
55217+ If you assign a GID on your system and add it to the supplementary
55218+ groups of users you want to restrict socket access to, this patch
55219+ will perform up to three things, based on the option(s) you choose.
55220+
55221+config GRKERNSEC_SOCKET_ALL
55222+ bool "Deny any sockets to group"
55223+ depends on GRKERNSEC_SOCKET
55224+ help
55225+ If you say Y here, you will be able to choose a GID of whose users will
55226+ be unable to connect to other hosts from your machine or run server
55227+ applications from your machine. If the sysctl option is enabled, a
55228+ sysctl option with name "socket_all" is created.
55229+
55230+config GRKERNSEC_SOCKET_ALL_GID
55231+ int "GID to deny all sockets for"
55232+ depends on GRKERNSEC_SOCKET_ALL
55233+ default 1004
55234+ help
55235+ Here you can choose the GID to disable socket access for. Remember to
55236+ add the users you want socket access disabled for to the GID
55237+ specified here. If the sysctl option is enabled, a sysctl option
55238+ with name "socket_all_gid" is created.
55239+
55240+config GRKERNSEC_SOCKET_CLIENT
55241+ bool "Deny client sockets to group"
55242+ depends on GRKERNSEC_SOCKET
55243+ help
55244+ If you say Y here, you will be able to choose a GID of whose users will
55245+ be unable to connect to other hosts from your machine, but will be
55246+ able to run servers. If this option is enabled, all users in the group
55247+ you specify will have to use passive mode when initiating ftp transfers
55248+ from the shell on your machine. If the sysctl option is enabled, a
55249+ sysctl option with name "socket_client" is created.
55250+
55251+config GRKERNSEC_SOCKET_CLIENT_GID
55252+ int "GID to deny client sockets for"
55253+ depends on GRKERNSEC_SOCKET_CLIENT
55254+ default 1003
55255+ help
55256+ Here you can choose the GID to disable client socket access for.
55257+ Remember to add the users you want client socket access disabled for to
55258+ the GID specified here. If the sysctl option is enabled, a sysctl
55259+ option with name "socket_client_gid" is created.
55260+
55261+config GRKERNSEC_SOCKET_SERVER
55262+ bool "Deny server sockets to group"
55263+ depends on GRKERNSEC_SOCKET
55264+ help
55265+ If you say Y here, you will be able to choose a GID of whose users will
55266+ be unable to run server applications from your machine. If the sysctl
55267+ option is enabled, a sysctl option with name "socket_server" is created.
55268+
55269+config GRKERNSEC_SOCKET_SERVER_GID
55270+ int "GID to deny server sockets for"
55271+ depends on GRKERNSEC_SOCKET_SERVER
55272+ default 1002
55273+ help
55274+ Here you can choose the GID to disable server socket access for.
55275+ Remember to add the users you want server socket access disabled for to
55276+ the GID specified here. If the sysctl option is enabled, a sysctl
55277+ option with name "socket_server_gid" is created.
55278+
55279+endmenu
55280+menu "Sysctl support"
55281+depends on GRKERNSEC && SYSCTL
55282+
55283+config GRKERNSEC_SYSCTL
55284+ bool "Sysctl support"
55285+ help
55286+ If you say Y here, you will be able to change the options that
55287+ grsecurity runs with at bootup, without having to recompile your
55288+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55289+ to enable (1) or disable (0) various features. All the sysctl entries
55290+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55291+ All features enabled in the kernel configuration are disabled at boot
55292+ if you do not say Y to the "Turn on features by default" option.
55293+ All options should be set at startup, and the grsec_lock entry should
55294+ be set to a non-zero value after all the options are set.
55295+ *THIS IS EXTREMELY IMPORTANT*
55296+
55297+config GRKERNSEC_SYSCTL_DISTRO
55298+ bool "Extra sysctl support for distro makers (READ HELP)"
55299+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55300+ help
55301+ If you say Y here, additional sysctl options will be created
55302+ for features that affect processes running as root. Therefore,
55303+ it is critical when using this option that the grsec_lock entry be
55304+ enabled after boot. Only distros with prebuilt kernel packages
55305+ with this option enabled that can ensure grsec_lock is enabled
55306+ after boot should use this option.
55307+ *Failure to set grsec_lock after boot makes all grsec features
55308+ this option covers useless*
55309+
55310+ Currently this option creates the following sysctl entries:
55311+ "Disable Privileged I/O": "disable_priv_io"
55312+
55313+config GRKERNSEC_SYSCTL_ON
55314+ bool "Turn on features by default"
55315+ depends on GRKERNSEC_SYSCTL
55316+ help
55317+ If you say Y here, instead of having all features enabled in the
55318+ kernel configuration disabled at boot time, the features will be
55319+ enabled at boot time. It is recommended you say Y here unless
55320+ there is some reason you would want all sysctl-tunable features to
55321+ be disabled by default. As mentioned elsewhere, it is important
55322+ to enable the grsec_lock entry once you have finished modifying
55323+ the sysctl entries.
55324+
55325+endmenu
55326+menu "Logging Options"
55327+depends on GRKERNSEC
55328+
55329+config GRKERNSEC_FLOODTIME
55330+ int "Seconds in between log messages (minimum)"
55331+ default 10
55332+ help
55333+ This option allows you to enforce the number of seconds between
55334+ grsecurity log messages. The default should be suitable for most
55335+ people, however, if you choose to change it, choose a value small enough
55336+ to allow informative logs to be produced, but large enough to
55337+ prevent flooding.
55338+
55339+config GRKERNSEC_FLOODBURST
55340+ int "Number of messages in a burst (maximum)"
55341+ default 6
55342+ help
55343+ This option allows you to choose the maximum number of messages allowed
55344+ within the flood time interval you chose in a separate option. The
55345+ default should be suitable for most people, however if you find that
55346+ many of your logs are being interpreted as flooding, you may want to
55347+ raise this value.
55348+
55349+endmenu
55350+
55351+endmenu
55352diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55353new file mode 100644
55354index 0000000..be9ae3a
55355--- /dev/null
55356+++ b/grsecurity/Makefile
55357@@ -0,0 +1,36 @@
55358+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55359+# during 2001-2009 it has been completely redesigned by Brad Spengler
55360+# into an RBAC system
55361+#
55362+# All code in this directory and various hooks inserted throughout the kernel
55363+# are copyright Brad Spengler - Open Source Security, Inc., and released
55364+# under the GPL v2 or higher
55365+
55366+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55367+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55368+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55369+
55370+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55371+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55372+ gracl_learn.o grsec_log.o
55373+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55374+
55375+ifdef CONFIG_NET
55376+obj-y += grsec_sock.o
55377+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55378+endif
55379+
55380+ifndef CONFIG_GRKERNSEC
55381+obj-y += grsec_disabled.o
55382+endif
55383+
55384+ifdef CONFIG_GRKERNSEC_HIDESYM
55385+extra-y := grsec_hidesym.o
55386+$(obj)/grsec_hidesym.o:
55387+ @-chmod -f 500 /boot
55388+ @-chmod -f 500 /lib/modules
55389+ @-chmod -f 500 /lib64/modules
55390+ @-chmod -f 500 /lib32/modules
55391+ @-chmod -f 700 .
55392+ @echo ' grsec: protected kernel image paths'
55393+endif
55394diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55395new file mode 100644
55396index 0000000..6bd68d6
55397--- /dev/null
55398+++ b/grsecurity/gracl.c
55399@@ -0,0 +1,4141 @@
55400+#include <linux/kernel.h>
55401+#include <linux/module.h>
55402+#include <linux/sched.h>
55403+#include <linux/mm.h>
55404+#include <linux/file.h>
55405+#include <linux/fs.h>
55406+#include <linux/namei.h>
55407+#include <linux/mount.h>
55408+#include <linux/tty.h>
55409+#include <linux/proc_fs.h>
55410+#include <linux/smp_lock.h>
55411+#include <linux/slab.h>
55412+#include <linux/vmalloc.h>
55413+#include <linux/types.h>
55414+#include <linux/sysctl.h>
55415+#include <linux/netdevice.h>
55416+#include <linux/ptrace.h>
55417+#include <linux/gracl.h>
55418+#include <linux/gralloc.h>
55419+#include <linux/grsecurity.h>
55420+#include <linux/grinternal.h>
55421+#include <linux/pid_namespace.h>
55422+#include <linux/fdtable.h>
55423+#include <linux/percpu.h>
55424+
55425+#include <asm/uaccess.h>
55426+#include <asm/errno.h>
55427+#include <asm/mman.h>
55428+
55429+static struct acl_role_db acl_role_set;
55430+static struct name_db name_set;
55431+static struct inodev_db inodev_set;
55432+
55433+/* for keeping track of userspace pointers used for subjects, so we
55434+ can share references in the kernel as well
55435+*/
55436+
55437+static struct dentry *real_root;
55438+static struct vfsmount *real_root_mnt;
55439+
55440+static struct acl_subj_map_db subj_map_set;
55441+
55442+static struct acl_role_label *default_role;
55443+
55444+static struct acl_role_label *role_list;
55445+
55446+static u16 acl_sp_role_value;
55447+
55448+extern char *gr_shared_page[4];
55449+static DEFINE_MUTEX(gr_dev_mutex);
55450+DEFINE_RWLOCK(gr_inode_lock);
55451+
55452+struct gr_arg *gr_usermode;
55453+
55454+static unsigned int gr_status __read_only = GR_STATUS_INIT;
55455+
55456+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55457+extern void gr_clear_learn_entries(void);
55458+
55459+#ifdef CONFIG_GRKERNSEC_RESLOG
55460+extern void gr_log_resource(const struct task_struct *task,
55461+ const int res, const unsigned long wanted, const int gt);
55462+#endif
55463+
55464+unsigned char *gr_system_salt;
55465+unsigned char *gr_system_sum;
55466+
55467+static struct sprole_pw **acl_special_roles = NULL;
55468+static __u16 num_sprole_pws = 0;
55469+
55470+static struct acl_role_label *kernel_role = NULL;
55471+
55472+static unsigned int gr_auth_attempts = 0;
55473+static unsigned long gr_auth_expires = 0UL;
55474+
55475+#ifdef CONFIG_NET
55476+extern struct vfsmount *sock_mnt;
55477+#endif
55478+extern struct vfsmount *pipe_mnt;
55479+extern struct vfsmount *shm_mnt;
55480+#ifdef CONFIG_HUGETLBFS
55481+extern struct vfsmount *hugetlbfs_vfsmount;
55482+#endif
55483+
55484+static struct acl_object_label *fakefs_obj_rw;
55485+static struct acl_object_label *fakefs_obj_rwx;
55486+
55487+extern int gr_init_uidset(void);
55488+extern void gr_free_uidset(void);
55489+extern void gr_remove_uid(uid_t uid);
55490+extern int gr_find_uid(uid_t uid);
55491+
55492+__inline__ int
55493+gr_acl_is_enabled(void)
55494+{
55495+ return (gr_status & GR_READY);
55496+}
55497+
55498+#ifdef CONFIG_BTRFS_FS
55499+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55500+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55501+#endif
55502+
55503+static inline dev_t __get_dev(const struct dentry *dentry)
55504+{
55505+#ifdef CONFIG_BTRFS_FS
55506+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55507+ return get_btrfs_dev_from_inode(dentry->d_inode);
55508+ else
55509+#endif
55510+ return dentry->d_inode->i_sb->s_dev;
55511+}
55512+
55513+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55514+{
55515+ return __get_dev(dentry);
55516+}
55517+
55518+static char gr_task_roletype_to_char(struct task_struct *task)
55519+{
55520+ switch (task->role->roletype &
55521+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
55522+ GR_ROLE_SPECIAL)) {
55523+ case GR_ROLE_DEFAULT:
55524+ return 'D';
55525+ case GR_ROLE_USER:
55526+ return 'U';
55527+ case GR_ROLE_GROUP:
55528+ return 'G';
55529+ case GR_ROLE_SPECIAL:
55530+ return 'S';
55531+ }
55532+
55533+ return 'X';
55534+}
55535+
55536+char gr_roletype_to_char(void)
55537+{
55538+ return gr_task_roletype_to_char(current);
55539+}
55540+
55541+__inline__ int
55542+gr_acl_tpe_check(void)
55543+{
55544+ if (unlikely(!(gr_status & GR_READY)))
55545+ return 0;
55546+ if (current->role->roletype & GR_ROLE_TPE)
55547+ return 1;
55548+ else
55549+ return 0;
55550+}
55551+
55552+int
55553+gr_handle_rawio(const struct inode *inode)
55554+{
55555+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55556+ if (inode && S_ISBLK(inode->i_mode) &&
55557+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55558+ !capable(CAP_SYS_RAWIO))
55559+ return 1;
55560+#endif
55561+ return 0;
55562+}
55563+
55564+static int
55565+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
55566+{
55567+ if (likely(lena != lenb))
55568+ return 0;
55569+
55570+ return !memcmp(a, b, lena);
55571+}
55572+
55573+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
55574+{
55575+ *buflen -= namelen;
55576+ if (*buflen < 0)
55577+ return -ENAMETOOLONG;
55578+ *buffer -= namelen;
55579+ memcpy(*buffer, str, namelen);
55580+ return 0;
55581+}
55582+
55583+/* this must be called with vfsmount_lock and dcache_lock held */
55584+
55585+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55586+ struct dentry *root, struct vfsmount *rootmnt,
55587+ char *buffer, int buflen)
55588+{
55589+ char * end = buffer+buflen;
55590+ char * retval;
55591+ int namelen;
55592+
55593+ *--end = '\0';
55594+ buflen--;
55595+
55596+ if (buflen < 1)
55597+ goto Elong;
55598+ /* Get '/' right */
55599+ retval = end-1;
55600+ *retval = '/';
55601+
55602+ for (;;) {
55603+ struct dentry * parent;
55604+
55605+ if (dentry == root && vfsmnt == rootmnt)
55606+ break;
55607+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
55608+ /* Global root? */
55609+ if (vfsmnt->mnt_parent == vfsmnt)
55610+ goto global_root;
55611+ dentry = vfsmnt->mnt_mountpoint;
55612+ vfsmnt = vfsmnt->mnt_parent;
55613+ continue;
55614+ }
55615+ parent = dentry->d_parent;
55616+ prefetch(parent);
55617+ namelen = dentry->d_name.len;
55618+ buflen -= namelen + 1;
55619+ if (buflen < 0)
55620+ goto Elong;
55621+ end -= namelen;
55622+ memcpy(end, dentry->d_name.name, namelen);
55623+ *--end = '/';
55624+ retval = end;
55625+ dentry = parent;
55626+ }
55627+
55628+out:
55629+ return retval;
55630+
55631+global_root:
55632+ namelen = dentry->d_name.len;
55633+ buflen -= namelen;
55634+ if (buflen < 0)
55635+ goto Elong;
55636+ retval -= namelen-1; /* hit the slash */
55637+ memcpy(retval, dentry->d_name.name, namelen);
55638+ goto out;
55639+Elong:
55640+ retval = ERR_PTR(-ENAMETOOLONG);
55641+ goto out;
55642+}
55643+
55644+static char *
55645+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55646+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
55647+{
55648+ char *retval;
55649+
55650+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
55651+ if (unlikely(IS_ERR(retval)))
55652+ retval = strcpy(buf, "<path too long>");
55653+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
55654+ retval[1] = '\0';
55655+
55656+ return retval;
55657+}
55658+
55659+static char *
55660+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55661+ char *buf, int buflen)
55662+{
55663+ char *res;
55664+
55665+ /* we can use real_root, real_root_mnt, because this is only called
55666+ by the RBAC system */
55667+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
55668+
55669+ return res;
55670+}
55671+
55672+static char *
55673+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55674+ char *buf, int buflen)
55675+{
55676+ char *res;
55677+ struct dentry *root;
55678+ struct vfsmount *rootmnt;
55679+ struct task_struct *reaper = &init_task;
55680+
55681+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
55682+ read_lock(&reaper->fs->lock);
55683+ root = dget(reaper->fs->root.dentry);
55684+ rootmnt = mntget(reaper->fs->root.mnt);
55685+ read_unlock(&reaper->fs->lock);
55686+
55687+ spin_lock(&dcache_lock);
55688+ spin_lock(&vfsmount_lock);
55689+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
55690+ spin_unlock(&vfsmount_lock);
55691+ spin_unlock(&dcache_lock);
55692+
55693+ dput(root);
55694+ mntput(rootmnt);
55695+ return res;
55696+}
55697+
55698+static char *
55699+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55700+{
55701+ char *ret;
55702+ spin_lock(&dcache_lock);
55703+ spin_lock(&vfsmount_lock);
55704+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55705+ PAGE_SIZE);
55706+ spin_unlock(&vfsmount_lock);
55707+ spin_unlock(&dcache_lock);
55708+ return ret;
55709+}
55710+
55711+static char *
55712+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55713+{
55714+ char *ret;
55715+ char *buf;
55716+ int buflen;
55717+
55718+ spin_lock(&dcache_lock);
55719+ spin_lock(&vfsmount_lock);
55720+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55721+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
55722+ buflen = (int)(ret - buf);
55723+ if (buflen >= 5)
55724+ prepend(&ret, &buflen, "/proc", 5);
55725+ else
55726+ ret = strcpy(buf, "<path too long>");
55727+ spin_unlock(&vfsmount_lock);
55728+ spin_unlock(&dcache_lock);
55729+ return ret;
55730+}
55731+
55732+char *
55733+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
55734+{
55735+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55736+ PAGE_SIZE);
55737+}
55738+
55739+char *
55740+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
55741+{
55742+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55743+ PAGE_SIZE);
55744+}
55745+
55746+char *
55747+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
55748+{
55749+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
55750+ PAGE_SIZE);
55751+}
55752+
55753+char *
55754+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
55755+{
55756+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
55757+ PAGE_SIZE);
55758+}
55759+
55760+char *
55761+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
55762+{
55763+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
55764+ PAGE_SIZE);
55765+}
55766+
55767+__inline__ __u32
55768+to_gr_audit(const __u32 reqmode)
55769+{
55770+ /* masks off auditable permission flags, then shifts them to create
55771+ auditing flags, and adds the special case of append auditing if
55772+ we're requesting write */
55773+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
55774+}
55775+
55776+struct acl_subject_label *
55777+lookup_subject_map(const struct acl_subject_label *userp)
55778+{
55779+ unsigned int index = shash(userp, subj_map_set.s_size);
55780+ struct subject_map *match;
55781+
55782+ match = subj_map_set.s_hash[index];
55783+
55784+ while (match && match->user != userp)
55785+ match = match->next;
55786+
55787+ if (match != NULL)
55788+ return match->kernel;
55789+ else
55790+ return NULL;
55791+}
55792+
55793+static void
55794+insert_subj_map_entry(struct subject_map *subjmap)
55795+{
55796+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
55797+ struct subject_map **curr;
55798+
55799+ subjmap->prev = NULL;
55800+
55801+ curr = &subj_map_set.s_hash[index];
55802+ if (*curr != NULL)
55803+ (*curr)->prev = subjmap;
55804+
55805+ subjmap->next = *curr;
55806+ *curr = subjmap;
55807+
55808+ return;
55809+}
55810+
55811+static struct acl_role_label *
55812+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
55813+ const gid_t gid)
55814+{
55815+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
55816+ struct acl_role_label *match;
55817+ struct role_allowed_ip *ipp;
55818+ unsigned int x;
55819+ u32 curr_ip = task->signal->curr_ip;
55820+
55821+ task->signal->saved_ip = curr_ip;
55822+
55823+ match = acl_role_set.r_hash[index];
55824+
55825+ while (match) {
55826+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
55827+ for (x = 0; x < match->domain_child_num; x++) {
55828+ if (match->domain_children[x] == uid)
55829+ goto found;
55830+ }
55831+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
55832+ break;
55833+ match = match->next;
55834+ }
55835+found:
55836+ if (match == NULL) {
55837+ try_group:
55838+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
55839+ match = acl_role_set.r_hash[index];
55840+
55841+ while (match) {
55842+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
55843+ for (x = 0; x < match->domain_child_num; x++) {
55844+ if (match->domain_children[x] == gid)
55845+ goto found2;
55846+ }
55847+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
55848+ break;
55849+ match = match->next;
55850+ }
55851+found2:
55852+ if (match == NULL)
55853+ match = default_role;
55854+ if (match->allowed_ips == NULL)
55855+ return match;
55856+ else {
55857+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55858+ if (likely
55859+ ((ntohl(curr_ip) & ipp->netmask) ==
55860+ (ntohl(ipp->addr) & ipp->netmask)))
55861+ return match;
55862+ }
55863+ match = default_role;
55864+ }
55865+ } else if (match->allowed_ips == NULL) {
55866+ return match;
55867+ } else {
55868+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55869+ if (likely
55870+ ((ntohl(curr_ip) & ipp->netmask) ==
55871+ (ntohl(ipp->addr) & ipp->netmask)))
55872+ return match;
55873+ }
55874+ goto try_group;
55875+ }
55876+
55877+ return match;
55878+}
55879+
55880+struct acl_subject_label *
55881+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
55882+ const struct acl_role_label *role)
55883+{
55884+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55885+ struct acl_subject_label *match;
55886+
55887+ match = role->subj_hash[index];
55888+
55889+ while (match && (match->inode != ino || match->device != dev ||
55890+ (match->mode & GR_DELETED))) {
55891+ match = match->next;
55892+ }
55893+
55894+ if (match && !(match->mode & GR_DELETED))
55895+ return match;
55896+ else
55897+ return NULL;
55898+}
55899+
55900+struct acl_subject_label *
55901+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
55902+ const struct acl_role_label *role)
55903+{
55904+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55905+ struct acl_subject_label *match;
55906+
55907+ match = role->subj_hash[index];
55908+
55909+ while (match && (match->inode != ino || match->device != dev ||
55910+ !(match->mode & GR_DELETED))) {
55911+ match = match->next;
55912+ }
55913+
55914+ if (match && (match->mode & GR_DELETED))
55915+ return match;
55916+ else
55917+ return NULL;
55918+}
55919+
55920+static struct acl_object_label *
55921+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
55922+ const struct acl_subject_label *subj)
55923+{
55924+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55925+ struct acl_object_label *match;
55926+
55927+ match = subj->obj_hash[index];
55928+
55929+ while (match && (match->inode != ino || match->device != dev ||
55930+ (match->mode & GR_DELETED))) {
55931+ match = match->next;
55932+ }
55933+
55934+ if (match && !(match->mode & GR_DELETED))
55935+ return match;
55936+ else
55937+ return NULL;
55938+}
55939+
55940+static struct acl_object_label *
55941+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
55942+ const struct acl_subject_label *subj)
55943+{
55944+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55945+ struct acl_object_label *match;
55946+
55947+ match = subj->obj_hash[index];
55948+
55949+ while (match && (match->inode != ino || match->device != dev ||
55950+ !(match->mode & GR_DELETED))) {
55951+ match = match->next;
55952+ }
55953+
55954+ if (match && (match->mode & GR_DELETED))
55955+ return match;
55956+
55957+ match = subj->obj_hash[index];
55958+
55959+ while (match && (match->inode != ino || match->device != dev ||
55960+ (match->mode & GR_DELETED))) {
55961+ match = match->next;
55962+ }
55963+
55964+ if (match && !(match->mode & GR_DELETED))
55965+ return match;
55966+ else
55967+ return NULL;
55968+}
55969+
55970+static struct name_entry *
55971+lookup_name_entry(const char *name)
55972+{
55973+ unsigned int len = strlen(name);
55974+ unsigned int key = full_name_hash(name, len);
55975+ unsigned int index = key % name_set.n_size;
55976+ struct name_entry *match;
55977+
55978+ match = name_set.n_hash[index];
55979+
55980+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
55981+ match = match->next;
55982+
55983+ return match;
55984+}
55985+
55986+static struct name_entry *
55987+lookup_name_entry_create(const char *name)
55988+{
55989+ unsigned int len = strlen(name);
55990+ unsigned int key = full_name_hash(name, len);
55991+ unsigned int index = key % name_set.n_size;
55992+ struct name_entry *match;
55993+
55994+ match = name_set.n_hash[index];
55995+
55996+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55997+ !match->deleted))
55998+ match = match->next;
55999+
56000+ if (match && match->deleted)
56001+ return match;
56002+
56003+ match = name_set.n_hash[index];
56004+
56005+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56006+ match->deleted))
56007+ match = match->next;
56008+
56009+ if (match && !match->deleted)
56010+ return match;
56011+ else
56012+ return NULL;
56013+}
56014+
56015+static struct inodev_entry *
56016+lookup_inodev_entry(const ino_t ino, const dev_t dev)
56017+{
56018+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
56019+ struct inodev_entry *match;
56020+
56021+ match = inodev_set.i_hash[index];
56022+
56023+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56024+ match = match->next;
56025+
56026+ return match;
56027+}
56028+
56029+static void
56030+insert_inodev_entry(struct inodev_entry *entry)
56031+{
56032+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56033+ inodev_set.i_size);
56034+ struct inodev_entry **curr;
56035+
56036+ entry->prev = NULL;
56037+
56038+ curr = &inodev_set.i_hash[index];
56039+ if (*curr != NULL)
56040+ (*curr)->prev = entry;
56041+
56042+ entry->next = *curr;
56043+ *curr = entry;
56044+
56045+ return;
56046+}
56047+
56048+static void
56049+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56050+{
56051+ unsigned int index =
56052+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56053+ struct acl_role_label **curr;
56054+ struct acl_role_label *tmp;
56055+
56056+ curr = &acl_role_set.r_hash[index];
56057+
56058+ /* if role was already inserted due to domains and already has
56059+ a role in the same bucket as it attached, then we need to
56060+ combine these two buckets
56061+ */
56062+ if (role->next) {
56063+ tmp = role->next;
56064+ while (tmp->next)
56065+ tmp = tmp->next;
56066+ tmp->next = *curr;
56067+ } else
56068+ role->next = *curr;
56069+ *curr = role;
56070+
56071+ return;
56072+}
56073+
56074+static void
56075+insert_acl_role_label(struct acl_role_label *role)
56076+{
56077+ int i;
56078+
56079+ if (role_list == NULL) {
56080+ role_list = role;
56081+ role->prev = NULL;
56082+ } else {
56083+ role->prev = role_list;
56084+ role_list = role;
56085+ }
56086+
56087+ /* used for hash chains */
56088+ role->next = NULL;
56089+
56090+ if (role->roletype & GR_ROLE_DOMAIN) {
56091+ for (i = 0; i < role->domain_child_num; i++)
56092+ __insert_acl_role_label(role, role->domain_children[i]);
56093+ } else
56094+ __insert_acl_role_label(role, role->uidgid);
56095+}
56096+
56097+static int
56098+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56099+{
56100+ struct name_entry **curr, *nentry;
56101+ struct inodev_entry *ientry;
56102+ unsigned int len = strlen(name);
56103+ unsigned int key = full_name_hash(name, len);
56104+ unsigned int index = key % name_set.n_size;
56105+
56106+ curr = &name_set.n_hash[index];
56107+
56108+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56109+ curr = &((*curr)->next);
56110+
56111+ if (*curr != NULL)
56112+ return 1;
56113+
56114+ nentry = acl_alloc(sizeof (struct name_entry));
56115+ if (nentry == NULL)
56116+ return 0;
56117+ ientry = acl_alloc(sizeof (struct inodev_entry));
56118+ if (ientry == NULL)
56119+ return 0;
56120+ ientry->nentry = nentry;
56121+
56122+ nentry->key = key;
56123+ nentry->name = name;
56124+ nentry->inode = inode;
56125+ nentry->device = device;
56126+ nentry->len = len;
56127+ nentry->deleted = deleted;
56128+
56129+ nentry->prev = NULL;
56130+ curr = &name_set.n_hash[index];
56131+ if (*curr != NULL)
56132+ (*curr)->prev = nentry;
56133+ nentry->next = *curr;
56134+ *curr = nentry;
56135+
56136+ /* insert us into the table searchable by inode/dev */
56137+ insert_inodev_entry(ientry);
56138+
56139+ return 1;
56140+}
56141+
56142+static void
56143+insert_acl_obj_label(struct acl_object_label *obj,
56144+ struct acl_subject_label *subj)
56145+{
56146+ unsigned int index =
56147+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56148+ struct acl_object_label **curr;
56149+
56150+
56151+ obj->prev = NULL;
56152+
56153+ curr = &subj->obj_hash[index];
56154+ if (*curr != NULL)
56155+ (*curr)->prev = obj;
56156+
56157+ obj->next = *curr;
56158+ *curr = obj;
56159+
56160+ return;
56161+}
56162+
56163+static void
56164+insert_acl_subj_label(struct acl_subject_label *obj,
56165+ struct acl_role_label *role)
56166+{
56167+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56168+ struct acl_subject_label **curr;
56169+
56170+ obj->prev = NULL;
56171+
56172+ curr = &role->subj_hash[index];
56173+ if (*curr != NULL)
56174+ (*curr)->prev = obj;
56175+
56176+ obj->next = *curr;
56177+ *curr = obj;
56178+
56179+ return;
56180+}
56181+
56182+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56183+
56184+static void *
56185+create_table(__u32 * len, int elementsize)
56186+{
56187+ unsigned int table_sizes[] = {
56188+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56189+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56190+ 4194301, 8388593, 16777213, 33554393, 67108859
56191+ };
56192+ void *newtable = NULL;
56193+ unsigned int pwr = 0;
56194+
56195+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56196+ table_sizes[pwr] <= *len)
56197+ pwr++;
56198+
56199+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56200+ return newtable;
56201+
56202+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56203+ newtable =
56204+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56205+ else
56206+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56207+
56208+ *len = table_sizes[pwr];
56209+
56210+ return newtable;
56211+}
56212+
56213+static int
56214+init_variables(const struct gr_arg *arg)
56215+{
56216+ struct task_struct *reaper = &init_task;
56217+ unsigned int stacksize;
56218+
56219+ subj_map_set.s_size = arg->role_db.num_subjects;
56220+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56221+ name_set.n_size = arg->role_db.num_objects;
56222+ inodev_set.i_size = arg->role_db.num_objects;
56223+
56224+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56225+ !name_set.n_size || !inodev_set.i_size)
56226+ return 1;
56227+
56228+ if (!gr_init_uidset())
56229+ return 1;
56230+
56231+ /* set up the stack that holds allocation info */
56232+
56233+ stacksize = arg->role_db.num_pointers + 5;
56234+
56235+ if (!acl_alloc_stack_init(stacksize))
56236+ return 1;
56237+
56238+ /* grab reference for the real root dentry and vfsmount */
56239+ read_lock(&reaper->fs->lock);
56240+ real_root = dget(reaper->fs->root.dentry);
56241+ real_root_mnt = mntget(reaper->fs->root.mnt);
56242+ read_unlock(&reaper->fs->lock);
56243+
56244+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56245+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56246+#endif
56247+
56248+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56249+ if (fakefs_obj_rw == NULL)
56250+ return 1;
56251+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56252+
56253+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56254+ if (fakefs_obj_rwx == NULL)
56255+ return 1;
56256+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56257+
56258+ subj_map_set.s_hash =
56259+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56260+ acl_role_set.r_hash =
56261+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56262+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56263+ inodev_set.i_hash =
56264+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56265+
56266+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56267+ !name_set.n_hash || !inodev_set.i_hash)
56268+ return 1;
56269+
56270+ memset(subj_map_set.s_hash, 0,
56271+ sizeof(struct subject_map *) * subj_map_set.s_size);
56272+ memset(acl_role_set.r_hash, 0,
56273+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56274+ memset(name_set.n_hash, 0,
56275+ sizeof (struct name_entry *) * name_set.n_size);
56276+ memset(inodev_set.i_hash, 0,
56277+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56278+
56279+ return 0;
56280+}
56281+
56282+/* free information not needed after startup
56283+ currently contains user->kernel pointer mappings for subjects
56284+*/
56285+
56286+static void
56287+free_init_variables(void)
56288+{
56289+ __u32 i;
56290+
56291+ if (subj_map_set.s_hash) {
56292+ for (i = 0; i < subj_map_set.s_size; i++) {
56293+ if (subj_map_set.s_hash[i]) {
56294+ kfree(subj_map_set.s_hash[i]);
56295+ subj_map_set.s_hash[i] = NULL;
56296+ }
56297+ }
56298+
56299+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56300+ PAGE_SIZE)
56301+ kfree(subj_map_set.s_hash);
56302+ else
56303+ vfree(subj_map_set.s_hash);
56304+ }
56305+
56306+ return;
56307+}
56308+
56309+static void
56310+free_variables(void)
56311+{
56312+ struct acl_subject_label *s;
56313+ struct acl_role_label *r;
56314+ struct task_struct *task, *task2;
56315+ unsigned int x;
56316+
56317+ gr_clear_learn_entries();
56318+
56319+ read_lock(&tasklist_lock);
56320+ do_each_thread(task2, task) {
56321+ task->acl_sp_role = 0;
56322+ task->acl_role_id = 0;
56323+ task->acl = NULL;
56324+ task->role = NULL;
56325+ } while_each_thread(task2, task);
56326+ read_unlock(&tasklist_lock);
56327+
56328+ /* release the reference to the real root dentry and vfsmount */
56329+ if (real_root)
56330+ dput(real_root);
56331+ real_root = NULL;
56332+ if (real_root_mnt)
56333+ mntput(real_root_mnt);
56334+ real_root_mnt = NULL;
56335+
56336+ /* free all object hash tables */
56337+
56338+ FOR_EACH_ROLE_START(r)
56339+ if (r->subj_hash == NULL)
56340+ goto next_role;
56341+ FOR_EACH_SUBJECT_START(r, s, x)
56342+ if (s->obj_hash == NULL)
56343+ break;
56344+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56345+ kfree(s->obj_hash);
56346+ else
56347+ vfree(s->obj_hash);
56348+ FOR_EACH_SUBJECT_END(s, x)
56349+ FOR_EACH_NESTED_SUBJECT_START(r, s)
56350+ if (s->obj_hash == NULL)
56351+ break;
56352+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56353+ kfree(s->obj_hash);
56354+ else
56355+ vfree(s->obj_hash);
56356+ FOR_EACH_NESTED_SUBJECT_END(s)
56357+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56358+ kfree(r->subj_hash);
56359+ else
56360+ vfree(r->subj_hash);
56361+ r->subj_hash = NULL;
56362+next_role:
56363+ FOR_EACH_ROLE_END(r)
56364+
56365+ acl_free_all();
56366+
56367+ if (acl_role_set.r_hash) {
56368+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56369+ PAGE_SIZE)
56370+ kfree(acl_role_set.r_hash);
56371+ else
56372+ vfree(acl_role_set.r_hash);
56373+ }
56374+ if (name_set.n_hash) {
56375+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
56376+ PAGE_SIZE)
56377+ kfree(name_set.n_hash);
56378+ else
56379+ vfree(name_set.n_hash);
56380+ }
56381+
56382+ if (inodev_set.i_hash) {
56383+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56384+ PAGE_SIZE)
56385+ kfree(inodev_set.i_hash);
56386+ else
56387+ vfree(inodev_set.i_hash);
56388+ }
56389+
56390+ gr_free_uidset();
56391+
56392+ memset(&name_set, 0, sizeof (struct name_db));
56393+ memset(&inodev_set, 0, sizeof (struct inodev_db));
56394+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56395+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56396+
56397+ default_role = NULL;
56398+ role_list = NULL;
56399+
56400+ return;
56401+}
56402+
56403+static __u32
56404+count_user_objs(struct acl_object_label *userp)
56405+{
56406+ struct acl_object_label o_tmp;
56407+ __u32 num = 0;
56408+
56409+ while (userp) {
56410+ if (copy_from_user(&o_tmp, userp,
56411+ sizeof (struct acl_object_label)))
56412+ break;
56413+
56414+ userp = o_tmp.prev;
56415+ num++;
56416+ }
56417+
56418+ return num;
56419+}
56420+
56421+static struct acl_subject_label *
56422+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56423+
56424+static int
56425+copy_user_glob(struct acl_object_label *obj)
56426+{
56427+ struct acl_object_label *g_tmp, **guser;
56428+ unsigned int len;
56429+ char *tmp;
56430+
56431+ if (obj->globbed == NULL)
56432+ return 0;
56433+
56434+ guser = &obj->globbed;
56435+ while (*guser) {
56436+ g_tmp = (struct acl_object_label *)
56437+ acl_alloc(sizeof (struct acl_object_label));
56438+ if (g_tmp == NULL)
56439+ return -ENOMEM;
56440+
56441+ if (copy_from_user(g_tmp, *guser,
56442+ sizeof (struct acl_object_label)))
56443+ return -EFAULT;
56444+
56445+ len = strnlen_user(g_tmp->filename, PATH_MAX);
56446+
56447+ if (!len || len >= PATH_MAX)
56448+ return -EINVAL;
56449+
56450+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56451+ return -ENOMEM;
56452+
56453+ if (copy_from_user(tmp, g_tmp->filename, len))
56454+ return -EFAULT;
56455+ tmp[len-1] = '\0';
56456+ g_tmp->filename = tmp;
56457+
56458+ *guser = g_tmp;
56459+ guser = &(g_tmp->next);
56460+ }
56461+
56462+ return 0;
56463+}
56464+
56465+static int
56466+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56467+ struct acl_role_label *role)
56468+{
56469+ struct acl_object_label *o_tmp;
56470+ unsigned int len;
56471+ int ret;
56472+ char *tmp;
56473+
56474+ while (userp) {
56475+ if ((o_tmp = (struct acl_object_label *)
56476+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
56477+ return -ENOMEM;
56478+
56479+ if (copy_from_user(o_tmp, userp,
56480+ sizeof (struct acl_object_label)))
56481+ return -EFAULT;
56482+
56483+ userp = o_tmp->prev;
56484+
56485+ len = strnlen_user(o_tmp->filename, PATH_MAX);
56486+
56487+ if (!len || len >= PATH_MAX)
56488+ return -EINVAL;
56489+
56490+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56491+ return -ENOMEM;
56492+
56493+ if (copy_from_user(tmp, o_tmp->filename, len))
56494+ return -EFAULT;
56495+ tmp[len-1] = '\0';
56496+ o_tmp->filename = tmp;
56497+
56498+ insert_acl_obj_label(o_tmp, subj);
56499+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
56500+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
56501+ return -ENOMEM;
56502+
56503+ ret = copy_user_glob(o_tmp);
56504+ if (ret)
56505+ return ret;
56506+
56507+ if (o_tmp->nested) {
56508+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
56509+ if (IS_ERR(o_tmp->nested))
56510+ return PTR_ERR(o_tmp->nested);
56511+
56512+ /* insert into nested subject list */
56513+ o_tmp->nested->next = role->hash->first;
56514+ role->hash->first = o_tmp->nested;
56515+ }
56516+ }
56517+
56518+ return 0;
56519+}
56520+
56521+static __u32
56522+count_user_subjs(struct acl_subject_label *userp)
56523+{
56524+ struct acl_subject_label s_tmp;
56525+ __u32 num = 0;
56526+
56527+ while (userp) {
56528+ if (copy_from_user(&s_tmp, userp,
56529+ sizeof (struct acl_subject_label)))
56530+ break;
56531+
56532+ userp = s_tmp.prev;
56533+ /* do not count nested subjects against this count, since
56534+ they are not included in the hash table, but are
56535+ attached to objects. We have already counted
56536+ the subjects in userspace for the allocation
56537+ stack
56538+ */
56539+ if (!(s_tmp.mode & GR_NESTED))
56540+ num++;
56541+ }
56542+
56543+ return num;
56544+}
56545+
56546+static int
56547+copy_user_allowedips(struct acl_role_label *rolep)
56548+{
56549+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
56550+
56551+ ruserip = rolep->allowed_ips;
56552+
56553+ while (ruserip) {
56554+ rlast = rtmp;
56555+
56556+ if ((rtmp = (struct role_allowed_ip *)
56557+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
56558+ return -ENOMEM;
56559+
56560+ if (copy_from_user(rtmp, ruserip,
56561+ sizeof (struct role_allowed_ip)))
56562+ return -EFAULT;
56563+
56564+ ruserip = rtmp->prev;
56565+
56566+ if (!rlast) {
56567+ rtmp->prev = NULL;
56568+ rolep->allowed_ips = rtmp;
56569+ } else {
56570+ rlast->next = rtmp;
56571+ rtmp->prev = rlast;
56572+ }
56573+
56574+ if (!ruserip)
56575+ rtmp->next = NULL;
56576+ }
56577+
56578+ return 0;
56579+}
56580+
56581+static int
56582+copy_user_transitions(struct acl_role_label *rolep)
56583+{
56584+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
56585+
56586+ unsigned int len;
56587+ char *tmp;
56588+
56589+ rusertp = rolep->transitions;
56590+
56591+ while (rusertp) {
56592+ rlast = rtmp;
56593+
56594+ if ((rtmp = (struct role_transition *)
56595+ acl_alloc(sizeof (struct role_transition))) == NULL)
56596+ return -ENOMEM;
56597+
56598+ if (copy_from_user(rtmp, rusertp,
56599+ sizeof (struct role_transition)))
56600+ return -EFAULT;
56601+
56602+ rusertp = rtmp->prev;
56603+
56604+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
56605+
56606+ if (!len || len >= GR_SPROLE_LEN)
56607+ return -EINVAL;
56608+
56609+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56610+ return -ENOMEM;
56611+
56612+ if (copy_from_user(tmp, rtmp->rolename, len))
56613+ return -EFAULT;
56614+ tmp[len-1] = '\0';
56615+ rtmp->rolename = tmp;
56616+
56617+ if (!rlast) {
56618+ rtmp->prev = NULL;
56619+ rolep->transitions = rtmp;
56620+ } else {
56621+ rlast->next = rtmp;
56622+ rtmp->prev = rlast;
56623+ }
56624+
56625+ if (!rusertp)
56626+ rtmp->next = NULL;
56627+ }
56628+
56629+ return 0;
56630+}
56631+
56632+static struct acl_subject_label *
56633+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
56634+{
56635+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
56636+ unsigned int len;
56637+ char *tmp;
56638+ __u32 num_objs;
56639+ struct acl_ip_label **i_tmp, *i_utmp2;
56640+ struct gr_hash_struct ghash;
56641+ struct subject_map *subjmap;
56642+ unsigned int i_num;
56643+ int err;
56644+
56645+ s_tmp = lookup_subject_map(userp);
56646+
56647+ /* we've already copied this subject into the kernel, just return
56648+ the reference to it, and don't copy it over again
56649+ */
56650+ if (s_tmp)
56651+ return(s_tmp);
56652+
56653+ if ((s_tmp = (struct acl_subject_label *)
56654+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
56655+ return ERR_PTR(-ENOMEM);
56656+
56657+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
56658+ if (subjmap == NULL)
56659+ return ERR_PTR(-ENOMEM);
56660+
56661+ subjmap->user = userp;
56662+ subjmap->kernel = s_tmp;
56663+ insert_subj_map_entry(subjmap);
56664+
56665+ if (copy_from_user(s_tmp, userp,
56666+ sizeof (struct acl_subject_label)))
56667+ return ERR_PTR(-EFAULT);
56668+
56669+ len = strnlen_user(s_tmp->filename, PATH_MAX);
56670+
56671+ if (!len || len >= PATH_MAX)
56672+ return ERR_PTR(-EINVAL);
56673+
56674+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56675+ return ERR_PTR(-ENOMEM);
56676+
56677+ if (copy_from_user(tmp, s_tmp->filename, len))
56678+ return ERR_PTR(-EFAULT);
56679+ tmp[len-1] = '\0';
56680+ s_tmp->filename = tmp;
56681+
56682+ if (!strcmp(s_tmp->filename, "/"))
56683+ role->root_label = s_tmp;
56684+
56685+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
56686+ return ERR_PTR(-EFAULT);
56687+
56688+ /* copy user and group transition tables */
56689+
56690+ if (s_tmp->user_trans_num) {
56691+ uid_t *uidlist;
56692+
56693+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
56694+ if (uidlist == NULL)
56695+ return ERR_PTR(-ENOMEM);
56696+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
56697+ return ERR_PTR(-EFAULT);
56698+
56699+ s_tmp->user_transitions = uidlist;
56700+ }
56701+
56702+ if (s_tmp->group_trans_num) {
56703+ gid_t *gidlist;
56704+
56705+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
56706+ if (gidlist == NULL)
56707+ return ERR_PTR(-ENOMEM);
56708+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
56709+ return ERR_PTR(-EFAULT);
56710+
56711+ s_tmp->group_transitions = gidlist;
56712+ }
56713+
56714+ /* set up object hash table */
56715+ num_objs = count_user_objs(ghash.first);
56716+
56717+ s_tmp->obj_hash_size = num_objs;
56718+ s_tmp->obj_hash =
56719+ (struct acl_object_label **)
56720+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
56721+
56722+ if (!s_tmp->obj_hash)
56723+ return ERR_PTR(-ENOMEM);
56724+
56725+ memset(s_tmp->obj_hash, 0,
56726+ s_tmp->obj_hash_size *
56727+ sizeof (struct acl_object_label *));
56728+
56729+ /* add in objects */
56730+ err = copy_user_objs(ghash.first, s_tmp, role);
56731+
56732+ if (err)
56733+ return ERR_PTR(err);
56734+
56735+ /* set pointer for parent subject */
56736+ if (s_tmp->parent_subject) {
56737+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
56738+
56739+ if (IS_ERR(s_tmp2))
56740+ return s_tmp2;
56741+
56742+ s_tmp->parent_subject = s_tmp2;
56743+ }
56744+
56745+ /* add in ip acls */
56746+
56747+ if (!s_tmp->ip_num) {
56748+ s_tmp->ips = NULL;
56749+ goto insert;
56750+ }
56751+
56752+ i_tmp =
56753+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
56754+ sizeof (struct acl_ip_label *));
56755+
56756+ if (!i_tmp)
56757+ return ERR_PTR(-ENOMEM);
56758+
56759+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
56760+ *(i_tmp + i_num) =
56761+ (struct acl_ip_label *)
56762+ acl_alloc(sizeof (struct acl_ip_label));
56763+ if (!*(i_tmp + i_num))
56764+ return ERR_PTR(-ENOMEM);
56765+
56766+ if (copy_from_user
56767+ (&i_utmp2, s_tmp->ips + i_num,
56768+ sizeof (struct acl_ip_label *)))
56769+ return ERR_PTR(-EFAULT);
56770+
56771+ if (copy_from_user
56772+ (*(i_tmp + i_num), i_utmp2,
56773+ sizeof (struct acl_ip_label)))
56774+ return ERR_PTR(-EFAULT);
56775+
56776+ if ((*(i_tmp + i_num))->iface == NULL)
56777+ continue;
56778+
56779+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
56780+ if (!len || len >= IFNAMSIZ)
56781+ return ERR_PTR(-EINVAL);
56782+ tmp = acl_alloc(len);
56783+ if (tmp == NULL)
56784+ return ERR_PTR(-ENOMEM);
56785+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
56786+ return ERR_PTR(-EFAULT);
56787+ (*(i_tmp + i_num))->iface = tmp;
56788+ }
56789+
56790+ s_tmp->ips = i_tmp;
56791+
56792+insert:
56793+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
56794+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
56795+ return ERR_PTR(-ENOMEM);
56796+
56797+ return s_tmp;
56798+}
56799+
56800+static int
56801+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
56802+{
56803+ struct acl_subject_label s_pre;
56804+ struct acl_subject_label * ret;
56805+ int err;
56806+
56807+ while (userp) {
56808+ if (copy_from_user(&s_pre, userp,
56809+ sizeof (struct acl_subject_label)))
56810+ return -EFAULT;
56811+
56812+ /* do not add nested subjects here, add
56813+ while parsing objects
56814+ */
56815+
56816+ if (s_pre.mode & GR_NESTED) {
56817+ userp = s_pre.prev;
56818+ continue;
56819+ }
56820+
56821+ ret = do_copy_user_subj(userp, role);
56822+
56823+ err = PTR_ERR(ret);
56824+ if (IS_ERR(ret))
56825+ return err;
56826+
56827+ insert_acl_subj_label(ret, role);
56828+
56829+ userp = s_pre.prev;
56830+ }
56831+
56832+ return 0;
56833+}
56834+
56835+static int
56836+copy_user_acl(struct gr_arg *arg)
56837+{
56838+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
56839+ struct sprole_pw *sptmp;
56840+ struct gr_hash_struct *ghash;
56841+ uid_t *domainlist;
56842+ unsigned int r_num;
56843+ unsigned int len;
56844+ char *tmp;
56845+ int err = 0;
56846+ __u16 i;
56847+ __u32 num_subjs;
56848+
56849+ /* we need a default and kernel role */
56850+ if (arg->role_db.num_roles < 2)
56851+ return -EINVAL;
56852+
56853+ /* copy special role authentication info from userspace */
56854+
56855+ num_sprole_pws = arg->num_sprole_pws;
56856+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
56857+
56858+ if (!acl_special_roles) {
56859+ err = -ENOMEM;
56860+ goto cleanup;
56861+ }
56862+
56863+ for (i = 0; i < num_sprole_pws; i++) {
56864+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
56865+ if (!sptmp) {
56866+ err = -ENOMEM;
56867+ goto cleanup;
56868+ }
56869+ if (copy_from_user(sptmp, arg->sprole_pws + i,
56870+ sizeof (struct sprole_pw))) {
56871+ err = -EFAULT;
56872+ goto cleanup;
56873+ }
56874+
56875+ len =
56876+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
56877+
56878+ if (!len || len >= GR_SPROLE_LEN) {
56879+ err = -EINVAL;
56880+ goto cleanup;
56881+ }
56882+
56883+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56884+ err = -ENOMEM;
56885+ goto cleanup;
56886+ }
56887+
56888+ if (copy_from_user(tmp, sptmp->rolename, len)) {
56889+ err = -EFAULT;
56890+ goto cleanup;
56891+ }
56892+ tmp[len-1] = '\0';
56893+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56894+ printk(KERN_ALERT "Copying special role %s\n", tmp);
56895+#endif
56896+ sptmp->rolename = tmp;
56897+ acl_special_roles[i] = sptmp;
56898+ }
56899+
56900+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
56901+
56902+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
56903+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
56904+
56905+ if (!r_tmp) {
56906+ err = -ENOMEM;
56907+ goto cleanup;
56908+ }
56909+
56910+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
56911+ sizeof (struct acl_role_label *))) {
56912+ err = -EFAULT;
56913+ goto cleanup;
56914+ }
56915+
56916+ if (copy_from_user(r_tmp, r_utmp2,
56917+ sizeof (struct acl_role_label))) {
56918+ err = -EFAULT;
56919+ goto cleanup;
56920+ }
56921+
56922+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
56923+
56924+ if (!len || len >= PATH_MAX) {
56925+ err = -EINVAL;
56926+ goto cleanup;
56927+ }
56928+
56929+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56930+ err = -ENOMEM;
56931+ goto cleanup;
56932+ }
56933+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
56934+ err = -EFAULT;
56935+ goto cleanup;
56936+ }
56937+ tmp[len-1] = '\0';
56938+ r_tmp->rolename = tmp;
56939+
56940+ if (!strcmp(r_tmp->rolename, "default")
56941+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
56942+ default_role = r_tmp;
56943+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
56944+ kernel_role = r_tmp;
56945+ }
56946+
56947+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
56948+ err = -ENOMEM;
56949+ goto cleanup;
56950+ }
56951+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
56952+ err = -EFAULT;
56953+ goto cleanup;
56954+ }
56955+
56956+ r_tmp->hash = ghash;
56957+
56958+ num_subjs = count_user_subjs(r_tmp->hash->first);
56959+
56960+ r_tmp->subj_hash_size = num_subjs;
56961+ r_tmp->subj_hash =
56962+ (struct acl_subject_label **)
56963+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
56964+
56965+ if (!r_tmp->subj_hash) {
56966+ err = -ENOMEM;
56967+ goto cleanup;
56968+ }
56969+
56970+ err = copy_user_allowedips(r_tmp);
56971+ if (err)
56972+ goto cleanup;
56973+
56974+ /* copy domain info */
56975+ if (r_tmp->domain_children != NULL) {
56976+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
56977+ if (domainlist == NULL) {
56978+ err = -ENOMEM;
56979+ goto cleanup;
56980+ }
56981+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
56982+ err = -EFAULT;
56983+ goto cleanup;
56984+ }
56985+ r_tmp->domain_children = domainlist;
56986+ }
56987+
56988+ err = copy_user_transitions(r_tmp);
56989+ if (err)
56990+ goto cleanup;
56991+
56992+ memset(r_tmp->subj_hash, 0,
56993+ r_tmp->subj_hash_size *
56994+ sizeof (struct acl_subject_label *));
56995+
56996+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
56997+
56998+ if (err)
56999+ goto cleanup;
57000+
57001+ /* set nested subject list to null */
57002+ r_tmp->hash->first = NULL;
57003+
57004+ insert_acl_role_label(r_tmp);
57005+ }
57006+
57007+ goto return_err;
57008+ cleanup:
57009+ free_variables();
57010+ return_err:
57011+ return err;
57012+
57013+}
57014+
57015+static int
57016+gracl_init(struct gr_arg *args)
57017+{
57018+ int error = 0;
57019+
57020+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57021+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57022+
57023+ if (init_variables(args)) {
57024+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57025+ error = -ENOMEM;
57026+ free_variables();
57027+ goto out;
57028+ }
57029+
57030+ error = copy_user_acl(args);
57031+ free_init_variables();
57032+ if (error) {
57033+ free_variables();
57034+ goto out;
57035+ }
57036+
57037+ if ((error = gr_set_acls(0))) {
57038+ free_variables();
57039+ goto out;
57040+ }
57041+
57042+ pax_open_kernel();
57043+ gr_status |= GR_READY;
57044+ pax_close_kernel();
57045+
57046+ out:
57047+ return error;
57048+}
57049+
57050+/* derived from glibc fnmatch() 0: match, 1: no match*/
57051+
57052+static int
57053+glob_match(const char *p, const char *n)
57054+{
57055+ char c;
57056+
57057+ while ((c = *p++) != '\0') {
57058+ switch (c) {
57059+ case '?':
57060+ if (*n == '\0')
57061+ return 1;
57062+ else if (*n == '/')
57063+ return 1;
57064+ break;
57065+ case '\\':
57066+ if (*n != c)
57067+ return 1;
57068+ break;
57069+ case '*':
57070+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
57071+ if (*n == '/')
57072+ return 1;
57073+ else if (c == '?') {
57074+ if (*n == '\0')
57075+ return 1;
57076+ else
57077+ ++n;
57078+ }
57079+ }
57080+ if (c == '\0') {
57081+ return 0;
57082+ } else {
57083+ const char *endp;
57084+
57085+ if ((endp = strchr(n, '/')) == NULL)
57086+ endp = n + strlen(n);
57087+
57088+ if (c == '[') {
57089+ for (--p; n < endp; ++n)
57090+ if (!glob_match(p, n))
57091+ return 0;
57092+ } else if (c == '/') {
57093+ while (*n != '\0' && *n != '/')
57094+ ++n;
57095+ if (*n == '/' && !glob_match(p, n + 1))
57096+ return 0;
57097+ } else {
57098+ for (--p; n < endp; ++n)
57099+ if (*n == c && !glob_match(p, n))
57100+ return 0;
57101+ }
57102+
57103+ return 1;
57104+ }
57105+ case '[':
57106+ {
57107+ int not;
57108+ char cold;
57109+
57110+ if (*n == '\0' || *n == '/')
57111+ return 1;
57112+
57113+ not = (*p == '!' || *p == '^');
57114+ if (not)
57115+ ++p;
57116+
57117+ c = *p++;
57118+ for (;;) {
57119+ unsigned char fn = (unsigned char)*n;
57120+
57121+ if (c == '\0')
57122+ return 1;
57123+ else {
57124+ if (c == fn)
57125+ goto matched;
57126+ cold = c;
57127+ c = *p++;
57128+
57129+ if (c == '-' && *p != ']') {
57130+ unsigned char cend = *p++;
57131+
57132+ if (cend == '\0')
57133+ return 1;
57134+
57135+ if (cold <= fn && fn <= cend)
57136+ goto matched;
57137+
57138+ c = *p++;
57139+ }
57140+ }
57141+
57142+ if (c == ']')
57143+ break;
57144+ }
57145+ if (!not)
57146+ return 1;
57147+ break;
57148+ matched:
57149+ while (c != ']') {
57150+ if (c == '\0')
57151+ return 1;
57152+
57153+ c = *p++;
57154+ }
57155+ if (not)
57156+ return 1;
57157+ }
57158+ break;
57159+ default:
57160+ if (c != *n)
57161+ return 1;
57162+ }
57163+
57164+ ++n;
57165+ }
57166+
57167+ if (*n == '\0')
57168+ return 0;
57169+
57170+ if (*n == '/')
57171+ return 0;
57172+
57173+ return 1;
57174+}
57175+
57176+static struct acl_object_label *
57177+chk_glob_label(struct acl_object_label *globbed,
57178+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57179+{
57180+ struct acl_object_label *tmp;
57181+
57182+ if (*path == NULL)
57183+ *path = gr_to_filename_nolock(dentry, mnt);
57184+
57185+ tmp = globbed;
57186+
57187+ while (tmp) {
57188+ if (!glob_match(tmp->filename, *path))
57189+ return tmp;
57190+ tmp = tmp->next;
57191+ }
57192+
57193+ return NULL;
57194+}
57195+
57196+static struct acl_object_label *
57197+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57198+ const ino_t curr_ino, const dev_t curr_dev,
57199+ const struct acl_subject_label *subj, char **path, const int checkglob)
57200+{
57201+ struct acl_subject_label *tmpsubj;
57202+ struct acl_object_label *retval;
57203+ struct acl_object_label *retval2;
57204+
57205+ tmpsubj = (struct acl_subject_label *) subj;
57206+ read_lock(&gr_inode_lock);
57207+ do {
57208+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57209+ if (retval) {
57210+ if (checkglob && retval->globbed) {
57211+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57212+ (struct vfsmount *)orig_mnt, path);
57213+ if (retval2)
57214+ retval = retval2;
57215+ }
57216+ break;
57217+ }
57218+ } while ((tmpsubj = tmpsubj->parent_subject));
57219+ read_unlock(&gr_inode_lock);
57220+
57221+ return retval;
57222+}
57223+
57224+static __inline__ struct acl_object_label *
57225+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57226+ const struct dentry *curr_dentry,
57227+ const struct acl_subject_label *subj, char **path, const int checkglob)
57228+{
57229+ int newglob = checkglob;
57230+
57231+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57232+ as we don't want a / * rule to match instead of the / object
57233+ don't do this for create lookups that call this function though, since they're looking up
57234+ on the parent and thus need globbing checks on all paths
57235+ */
57236+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57237+ newglob = GR_NO_GLOB;
57238+
57239+ return __full_lookup(orig_dentry, orig_mnt,
57240+ curr_dentry->d_inode->i_ino,
57241+ __get_dev(curr_dentry), subj, path, newglob);
57242+}
57243+
57244+static struct acl_object_label *
57245+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57246+ const struct acl_subject_label *subj, char *path, const int checkglob)
57247+{
57248+ struct dentry *dentry = (struct dentry *) l_dentry;
57249+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57250+ struct acl_object_label *retval;
57251+
57252+ spin_lock(&dcache_lock);
57253+ spin_lock(&vfsmount_lock);
57254+
57255+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57256+#ifdef CONFIG_NET
57257+ mnt == sock_mnt ||
57258+#endif
57259+#ifdef CONFIG_HUGETLBFS
57260+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57261+#endif
57262+ /* ignore Eric Biederman */
57263+ IS_PRIVATE(l_dentry->d_inode))) {
57264+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57265+ goto out;
57266+ }
57267+
57268+ for (;;) {
57269+ if (dentry == real_root && mnt == real_root_mnt)
57270+ break;
57271+
57272+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57273+ if (mnt->mnt_parent == mnt)
57274+ break;
57275+
57276+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57277+ if (retval != NULL)
57278+ goto out;
57279+
57280+ dentry = mnt->mnt_mountpoint;
57281+ mnt = mnt->mnt_parent;
57282+ continue;
57283+ }
57284+
57285+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57286+ if (retval != NULL)
57287+ goto out;
57288+
57289+ dentry = dentry->d_parent;
57290+ }
57291+
57292+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57293+
57294+ if (retval == NULL)
57295+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57296+out:
57297+ spin_unlock(&vfsmount_lock);
57298+ spin_unlock(&dcache_lock);
57299+
57300+ BUG_ON(retval == NULL);
57301+
57302+ return retval;
57303+}
57304+
57305+static __inline__ struct acl_object_label *
57306+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57307+ const struct acl_subject_label *subj)
57308+{
57309+ char *path = NULL;
57310+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57311+}
57312+
57313+static __inline__ struct acl_object_label *
57314+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57315+ const struct acl_subject_label *subj)
57316+{
57317+ char *path = NULL;
57318+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57319+}
57320+
57321+static __inline__ struct acl_object_label *
57322+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57323+ const struct acl_subject_label *subj, char *path)
57324+{
57325+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57326+}
57327+
57328+static struct acl_subject_label *
57329+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57330+ const struct acl_role_label *role)
57331+{
57332+ struct dentry *dentry = (struct dentry *) l_dentry;
57333+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57334+ struct acl_subject_label *retval;
57335+
57336+ spin_lock(&dcache_lock);
57337+ spin_lock(&vfsmount_lock);
57338+
57339+ for (;;) {
57340+ if (dentry == real_root && mnt == real_root_mnt)
57341+ break;
57342+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57343+ if (mnt->mnt_parent == mnt)
57344+ break;
57345+
57346+ read_lock(&gr_inode_lock);
57347+ retval =
57348+ lookup_acl_subj_label(dentry->d_inode->i_ino,
57349+ __get_dev(dentry), role);
57350+ read_unlock(&gr_inode_lock);
57351+ if (retval != NULL)
57352+ goto out;
57353+
57354+ dentry = mnt->mnt_mountpoint;
57355+ mnt = mnt->mnt_parent;
57356+ continue;
57357+ }
57358+
57359+ read_lock(&gr_inode_lock);
57360+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57361+ __get_dev(dentry), role);
57362+ read_unlock(&gr_inode_lock);
57363+ if (retval != NULL)
57364+ goto out;
57365+
57366+ dentry = dentry->d_parent;
57367+ }
57368+
57369+ read_lock(&gr_inode_lock);
57370+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57371+ __get_dev(dentry), role);
57372+ read_unlock(&gr_inode_lock);
57373+
57374+ if (unlikely(retval == NULL)) {
57375+ read_lock(&gr_inode_lock);
57376+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57377+ __get_dev(real_root), role);
57378+ read_unlock(&gr_inode_lock);
57379+ }
57380+out:
57381+ spin_unlock(&vfsmount_lock);
57382+ spin_unlock(&dcache_lock);
57383+
57384+ BUG_ON(retval == NULL);
57385+
57386+ return retval;
57387+}
57388+
57389+static void
57390+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57391+{
57392+ struct task_struct *task = current;
57393+ const struct cred *cred = current_cred();
57394+
57395+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57396+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57397+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57398+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57399+
57400+ return;
57401+}
57402+
57403+static void
57404+gr_log_learn_sysctl(const char *path, const __u32 mode)
57405+{
57406+ struct task_struct *task = current;
57407+ const struct cred *cred = current_cred();
57408+
57409+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57410+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57411+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57412+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57413+
57414+ return;
57415+}
57416+
57417+static void
57418+gr_log_learn_id_change(const char type, const unsigned int real,
57419+ const unsigned int effective, const unsigned int fs)
57420+{
57421+ struct task_struct *task = current;
57422+ const struct cred *cred = current_cred();
57423+
57424+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57425+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57426+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57427+ type, real, effective, fs, &task->signal->saved_ip);
57428+
57429+ return;
57430+}
57431+
57432+__u32
57433+gr_search_file(const struct dentry * dentry, const __u32 mode,
57434+ const struct vfsmount * mnt)
57435+{
57436+ __u32 retval = mode;
57437+ struct acl_subject_label *curracl;
57438+ struct acl_object_label *currobj;
57439+
57440+ if (unlikely(!(gr_status & GR_READY)))
57441+ return (mode & ~GR_AUDITS);
57442+
57443+ curracl = current->acl;
57444+
57445+ currobj = chk_obj_label(dentry, mnt, curracl);
57446+ retval = currobj->mode & mode;
57447+
57448+ /* if we're opening a specified transfer file for writing
57449+ (e.g. /dev/initctl), then transfer our role to init
57450+ */
57451+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57452+ current->role->roletype & GR_ROLE_PERSIST)) {
57453+ struct task_struct *task = init_pid_ns.child_reaper;
57454+
57455+ if (task->role != current->role) {
57456+ task->acl_sp_role = 0;
57457+ task->acl_role_id = current->acl_role_id;
57458+ task->role = current->role;
57459+ rcu_read_lock();
57460+ read_lock(&grsec_exec_file_lock);
57461+ gr_apply_subject_to_task(task);
57462+ read_unlock(&grsec_exec_file_lock);
57463+ rcu_read_unlock();
57464+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57465+ }
57466+ }
57467+
57468+ if (unlikely
57469+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57470+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57471+ __u32 new_mode = mode;
57472+
57473+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57474+
57475+ retval = new_mode;
57476+
57477+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57478+ new_mode |= GR_INHERIT;
57479+
57480+ if (!(mode & GR_NOLEARN))
57481+ gr_log_learn(dentry, mnt, new_mode);
57482+ }
57483+
57484+ return retval;
57485+}
57486+
57487+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57488+ const struct dentry *parent,
57489+ const struct vfsmount *mnt)
57490+{
57491+ struct name_entry *match;
57492+ struct acl_object_label *matchpo;
57493+ struct acl_subject_label *curracl;
57494+ char *path;
57495+
57496+ if (unlikely(!(gr_status & GR_READY)))
57497+ return NULL;
57498+
57499+ preempt_disable();
57500+ path = gr_to_filename_rbac(new_dentry, mnt);
57501+ match = lookup_name_entry_create(path);
57502+
57503+ curracl = current->acl;
57504+
57505+ if (match) {
57506+ read_lock(&gr_inode_lock);
57507+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
57508+ read_unlock(&gr_inode_lock);
57509+
57510+ if (matchpo) {
57511+ preempt_enable();
57512+ return matchpo;
57513+ }
57514+ }
57515+
57516+ // lookup parent
57517+
57518+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
57519+
57520+ preempt_enable();
57521+ return matchpo;
57522+}
57523+
57524+__u32
57525+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
57526+ const struct vfsmount * mnt, const __u32 mode)
57527+{
57528+ struct acl_object_label *matchpo;
57529+ __u32 retval;
57530+
57531+ if (unlikely(!(gr_status & GR_READY)))
57532+ return (mode & ~GR_AUDITS);
57533+
57534+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
57535+
57536+ retval = matchpo->mode & mode;
57537+
57538+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
57539+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
57540+ __u32 new_mode = mode;
57541+
57542+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57543+
57544+ gr_log_learn(new_dentry, mnt, new_mode);
57545+ return new_mode;
57546+ }
57547+
57548+ return retval;
57549+}
57550+
57551+__u32
57552+gr_check_link(const struct dentry * new_dentry,
57553+ const struct dentry * parent_dentry,
57554+ const struct vfsmount * parent_mnt,
57555+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
57556+{
57557+ struct acl_object_label *obj;
57558+ __u32 oldmode, newmode;
57559+ __u32 needmode;
57560+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
57561+ GR_DELETE | GR_INHERIT;
57562+
57563+ if (unlikely(!(gr_status & GR_READY)))
57564+ return (GR_CREATE | GR_LINK);
57565+
57566+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
57567+ oldmode = obj->mode;
57568+
57569+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
57570+ newmode = obj->mode;
57571+
57572+ needmode = newmode & checkmodes;
57573+
57574+ // old name for hardlink must have at least the permissions of the new name
57575+ if ((oldmode & needmode) != needmode)
57576+ goto bad;
57577+
57578+ // if old name had restrictions/auditing, make sure the new name does as well
57579+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
57580+
57581+ // don't allow hardlinking of suid/sgid files without permission
57582+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57583+ needmode |= GR_SETID;
57584+
57585+ if ((newmode & needmode) != needmode)
57586+ goto bad;
57587+
57588+ // enforce minimum permissions
57589+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
57590+ return newmode;
57591+bad:
57592+ needmode = oldmode;
57593+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57594+ needmode |= GR_SETID;
57595+
57596+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
57597+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
57598+ return (GR_CREATE | GR_LINK);
57599+ } else if (newmode & GR_SUPPRESS)
57600+ return GR_SUPPRESS;
57601+ else
57602+ return 0;
57603+}
57604+
57605+int
57606+gr_check_hidden_task(const struct task_struct *task)
57607+{
57608+ if (unlikely(!(gr_status & GR_READY)))
57609+ return 0;
57610+
57611+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
57612+ return 1;
57613+
57614+ return 0;
57615+}
57616+
57617+int
57618+gr_check_protected_task(const struct task_struct *task)
57619+{
57620+ if (unlikely(!(gr_status & GR_READY) || !task))
57621+ return 0;
57622+
57623+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57624+ task->acl != current->acl)
57625+ return 1;
57626+
57627+ return 0;
57628+}
57629+
57630+int
57631+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57632+{
57633+ struct task_struct *p;
57634+ int ret = 0;
57635+
57636+ if (unlikely(!(gr_status & GR_READY) || !pid))
57637+ return ret;
57638+
57639+ read_lock(&tasklist_lock);
57640+ do_each_pid_task(pid, type, p) {
57641+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57642+ p->acl != current->acl) {
57643+ ret = 1;
57644+ goto out;
57645+ }
57646+ } while_each_pid_task(pid, type, p);
57647+out:
57648+ read_unlock(&tasklist_lock);
57649+
57650+ return ret;
57651+}
57652+
57653+void
57654+gr_copy_label(struct task_struct *tsk)
57655+{
57656+ tsk->signal->used_accept = 0;
57657+ tsk->acl_sp_role = 0;
57658+ tsk->acl_role_id = current->acl_role_id;
57659+ tsk->acl = current->acl;
57660+ tsk->role = current->role;
57661+ tsk->signal->curr_ip = current->signal->curr_ip;
57662+ tsk->signal->saved_ip = current->signal->saved_ip;
57663+ if (current->exec_file)
57664+ get_file(current->exec_file);
57665+ tsk->exec_file = current->exec_file;
57666+ tsk->is_writable = current->is_writable;
57667+ if (unlikely(current->signal->used_accept)) {
57668+ current->signal->curr_ip = 0;
57669+ current->signal->saved_ip = 0;
57670+ }
57671+
57672+ return;
57673+}
57674+
57675+static void
57676+gr_set_proc_res(struct task_struct *task)
57677+{
57678+ struct acl_subject_label *proc;
57679+ unsigned short i;
57680+
57681+ proc = task->acl;
57682+
57683+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
57684+ return;
57685+
57686+ for (i = 0; i < RLIM_NLIMITS; i++) {
57687+ if (!(proc->resmask & (1 << i)))
57688+ continue;
57689+
57690+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
57691+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
57692+ }
57693+
57694+ return;
57695+}
57696+
57697+extern int __gr_process_user_ban(struct user_struct *user);
57698+
57699+int
57700+gr_check_user_change(int real, int effective, int fs)
57701+{
57702+ unsigned int i;
57703+ __u16 num;
57704+ uid_t *uidlist;
57705+ int curuid;
57706+ int realok = 0;
57707+ int effectiveok = 0;
57708+ int fsok = 0;
57709+
57710+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57711+ struct user_struct *user;
57712+
57713+ if (real == -1)
57714+ goto skipit;
57715+
57716+ user = find_user(real);
57717+ if (user == NULL)
57718+ goto skipit;
57719+
57720+ if (__gr_process_user_ban(user)) {
57721+ /* for find_user */
57722+ free_uid(user);
57723+ return 1;
57724+ }
57725+
57726+ /* for find_user */
57727+ free_uid(user);
57728+
57729+skipit:
57730+#endif
57731+
57732+ if (unlikely(!(gr_status & GR_READY)))
57733+ return 0;
57734+
57735+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57736+ gr_log_learn_id_change('u', real, effective, fs);
57737+
57738+ num = current->acl->user_trans_num;
57739+ uidlist = current->acl->user_transitions;
57740+
57741+ if (uidlist == NULL)
57742+ return 0;
57743+
57744+ if (real == -1)
57745+ realok = 1;
57746+ if (effective == -1)
57747+ effectiveok = 1;
57748+ if (fs == -1)
57749+ fsok = 1;
57750+
57751+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
57752+ for (i = 0; i < num; i++) {
57753+ curuid = (int)uidlist[i];
57754+ if (real == curuid)
57755+ realok = 1;
57756+ if (effective == curuid)
57757+ effectiveok = 1;
57758+ if (fs == curuid)
57759+ fsok = 1;
57760+ }
57761+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
57762+ for (i = 0; i < num; i++) {
57763+ curuid = (int)uidlist[i];
57764+ if (real == curuid)
57765+ break;
57766+ if (effective == curuid)
57767+ break;
57768+ if (fs == curuid)
57769+ break;
57770+ }
57771+ /* not in deny list */
57772+ if (i == num) {
57773+ realok = 1;
57774+ effectiveok = 1;
57775+ fsok = 1;
57776+ }
57777+ }
57778+
57779+ if (realok && effectiveok && fsok)
57780+ return 0;
57781+ else {
57782+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57783+ return 1;
57784+ }
57785+}
57786+
57787+int
57788+gr_check_group_change(int real, int effective, int fs)
57789+{
57790+ unsigned int i;
57791+ __u16 num;
57792+ gid_t *gidlist;
57793+ int curgid;
57794+ int realok = 0;
57795+ int effectiveok = 0;
57796+ int fsok = 0;
57797+
57798+ if (unlikely(!(gr_status & GR_READY)))
57799+ return 0;
57800+
57801+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57802+ gr_log_learn_id_change('g', real, effective, fs);
57803+
57804+ num = current->acl->group_trans_num;
57805+ gidlist = current->acl->group_transitions;
57806+
57807+ if (gidlist == NULL)
57808+ return 0;
57809+
57810+ if (real == -1)
57811+ realok = 1;
57812+ if (effective == -1)
57813+ effectiveok = 1;
57814+ if (fs == -1)
57815+ fsok = 1;
57816+
57817+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
57818+ for (i = 0; i < num; i++) {
57819+ curgid = (int)gidlist[i];
57820+ if (real == curgid)
57821+ realok = 1;
57822+ if (effective == curgid)
57823+ effectiveok = 1;
57824+ if (fs == curgid)
57825+ fsok = 1;
57826+ }
57827+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
57828+ for (i = 0; i < num; i++) {
57829+ curgid = (int)gidlist[i];
57830+ if (real == curgid)
57831+ break;
57832+ if (effective == curgid)
57833+ break;
57834+ if (fs == curgid)
57835+ break;
57836+ }
57837+ /* not in deny list */
57838+ if (i == num) {
57839+ realok = 1;
57840+ effectiveok = 1;
57841+ fsok = 1;
57842+ }
57843+ }
57844+
57845+ if (realok && effectiveok && fsok)
57846+ return 0;
57847+ else {
57848+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57849+ return 1;
57850+ }
57851+}
57852+
57853+void
57854+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
57855+{
57856+ struct acl_role_label *role = task->role;
57857+ struct acl_subject_label *subj = NULL;
57858+ struct acl_object_label *obj;
57859+ struct file *filp;
57860+
57861+ if (unlikely(!(gr_status & GR_READY)))
57862+ return;
57863+
57864+ filp = task->exec_file;
57865+
57866+ /* kernel process, we'll give them the kernel role */
57867+ if (unlikely(!filp)) {
57868+ task->role = kernel_role;
57869+ task->acl = kernel_role->root_label;
57870+ return;
57871+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
57872+ role = lookup_acl_role_label(task, uid, gid);
57873+
57874+ /* perform subject lookup in possibly new role
57875+ we can use this result below in the case where role == task->role
57876+ */
57877+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
57878+
57879+ /* if we changed uid/gid, but result in the same role
57880+ and are using inheritance, don't lose the inherited subject
57881+ if current subject is other than what normal lookup
57882+ would result in, we arrived via inheritance, don't
57883+ lose subject
57884+ */
57885+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
57886+ (subj == task->acl)))
57887+ task->acl = subj;
57888+
57889+ task->role = role;
57890+
57891+ task->is_writable = 0;
57892+
57893+ /* ignore additional mmap checks for processes that are writable
57894+ by the default ACL */
57895+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57896+ if (unlikely(obj->mode & GR_WRITE))
57897+ task->is_writable = 1;
57898+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
57899+ if (unlikely(obj->mode & GR_WRITE))
57900+ task->is_writable = 1;
57901+
57902+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57903+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57904+#endif
57905+
57906+ gr_set_proc_res(task);
57907+
57908+ return;
57909+}
57910+
57911+int
57912+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57913+ const int unsafe_share)
57914+{
57915+ struct task_struct *task = current;
57916+ struct acl_subject_label *newacl;
57917+ struct acl_object_label *obj;
57918+ __u32 retmode;
57919+
57920+ if (unlikely(!(gr_status & GR_READY)))
57921+ return 0;
57922+
57923+ newacl = chk_subj_label(dentry, mnt, task->role);
57924+
57925+ task_lock(task);
57926+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
57927+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
57928+ !(task->role->roletype & GR_ROLE_GOD) &&
57929+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
57930+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
57931+ task_unlock(task);
57932+ if (unsafe_share)
57933+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
57934+ else
57935+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
57936+ return -EACCES;
57937+ }
57938+ task_unlock(task);
57939+
57940+ obj = chk_obj_label(dentry, mnt, task->acl);
57941+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
57942+
57943+ if (!(task->acl->mode & GR_INHERITLEARN) &&
57944+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
57945+ if (obj->nested)
57946+ task->acl = obj->nested;
57947+ else
57948+ task->acl = newacl;
57949+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
57950+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
57951+
57952+ task->is_writable = 0;
57953+
57954+ /* ignore additional mmap checks for processes that are writable
57955+ by the default ACL */
57956+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
57957+ if (unlikely(obj->mode & GR_WRITE))
57958+ task->is_writable = 1;
57959+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
57960+ if (unlikely(obj->mode & GR_WRITE))
57961+ task->is_writable = 1;
57962+
57963+ gr_set_proc_res(task);
57964+
57965+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57966+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57967+#endif
57968+ return 0;
57969+}
57970+
57971+/* always called with valid inodev ptr */
57972+static void
57973+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
57974+{
57975+ struct acl_object_label *matchpo;
57976+ struct acl_subject_label *matchps;
57977+ struct acl_subject_label *subj;
57978+ struct acl_role_label *role;
57979+ unsigned int x;
57980+
57981+ FOR_EACH_ROLE_START(role)
57982+ FOR_EACH_SUBJECT_START(role, subj, x)
57983+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
57984+ matchpo->mode |= GR_DELETED;
57985+ FOR_EACH_SUBJECT_END(subj,x)
57986+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
57987+ if (subj->inode == ino && subj->device == dev)
57988+ subj->mode |= GR_DELETED;
57989+ FOR_EACH_NESTED_SUBJECT_END(subj)
57990+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
57991+ matchps->mode |= GR_DELETED;
57992+ FOR_EACH_ROLE_END(role)
57993+
57994+ inodev->nentry->deleted = 1;
57995+
57996+ return;
57997+}
57998+
57999+void
58000+gr_handle_delete(const ino_t ino, const dev_t dev)
58001+{
58002+ struct inodev_entry *inodev;
58003+
58004+ if (unlikely(!(gr_status & GR_READY)))
58005+ return;
58006+
58007+ write_lock(&gr_inode_lock);
58008+ inodev = lookup_inodev_entry(ino, dev);
58009+ if (inodev != NULL)
58010+ do_handle_delete(inodev, ino, dev);
58011+ write_unlock(&gr_inode_lock);
58012+
58013+ return;
58014+}
58015+
58016+static void
58017+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58018+ const ino_t newinode, const dev_t newdevice,
58019+ struct acl_subject_label *subj)
58020+{
58021+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58022+ struct acl_object_label *match;
58023+
58024+ match = subj->obj_hash[index];
58025+
58026+ while (match && (match->inode != oldinode ||
58027+ match->device != olddevice ||
58028+ !(match->mode & GR_DELETED)))
58029+ match = match->next;
58030+
58031+ if (match && (match->inode == oldinode)
58032+ && (match->device == olddevice)
58033+ && (match->mode & GR_DELETED)) {
58034+ if (match->prev == NULL) {
58035+ subj->obj_hash[index] = match->next;
58036+ if (match->next != NULL)
58037+ match->next->prev = NULL;
58038+ } else {
58039+ match->prev->next = match->next;
58040+ if (match->next != NULL)
58041+ match->next->prev = match->prev;
58042+ }
58043+ match->prev = NULL;
58044+ match->next = NULL;
58045+ match->inode = newinode;
58046+ match->device = newdevice;
58047+ match->mode &= ~GR_DELETED;
58048+
58049+ insert_acl_obj_label(match, subj);
58050+ }
58051+
58052+ return;
58053+}
58054+
58055+static void
58056+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58057+ const ino_t newinode, const dev_t newdevice,
58058+ struct acl_role_label *role)
58059+{
58060+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58061+ struct acl_subject_label *match;
58062+
58063+ match = role->subj_hash[index];
58064+
58065+ while (match && (match->inode != oldinode ||
58066+ match->device != olddevice ||
58067+ !(match->mode & GR_DELETED)))
58068+ match = match->next;
58069+
58070+ if (match && (match->inode == oldinode)
58071+ && (match->device == olddevice)
58072+ && (match->mode & GR_DELETED)) {
58073+ if (match->prev == NULL) {
58074+ role->subj_hash[index] = match->next;
58075+ if (match->next != NULL)
58076+ match->next->prev = NULL;
58077+ } else {
58078+ match->prev->next = match->next;
58079+ if (match->next != NULL)
58080+ match->next->prev = match->prev;
58081+ }
58082+ match->prev = NULL;
58083+ match->next = NULL;
58084+ match->inode = newinode;
58085+ match->device = newdevice;
58086+ match->mode &= ~GR_DELETED;
58087+
58088+ insert_acl_subj_label(match, role);
58089+ }
58090+
58091+ return;
58092+}
58093+
58094+static void
58095+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58096+ const ino_t newinode, const dev_t newdevice)
58097+{
58098+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58099+ struct inodev_entry *match;
58100+
58101+ match = inodev_set.i_hash[index];
58102+
58103+ while (match && (match->nentry->inode != oldinode ||
58104+ match->nentry->device != olddevice || !match->nentry->deleted))
58105+ match = match->next;
58106+
58107+ if (match && (match->nentry->inode == oldinode)
58108+ && (match->nentry->device == olddevice) &&
58109+ match->nentry->deleted) {
58110+ if (match->prev == NULL) {
58111+ inodev_set.i_hash[index] = match->next;
58112+ if (match->next != NULL)
58113+ match->next->prev = NULL;
58114+ } else {
58115+ match->prev->next = match->next;
58116+ if (match->next != NULL)
58117+ match->next->prev = match->prev;
58118+ }
58119+ match->prev = NULL;
58120+ match->next = NULL;
58121+ match->nentry->inode = newinode;
58122+ match->nentry->device = newdevice;
58123+ match->nentry->deleted = 0;
58124+
58125+ insert_inodev_entry(match);
58126+ }
58127+
58128+ return;
58129+}
58130+
58131+static void
58132+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58133+{
58134+ struct acl_subject_label *subj;
58135+ struct acl_role_label *role;
58136+ unsigned int x;
58137+
58138+ FOR_EACH_ROLE_START(role)
58139+ update_acl_subj_label(matchn->inode, matchn->device,
58140+ inode, dev, role);
58141+
58142+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58143+ if ((subj->inode == inode) && (subj->device == dev)) {
58144+ subj->inode = inode;
58145+ subj->device = dev;
58146+ }
58147+ FOR_EACH_NESTED_SUBJECT_END(subj)
58148+ FOR_EACH_SUBJECT_START(role, subj, x)
58149+ update_acl_obj_label(matchn->inode, matchn->device,
58150+ inode, dev, subj);
58151+ FOR_EACH_SUBJECT_END(subj,x)
58152+ FOR_EACH_ROLE_END(role)
58153+
58154+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58155+
58156+ return;
58157+}
58158+
58159+static void
58160+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58161+ const struct vfsmount *mnt)
58162+{
58163+ ino_t ino = dentry->d_inode->i_ino;
58164+ dev_t dev = __get_dev(dentry);
58165+
58166+ __do_handle_create(matchn, ino, dev);
58167+
58168+ return;
58169+}
58170+
58171+void
58172+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58173+{
58174+ struct name_entry *matchn;
58175+
58176+ if (unlikely(!(gr_status & GR_READY)))
58177+ return;
58178+
58179+ preempt_disable();
58180+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58181+
58182+ if (unlikely((unsigned long)matchn)) {
58183+ write_lock(&gr_inode_lock);
58184+ do_handle_create(matchn, dentry, mnt);
58185+ write_unlock(&gr_inode_lock);
58186+ }
58187+ preempt_enable();
58188+
58189+ return;
58190+}
58191+
58192+void
58193+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58194+{
58195+ struct name_entry *matchn;
58196+
58197+ if (unlikely(!(gr_status & GR_READY)))
58198+ return;
58199+
58200+ preempt_disable();
58201+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58202+
58203+ if (unlikely((unsigned long)matchn)) {
58204+ write_lock(&gr_inode_lock);
58205+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58206+ write_unlock(&gr_inode_lock);
58207+ }
58208+ preempt_enable();
58209+
58210+ return;
58211+}
58212+
58213+void
58214+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58215+ struct dentry *old_dentry,
58216+ struct dentry *new_dentry,
58217+ struct vfsmount *mnt, const __u8 replace)
58218+{
58219+ struct name_entry *matchn;
58220+ struct inodev_entry *inodev;
58221+ struct inode *inode = new_dentry->d_inode;
58222+ ino_t oldinode = old_dentry->d_inode->i_ino;
58223+ dev_t olddev = __get_dev(old_dentry);
58224+
58225+ /* vfs_rename swaps the name and parent link for old_dentry and
58226+ new_dentry
58227+ at this point, old_dentry has the new name, parent link, and inode
58228+ for the renamed file
58229+ if a file is being replaced by a rename, new_dentry has the inode
58230+ and name for the replaced file
58231+ */
58232+
58233+ if (unlikely(!(gr_status & GR_READY)))
58234+ return;
58235+
58236+ preempt_disable();
58237+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58238+
58239+ /* we wouldn't have to check d_inode if it weren't for
58240+ NFS silly-renaming
58241+ */
58242+
58243+ write_lock(&gr_inode_lock);
58244+ if (unlikely(replace && inode)) {
58245+ ino_t newinode = inode->i_ino;
58246+ dev_t newdev = __get_dev(new_dentry);
58247+ inodev = lookup_inodev_entry(newinode, newdev);
58248+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58249+ do_handle_delete(inodev, newinode, newdev);
58250+ }
58251+
58252+ inodev = lookup_inodev_entry(oldinode, olddev);
58253+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58254+ do_handle_delete(inodev, oldinode, olddev);
58255+
58256+ if (unlikely((unsigned long)matchn))
58257+ do_handle_create(matchn, old_dentry, mnt);
58258+
58259+ write_unlock(&gr_inode_lock);
58260+ preempt_enable();
58261+
58262+ return;
58263+}
58264+
58265+static int
58266+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58267+ unsigned char **sum)
58268+{
58269+ struct acl_role_label *r;
58270+ struct role_allowed_ip *ipp;
58271+ struct role_transition *trans;
58272+ unsigned int i;
58273+ int found = 0;
58274+ u32 curr_ip = current->signal->curr_ip;
58275+
58276+ current->signal->saved_ip = curr_ip;
58277+
58278+ /* check transition table */
58279+
58280+ for (trans = current->role->transitions; trans; trans = trans->next) {
58281+ if (!strcmp(rolename, trans->rolename)) {
58282+ found = 1;
58283+ break;
58284+ }
58285+ }
58286+
58287+ if (!found)
58288+ return 0;
58289+
58290+ /* handle special roles that do not require authentication
58291+ and check ip */
58292+
58293+ FOR_EACH_ROLE_START(r)
58294+ if (!strcmp(rolename, r->rolename) &&
58295+ (r->roletype & GR_ROLE_SPECIAL)) {
58296+ found = 0;
58297+ if (r->allowed_ips != NULL) {
58298+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58299+ if ((ntohl(curr_ip) & ipp->netmask) ==
58300+ (ntohl(ipp->addr) & ipp->netmask))
58301+ found = 1;
58302+ }
58303+ } else
58304+ found = 2;
58305+ if (!found)
58306+ return 0;
58307+
58308+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58309+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58310+ *salt = NULL;
58311+ *sum = NULL;
58312+ return 1;
58313+ }
58314+ }
58315+ FOR_EACH_ROLE_END(r)
58316+
58317+ for (i = 0; i < num_sprole_pws; i++) {
58318+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58319+ *salt = acl_special_roles[i]->salt;
58320+ *sum = acl_special_roles[i]->sum;
58321+ return 1;
58322+ }
58323+ }
58324+
58325+ return 0;
58326+}
58327+
58328+static void
58329+assign_special_role(char *rolename)
58330+{
58331+ struct acl_object_label *obj;
58332+ struct acl_role_label *r;
58333+ struct acl_role_label *assigned = NULL;
58334+ struct task_struct *tsk;
58335+ struct file *filp;
58336+
58337+ FOR_EACH_ROLE_START(r)
58338+ if (!strcmp(rolename, r->rolename) &&
58339+ (r->roletype & GR_ROLE_SPECIAL)) {
58340+ assigned = r;
58341+ break;
58342+ }
58343+ FOR_EACH_ROLE_END(r)
58344+
58345+ if (!assigned)
58346+ return;
58347+
58348+ read_lock(&tasklist_lock);
58349+ read_lock(&grsec_exec_file_lock);
58350+
58351+ tsk = current->real_parent;
58352+ if (tsk == NULL)
58353+ goto out_unlock;
58354+
58355+ filp = tsk->exec_file;
58356+ if (filp == NULL)
58357+ goto out_unlock;
58358+
58359+ tsk->is_writable = 0;
58360+
58361+ tsk->acl_sp_role = 1;
58362+ tsk->acl_role_id = ++acl_sp_role_value;
58363+ tsk->role = assigned;
58364+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58365+
58366+ /* ignore additional mmap checks for processes that are writable
58367+ by the default ACL */
58368+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58369+ if (unlikely(obj->mode & GR_WRITE))
58370+ tsk->is_writable = 1;
58371+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58372+ if (unlikely(obj->mode & GR_WRITE))
58373+ tsk->is_writable = 1;
58374+
58375+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58376+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58377+#endif
58378+
58379+out_unlock:
58380+ read_unlock(&grsec_exec_file_lock);
58381+ read_unlock(&tasklist_lock);
58382+ return;
58383+}
58384+
58385+int gr_check_secure_terminal(struct task_struct *task)
58386+{
58387+ struct task_struct *p, *p2, *p3;
58388+ struct files_struct *files;
58389+ struct fdtable *fdt;
58390+ struct file *our_file = NULL, *file;
58391+ int i;
58392+
58393+ if (task->signal->tty == NULL)
58394+ return 1;
58395+
58396+ files = get_files_struct(task);
58397+ if (files != NULL) {
58398+ rcu_read_lock();
58399+ fdt = files_fdtable(files);
58400+ for (i=0; i < fdt->max_fds; i++) {
58401+ file = fcheck_files(files, i);
58402+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58403+ get_file(file);
58404+ our_file = file;
58405+ }
58406+ }
58407+ rcu_read_unlock();
58408+ put_files_struct(files);
58409+ }
58410+
58411+ if (our_file == NULL)
58412+ return 1;
58413+
58414+ read_lock(&tasklist_lock);
58415+ do_each_thread(p2, p) {
58416+ files = get_files_struct(p);
58417+ if (files == NULL ||
58418+ (p->signal && p->signal->tty == task->signal->tty)) {
58419+ if (files != NULL)
58420+ put_files_struct(files);
58421+ continue;
58422+ }
58423+ rcu_read_lock();
58424+ fdt = files_fdtable(files);
58425+ for (i=0; i < fdt->max_fds; i++) {
58426+ file = fcheck_files(files, i);
58427+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58428+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58429+ p3 = task;
58430+ while (p3->pid > 0) {
58431+ if (p3 == p)
58432+ break;
58433+ p3 = p3->real_parent;
58434+ }
58435+ if (p3 == p)
58436+ break;
58437+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58438+ gr_handle_alertkill(p);
58439+ rcu_read_unlock();
58440+ put_files_struct(files);
58441+ read_unlock(&tasklist_lock);
58442+ fput(our_file);
58443+ return 0;
58444+ }
58445+ }
58446+ rcu_read_unlock();
58447+ put_files_struct(files);
58448+ } while_each_thread(p2, p);
58449+ read_unlock(&tasklist_lock);
58450+
58451+ fput(our_file);
58452+ return 1;
58453+}
58454+
58455+ssize_t
58456+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58457+{
58458+ struct gr_arg_wrapper uwrap;
58459+ unsigned char *sprole_salt = NULL;
58460+ unsigned char *sprole_sum = NULL;
58461+ int error = sizeof (struct gr_arg_wrapper);
58462+ int error2 = 0;
58463+
58464+ mutex_lock(&gr_dev_mutex);
58465+
58466+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58467+ error = -EPERM;
58468+ goto out;
58469+ }
58470+
58471+ if (count != sizeof (struct gr_arg_wrapper)) {
58472+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58473+ error = -EINVAL;
58474+ goto out;
58475+ }
58476+
58477+
58478+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58479+ gr_auth_expires = 0;
58480+ gr_auth_attempts = 0;
58481+ }
58482+
58483+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58484+ error = -EFAULT;
58485+ goto out;
58486+ }
58487+
58488+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58489+ error = -EINVAL;
58490+ goto out;
58491+ }
58492+
58493+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58494+ error = -EFAULT;
58495+ goto out;
58496+ }
58497+
58498+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58499+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58500+ time_after(gr_auth_expires, get_seconds())) {
58501+ error = -EBUSY;
58502+ goto out;
58503+ }
58504+
58505+ /* if non-root trying to do anything other than use a special role,
58506+ do not attempt authentication, do not count towards authentication
58507+ locking
58508+ */
58509+
58510+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
58511+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58512+ current_uid()) {
58513+ error = -EPERM;
58514+ goto out;
58515+ }
58516+
58517+ /* ensure pw and special role name are null terminated */
58518+
58519+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
58520+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
58521+
58522+ /* Okay.
58523+ * We have our enough of the argument structure..(we have yet
58524+ * to copy_from_user the tables themselves) . Copy the tables
58525+ * only if we need them, i.e. for loading operations. */
58526+
58527+ switch (gr_usermode->mode) {
58528+ case GR_STATUS:
58529+ if (gr_status & GR_READY) {
58530+ error = 1;
58531+ if (!gr_check_secure_terminal(current))
58532+ error = 3;
58533+ } else
58534+ error = 2;
58535+ goto out;
58536+ case GR_SHUTDOWN:
58537+ if ((gr_status & GR_READY)
58538+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58539+ pax_open_kernel();
58540+ gr_status &= ~GR_READY;
58541+ pax_close_kernel();
58542+
58543+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
58544+ free_variables();
58545+ memset(gr_usermode, 0, sizeof (struct gr_arg));
58546+ memset(gr_system_salt, 0, GR_SALT_LEN);
58547+ memset(gr_system_sum, 0, GR_SHA_LEN);
58548+ } else if (gr_status & GR_READY) {
58549+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
58550+ error = -EPERM;
58551+ } else {
58552+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
58553+ error = -EAGAIN;
58554+ }
58555+ break;
58556+ case GR_ENABLE:
58557+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
58558+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
58559+ else {
58560+ if (gr_status & GR_READY)
58561+ error = -EAGAIN;
58562+ else
58563+ error = error2;
58564+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
58565+ }
58566+ break;
58567+ case GR_RELOAD:
58568+ if (!(gr_status & GR_READY)) {
58569+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
58570+ error = -EAGAIN;
58571+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58572+ lock_kernel();
58573+
58574+ pax_open_kernel();
58575+ gr_status &= ~GR_READY;
58576+ pax_close_kernel();
58577+
58578+ free_variables();
58579+ if (!(error2 = gracl_init(gr_usermode))) {
58580+ unlock_kernel();
58581+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
58582+ } else {
58583+ unlock_kernel();
58584+ error = error2;
58585+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58586+ }
58587+ } else {
58588+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58589+ error = -EPERM;
58590+ }
58591+ break;
58592+ case GR_SEGVMOD:
58593+ if (unlikely(!(gr_status & GR_READY))) {
58594+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
58595+ error = -EAGAIN;
58596+ break;
58597+ }
58598+
58599+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58600+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
58601+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
58602+ struct acl_subject_label *segvacl;
58603+ segvacl =
58604+ lookup_acl_subj_label(gr_usermode->segv_inode,
58605+ gr_usermode->segv_device,
58606+ current->role);
58607+ if (segvacl) {
58608+ segvacl->crashes = 0;
58609+ segvacl->expires = 0;
58610+ }
58611+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
58612+ gr_remove_uid(gr_usermode->segv_uid);
58613+ }
58614+ } else {
58615+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
58616+ error = -EPERM;
58617+ }
58618+ break;
58619+ case GR_SPROLE:
58620+ case GR_SPROLEPAM:
58621+ if (unlikely(!(gr_status & GR_READY))) {
58622+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
58623+ error = -EAGAIN;
58624+ break;
58625+ }
58626+
58627+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
58628+ current->role->expires = 0;
58629+ current->role->auth_attempts = 0;
58630+ }
58631+
58632+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58633+ time_after(current->role->expires, get_seconds())) {
58634+ error = -EBUSY;
58635+ goto out;
58636+ }
58637+
58638+ if (lookup_special_role_auth
58639+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
58640+ && ((!sprole_salt && !sprole_sum)
58641+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
58642+ char *p = "";
58643+ assign_special_role(gr_usermode->sp_role);
58644+ read_lock(&tasklist_lock);
58645+ if (current->real_parent)
58646+ p = current->real_parent->role->rolename;
58647+ read_unlock(&tasklist_lock);
58648+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
58649+ p, acl_sp_role_value);
58650+ } else {
58651+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
58652+ error = -EPERM;
58653+ if(!(current->role->auth_attempts++))
58654+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58655+
58656+ goto out;
58657+ }
58658+ break;
58659+ case GR_UNSPROLE:
58660+ if (unlikely(!(gr_status & GR_READY))) {
58661+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
58662+ error = -EAGAIN;
58663+ break;
58664+ }
58665+
58666+ if (current->role->roletype & GR_ROLE_SPECIAL) {
58667+ char *p = "";
58668+ int i = 0;
58669+
58670+ read_lock(&tasklist_lock);
58671+ if (current->real_parent) {
58672+ p = current->real_parent->role->rolename;
58673+ i = current->real_parent->acl_role_id;
58674+ }
58675+ read_unlock(&tasklist_lock);
58676+
58677+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
58678+ gr_set_acls(1);
58679+ } else {
58680+ error = -EPERM;
58681+ goto out;
58682+ }
58683+ break;
58684+ default:
58685+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
58686+ error = -EINVAL;
58687+ break;
58688+ }
58689+
58690+ if (error != -EPERM)
58691+ goto out;
58692+
58693+ if(!(gr_auth_attempts++))
58694+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58695+
58696+ out:
58697+ mutex_unlock(&gr_dev_mutex);
58698+ return error;
58699+}
58700+
58701+/* must be called with
58702+ rcu_read_lock();
58703+ read_lock(&tasklist_lock);
58704+ read_lock(&grsec_exec_file_lock);
58705+*/
58706+int gr_apply_subject_to_task(struct task_struct *task)
58707+{
58708+ struct acl_object_label *obj;
58709+ char *tmpname;
58710+ struct acl_subject_label *tmpsubj;
58711+ struct file *filp;
58712+ struct name_entry *nmatch;
58713+
58714+ filp = task->exec_file;
58715+ if (filp == NULL)
58716+ return 0;
58717+
58718+ /* the following is to apply the correct subject
58719+ on binaries running when the RBAC system
58720+ is enabled, when the binaries have been
58721+ replaced or deleted since their execution
58722+ -----
58723+ when the RBAC system starts, the inode/dev
58724+ from exec_file will be one the RBAC system
58725+ is unaware of. It only knows the inode/dev
58726+ of the present file on disk, or the absence
58727+ of it.
58728+ */
58729+ preempt_disable();
58730+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
58731+
58732+ nmatch = lookup_name_entry(tmpname);
58733+ preempt_enable();
58734+ tmpsubj = NULL;
58735+ if (nmatch) {
58736+ if (nmatch->deleted)
58737+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
58738+ else
58739+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
58740+ if (tmpsubj != NULL)
58741+ task->acl = tmpsubj;
58742+ }
58743+ if (tmpsubj == NULL)
58744+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
58745+ task->role);
58746+ if (task->acl) {
58747+ task->is_writable = 0;
58748+ /* ignore additional mmap checks for processes that are writable
58749+ by the default ACL */
58750+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58751+ if (unlikely(obj->mode & GR_WRITE))
58752+ task->is_writable = 1;
58753+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58754+ if (unlikely(obj->mode & GR_WRITE))
58755+ task->is_writable = 1;
58756+
58757+ gr_set_proc_res(task);
58758+
58759+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58760+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58761+#endif
58762+ } else {
58763+ return 1;
58764+ }
58765+
58766+ return 0;
58767+}
58768+
58769+int
58770+gr_set_acls(const int type)
58771+{
58772+ struct task_struct *task, *task2;
58773+ struct acl_role_label *role = current->role;
58774+ __u16 acl_role_id = current->acl_role_id;
58775+ const struct cred *cred;
58776+ int ret;
58777+
58778+ rcu_read_lock();
58779+ read_lock(&tasklist_lock);
58780+ read_lock(&grsec_exec_file_lock);
58781+ do_each_thread(task2, task) {
58782+ /* check to see if we're called from the exit handler,
58783+ if so, only replace ACLs that have inherited the admin
58784+ ACL */
58785+
58786+ if (type && (task->role != role ||
58787+ task->acl_role_id != acl_role_id))
58788+ continue;
58789+
58790+ task->acl_role_id = 0;
58791+ task->acl_sp_role = 0;
58792+
58793+ if (task->exec_file) {
58794+ cred = __task_cred(task);
58795+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
58796+
58797+ ret = gr_apply_subject_to_task(task);
58798+ if (ret) {
58799+ read_unlock(&grsec_exec_file_lock);
58800+ read_unlock(&tasklist_lock);
58801+ rcu_read_unlock();
58802+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
58803+ return ret;
58804+ }
58805+ } else {
58806+ // it's a kernel process
58807+ task->role = kernel_role;
58808+ task->acl = kernel_role->root_label;
58809+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
58810+ task->acl->mode &= ~GR_PROCFIND;
58811+#endif
58812+ }
58813+ } while_each_thread(task2, task);
58814+ read_unlock(&grsec_exec_file_lock);
58815+ read_unlock(&tasklist_lock);
58816+ rcu_read_unlock();
58817+
58818+ return 0;
58819+}
58820+
58821+void
58822+gr_learn_resource(const struct task_struct *task,
58823+ const int res, const unsigned long wanted, const int gt)
58824+{
58825+ struct acl_subject_label *acl;
58826+ const struct cred *cred;
58827+
58828+ if (unlikely((gr_status & GR_READY) &&
58829+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
58830+ goto skip_reslog;
58831+
58832+#ifdef CONFIG_GRKERNSEC_RESLOG
58833+ gr_log_resource(task, res, wanted, gt);
58834+#endif
58835+ skip_reslog:
58836+
58837+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
58838+ return;
58839+
58840+ acl = task->acl;
58841+
58842+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
58843+ !(acl->resmask & (1 << (unsigned short) res))))
58844+ return;
58845+
58846+ if (wanted >= acl->res[res].rlim_cur) {
58847+ unsigned long res_add;
58848+
58849+ res_add = wanted;
58850+ switch (res) {
58851+ case RLIMIT_CPU:
58852+ res_add += GR_RLIM_CPU_BUMP;
58853+ break;
58854+ case RLIMIT_FSIZE:
58855+ res_add += GR_RLIM_FSIZE_BUMP;
58856+ break;
58857+ case RLIMIT_DATA:
58858+ res_add += GR_RLIM_DATA_BUMP;
58859+ break;
58860+ case RLIMIT_STACK:
58861+ res_add += GR_RLIM_STACK_BUMP;
58862+ break;
58863+ case RLIMIT_CORE:
58864+ res_add += GR_RLIM_CORE_BUMP;
58865+ break;
58866+ case RLIMIT_RSS:
58867+ res_add += GR_RLIM_RSS_BUMP;
58868+ break;
58869+ case RLIMIT_NPROC:
58870+ res_add += GR_RLIM_NPROC_BUMP;
58871+ break;
58872+ case RLIMIT_NOFILE:
58873+ res_add += GR_RLIM_NOFILE_BUMP;
58874+ break;
58875+ case RLIMIT_MEMLOCK:
58876+ res_add += GR_RLIM_MEMLOCK_BUMP;
58877+ break;
58878+ case RLIMIT_AS:
58879+ res_add += GR_RLIM_AS_BUMP;
58880+ break;
58881+ case RLIMIT_LOCKS:
58882+ res_add += GR_RLIM_LOCKS_BUMP;
58883+ break;
58884+ case RLIMIT_SIGPENDING:
58885+ res_add += GR_RLIM_SIGPENDING_BUMP;
58886+ break;
58887+ case RLIMIT_MSGQUEUE:
58888+ res_add += GR_RLIM_MSGQUEUE_BUMP;
58889+ break;
58890+ case RLIMIT_NICE:
58891+ res_add += GR_RLIM_NICE_BUMP;
58892+ break;
58893+ case RLIMIT_RTPRIO:
58894+ res_add += GR_RLIM_RTPRIO_BUMP;
58895+ break;
58896+ case RLIMIT_RTTIME:
58897+ res_add += GR_RLIM_RTTIME_BUMP;
58898+ break;
58899+ }
58900+
58901+ acl->res[res].rlim_cur = res_add;
58902+
58903+ if (wanted > acl->res[res].rlim_max)
58904+ acl->res[res].rlim_max = res_add;
58905+
58906+ /* only log the subject filename, since resource logging is supported for
58907+ single-subject learning only */
58908+ rcu_read_lock();
58909+ cred = __task_cred(task);
58910+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
58911+ task->role->roletype, cred->uid, cred->gid, acl->filename,
58912+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
58913+ "", (unsigned long) res, &task->signal->saved_ip);
58914+ rcu_read_unlock();
58915+ }
58916+
58917+ return;
58918+}
58919+
58920+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
58921+void
58922+pax_set_initial_flags(struct linux_binprm *bprm)
58923+{
58924+ struct task_struct *task = current;
58925+ struct acl_subject_label *proc;
58926+ unsigned long flags;
58927+
58928+ if (unlikely(!(gr_status & GR_READY)))
58929+ return;
58930+
58931+ flags = pax_get_flags(task);
58932+
58933+ proc = task->acl;
58934+
58935+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
58936+ flags &= ~MF_PAX_PAGEEXEC;
58937+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
58938+ flags &= ~MF_PAX_SEGMEXEC;
58939+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
58940+ flags &= ~MF_PAX_RANDMMAP;
58941+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
58942+ flags &= ~MF_PAX_EMUTRAMP;
58943+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
58944+ flags &= ~MF_PAX_MPROTECT;
58945+
58946+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
58947+ flags |= MF_PAX_PAGEEXEC;
58948+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
58949+ flags |= MF_PAX_SEGMEXEC;
58950+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
58951+ flags |= MF_PAX_RANDMMAP;
58952+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
58953+ flags |= MF_PAX_EMUTRAMP;
58954+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
58955+ flags |= MF_PAX_MPROTECT;
58956+
58957+ pax_set_flags(task, flags);
58958+
58959+ return;
58960+}
58961+#endif
58962+
58963+#ifdef CONFIG_SYSCTL
58964+/* Eric Biederman likes breaking userland ABI and every inode-based security
58965+ system to save 35kb of memory */
58966+
58967+/* we modify the passed in filename, but adjust it back before returning */
58968+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
58969+{
58970+ struct name_entry *nmatch;
58971+ char *p, *lastp = NULL;
58972+ struct acl_object_label *obj = NULL, *tmp;
58973+ struct acl_subject_label *tmpsubj;
58974+ char c = '\0';
58975+
58976+ read_lock(&gr_inode_lock);
58977+
58978+ p = name + len - 1;
58979+ do {
58980+ nmatch = lookup_name_entry(name);
58981+ if (lastp != NULL)
58982+ *lastp = c;
58983+
58984+ if (nmatch == NULL)
58985+ goto next_component;
58986+ tmpsubj = current->acl;
58987+ do {
58988+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
58989+ if (obj != NULL) {
58990+ tmp = obj->globbed;
58991+ while (tmp) {
58992+ if (!glob_match(tmp->filename, name)) {
58993+ obj = tmp;
58994+ goto found_obj;
58995+ }
58996+ tmp = tmp->next;
58997+ }
58998+ goto found_obj;
58999+ }
59000+ } while ((tmpsubj = tmpsubj->parent_subject));
59001+next_component:
59002+ /* end case */
59003+ if (p == name)
59004+ break;
59005+
59006+ while (*p != '/')
59007+ p--;
59008+ if (p == name)
59009+ lastp = p + 1;
59010+ else {
59011+ lastp = p;
59012+ p--;
59013+ }
59014+ c = *lastp;
59015+ *lastp = '\0';
59016+ } while (1);
59017+found_obj:
59018+ read_unlock(&gr_inode_lock);
59019+ /* obj returned will always be non-null */
59020+ return obj;
59021+}
59022+
59023+/* returns 0 when allowing, non-zero on error
59024+ op of 0 is used for readdir, so we don't log the names of hidden files
59025+*/
59026+__u32
59027+gr_handle_sysctl(const struct ctl_table *table, const int op)
59028+{
59029+ ctl_table *tmp;
59030+ const char *proc_sys = "/proc/sys";
59031+ char *path;
59032+ struct acl_object_label *obj;
59033+ unsigned short len = 0, pos = 0, depth = 0, i;
59034+ __u32 err = 0;
59035+ __u32 mode = 0;
59036+
59037+ if (unlikely(!(gr_status & GR_READY)))
59038+ return 0;
59039+
59040+ /* for now, ignore operations on non-sysctl entries if it's not a
59041+ readdir*/
59042+ if (table->child != NULL && op != 0)
59043+ return 0;
59044+
59045+ mode |= GR_FIND;
59046+ /* it's only a read if it's an entry, read on dirs is for readdir */
59047+ if (op & MAY_READ)
59048+ mode |= GR_READ;
59049+ if (op & MAY_WRITE)
59050+ mode |= GR_WRITE;
59051+
59052+ preempt_disable();
59053+
59054+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59055+
59056+ /* it's only a read/write if it's an actual entry, not a dir
59057+ (which are opened for readdir)
59058+ */
59059+
59060+ /* convert the requested sysctl entry into a pathname */
59061+
59062+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59063+ len += strlen(tmp->procname);
59064+ len++;
59065+ depth++;
59066+ }
59067+
59068+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59069+ /* deny */
59070+ goto out;
59071+ }
59072+
59073+ memset(path, 0, PAGE_SIZE);
59074+
59075+ memcpy(path, proc_sys, strlen(proc_sys));
59076+
59077+ pos += strlen(proc_sys);
59078+
59079+ for (; depth > 0; depth--) {
59080+ path[pos] = '/';
59081+ pos++;
59082+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59083+ if (depth == i) {
59084+ memcpy(path + pos, tmp->procname,
59085+ strlen(tmp->procname));
59086+ pos += strlen(tmp->procname);
59087+ }
59088+ i++;
59089+ }
59090+ }
59091+
59092+ obj = gr_lookup_by_name(path, pos);
59093+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59094+
59095+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59096+ ((err & mode) != mode))) {
59097+ __u32 new_mode = mode;
59098+
59099+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59100+
59101+ err = 0;
59102+ gr_log_learn_sysctl(path, new_mode);
59103+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59104+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59105+ err = -ENOENT;
59106+ } else if (!(err & GR_FIND)) {
59107+ err = -ENOENT;
59108+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59109+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59110+ path, (mode & GR_READ) ? " reading" : "",
59111+ (mode & GR_WRITE) ? " writing" : "");
59112+ err = -EACCES;
59113+ } else if ((err & mode) != mode) {
59114+ err = -EACCES;
59115+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59116+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59117+ path, (mode & GR_READ) ? " reading" : "",
59118+ (mode & GR_WRITE) ? " writing" : "");
59119+ err = 0;
59120+ } else
59121+ err = 0;
59122+
59123+ out:
59124+ preempt_enable();
59125+
59126+ return err;
59127+}
59128+#endif
59129+
59130+int
59131+gr_handle_proc_ptrace(struct task_struct *task)
59132+{
59133+ struct file *filp;
59134+ struct task_struct *tmp = task;
59135+ struct task_struct *curtemp = current;
59136+ __u32 retmode;
59137+
59138+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59139+ if (unlikely(!(gr_status & GR_READY)))
59140+ return 0;
59141+#endif
59142+
59143+ read_lock(&tasklist_lock);
59144+ read_lock(&grsec_exec_file_lock);
59145+ filp = task->exec_file;
59146+
59147+ while (tmp->pid > 0) {
59148+ if (tmp == curtemp)
59149+ break;
59150+ tmp = tmp->real_parent;
59151+ }
59152+
59153+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59154+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59155+ read_unlock(&grsec_exec_file_lock);
59156+ read_unlock(&tasklist_lock);
59157+ return 1;
59158+ }
59159+
59160+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59161+ if (!(gr_status & GR_READY)) {
59162+ read_unlock(&grsec_exec_file_lock);
59163+ read_unlock(&tasklist_lock);
59164+ return 0;
59165+ }
59166+#endif
59167+
59168+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59169+ read_unlock(&grsec_exec_file_lock);
59170+ read_unlock(&tasklist_lock);
59171+
59172+ if (retmode & GR_NOPTRACE)
59173+ return 1;
59174+
59175+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59176+ && (current->acl != task->acl || (current->acl != current->role->root_label
59177+ && current->pid != task->pid)))
59178+ return 1;
59179+
59180+ return 0;
59181+}
59182+
59183+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59184+{
59185+ if (unlikely(!(gr_status & GR_READY)))
59186+ return;
59187+
59188+ if (!(current->role->roletype & GR_ROLE_GOD))
59189+ return;
59190+
59191+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59192+ p->role->rolename, gr_task_roletype_to_char(p),
59193+ p->acl->filename);
59194+}
59195+
59196+int
59197+gr_handle_ptrace(struct task_struct *task, const long request)
59198+{
59199+ struct task_struct *tmp = task;
59200+ struct task_struct *curtemp = current;
59201+ __u32 retmode;
59202+
59203+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59204+ if (unlikely(!(gr_status & GR_READY)))
59205+ return 0;
59206+#endif
59207+
59208+ read_lock(&tasklist_lock);
59209+ while (tmp->pid > 0) {
59210+ if (tmp == curtemp)
59211+ break;
59212+ tmp = tmp->real_parent;
59213+ }
59214+
59215+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59216+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59217+ read_unlock(&tasklist_lock);
59218+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59219+ return 1;
59220+ }
59221+ read_unlock(&tasklist_lock);
59222+
59223+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59224+ if (!(gr_status & GR_READY))
59225+ return 0;
59226+#endif
59227+
59228+ read_lock(&grsec_exec_file_lock);
59229+ if (unlikely(!task->exec_file)) {
59230+ read_unlock(&grsec_exec_file_lock);
59231+ return 0;
59232+ }
59233+
59234+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59235+ read_unlock(&grsec_exec_file_lock);
59236+
59237+ if (retmode & GR_NOPTRACE) {
59238+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59239+ return 1;
59240+ }
59241+
59242+ if (retmode & GR_PTRACERD) {
59243+ switch (request) {
59244+ case PTRACE_POKETEXT:
59245+ case PTRACE_POKEDATA:
59246+ case PTRACE_POKEUSR:
59247+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59248+ case PTRACE_SETREGS:
59249+ case PTRACE_SETFPREGS:
59250+#endif
59251+#ifdef CONFIG_X86
59252+ case PTRACE_SETFPXREGS:
59253+#endif
59254+#ifdef CONFIG_ALTIVEC
59255+ case PTRACE_SETVRREGS:
59256+#endif
59257+ return 1;
59258+ default:
59259+ return 0;
59260+ }
59261+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59262+ !(current->role->roletype & GR_ROLE_GOD) &&
59263+ (current->acl != task->acl)) {
59264+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59265+ return 1;
59266+ }
59267+
59268+ return 0;
59269+}
59270+
59271+static int is_writable_mmap(const struct file *filp)
59272+{
59273+ struct task_struct *task = current;
59274+ struct acl_object_label *obj, *obj2;
59275+
59276+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59277+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59278+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59279+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59280+ task->role->root_label);
59281+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59282+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59283+ return 1;
59284+ }
59285+ }
59286+ return 0;
59287+}
59288+
59289+int
59290+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59291+{
59292+ __u32 mode;
59293+
59294+ if (unlikely(!file || !(prot & PROT_EXEC)))
59295+ return 1;
59296+
59297+ if (is_writable_mmap(file))
59298+ return 0;
59299+
59300+ mode =
59301+ gr_search_file(file->f_path.dentry,
59302+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59303+ file->f_path.mnt);
59304+
59305+ if (!gr_tpe_allow(file))
59306+ return 0;
59307+
59308+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59309+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59310+ return 0;
59311+ } else if (unlikely(!(mode & GR_EXEC))) {
59312+ return 0;
59313+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59314+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59315+ return 1;
59316+ }
59317+
59318+ return 1;
59319+}
59320+
59321+int
59322+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59323+{
59324+ __u32 mode;
59325+
59326+ if (unlikely(!file || !(prot & PROT_EXEC)))
59327+ return 1;
59328+
59329+ if (is_writable_mmap(file))
59330+ return 0;
59331+
59332+ mode =
59333+ gr_search_file(file->f_path.dentry,
59334+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59335+ file->f_path.mnt);
59336+
59337+ if (!gr_tpe_allow(file))
59338+ return 0;
59339+
59340+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59341+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59342+ return 0;
59343+ } else if (unlikely(!(mode & GR_EXEC))) {
59344+ return 0;
59345+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59346+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59347+ return 1;
59348+ }
59349+
59350+ return 1;
59351+}
59352+
59353+void
59354+gr_acl_handle_psacct(struct task_struct *task, const long code)
59355+{
59356+ unsigned long runtime;
59357+ unsigned long cputime;
59358+ unsigned int wday, cday;
59359+ __u8 whr, chr;
59360+ __u8 wmin, cmin;
59361+ __u8 wsec, csec;
59362+ struct timespec timeval;
59363+
59364+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59365+ !(task->acl->mode & GR_PROCACCT)))
59366+ return;
59367+
59368+ do_posix_clock_monotonic_gettime(&timeval);
59369+ runtime = timeval.tv_sec - task->start_time.tv_sec;
59370+ wday = runtime / (3600 * 24);
59371+ runtime -= wday * (3600 * 24);
59372+ whr = runtime / 3600;
59373+ runtime -= whr * 3600;
59374+ wmin = runtime / 60;
59375+ runtime -= wmin * 60;
59376+ wsec = runtime;
59377+
59378+ cputime = (task->utime + task->stime) / HZ;
59379+ cday = cputime / (3600 * 24);
59380+ cputime -= cday * (3600 * 24);
59381+ chr = cputime / 3600;
59382+ cputime -= chr * 3600;
59383+ cmin = cputime / 60;
59384+ cputime -= cmin * 60;
59385+ csec = cputime;
59386+
59387+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59388+
59389+ return;
59390+}
59391+
59392+void gr_set_kernel_label(struct task_struct *task)
59393+{
59394+ if (gr_status & GR_READY) {
59395+ task->role = kernel_role;
59396+ task->acl = kernel_role->root_label;
59397+ }
59398+ return;
59399+}
59400+
59401+#ifdef CONFIG_TASKSTATS
59402+int gr_is_taskstats_denied(int pid)
59403+{
59404+ struct task_struct *task;
59405+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59406+ const struct cred *cred;
59407+#endif
59408+ int ret = 0;
59409+
59410+ /* restrict taskstats viewing to un-chrooted root users
59411+ who have the 'view' subject flag if the RBAC system is enabled
59412+ */
59413+
59414+ rcu_read_lock();
59415+ read_lock(&tasklist_lock);
59416+ task = find_task_by_vpid(pid);
59417+ if (task) {
59418+#ifdef CONFIG_GRKERNSEC_CHROOT
59419+ if (proc_is_chrooted(task))
59420+ ret = -EACCES;
59421+#endif
59422+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59423+ cred = __task_cred(task);
59424+#ifdef CONFIG_GRKERNSEC_PROC_USER
59425+ if (cred->uid != 0)
59426+ ret = -EACCES;
59427+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59428+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59429+ ret = -EACCES;
59430+#endif
59431+#endif
59432+ if (gr_status & GR_READY) {
59433+ if (!(task->acl->mode & GR_VIEW))
59434+ ret = -EACCES;
59435+ }
59436+ } else
59437+ ret = -ENOENT;
59438+
59439+ read_unlock(&tasklist_lock);
59440+ rcu_read_unlock();
59441+
59442+ return ret;
59443+}
59444+#endif
59445+
59446+/* AUXV entries are filled via a descendant of search_binary_handler
59447+ after we've already applied the subject for the target
59448+*/
59449+int gr_acl_enable_at_secure(void)
59450+{
59451+ if (unlikely(!(gr_status & GR_READY)))
59452+ return 0;
59453+
59454+ if (current->acl->mode & GR_ATSECURE)
59455+ return 1;
59456+
59457+ return 0;
59458+}
59459+
59460+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59461+{
59462+ struct task_struct *task = current;
59463+ struct dentry *dentry = file->f_path.dentry;
59464+ struct vfsmount *mnt = file->f_path.mnt;
59465+ struct acl_object_label *obj, *tmp;
59466+ struct acl_subject_label *subj;
59467+ unsigned int bufsize;
59468+ int is_not_root;
59469+ char *path;
59470+ dev_t dev = __get_dev(dentry);
59471+
59472+ if (unlikely(!(gr_status & GR_READY)))
59473+ return 1;
59474+
59475+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59476+ return 1;
59477+
59478+ /* ignore Eric Biederman */
59479+ if (IS_PRIVATE(dentry->d_inode))
59480+ return 1;
59481+
59482+ subj = task->acl;
59483+ do {
59484+ obj = lookup_acl_obj_label(ino, dev, subj);
59485+ if (obj != NULL)
59486+ return (obj->mode & GR_FIND) ? 1 : 0;
59487+ } while ((subj = subj->parent_subject));
59488+
59489+ /* this is purely an optimization since we're looking for an object
59490+ for the directory we're doing a readdir on
59491+ if it's possible for any globbed object to match the entry we're
59492+ filling into the directory, then the object we find here will be
59493+ an anchor point with attached globbed objects
59494+ */
59495+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59496+ if (obj->globbed == NULL)
59497+ return (obj->mode & GR_FIND) ? 1 : 0;
59498+
59499+ is_not_root = ((obj->filename[0] == '/') &&
59500+ (obj->filename[1] == '\0')) ? 0 : 1;
59501+ bufsize = PAGE_SIZE - namelen - is_not_root;
59502+
59503+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
59504+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
59505+ return 1;
59506+
59507+ preempt_disable();
59508+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59509+ bufsize);
59510+
59511+ bufsize = strlen(path);
59512+
59513+ /* if base is "/", don't append an additional slash */
59514+ if (is_not_root)
59515+ *(path + bufsize) = '/';
59516+ memcpy(path + bufsize + is_not_root, name, namelen);
59517+ *(path + bufsize + namelen + is_not_root) = '\0';
59518+
59519+ tmp = obj->globbed;
59520+ while (tmp) {
59521+ if (!glob_match(tmp->filename, path)) {
59522+ preempt_enable();
59523+ return (tmp->mode & GR_FIND) ? 1 : 0;
59524+ }
59525+ tmp = tmp->next;
59526+ }
59527+ preempt_enable();
59528+ return (obj->mode & GR_FIND) ? 1 : 0;
59529+}
59530+
59531+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
59532+EXPORT_SYMBOL(gr_acl_is_enabled);
59533+#endif
59534+EXPORT_SYMBOL(gr_learn_resource);
59535+EXPORT_SYMBOL(gr_set_kernel_label);
59536+#ifdef CONFIG_SECURITY
59537+EXPORT_SYMBOL(gr_check_user_change);
59538+EXPORT_SYMBOL(gr_check_group_change);
59539+#endif
59540+
59541diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
59542new file mode 100644
59543index 0000000..34fefda
59544--- /dev/null
59545+++ b/grsecurity/gracl_alloc.c
59546@@ -0,0 +1,105 @@
59547+#include <linux/kernel.h>
59548+#include <linux/mm.h>
59549+#include <linux/slab.h>
59550+#include <linux/vmalloc.h>
59551+#include <linux/gracl.h>
59552+#include <linux/grsecurity.h>
59553+
59554+static unsigned long alloc_stack_next = 1;
59555+static unsigned long alloc_stack_size = 1;
59556+static void **alloc_stack;
59557+
59558+static __inline__ int
59559+alloc_pop(void)
59560+{
59561+ if (alloc_stack_next == 1)
59562+ return 0;
59563+
59564+ kfree(alloc_stack[alloc_stack_next - 2]);
59565+
59566+ alloc_stack_next--;
59567+
59568+ return 1;
59569+}
59570+
59571+static __inline__ int
59572+alloc_push(void *buf)
59573+{
59574+ if (alloc_stack_next >= alloc_stack_size)
59575+ return 1;
59576+
59577+ alloc_stack[alloc_stack_next - 1] = buf;
59578+
59579+ alloc_stack_next++;
59580+
59581+ return 0;
59582+}
59583+
59584+void *
59585+acl_alloc(unsigned long len)
59586+{
59587+ void *ret = NULL;
59588+
59589+ if (!len || len > PAGE_SIZE)
59590+ goto out;
59591+
59592+ ret = kmalloc(len, GFP_KERNEL);
59593+
59594+ if (ret) {
59595+ if (alloc_push(ret)) {
59596+ kfree(ret);
59597+ ret = NULL;
59598+ }
59599+ }
59600+
59601+out:
59602+ return ret;
59603+}
59604+
59605+void *
59606+acl_alloc_num(unsigned long num, unsigned long len)
59607+{
59608+ if (!len || (num > (PAGE_SIZE / len)))
59609+ return NULL;
59610+
59611+ return acl_alloc(num * len);
59612+}
59613+
59614+void
59615+acl_free_all(void)
59616+{
59617+ if (gr_acl_is_enabled() || !alloc_stack)
59618+ return;
59619+
59620+ while (alloc_pop()) ;
59621+
59622+ if (alloc_stack) {
59623+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
59624+ kfree(alloc_stack);
59625+ else
59626+ vfree(alloc_stack);
59627+ }
59628+
59629+ alloc_stack = NULL;
59630+ alloc_stack_size = 1;
59631+ alloc_stack_next = 1;
59632+
59633+ return;
59634+}
59635+
59636+int
59637+acl_alloc_stack_init(unsigned long size)
59638+{
59639+ if ((size * sizeof (void *)) <= PAGE_SIZE)
59640+ alloc_stack =
59641+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
59642+ else
59643+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
59644+
59645+ alloc_stack_size = size;
59646+
59647+ if (!alloc_stack)
59648+ return 0;
59649+ else
59650+ return 1;
59651+}
59652diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
59653new file mode 100644
59654index 0000000..955ddfb
59655--- /dev/null
59656+++ b/grsecurity/gracl_cap.c
59657@@ -0,0 +1,101 @@
59658+#include <linux/kernel.h>
59659+#include <linux/module.h>
59660+#include <linux/sched.h>
59661+#include <linux/gracl.h>
59662+#include <linux/grsecurity.h>
59663+#include <linux/grinternal.h>
59664+
59665+extern const char *captab_log[];
59666+extern int captab_log_entries;
59667+
59668+int
59669+gr_acl_is_capable(const int cap)
59670+{
59671+ struct task_struct *task = current;
59672+ const struct cred *cred = current_cred();
59673+ struct acl_subject_label *curracl;
59674+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59675+ kernel_cap_t cap_audit = __cap_empty_set;
59676+
59677+ if (!gr_acl_is_enabled())
59678+ return 1;
59679+
59680+ curracl = task->acl;
59681+
59682+ cap_drop = curracl->cap_lower;
59683+ cap_mask = curracl->cap_mask;
59684+ cap_audit = curracl->cap_invert_audit;
59685+
59686+ while ((curracl = curracl->parent_subject)) {
59687+ /* if the cap isn't specified in the current computed mask but is specified in the
59688+ current level subject, and is lowered in the current level subject, then add
59689+ it to the set of dropped capabilities
59690+ otherwise, add the current level subject's mask to the current computed mask
59691+ */
59692+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59693+ cap_raise(cap_mask, cap);
59694+ if (cap_raised(curracl->cap_lower, cap))
59695+ cap_raise(cap_drop, cap);
59696+ if (cap_raised(curracl->cap_invert_audit, cap))
59697+ cap_raise(cap_audit, cap);
59698+ }
59699+ }
59700+
59701+ if (!cap_raised(cap_drop, cap)) {
59702+ if (cap_raised(cap_audit, cap))
59703+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
59704+ return 1;
59705+ }
59706+
59707+ curracl = task->acl;
59708+
59709+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
59710+ && cap_raised(cred->cap_effective, cap)) {
59711+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59712+ task->role->roletype, cred->uid,
59713+ cred->gid, task->exec_file ?
59714+ gr_to_filename(task->exec_file->f_path.dentry,
59715+ task->exec_file->f_path.mnt) : curracl->filename,
59716+ curracl->filename, 0UL,
59717+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
59718+ return 1;
59719+ }
59720+
59721+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
59722+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
59723+ return 0;
59724+}
59725+
59726+int
59727+gr_acl_is_capable_nolog(const int cap)
59728+{
59729+ struct acl_subject_label *curracl;
59730+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59731+
59732+ if (!gr_acl_is_enabled())
59733+ return 1;
59734+
59735+ curracl = current->acl;
59736+
59737+ cap_drop = curracl->cap_lower;
59738+ cap_mask = curracl->cap_mask;
59739+
59740+ while ((curracl = curracl->parent_subject)) {
59741+ /* if the cap isn't specified in the current computed mask but is specified in the
59742+ current level subject, and is lowered in the current level subject, then add
59743+ it to the set of dropped capabilities
59744+ otherwise, add the current level subject's mask to the current computed mask
59745+ */
59746+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59747+ cap_raise(cap_mask, cap);
59748+ if (cap_raised(curracl->cap_lower, cap))
59749+ cap_raise(cap_drop, cap);
59750+ }
59751+ }
59752+
59753+ if (!cap_raised(cap_drop, cap))
59754+ return 1;
59755+
59756+ return 0;
59757+}
59758+
59759diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
59760new file mode 100644
59761index 0000000..d5f210c
59762--- /dev/null
59763+++ b/grsecurity/gracl_fs.c
59764@@ -0,0 +1,433 @@
59765+#include <linux/kernel.h>
59766+#include <linux/sched.h>
59767+#include <linux/types.h>
59768+#include <linux/fs.h>
59769+#include <linux/file.h>
59770+#include <linux/stat.h>
59771+#include <linux/grsecurity.h>
59772+#include <linux/grinternal.h>
59773+#include <linux/gracl.h>
59774+
59775+__u32
59776+gr_acl_handle_hidden_file(const struct dentry * dentry,
59777+ const struct vfsmount * mnt)
59778+{
59779+ __u32 mode;
59780+
59781+ if (unlikely(!dentry->d_inode))
59782+ return GR_FIND;
59783+
59784+ mode =
59785+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
59786+
59787+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
59788+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59789+ return mode;
59790+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
59791+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59792+ return 0;
59793+ } else if (unlikely(!(mode & GR_FIND)))
59794+ return 0;
59795+
59796+ return GR_FIND;
59797+}
59798+
59799+__u32
59800+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59801+ int acc_mode)
59802+{
59803+ __u32 reqmode = GR_FIND;
59804+ __u32 mode;
59805+
59806+ if (unlikely(!dentry->d_inode))
59807+ return reqmode;
59808+
59809+ if (acc_mode & MAY_APPEND)
59810+ reqmode |= GR_APPEND;
59811+ else if (acc_mode & MAY_WRITE)
59812+ reqmode |= GR_WRITE;
59813+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
59814+ reqmode |= GR_READ;
59815+
59816+ mode =
59817+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59818+ mnt);
59819+
59820+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59821+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59822+ reqmode & GR_READ ? " reading" : "",
59823+ reqmode & GR_WRITE ? " writing" : reqmode &
59824+ GR_APPEND ? " appending" : "");
59825+ return reqmode;
59826+ } else
59827+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59828+ {
59829+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59830+ reqmode & GR_READ ? " reading" : "",
59831+ reqmode & GR_WRITE ? " writing" : reqmode &
59832+ GR_APPEND ? " appending" : "");
59833+ return 0;
59834+ } else if (unlikely((mode & reqmode) != reqmode))
59835+ return 0;
59836+
59837+ return reqmode;
59838+}
59839+
59840+__u32
59841+gr_acl_handle_creat(const struct dentry * dentry,
59842+ const struct dentry * p_dentry,
59843+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59844+ const int imode)
59845+{
59846+ __u32 reqmode = GR_WRITE | GR_CREATE;
59847+ __u32 mode;
59848+
59849+ if (acc_mode & MAY_APPEND)
59850+ reqmode |= GR_APPEND;
59851+ // if a directory was required or the directory already exists, then
59852+ // don't count this open as a read
59853+ if ((acc_mode & MAY_READ) &&
59854+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
59855+ reqmode |= GR_READ;
59856+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
59857+ reqmode |= GR_SETID;
59858+
59859+ mode =
59860+ gr_check_create(dentry, p_dentry, p_mnt,
59861+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59862+
59863+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59864+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59865+ reqmode & GR_READ ? " reading" : "",
59866+ reqmode & GR_WRITE ? " writing" : reqmode &
59867+ GR_APPEND ? " appending" : "");
59868+ return reqmode;
59869+ } else
59870+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59871+ {
59872+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59873+ reqmode & GR_READ ? " reading" : "",
59874+ reqmode & GR_WRITE ? " writing" : reqmode &
59875+ GR_APPEND ? " appending" : "");
59876+ return 0;
59877+ } else if (unlikely((mode & reqmode) != reqmode))
59878+ return 0;
59879+
59880+ return reqmode;
59881+}
59882+
59883+__u32
59884+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
59885+ const int fmode)
59886+{
59887+ __u32 mode, reqmode = GR_FIND;
59888+
59889+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
59890+ reqmode |= GR_EXEC;
59891+ if (fmode & S_IWOTH)
59892+ reqmode |= GR_WRITE;
59893+ if (fmode & S_IROTH)
59894+ reqmode |= GR_READ;
59895+
59896+ mode =
59897+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59898+ mnt);
59899+
59900+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59901+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59902+ reqmode & GR_READ ? " reading" : "",
59903+ reqmode & GR_WRITE ? " writing" : "",
59904+ reqmode & GR_EXEC ? " executing" : "");
59905+ return reqmode;
59906+ } else
59907+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59908+ {
59909+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59910+ reqmode & GR_READ ? " reading" : "",
59911+ reqmode & GR_WRITE ? " writing" : "",
59912+ reqmode & GR_EXEC ? " executing" : "");
59913+ return 0;
59914+ } else if (unlikely((mode & reqmode) != reqmode))
59915+ return 0;
59916+
59917+ return reqmode;
59918+}
59919+
59920+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
59921+{
59922+ __u32 mode;
59923+
59924+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
59925+
59926+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59927+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
59928+ return mode;
59929+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59930+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
59931+ return 0;
59932+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
59933+ return 0;
59934+
59935+ return (reqmode);
59936+}
59937+
59938+__u32
59939+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59940+{
59941+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
59942+}
59943+
59944+__u32
59945+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
59946+{
59947+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
59948+}
59949+
59950+__u32
59951+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
59952+{
59953+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
59954+}
59955+
59956+__u32
59957+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
59958+{
59959+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
59960+}
59961+
59962+__u32
59963+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
59964+ mode_t mode)
59965+{
59966+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
59967+ return 1;
59968+
59969+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59970+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59971+ GR_FCHMOD_ACL_MSG);
59972+ } else {
59973+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
59974+ }
59975+}
59976+
59977+__u32
59978+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
59979+ mode_t mode)
59980+{
59981+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59982+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59983+ GR_CHMOD_ACL_MSG);
59984+ } else {
59985+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
59986+ }
59987+}
59988+
59989+__u32
59990+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
59991+{
59992+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
59993+}
59994+
59995+__u32
59996+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
59997+{
59998+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
59999+}
60000+
60001+__u32
60002+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60003+{
60004+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60005+}
60006+
60007+__u32
60008+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60009+{
60010+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60011+ GR_UNIXCONNECT_ACL_MSG);
60012+}
60013+
60014+/* hardlinks require at minimum create and link permission,
60015+ any additional privilege required is based on the
60016+ privilege of the file being linked to
60017+*/
60018+__u32
60019+gr_acl_handle_link(const struct dentry * new_dentry,
60020+ const struct dentry * parent_dentry,
60021+ const struct vfsmount * parent_mnt,
60022+ const struct dentry * old_dentry,
60023+ const struct vfsmount * old_mnt, const char *to)
60024+{
60025+ __u32 mode;
60026+ __u32 needmode = GR_CREATE | GR_LINK;
60027+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60028+
60029+ mode =
60030+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60031+ old_mnt);
60032+
60033+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60034+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60035+ return mode;
60036+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60037+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60038+ return 0;
60039+ } else if (unlikely((mode & needmode) != needmode))
60040+ return 0;
60041+
60042+ return 1;
60043+}
60044+
60045+__u32
60046+gr_acl_handle_symlink(const struct dentry * new_dentry,
60047+ const struct dentry * parent_dentry,
60048+ const struct vfsmount * parent_mnt, const char *from)
60049+{
60050+ __u32 needmode = GR_WRITE | GR_CREATE;
60051+ __u32 mode;
60052+
60053+ mode =
60054+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
60055+ GR_CREATE | GR_AUDIT_CREATE |
60056+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60057+
60058+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60059+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60060+ return mode;
60061+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60062+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60063+ return 0;
60064+ } else if (unlikely((mode & needmode) != needmode))
60065+ return 0;
60066+
60067+ return (GR_WRITE | GR_CREATE);
60068+}
60069+
60070+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60071+{
60072+ __u32 mode;
60073+
60074+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60075+
60076+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60077+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60078+ return mode;
60079+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60080+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60081+ return 0;
60082+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60083+ return 0;
60084+
60085+ return (reqmode);
60086+}
60087+
60088+__u32
60089+gr_acl_handle_mknod(const struct dentry * new_dentry,
60090+ const struct dentry * parent_dentry,
60091+ const struct vfsmount * parent_mnt,
60092+ const int mode)
60093+{
60094+ __u32 reqmode = GR_WRITE | GR_CREATE;
60095+ if (unlikely(mode & (S_ISUID | S_ISGID)))
60096+ reqmode |= GR_SETID;
60097+
60098+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60099+ reqmode, GR_MKNOD_ACL_MSG);
60100+}
60101+
60102+__u32
60103+gr_acl_handle_mkdir(const struct dentry *new_dentry,
60104+ const struct dentry *parent_dentry,
60105+ const struct vfsmount *parent_mnt)
60106+{
60107+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60108+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60109+}
60110+
60111+#define RENAME_CHECK_SUCCESS(old, new) \
60112+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60113+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60114+
60115+int
60116+gr_acl_handle_rename(struct dentry *new_dentry,
60117+ struct dentry *parent_dentry,
60118+ const struct vfsmount *parent_mnt,
60119+ struct dentry *old_dentry,
60120+ struct inode *old_parent_inode,
60121+ struct vfsmount *old_mnt, const char *newname)
60122+{
60123+ __u32 comp1, comp2;
60124+ int error = 0;
60125+
60126+ if (unlikely(!gr_acl_is_enabled()))
60127+ return 0;
60128+
60129+ if (!new_dentry->d_inode) {
60130+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60131+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60132+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60133+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60134+ GR_DELETE | GR_AUDIT_DELETE |
60135+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60136+ GR_SUPPRESS, old_mnt);
60137+ } else {
60138+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60139+ GR_CREATE | GR_DELETE |
60140+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60141+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60142+ GR_SUPPRESS, parent_mnt);
60143+ comp2 =
60144+ gr_search_file(old_dentry,
60145+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60146+ GR_DELETE | GR_AUDIT_DELETE |
60147+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60148+ }
60149+
60150+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60151+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60152+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60153+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60154+ && !(comp2 & GR_SUPPRESS)) {
60155+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60156+ error = -EACCES;
60157+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60158+ error = -EACCES;
60159+
60160+ return error;
60161+}
60162+
60163+void
60164+gr_acl_handle_exit(void)
60165+{
60166+ u16 id;
60167+ char *rolename;
60168+ struct file *exec_file;
60169+
60170+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60171+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60172+ id = current->acl_role_id;
60173+ rolename = current->role->rolename;
60174+ gr_set_acls(1);
60175+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60176+ }
60177+
60178+ write_lock(&grsec_exec_file_lock);
60179+ exec_file = current->exec_file;
60180+ current->exec_file = NULL;
60181+ write_unlock(&grsec_exec_file_lock);
60182+
60183+ if (exec_file)
60184+ fput(exec_file);
60185+}
60186+
60187+int
60188+gr_acl_handle_procpidmem(const struct task_struct *task)
60189+{
60190+ if (unlikely(!gr_acl_is_enabled()))
60191+ return 0;
60192+
60193+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60194+ return -EACCES;
60195+
60196+ return 0;
60197+}
60198diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60199new file mode 100644
60200index 0000000..cd07b96
60201--- /dev/null
60202+++ b/grsecurity/gracl_ip.c
60203@@ -0,0 +1,382 @@
60204+#include <linux/kernel.h>
60205+#include <asm/uaccess.h>
60206+#include <asm/errno.h>
60207+#include <net/sock.h>
60208+#include <linux/file.h>
60209+#include <linux/fs.h>
60210+#include <linux/net.h>
60211+#include <linux/in.h>
60212+#include <linux/skbuff.h>
60213+#include <linux/ip.h>
60214+#include <linux/udp.h>
60215+#include <linux/smp_lock.h>
60216+#include <linux/types.h>
60217+#include <linux/sched.h>
60218+#include <linux/netdevice.h>
60219+#include <linux/inetdevice.h>
60220+#include <linux/gracl.h>
60221+#include <linux/grsecurity.h>
60222+#include <linux/grinternal.h>
60223+
60224+#define GR_BIND 0x01
60225+#define GR_CONNECT 0x02
60226+#define GR_INVERT 0x04
60227+#define GR_BINDOVERRIDE 0x08
60228+#define GR_CONNECTOVERRIDE 0x10
60229+#define GR_SOCK_FAMILY 0x20
60230+
60231+static const char * gr_protocols[IPPROTO_MAX] = {
60232+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60233+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60234+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60235+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60236+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60237+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60238+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60239+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60240+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60241+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60242+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60243+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60244+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60245+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60246+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60247+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60248+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60249+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60250+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60251+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60252+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60253+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60254+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60255+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60256+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60257+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60258+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60259+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60260+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60261+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60262+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60263+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60264+ };
60265+
60266+static const char * gr_socktypes[SOCK_MAX] = {
60267+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60268+ "unknown:7", "unknown:8", "unknown:9", "packet"
60269+ };
60270+
60271+static const char * gr_sockfamilies[AF_MAX+1] = {
60272+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60273+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60274+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60275+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60276+ };
60277+
60278+const char *
60279+gr_proto_to_name(unsigned char proto)
60280+{
60281+ return gr_protocols[proto];
60282+}
60283+
60284+const char *
60285+gr_socktype_to_name(unsigned char type)
60286+{
60287+ return gr_socktypes[type];
60288+}
60289+
60290+const char *
60291+gr_sockfamily_to_name(unsigned char family)
60292+{
60293+ return gr_sockfamilies[family];
60294+}
60295+
60296+int
60297+gr_search_socket(const int domain, const int type, const int protocol)
60298+{
60299+ struct acl_subject_label *curr;
60300+ const struct cred *cred = current_cred();
60301+
60302+ if (unlikely(!gr_acl_is_enabled()))
60303+ goto exit;
60304+
60305+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60306+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60307+ goto exit; // let the kernel handle it
60308+
60309+ curr = current->acl;
60310+
60311+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60312+ /* the family is allowed, if this is PF_INET allow it only if
60313+ the extra sock type/protocol checks pass */
60314+ if (domain == PF_INET)
60315+ goto inet_check;
60316+ goto exit;
60317+ } else {
60318+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60319+ __u32 fakeip = 0;
60320+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60321+ current->role->roletype, cred->uid,
60322+ cred->gid, current->exec_file ?
60323+ gr_to_filename(current->exec_file->f_path.dentry,
60324+ current->exec_file->f_path.mnt) :
60325+ curr->filename, curr->filename,
60326+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60327+ &current->signal->saved_ip);
60328+ goto exit;
60329+ }
60330+ goto exit_fail;
60331+ }
60332+
60333+inet_check:
60334+ /* the rest of this checking is for IPv4 only */
60335+ if (!curr->ips)
60336+ goto exit;
60337+
60338+ if ((curr->ip_type & (1 << type)) &&
60339+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60340+ goto exit;
60341+
60342+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60343+ /* we don't place acls on raw sockets , and sometimes
60344+ dgram/ip sockets are opened for ioctl and not
60345+ bind/connect, so we'll fake a bind learn log */
60346+ if (type == SOCK_RAW || type == SOCK_PACKET) {
60347+ __u32 fakeip = 0;
60348+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60349+ current->role->roletype, cred->uid,
60350+ cred->gid, current->exec_file ?
60351+ gr_to_filename(current->exec_file->f_path.dentry,
60352+ current->exec_file->f_path.mnt) :
60353+ curr->filename, curr->filename,
60354+ &fakeip, 0, type,
60355+ protocol, GR_CONNECT, &current->signal->saved_ip);
60356+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60357+ __u32 fakeip = 0;
60358+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60359+ current->role->roletype, cred->uid,
60360+ cred->gid, current->exec_file ?
60361+ gr_to_filename(current->exec_file->f_path.dentry,
60362+ current->exec_file->f_path.mnt) :
60363+ curr->filename, curr->filename,
60364+ &fakeip, 0, type,
60365+ protocol, GR_BIND, &current->signal->saved_ip);
60366+ }
60367+ /* we'll log when they use connect or bind */
60368+ goto exit;
60369+ }
60370+
60371+exit_fail:
60372+ if (domain == PF_INET)
60373+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60374+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
60375+ else
60376+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60377+ gr_socktype_to_name(type), protocol);
60378+
60379+ return 0;
60380+exit:
60381+ return 1;
60382+}
60383+
60384+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60385+{
60386+ if ((ip->mode & mode) &&
60387+ (ip_port >= ip->low) &&
60388+ (ip_port <= ip->high) &&
60389+ ((ntohl(ip_addr) & our_netmask) ==
60390+ (ntohl(our_addr) & our_netmask))
60391+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60392+ && (ip->type & (1 << type))) {
60393+ if (ip->mode & GR_INVERT)
60394+ return 2; // specifically denied
60395+ else
60396+ return 1; // allowed
60397+ }
60398+
60399+ return 0; // not specifically allowed, may continue parsing
60400+}
60401+
60402+static int
60403+gr_search_connectbind(const int full_mode, struct sock *sk,
60404+ struct sockaddr_in *addr, const int type)
60405+{
60406+ char iface[IFNAMSIZ] = {0};
60407+ struct acl_subject_label *curr;
60408+ struct acl_ip_label *ip;
60409+ struct inet_sock *isk;
60410+ struct net_device *dev;
60411+ struct in_device *idev;
60412+ unsigned long i;
60413+ int ret;
60414+ int mode = full_mode & (GR_BIND | GR_CONNECT);
60415+ __u32 ip_addr = 0;
60416+ __u32 our_addr;
60417+ __u32 our_netmask;
60418+ char *p;
60419+ __u16 ip_port = 0;
60420+ const struct cred *cred = current_cred();
60421+
60422+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60423+ return 0;
60424+
60425+ curr = current->acl;
60426+ isk = inet_sk(sk);
60427+
60428+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60429+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60430+ addr->sin_addr.s_addr = curr->inaddr_any_override;
60431+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60432+ struct sockaddr_in saddr;
60433+ int err;
60434+
60435+ saddr.sin_family = AF_INET;
60436+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
60437+ saddr.sin_port = isk->sport;
60438+
60439+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60440+ if (err)
60441+ return err;
60442+
60443+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60444+ if (err)
60445+ return err;
60446+ }
60447+
60448+ if (!curr->ips)
60449+ return 0;
60450+
60451+ ip_addr = addr->sin_addr.s_addr;
60452+ ip_port = ntohs(addr->sin_port);
60453+
60454+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60455+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60456+ current->role->roletype, cred->uid,
60457+ cred->gid, current->exec_file ?
60458+ gr_to_filename(current->exec_file->f_path.dentry,
60459+ current->exec_file->f_path.mnt) :
60460+ curr->filename, curr->filename,
60461+ &ip_addr, ip_port, type,
60462+ sk->sk_protocol, mode, &current->signal->saved_ip);
60463+ return 0;
60464+ }
60465+
60466+ for (i = 0; i < curr->ip_num; i++) {
60467+ ip = *(curr->ips + i);
60468+ if (ip->iface != NULL) {
60469+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
60470+ p = strchr(iface, ':');
60471+ if (p != NULL)
60472+ *p = '\0';
60473+ dev = dev_get_by_name(sock_net(sk), iface);
60474+ if (dev == NULL)
60475+ continue;
60476+ idev = in_dev_get(dev);
60477+ if (idev == NULL) {
60478+ dev_put(dev);
60479+ continue;
60480+ }
60481+ rcu_read_lock();
60482+ for_ifa(idev) {
60483+ if (!strcmp(ip->iface, ifa->ifa_label)) {
60484+ our_addr = ifa->ifa_address;
60485+ our_netmask = 0xffffffff;
60486+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60487+ if (ret == 1) {
60488+ rcu_read_unlock();
60489+ in_dev_put(idev);
60490+ dev_put(dev);
60491+ return 0;
60492+ } else if (ret == 2) {
60493+ rcu_read_unlock();
60494+ in_dev_put(idev);
60495+ dev_put(dev);
60496+ goto denied;
60497+ }
60498+ }
60499+ } endfor_ifa(idev);
60500+ rcu_read_unlock();
60501+ in_dev_put(idev);
60502+ dev_put(dev);
60503+ } else {
60504+ our_addr = ip->addr;
60505+ our_netmask = ip->netmask;
60506+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60507+ if (ret == 1)
60508+ return 0;
60509+ else if (ret == 2)
60510+ goto denied;
60511+ }
60512+ }
60513+
60514+denied:
60515+ if (mode == GR_BIND)
60516+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60517+ else if (mode == GR_CONNECT)
60518+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60519+
60520+ return -EACCES;
60521+}
60522+
60523+int
60524+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
60525+{
60526+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
60527+}
60528+
60529+int
60530+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
60531+{
60532+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
60533+}
60534+
60535+int gr_search_listen(struct socket *sock)
60536+{
60537+ struct sock *sk = sock->sk;
60538+ struct sockaddr_in addr;
60539+
60540+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60541+ addr.sin_port = inet_sk(sk)->sport;
60542+
60543+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60544+}
60545+
60546+int gr_search_accept(struct socket *sock)
60547+{
60548+ struct sock *sk = sock->sk;
60549+ struct sockaddr_in addr;
60550+
60551+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60552+ addr.sin_port = inet_sk(sk)->sport;
60553+
60554+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60555+}
60556+
60557+int
60558+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
60559+{
60560+ if (addr)
60561+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
60562+ else {
60563+ struct sockaddr_in sin;
60564+ const struct inet_sock *inet = inet_sk(sk);
60565+
60566+ sin.sin_addr.s_addr = inet->daddr;
60567+ sin.sin_port = inet->dport;
60568+
60569+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60570+ }
60571+}
60572+
60573+int
60574+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
60575+{
60576+ struct sockaddr_in sin;
60577+
60578+ if (unlikely(skb->len < sizeof (struct udphdr)))
60579+ return 0; // skip this packet
60580+
60581+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
60582+ sin.sin_port = udp_hdr(skb)->source;
60583+
60584+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60585+}
60586diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
60587new file mode 100644
60588index 0000000..34bdd46
60589--- /dev/null
60590+++ b/grsecurity/gracl_learn.c
60591@@ -0,0 +1,208 @@
60592+#include <linux/kernel.h>
60593+#include <linux/mm.h>
60594+#include <linux/sched.h>
60595+#include <linux/poll.h>
60596+#include <linux/smp_lock.h>
60597+#include <linux/string.h>
60598+#include <linux/file.h>
60599+#include <linux/types.h>
60600+#include <linux/vmalloc.h>
60601+#include <linux/grinternal.h>
60602+
60603+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
60604+ size_t count, loff_t *ppos);
60605+extern int gr_acl_is_enabled(void);
60606+
60607+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
60608+static int gr_learn_attached;
60609+
60610+/* use a 512k buffer */
60611+#define LEARN_BUFFER_SIZE (512 * 1024)
60612+
60613+static DEFINE_SPINLOCK(gr_learn_lock);
60614+static DEFINE_MUTEX(gr_learn_user_mutex);
60615+
60616+/* we need to maintain two buffers, so that the kernel context of grlearn
60617+ uses a semaphore around the userspace copying, and the other kernel contexts
60618+ use a spinlock when copying into the buffer, since they cannot sleep
60619+*/
60620+static char *learn_buffer;
60621+static char *learn_buffer_user;
60622+static int learn_buffer_len;
60623+static int learn_buffer_user_len;
60624+
60625+static ssize_t
60626+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
60627+{
60628+ DECLARE_WAITQUEUE(wait, current);
60629+ ssize_t retval = 0;
60630+
60631+ add_wait_queue(&learn_wait, &wait);
60632+ set_current_state(TASK_INTERRUPTIBLE);
60633+ do {
60634+ mutex_lock(&gr_learn_user_mutex);
60635+ spin_lock(&gr_learn_lock);
60636+ if (learn_buffer_len)
60637+ break;
60638+ spin_unlock(&gr_learn_lock);
60639+ mutex_unlock(&gr_learn_user_mutex);
60640+ if (file->f_flags & O_NONBLOCK) {
60641+ retval = -EAGAIN;
60642+ goto out;
60643+ }
60644+ if (signal_pending(current)) {
60645+ retval = -ERESTARTSYS;
60646+ goto out;
60647+ }
60648+
60649+ schedule();
60650+ } while (1);
60651+
60652+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
60653+ learn_buffer_user_len = learn_buffer_len;
60654+ retval = learn_buffer_len;
60655+ learn_buffer_len = 0;
60656+
60657+ spin_unlock(&gr_learn_lock);
60658+
60659+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
60660+ retval = -EFAULT;
60661+
60662+ mutex_unlock(&gr_learn_user_mutex);
60663+out:
60664+ set_current_state(TASK_RUNNING);
60665+ remove_wait_queue(&learn_wait, &wait);
60666+ return retval;
60667+}
60668+
60669+static unsigned int
60670+poll_learn(struct file * file, poll_table * wait)
60671+{
60672+ poll_wait(file, &learn_wait, wait);
60673+
60674+ if (learn_buffer_len)
60675+ return (POLLIN | POLLRDNORM);
60676+
60677+ return 0;
60678+}
60679+
60680+void
60681+gr_clear_learn_entries(void)
60682+{
60683+ char *tmp;
60684+
60685+ mutex_lock(&gr_learn_user_mutex);
60686+ spin_lock(&gr_learn_lock);
60687+ tmp = learn_buffer;
60688+ learn_buffer = NULL;
60689+ spin_unlock(&gr_learn_lock);
60690+ if (tmp)
60691+ vfree(tmp);
60692+ if (learn_buffer_user != NULL) {
60693+ vfree(learn_buffer_user);
60694+ learn_buffer_user = NULL;
60695+ }
60696+ learn_buffer_len = 0;
60697+ mutex_unlock(&gr_learn_user_mutex);
60698+
60699+ return;
60700+}
60701+
60702+void
60703+gr_add_learn_entry(const char *fmt, ...)
60704+{
60705+ va_list args;
60706+ unsigned int len;
60707+
60708+ if (!gr_learn_attached)
60709+ return;
60710+
60711+ spin_lock(&gr_learn_lock);
60712+
60713+ /* leave a gap at the end so we know when it's "full" but don't have to
60714+ compute the exact length of the string we're trying to append
60715+ */
60716+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
60717+ spin_unlock(&gr_learn_lock);
60718+ wake_up_interruptible(&learn_wait);
60719+ return;
60720+ }
60721+ if (learn_buffer == NULL) {
60722+ spin_unlock(&gr_learn_lock);
60723+ return;
60724+ }
60725+
60726+ va_start(args, fmt);
60727+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
60728+ va_end(args);
60729+
60730+ learn_buffer_len += len + 1;
60731+
60732+ spin_unlock(&gr_learn_lock);
60733+ wake_up_interruptible(&learn_wait);
60734+
60735+ return;
60736+}
60737+
60738+static int
60739+open_learn(struct inode *inode, struct file *file)
60740+{
60741+ if (file->f_mode & FMODE_READ && gr_learn_attached)
60742+ return -EBUSY;
60743+ if (file->f_mode & FMODE_READ) {
60744+ int retval = 0;
60745+ mutex_lock(&gr_learn_user_mutex);
60746+ if (learn_buffer == NULL)
60747+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
60748+ if (learn_buffer_user == NULL)
60749+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
60750+ if (learn_buffer == NULL) {
60751+ retval = -ENOMEM;
60752+ goto out_error;
60753+ }
60754+ if (learn_buffer_user == NULL) {
60755+ retval = -ENOMEM;
60756+ goto out_error;
60757+ }
60758+ learn_buffer_len = 0;
60759+ learn_buffer_user_len = 0;
60760+ gr_learn_attached = 1;
60761+out_error:
60762+ mutex_unlock(&gr_learn_user_mutex);
60763+ return retval;
60764+ }
60765+ return 0;
60766+}
60767+
60768+static int
60769+close_learn(struct inode *inode, struct file *file)
60770+{
60771+ if (file->f_mode & FMODE_READ) {
60772+ char *tmp = NULL;
60773+ mutex_lock(&gr_learn_user_mutex);
60774+ spin_lock(&gr_learn_lock);
60775+ tmp = learn_buffer;
60776+ learn_buffer = NULL;
60777+ spin_unlock(&gr_learn_lock);
60778+ if (tmp)
60779+ vfree(tmp);
60780+ if (learn_buffer_user != NULL) {
60781+ vfree(learn_buffer_user);
60782+ learn_buffer_user = NULL;
60783+ }
60784+ learn_buffer_len = 0;
60785+ learn_buffer_user_len = 0;
60786+ gr_learn_attached = 0;
60787+ mutex_unlock(&gr_learn_user_mutex);
60788+ }
60789+
60790+ return 0;
60791+}
60792+
60793+const struct file_operations grsec_fops = {
60794+ .read = read_learn,
60795+ .write = write_grsec_handler,
60796+ .open = open_learn,
60797+ .release = close_learn,
60798+ .poll = poll_learn,
60799+};
60800diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
60801new file mode 100644
60802index 0000000..70b2179
60803--- /dev/null
60804+++ b/grsecurity/gracl_res.c
60805@@ -0,0 +1,67 @@
60806+#include <linux/kernel.h>
60807+#include <linux/sched.h>
60808+#include <linux/gracl.h>
60809+#include <linux/grinternal.h>
60810+
60811+static const char *restab_log[] = {
60812+ [RLIMIT_CPU] = "RLIMIT_CPU",
60813+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
60814+ [RLIMIT_DATA] = "RLIMIT_DATA",
60815+ [RLIMIT_STACK] = "RLIMIT_STACK",
60816+ [RLIMIT_CORE] = "RLIMIT_CORE",
60817+ [RLIMIT_RSS] = "RLIMIT_RSS",
60818+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
60819+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
60820+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
60821+ [RLIMIT_AS] = "RLIMIT_AS",
60822+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
60823+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
60824+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
60825+ [RLIMIT_NICE] = "RLIMIT_NICE",
60826+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
60827+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
60828+ [GR_CRASH_RES] = "RLIMIT_CRASH"
60829+};
60830+
60831+void
60832+gr_log_resource(const struct task_struct *task,
60833+ const int res, const unsigned long wanted, const int gt)
60834+{
60835+ const struct cred *cred;
60836+ unsigned long rlim;
60837+
60838+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
60839+ return;
60840+
60841+ // not yet supported resource
60842+ if (unlikely(!restab_log[res]))
60843+ return;
60844+
60845+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
60846+ rlim = task->signal->rlim[res].rlim_max;
60847+ else
60848+ rlim = task->signal->rlim[res].rlim_cur;
60849+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
60850+ return;
60851+
60852+ rcu_read_lock();
60853+ cred = __task_cred(task);
60854+
60855+ if (res == RLIMIT_NPROC &&
60856+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
60857+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
60858+ goto out_rcu_unlock;
60859+ else if (res == RLIMIT_MEMLOCK &&
60860+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
60861+ goto out_rcu_unlock;
60862+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
60863+ goto out_rcu_unlock;
60864+ rcu_read_unlock();
60865+
60866+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
60867+
60868+ return;
60869+out_rcu_unlock:
60870+ rcu_read_unlock();
60871+ return;
60872+}
60873diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
60874new file mode 100644
60875index 0000000..1d1b734
60876--- /dev/null
60877+++ b/grsecurity/gracl_segv.c
60878@@ -0,0 +1,284 @@
60879+#include <linux/kernel.h>
60880+#include <linux/mm.h>
60881+#include <asm/uaccess.h>
60882+#include <asm/errno.h>
60883+#include <asm/mman.h>
60884+#include <net/sock.h>
60885+#include <linux/file.h>
60886+#include <linux/fs.h>
60887+#include <linux/net.h>
60888+#include <linux/in.h>
60889+#include <linux/smp_lock.h>
60890+#include <linux/slab.h>
60891+#include <linux/types.h>
60892+#include <linux/sched.h>
60893+#include <linux/timer.h>
60894+#include <linux/gracl.h>
60895+#include <linux/grsecurity.h>
60896+#include <linux/grinternal.h>
60897+
60898+static struct crash_uid *uid_set;
60899+static unsigned short uid_used;
60900+static DEFINE_SPINLOCK(gr_uid_lock);
60901+extern rwlock_t gr_inode_lock;
60902+extern struct acl_subject_label *
60903+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
60904+ struct acl_role_label *role);
60905+extern int gr_fake_force_sig(int sig, struct task_struct *t);
60906+
60907+int
60908+gr_init_uidset(void)
60909+{
60910+ uid_set =
60911+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
60912+ uid_used = 0;
60913+
60914+ return uid_set ? 1 : 0;
60915+}
60916+
60917+void
60918+gr_free_uidset(void)
60919+{
60920+ if (uid_set)
60921+ kfree(uid_set);
60922+
60923+ return;
60924+}
60925+
60926+int
60927+gr_find_uid(const uid_t uid)
60928+{
60929+ struct crash_uid *tmp = uid_set;
60930+ uid_t buid;
60931+ int low = 0, high = uid_used - 1, mid;
60932+
60933+ while (high >= low) {
60934+ mid = (low + high) >> 1;
60935+ buid = tmp[mid].uid;
60936+ if (buid == uid)
60937+ return mid;
60938+ if (buid > uid)
60939+ high = mid - 1;
60940+ if (buid < uid)
60941+ low = mid + 1;
60942+ }
60943+
60944+ return -1;
60945+}
60946+
60947+static __inline__ void
60948+gr_insertsort(void)
60949+{
60950+ unsigned short i, j;
60951+ struct crash_uid index;
60952+
60953+ for (i = 1; i < uid_used; i++) {
60954+ index = uid_set[i];
60955+ j = i;
60956+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
60957+ uid_set[j] = uid_set[j - 1];
60958+ j--;
60959+ }
60960+ uid_set[j] = index;
60961+ }
60962+
60963+ return;
60964+}
60965+
60966+static __inline__ void
60967+gr_insert_uid(const uid_t uid, const unsigned long expires)
60968+{
60969+ int loc;
60970+
60971+ if (uid_used == GR_UIDTABLE_MAX)
60972+ return;
60973+
60974+ loc = gr_find_uid(uid);
60975+
60976+ if (loc >= 0) {
60977+ uid_set[loc].expires = expires;
60978+ return;
60979+ }
60980+
60981+ uid_set[uid_used].uid = uid;
60982+ uid_set[uid_used].expires = expires;
60983+ uid_used++;
60984+
60985+ gr_insertsort();
60986+
60987+ return;
60988+}
60989+
60990+void
60991+gr_remove_uid(const unsigned short loc)
60992+{
60993+ unsigned short i;
60994+
60995+ for (i = loc + 1; i < uid_used; i++)
60996+ uid_set[i - 1] = uid_set[i];
60997+
60998+ uid_used--;
60999+
61000+ return;
61001+}
61002+
61003+int
61004+gr_check_crash_uid(const uid_t uid)
61005+{
61006+ int loc;
61007+ int ret = 0;
61008+
61009+ if (unlikely(!gr_acl_is_enabled()))
61010+ return 0;
61011+
61012+ spin_lock(&gr_uid_lock);
61013+ loc = gr_find_uid(uid);
61014+
61015+ if (loc < 0)
61016+ goto out_unlock;
61017+
61018+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
61019+ gr_remove_uid(loc);
61020+ else
61021+ ret = 1;
61022+
61023+out_unlock:
61024+ spin_unlock(&gr_uid_lock);
61025+ return ret;
61026+}
61027+
61028+static __inline__ int
61029+proc_is_setxid(const struct cred *cred)
61030+{
61031+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
61032+ cred->uid != cred->fsuid)
61033+ return 1;
61034+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61035+ cred->gid != cred->fsgid)
61036+ return 1;
61037+
61038+ return 0;
61039+}
61040+
61041+void
61042+gr_handle_crash(struct task_struct *task, const int sig)
61043+{
61044+ struct acl_subject_label *curr;
61045+ struct task_struct *tsk, *tsk2;
61046+ const struct cred *cred;
61047+ const struct cred *cred2;
61048+
61049+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61050+ return;
61051+
61052+ if (unlikely(!gr_acl_is_enabled()))
61053+ return;
61054+
61055+ curr = task->acl;
61056+
61057+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
61058+ return;
61059+
61060+ if (time_before_eq(curr->expires, get_seconds())) {
61061+ curr->expires = 0;
61062+ curr->crashes = 0;
61063+ }
61064+
61065+ curr->crashes++;
61066+
61067+ if (!curr->expires)
61068+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61069+
61070+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61071+ time_after(curr->expires, get_seconds())) {
61072+ rcu_read_lock();
61073+ cred = __task_cred(task);
61074+ if (cred->uid && proc_is_setxid(cred)) {
61075+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61076+ spin_lock(&gr_uid_lock);
61077+ gr_insert_uid(cred->uid, curr->expires);
61078+ spin_unlock(&gr_uid_lock);
61079+ curr->expires = 0;
61080+ curr->crashes = 0;
61081+ read_lock(&tasklist_lock);
61082+ do_each_thread(tsk2, tsk) {
61083+ cred2 = __task_cred(tsk);
61084+ if (tsk != task && cred2->uid == cred->uid)
61085+ gr_fake_force_sig(SIGKILL, tsk);
61086+ } while_each_thread(tsk2, tsk);
61087+ read_unlock(&tasklist_lock);
61088+ } else {
61089+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61090+ read_lock(&tasklist_lock);
61091+ read_lock(&grsec_exec_file_lock);
61092+ do_each_thread(tsk2, tsk) {
61093+ if (likely(tsk != task)) {
61094+ // if this thread has the same subject as the one that triggered
61095+ // RES_CRASH and it's the same binary, kill it
61096+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61097+ gr_fake_force_sig(SIGKILL, tsk);
61098+ }
61099+ } while_each_thread(tsk2, tsk);
61100+ read_unlock(&grsec_exec_file_lock);
61101+ read_unlock(&tasklist_lock);
61102+ }
61103+ rcu_read_unlock();
61104+ }
61105+
61106+ return;
61107+}
61108+
61109+int
61110+gr_check_crash_exec(const struct file *filp)
61111+{
61112+ struct acl_subject_label *curr;
61113+
61114+ if (unlikely(!gr_acl_is_enabled()))
61115+ return 0;
61116+
61117+ read_lock(&gr_inode_lock);
61118+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61119+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61120+ current->role);
61121+ read_unlock(&gr_inode_lock);
61122+
61123+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61124+ (!curr->crashes && !curr->expires))
61125+ return 0;
61126+
61127+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61128+ time_after(curr->expires, get_seconds()))
61129+ return 1;
61130+ else if (time_before_eq(curr->expires, get_seconds())) {
61131+ curr->crashes = 0;
61132+ curr->expires = 0;
61133+ }
61134+
61135+ return 0;
61136+}
61137+
61138+void
61139+gr_handle_alertkill(struct task_struct *task)
61140+{
61141+ struct acl_subject_label *curracl;
61142+ __u32 curr_ip;
61143+ struct task_struct *p, *p2;
61144+
61145+ if (unlikely(!gr_acl_is_enabled()))
61146+ return;
61147+
61148+ curracl = task->acl;
61149+ curr_ip = task->signal->curr_ip;
61150+
61151+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61152+ read_lock(&tasklist_lock);
61153+ do_each_thread(p2, p) {
61154+ if (p->signal->curr_ip == curr_ip)
61155+ gr_fake_force_sig(SIGKILL, p);
61156+ } while_each_thread(p2, p);
61157+ read_unlock(&tasklist_lock);
61158+ } else if (curracl->mode & GR_KILLPROC)
61159+ gr_fake_force_sig(SIGKILL, task);
61160+
61161+ return;
61162+}
61163diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61164new file mode 100644
61165index 0000000..9d83a69
61166--- /dev/null
61167+++ b/grsecurity/gracl_shm.c
61168@@ -0,0 +1,40 @@
61169+#include <linux/kernel.h>
61170+#include <linux/mm.h>
61171+#include <linux/sched.h>
61172+#include <linux/file.h>
61173+#include <linux/ipc.h>
61174+#include <linux/gracl.h>
61175+#include <linux/grsecurity.h>
61176+#include <linux/grinternal.h>
61177+
61178+int
61179+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61180+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61181+{
61182+ struct task_struct *task;
61183+
61184+ if (!gr_acl_is_enabled())
61185+ return 1;
61186+
61187+ rcu_read_lock();
61188+ read_lock(&tasklist_lock);
61189+
61190+ task = find_task_by_vpid(shm_cprid);
61191+
61192+ if (unlikely(!task))
61193+ task = find_task_by_vpid(shm_lapid);
61194+
61195+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61196+ (task->pid == shm_lapid)) &&
61197+ (task->acl->mode & GR_PROTSHM) &&
61198+ (task->acl != current->acl))) {
61199+ read_unlock(&tasklist_lock);
61200+ rcu_read_unlock();
61201+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61202+ return 0;
61203+ }
61204+ read_unlock(&tasklist_lock);
61205+ rcu_read_unlock();
61206+
61207+ return 1;
61208+}
61209diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61210new file mode 100644
61211index 0000000..bc0be01
61212--- /dev/null
61213+++ b/grsecurity/grsec_chdir.c
61214@@ -0,0 +1,19 @@
61215+#include <linux/kernel.h>
61216+#include <linux/sched.h>
61217+#include <linux/fs.h>
61218+#include <linux/file.h>
61219+#include <linux/grsecurity.h>
61220+#include <linux/grinternal.h>
61221+
61222+void
61223+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61224+{
61225+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61226+ if ((grsec_enable_chdir && grsec_enable_group &&
61227+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61228+ !grsec_enable_group)) {
61229+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61230+ }
61231+#endif
61232+ return;
61233+}
61234diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61235new file mode 100644
61236index 0000000..197bdd5
61237--- /dev/null
61238+++ b/grsecurity/grsec_chroot.c
61239@@ -0,0 +1,386 @@
61240+#include <linux/kernel.h>
61241+#include <linux/module.h>
61242+#include <linux/sched.h>
61243+#include <linux/file.h>
61244+#include <linux/fs.h>
61245+#include <linux/mount.h>
61246+#include <linux/types.h>
61247+#include <linux/pid_namespace.h>
61248+#include <linux/grsecurity.h>
61249+#include <linux/grinternal.h>
61250+
61251+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61252+{
61253+#ifdef CONFIG_GRKERNSEC
61254+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61255+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61256+ task->gr_is_chrooted = 1;
61257+ else
61258+ task->gr_is_chrooted = 0;
61259+
61260+ task->gr_chroot_dentry = path->dentry;
61261+#endif
61262+ return;
61263+}
61264+
61265+void gr_clear_chroot_entries(struct task_struct *task)
61266+{
61267+#ifdef CONFIG_GRKERNSEC
61268+ task->gr_is_chrooted = 0;
61269+ task->gr_chroot_dentry = NULL;
61270+#endif
61271+ return;
61272+}
61273+
61274+int
61275+gr_handle_chroot_unix(const pid_t pid)
61276+{
61277+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61278+ struct task_struct *p;
61279+
61280+ if (unlikely(!grsec_enable_chroot_unix))
61281+ return 1;
61282+
61283+ if (likely(!proc_is_chrooted(current)))
61284+ return 1;
61285+
61286+ rcu_read_lock();
61287+ read_lock(&tasklist_lock);
61288+
61289+ p = find_task_by_vpid_unrestricted(pid);
61290+ if (unlikely(p && !have_same_root(current, p))) {
61291+ read_unlock(&tasklist_lock);
61292+ rcu_read_unlock();
61293+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61294+ return 0;
61295+ }
61296+ read_unlock(&tasklist_lock);
61297+ rcu_read_unlock();
61298+#endif
61299+ return 1;
61300+}
61301+
61302+int
61303+gr_handle_chroot_nice(void)
61304+{
61305+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61306+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61307+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61308+ return -EPERM;
61309+ }
61310+#endif
61311+ return 0;
61312+}
61313+
61314+int
61315+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61316+{
61317+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61318+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61319+ && proc_is_chrooted(current)) {
61320+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61321+ return -EACCES;
61322+ }
61323+#endif
61324+ return 0;
61325+}
61326+
61327+int
61328+gr_handle_chroot_rawio(const struct inode *inode)
61329+{
61330+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61331+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61332+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61333+ return 1;
61334+#endif
61335+ return 0;
61336+}
61337+
61338+int
61339+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61340+{
61341+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61342+ struct task_struct *p;
61343+ int ret = 0;
61344+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61345+ return ret;
61346+
61347+ read_lock(&tasklist_lock);
61348+ do_each_pid_task(pid, type, p) {
61349+ if (!have_same_root(current, p)) {
61350+ ret = 1;
61351+ goto out;
61352+ }
61353+ } while_each_pid_task(pid, type, p);
61354+out:
61355+ read_unlock(&tasklist_lock);
61356+ return ret;
61357+#endif
61358+ return 0;
61359+}
61360+
61361+int
61362+gr_pid_is_chrooted(struct task_struct *p)
61363+{
61364+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61365+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61366+ return 0;
61367+
61368+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61369+ !have_same_root(current, p)) {
61370+ return 1;
61371+ }
61372+#endif
61373+ return 0;
61374+}
61375+
61376+EXPORT_SYMBOL(gr_pid_is_chrooted);
61377+
61378+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61379+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61380+{
61381+ struct dentry *dentry = (struct dentry *)u_dentry;
61382+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61383+ struct dentry *realroot;
61384+ struct vfsmount *realrootmnt;
61385+ struct dentry *currentroot;
61386+ struct vfsmount *currentmnt;
61387+ struct task_struct *reaper = &init_task;
61388+ int ret = 1;
61389+
61390+ read_lock(&reaper->fs->lock);
61391+ realrootmnt = mntget(reaper->fs->root.mnt);
61392+ realroot = dget(reaper->fs->root.dentry);
61393+ read_unlock(&reaper->fs->lock);
61394+
61395+ read_lock(&current->fs->lock);
61396+ currentmnt = mntget(current->fs->root.mnt);
61397+ currentroot = dget(current->fs->root.dentry);
61398+ read_unlock(&current->fs->lock);
61399+
61400+ spin_lock(&dcache_lock);
61401+ for (;;) {
61402+ if (unlikely((dentry == realroot && mnt == realrootmnt)
61403+ || (dentry == currentroot && mnt == currentmnt)))
61404+ break;
61405+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61406+ if (mnt->mnt_parent == mnt)
61407+ break;
61408+ dentry = mnt->mnt_mountpoint;
61409+ mnt = mnt->mnt_parent;
61410+ continue;
61411+ }
61412+ dentry = dentry->d_parent;
61413+ }
61414+ spin_unlock(&dcache_lock);
61415+
61416+ dput(currentroot);
61417+ mntput(currentmnt);
61418+
61419+ /* access is outside of chroot */
61420+ if (dentry == realroot && mnt == realrootmnt)
61421+ ret = 0;
61422+
61423+ dput(realroot);
61424+ mntput(realrootmnt);
61425+ return ret;
61426+}
61427+#endif
61428+
61429+int
61430+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61431+{
61432+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61433+ if (!grsec_enable_chroot_fchdir)
61434+ return 1;
61435+
61436+ if (!proc_is_chrooted(current))
61437+ return 1;
61438+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61439+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61440+ return 0;
61441+ }
61442+#endif
61443+ return 1;
61444+}
61445+
61446+int
61447+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61448+ const time_t shm_createtime)
61449+{
61450+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61451+ struct task_struct *p;
61452+ time_t starttime;
61453+
61454+ if (unlikely(!grsec_enable_chroot_shmat))
61455+ return 1;
61456+
61457+ if (likely(!proc_is_chrooted(current)))
61458+ return 1;
61459+
61460+ rcu_read_lock();
61461+ read_lock(&tasklist_lock);
61462+
61463+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61464+ starttime = p->start_time.tv_sec;
61465+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61466+ if (have_same_root(current, p)) {
61467+ goto allow;
61468+ } else {
61469+ read_unlock(&tasklist_lock);
61470+ rcu_read_unlock();
61471+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61472+ return 0;
61473+ }
61474+ }
61475+ /* creator exited, pid reuse, fall through to next check */
61476+ }
61477+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61478+ if (unlikely(!have_same_root(current, p))) {
61479+ read_unlock(&tasklist_lock);
61480+ rcu_read_unlock();
61481+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61482+ return 0;
61483+ }
61484+ }
61485+
61486+allow:
61487+ read_unlock(&tasklist_lock);
61488+ rcu_read_unlock();
61489+#endif
61490+ return 1;
61491+}
61492+
61493+void
61494+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61495+{
61496+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61497+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61498+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61499+#endif
61500+ return;
61501+}
61502+
61503+int
61504+gr_handle_chroot_mknod(const struct dentry *dentry,
61505+ const struct vfsmount *mnt, const int mode)
61506+{
61507+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61508+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
61509+ proc_is_chrooted(current)) {
61510+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
61511+ return -EPERM;
61512+ }
61513+#endif
61514+ return 0;
61515+}
61516+
61517+int
61518+gr_handle_chroot_mount(const struct dentry *dentry,
61519+ const struct vfsmount *mnt, const char *dev_name)
61520+{
61521+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61522+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
61523+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
61524+ return -EPERM;
61525+ }
61526+#endif
61527+ return 0;
61528+}
61529+
61530+int
61531+gr_handle_chroot_pivot(void)
61532+{
61533+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61534+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
61535+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
61536+ return -EPERM;
61537+ }
61538+#endif
61539+ return 0;
61540+}
61541+
61542+int
61543+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
61544+{
61545+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61546+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
61547+ !gr_is_outside_chroot(dentry, mnt)) {
61548+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
61549+ return -EPERM;
61550+ }
61551+#endif
61552+ return 0;
61553+}
61554+
61555+extern const char *captab_log[];
61556+extern int captab_log_entries;
61557+
61558+int
61559+gr_chroot_is_capable(const int cap)
61560+{
61561+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61562+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61563+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61564+ if (cap_raised(chroot_caps, cap)) {
61565+ const struct cred *creds = current_cred();
61566+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
61567+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
61568+ }
61569+ return 0;
61570+ }
61571+ }
61572+#endif
61573+ return 1;
61574+}
61575+
61576+int
61577+gr_chroot_is_capable_nolog(const int cap)
61578+{
61579+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61580+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61581+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61582+ if (cap_raised(chroot_caps, cap)) {
61583+ return 0;
61584+ }
61585+ }
61586+#endif
61587+ return 1;
61588+}
61589+
61590+int
61591+gr_handle_chroot_sysctl(const int op)
61592+{
61593+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61594+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
61595+ && (op & MAY_WRITE))
61596+ return -EACCES;
61597+#endif
61598+ return 0;
61599+}
61600+
61601+void
61602+gr_handle_chroot_chdir(struct path *path)
61603+{
61604+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61605+ if (grsec_enable_chroot_chdir)
61606+ set_fs_pwd(current->fs, path);
61607+#endif
61608+ return;
61609+}
61610+
61611+int
61612+gr_handle_chroot_chmod(const struct dentry *dentry,
61613+ const struct vfsmount *mnt, const int mode)
61614+{
61615+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61616+ /* allow chmod +s on directories, but not on files */
61617+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
61618+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
61619+ proc_is_chrooted(current)) {
61620+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
61621+ return -EPERM;
61622+ }
61623+#endif
61624+ return 0;
61625+}
61626diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
61627new file mode 100644
61628index 0000000..b81db5b
61629--- /dev/null
61630+++ b/grsecurity/grsec_disabled.c
61631@@ -0,0 +1,439 @@
61632+#include <linux/kernel.h>
61633+#include <linux/module.h>
61634+#include <linux/sched.h>
61635+#include <linux/file.h>
61636+#include <linux/fs.h>
61637+#include <linux/kdev_t.h>
61638+#include <linux/net.h>
61639+#include <linux/in.h>
61640+#include <linux/ip.h>
61641+#include <linux/skbuff.h>
61642+#include <linux/sysctl.h>
61643+
61644+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61645+void
61646+pax_set_initial_flags(struct linux_binprm *bprm)
61647+{
61648+ return;
61649+}
61650+#endif
61651+
61652+#ifdef CONFIG_SYSCTL
61653+__u32
61654+gr_handle_sysctl(const struct ctl_table * table, const int op)
61655+{
61656+ return 0;
61657+}
61658+#endif
61659+
61660+#ifdef CONFIG_TASKSTATS
61661+int gr_is_taskstats_denied(int pid)
61662+{
61663+ return 0;
61664+}
61665+#endif
61666+
61667+int
61668+gr_acl_is_enabled(void)
61669+{
61670+ return 0;
61671+}
61672+
61673+void
61674+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61675+{
61676+ return;
61677+}
61678+
61679+int
61680+gr_handle_rawio(const struct inode *inode)
61681+{
61682+ return 0;
61683+}
61684+
61685+void
61686+gr_acl_handle_psacct(struct task_struct *task, const long code)
61687+{
61688+ return;
61689+}
61690+
61691+int
61692+gr_handle_ptrace(struct task_struct *task, const long request)
61693+{
61694+ return 0;
61695+}
61696+
61697+int
61698+gr_handle_proc_ptrace(struct task_struct *task)
61699+{
61700+ return 0;
61701+}
61702+
61703+void
61704+gr_learn_resource(const struct task_struct *task,
61705+ const int res, const unsigned long wanted, const int gt)
61706+{
61707+ return;
61708+}
61709+
61710+int
61711+gr_set_acls(const int type)
61712+{
61713+ return 0;
61714+}
61715+
61716+int
61717+gr_check_hidden_task(const struct task_struct *tsk)
61718+{
61719+ return 0;
61720+}
61721+
61722+int
61723+gr_check_protected_task(const struct task_struct *task)
61724+{
61725+ return 0;
61726+}
61727+
61728+int
61729+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
61730+{
61731+ return 0;
61732+}
61733+
61734+void
61735+gr_copy_label(struct task_struct *tsk)
61736+{
61737+ return;
61738+}
61739+
61740+void
61741+gr_set_pax_flags(struct task_struct *task)
61742+{
61743+ return;
61744+}
61745+
61746+int
61747+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
61748+ const int unsafe_share)
61749+{
61750+ return 0;
61751+}
61752+
61753+void
61754+gr_handle_delete(const ino_t ino, const dev_t dev)
61755+{
61756+ return;
61757+}
61758+
61759+void
61760+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61761+{
61762+ return;
61763+}
61764+
61765+void
61766+gr_handle_crash(struct task_struct *task, const int sig)
61767+{
61768+ return;
61769+}
61770+
61771+int
61772+gr_check_crash_exec(const struct file *filp)
61773+{
61774+ return 0;
61775+}
61776+
61777+int
61778+gr_check_crash_uid(const uid_t uid)
61779+{
61780+ return 0;
61781+}
61782+
61783+void
61784+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61785+ struct dentry *old_dentry,
61786+ struct dentry *new_dentry,
61787+ struct vfsmount *mnt, const __u8 replace)
61788+{
61789+ return;
61790+}
61791+
61792+int
61793+gr_search_socket(const int family, const int type, const int protocol)
61794+{
61795+ return 1;
61796+}
61797+
61798+int
61799+gr_search_connectbind(const int mode, const struct socket *sock,
61800+ const struct sockaddr_in *addr)
61801+{
61802+ return 0;
61803+}
61804+
61805+void
61806+gr_handle_alertkill(struct task_struct *task)
61807+{
61808+ return;
61809+}
61810+
61811+__u32
61812+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
61813+{
61814+ return 1;
61815+}
61816+
61817+__u32
61818+gr_acl_handle_hidden_file(const struct dentry * dentry,
61819+ const struct vfsmount * mnt)
61820+{
61821+ return 1;
61822+}
61823+
61824+__u32
61825+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61826+ int acc_mode)
61827+{
61828+ return 1;
61829+}
61830+
61831+__u32
61832+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61833+{
61834+ return 1;
61835+}
61836+
61837+__u32
61838+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
61839+{
61840+ return 1;
61841+}
61842+
61843+int
61844+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
61845+ unsigned int *vm_flags)
61846+{
61847+ return 1;
61848+}
61849+
61850+__u32
61851+gr_acl_handle_truncate(const struct dentry * dentry,
61852+ const struct vfsmount * mnt)
61853+{
61854+ return 1;
61855+}
61856+
61857+__u32
61858+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
61859+{
61860+ return 1;
61861+}
61862+
61863+__u32
61864+gr_acl_handle_access(const struct dentry * dentry,
61865+ const struct vfsmount * mnt, const int fmode)
61866+{
61867+ return 1;
61868+}
61869+
61870+__u32
61871+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
61872+ mode_t mode)
61873+{
61874+ return 1;
61875+}
61876+
61877+__u32
61878+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
61879+ mode_t mode)
61880+{
61881+ return 1;
61882+}
61883+
61884+__u32
61885+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
61886+{
61887+ return 1;
61888+}
61889+
61890+__u32
61891+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
61892+{
61893+ return 1;
61894+}
61895+
61896+void
61897+grsecurity_init(void)
61898+{
61899+ return;
61900+}
61901+
61902+__u32
61903+gr_acl_handle_mknod(const struct dentry * new_dentry,
61904+ const struct dentry * parent_dentry,
61905+ const struct vfsmount * parent_mnt,
61906+ const int mode)
61907+{
61908+ return 1;
61909+}
61910+
61911+__u32
61912+gr_acl_handle_mkdir(const struct dentry * new_dentry,
61913+ const struct dentry * parent_dentry,
61914+ const struct vfsmount * parent_mnt)
61915+{
61916+ return 1;
61917+}
61918+
61919+__u32
61920+gr_acl_handle_symlink(const struct dentry * new_dentry,
61921+ const struct dentry * parent_dentry,
61922+ const struct vfsmount * parent_mnt, const char *from)
61923+{
61924+ return 1;
61925+}
61926+
61927+__u32
61928+gr_acl_handle_link(const struct dentry * new_dentry,
61929+ const struct dentry * parent_dentry,
61930+ const struct vfsmount * parent_mnt,
61931+ const struct dentry * old_dentry,
61932+ const struct vfsmount * old_mnt, const char *to)
61933+{
61934+ return 1;
61935+}
61936+
61937+int
61938+gr_acl_handle_rename(const struct dentry *new_dentry,
61939+ const struct dentry *parent_dentry,
61940+ const struct vfsmount *parent_mnt,
61941+ const struct dentry *old_dentry,
61942+ const struct inode *old_parent_inode,
61943+ const struct vfsmount *old_mnt, const char *newname)
61944+{
61945+ return 0;
61946+}
61947+
61948+int
61949+gr_acl_handle_filldir(const struct file *file, const char *name,
61950+ const int namelen, const ino_t ino)
61951+{
61952+ return 1;
61953+}
61954+
61955+int
61956+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61957+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61958+{
61959+ return 1;
61960+}
61961+
61962+int
61963+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
61964+{
61965+ return 0;
61966+}
61967+
61968+int
61969+gr_search_accept(const struct socket *sock)
61970+{
61971+ return 0;
61972+}
61973+
61974+int
61975+gr_search_listen(const struct socket *sock)
61976+{
61977+ return 0;
61978+}
61979+
61980+int
61981+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
61982+{
61983+ return 0;
61984+}
61985+
61986+__u32
61987+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
61988+{
61989+ return 1;
61990+}
61991+
61992+__u32
61993+gr_acl_handle_creat(const struct dentry * dentry,
61994+ const struct dentry * p_dentry,
61995+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61996+ const int imode)
61997+{
61998+ return 1;
61999+}
62000+
62001+void
62002+gr_acl_handle_exit(void)
62003+{
62004+ return;
62005+}
62006+
62007+int
62008+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62009+{
62010+ return 1;
62011+}
62012+
62013+void
62014+gr_set_role_label(const uid_t uid, const gid_t gid)
62015+{
62016+ return;
62017+}
62018+
62019+int
62020+gr_acl_handle_procpidmem(const struct task_struct *task)
62021+{
62022+ return 0;
62023+}
62024+
62025+int
62026+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62027+{
62028+ return 0;
62029+}
62030+
62031+int
62032+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62033+{
62034+ return 0;
62035+}
62036+
62037+void
62038+gr_set_kernel_label(struct task_struct *task)
62039+{
62040+ return;
62041+}
62042+
62043+int
62044+gr_check_user_change(int real, int effective, int fs)
62045+{
62046+ return 0;
62047+}
62048+
62049+int
62050+gr_check_group_change(int real, int effective, int fs)
62051+{
62052+ return 0;
62053+}
62054+
62055+int gr_acl_enable_at_secure(void)
62056+{
62057+ return 0;
62058+}
62059+
62060+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62061+{
62062+ return dentry->d_inode->i_sb->s_dev;
62063+}
62064+
62065+EXPORT_SYMBOL(gr_learn_resource);
62066+EXPORT_SYMBOL(gr_set_kernel_label);
62067+#ifdef CONFIG_SECURITY
62068+EXPORT_SYMBOL(gr_check_user_change);
62069+EXPORT_SYMBOL(gr_check_group_change);
62070+#endif
62071diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62072new file mode 100644
62073index 0000000..a96e155
62074--- /dev/null
62075+++ b/grsecurity/grsec_exec.c
62076@@ -0,0 +1,204 @@
62077+#include <linux/kernel.h>
62078+#include <linux/sched.h>
62079+#include <linux/file.h>
62080+#include <linux/binfmts.h>
62081+#include <linux/smp_lock.h>
62082+#include <linux/fs.h>
62083+#include <linux/types.h>
62084+#include <linux/grdefs.h>
62085+#include <linux/grinternal.h>
62086+#include <linux/capability.h>
62087+#include <linux/compat.h>
62088+#include <linux/module.h>
62089+
62090+#include <asm/uaccess.h>
62091+
62092+#ifdef CONFIG_GRKERNSEC_EXECLOG
62093+static char gr_exec_arg_buf[132];
62094+static DEFINE_MUTEX(gr_exec_arg_mutex);
62095+#endif
62096+
62097+void
62098+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62099+{
62100+#ifdef CONFIG_GRKERNSEC_EXECLOG
62101+ char *grarg = gr_exec_arg_buf;
62102+ unsigned int i, x, execlen = 0;
62103+ char c;
62104+
62105+ if (!((grsec_enable_execlog && grsec_enable_group &&
62106+ in_group_p(grsec_audit_gid))
62107+ || (grsec_enable_execlog && !grsec_enable_group)))
62108+ return;
62109+
62110+ mutex_lock(&gr_exec_arg_mutex);
62111+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62112+
62113+ if (unlikely(argv == NULL))
62114+ goto log;
62115+
62116+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62117+ const char __user *p;
62118+ unsigned int len;
62119+
62120+ if (copy_from_user(&p, argv + i, sizeof(p)))
62121+ goto log;
62122+ if (!p)
62123+ goto log;
62124+ len = strnlen_user(p, 128 - execlen);
62125+ if (len > 128 - execlen)
62126+ len = 128 - execlen;
62127+ else if (len > 0)
62128+ len--;
62129+ if (copy_from_user(grarg + execlen, p, len))
62130+ goto log;
62131+
62132+ /* rewrite unprintable characters */
62133+ for (x = 0; x < len; x++) {
62134+ c = *(grarg + execlen + x);
62135+ if (c < 32 || c > 126)
62136+ *(grarg + execlen + x) = ' ';
62137+ }
62138+
62139+ execlen += len;
62140+ *(grarg + execlen) = ' ';
62141+ *(grarg + execlen + 1) = '\0';
62142+ execlen++;
62143+ }
62144+
62145+ log:
62146+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62147+ bprm->file->f_path.mnt, grarg);
62148+ mutex_unlock(&gr_exec_arg_mutex);
62149+#endif
62150+ return;
62151+}
62152+
62153+#ifdef CONFIG_COMPAT
62154+void
62155+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62156+{
62157+#ifdef CONFIG_GRKERNSEC_EXECLOG
62158+ char *grarg = gr_exec_arg_buf;
62159+ unsigned int i, x, execlen = 0;
62160+ char c;
62161+
62162+ if (!((grsec_enable_execlog && grsec_enable_group &&
62163+ in_group_p(grsec_audit_gid))
62164+ || (grsec_enable_execlog && !grsec_enable_group)))
62165+ return;
62166+
62167+ mutex_lock(&gr_exec_arg_mutex);
62168+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62169+
62170+ if (unlikely(argv == NULL))
62171+ goto log;
62172+
62173+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62174+ compat_uptr_t p;
62175+ unsigned int len;
62176+
62177+ if (get_user(p, argv + i))
62178+ goto log;
62179+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62180+ if (len > 128 - execlen)
62181+ len = 128 - execlen;
62182+ else if (len > 0)
62183+ len--;
62184+ else
62185+ goto log;
62186+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62187+ goto log;
62188+
62189+ /* rewrite unprintable characters */
62190+ for (x = 0; x < len; x++) {
62191+ c = *(grarg + execlen + x);
62192+ if (c < 32 || c > 126)
62193+ *(grarg + execlen + x) = ' ';
62194+ }
62195+
62196+ execlen += len;
62197+ *(grarg + execlen) = ' ';
62198+ *(grarg + execlen + 1) = '\0';
62199+ execlen++;
62200+ }
62201+
62202+ log:
62203+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62204+ bprm->file->f_path.mnt, grarg);
62205+ mutex_unlock(&gr_exec_arg_mutex);
62206+#endif
62207+ return;
62208+}
62209+#endif
62210+
62211+#ifdef CONFIG_GRKERNSEC
62212+extern int gr_acl_is_capable(const int cap);
62213+extern int gr_acl_is_capable_nolog(const int cap);
62214+extern int gr_chroot_is_capable(const int cap);
62215+extern int gr_chroot_is_capable_nolog(const int cap);
62216+#endif
62217+
62218+const char *captab_log[] = {
62219+ "CAP_CHOWN",
62220+ "CAP_DAC_OVERRIDE",
62221+ "CAP_DAC_READ_SEARCH",
62222+ "CAP_FOWNER",
62223+ "CAP_FSETID",
62224+ "CAP_KILL",
62225+ "CAP_SETGID",
62226+ "CAP_SETUID",
62227+ "CAP_SETPCAP",
62228+ "CAP_LINUX_IMMUTABLE",
62229+ "CAP_NET_BIND_SERVICE",
62230+ "CAP_NET_BROADCAST",
62231+ "CAP_NET_ADMIN",
62232+ "CAP_NET_RAW",
62233+ "CAP_IPC_LOCK",
62234+ "CAP_IPC_OWNER",
62235+ "CAP_SYS_MODULE",
62236+ "CAP_SYS_RAWIO",
62237+ "CAP_SYS_CHROOT",
62238+ "CAP_SYS_PTRACE",
62239+ "CAP_SYS_PACCT",
62240+ "CAP_SYS_ADMIN",
62241+ "CAP_SYS_BOOT",
62242+ "CAP_SYS_NICE",
62243+ "CAP_SYS_RESOURCE",
62244+ "CAP_SYS_TIME",
62245+ "CAP_SYS_TTY_CONFIG",
62246+ "CAP_MKNOD",
62247+ "CAP_LEASE",
62248+ "CAP_AUDIT_WRITE",
62249+ "CAP_AUDIT_CONTROL",
62250+ "CAP_SETFCAP",
62251+ "CAP_MAC_OVERRIDE",
62252+ "CAP_MAC_ADMIN"
62253+};
62254+
62255+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62256+
62257+int gr_is_capable(const int cap)
62258+{
62259+#ifdef CONFIG_GRKERNSEC
62260+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62261+ return 1;
62262+ return 0;
62263+#else
62264+ return 1;
62265+#endif
62266+}
62267+
62268+int gr_is_capable_nolog(const int cap)
62269+{
62270+#ifdef CONFIG_GRKERNSEC
62271+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62272+ return 1;
62273+ return 0;
62274+#else
62275+ return 1;
62276+#endif
62277+}
62278+
62279+EXPORT_SYMBOL(gr_is_capable);
62280+EXPORT_SYMBOL(gr_is_capable_nolog);
62281diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62282new file mode 100644
62283index 0000000..d3ee748
62284--- /dev/null
62285+++ b/grsecurity/grsec_fifo.c
62286@@ -0,0 +1,24 @@
62287+#include <linux/kernel.h>
62288+#include <linux/sched.h>
62289+#include <linux/fs.h>
62290+#include <linux/file.h>
62291+#include <linux/grinternal.h>
62292+
62293+int
62294+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62295+ const struct dentry *dir, const int flag, const int acc_mode)
62296+{
62297+#ifdef CONFIG_GRKERNSEC_FIFO
62298+ const struct cred *cred = current_cred();
62299+
62300+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62301+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62302+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62303+ (cred->fsuid != dentry->d_inode->i_uid)) {
62304+ if (!inode_permission(dentry->d_inode, acc_mode))
62305+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62306+ return -EACCES;
62307+ }
62308+#endif
62309+ return 0;
62310+}
62311diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62312new file mode 100644
62313index 0000000..8ca18bf
62314--- /dev/null
62315+++ b/grsecurity/grsec_fork.c
62316@@ -0,0 +1,23 @@
62317+#include <linux/kernel.h>
62318+#include <linux/sched.h>
62319+#include <linux/grsecurity.h>
62320+#include <linux/grinternal.h>
62321+#include <linux/errno.h>
62322+
62323+void
62324+gr_log_forkfail(const int retval)
62325+{
62326+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62327+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62328+ switch (retval) {
62329+ case -EAGAIN:
62330+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62331+ break;
62332+ case -ENOMEM:
62333+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62334+ break;
62335+ }
62336+ }
62337+#endif
62338+ return;
62339+}
62340diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62341new file mode 100644
62342index 0000000..f813c26
62343--- /dev/null
62344+++ b/grsecurity/grsec_init.c
62345@@ -0,0 +1,270 @@
62346+#include <linux/kernel.h>
62347+#include <linux/sched.h>
62348+#include <linux/mm.h>
62349+#include <linux/smp_lock.h>
62350+#include <linux/gracl.h>
62351+#include <linux/slab.h>
62352+#include <linux/vmalloc.h>
62353+#include <linux/percpu.h>
62354+#include <linux/module.h>
62355+
62356+int grsec_enable_brute;
62357+int grsec_enable_link;
62358+int grsec_enable_dmesg;
62359+int grsec_enable_harden_ptrace;
62360+int grsec_enable_fifo;
62361+int grsec_enable_execlog;
62362+int grsec_enable_signal;
62363+int grsec_enable_forkfail;
62364+int grsec_enable_audit_ptrace;
62365+int grsec_enable_time;
62366+int grsec_enable_audit_textrel;
62367+int grsec_enable_group;
62368+int grsec_audit_gid;
62369+int grsec_enable_chdir;
62370+int grsec_enable_mount;
62371+int grsec_enable_rofs;
62372+int grsec_enable_chroot_findtask;
62373+int grsec_enable_chroot_mount;
62374+int grsec_enable_chroot_shmat;
62375+int grsec_enable_chroot_fchdir;
62376+int grsec_enable_chroot_double;
62377+int grsec_enable_chroot_pivot;
62378+int grsec_enable_chroot_chdir;
62379+int grsec_enable_chroot_chmod;
62380+int grsec_enable_chroot_mknod;
62381+int grsec_enable_chroot_nice;
62382+int grsec_enable_chroot_execlog;
62383+int grsec_enable_chroot_caps;
62384+int grsec_enable_chroot_sysctl;
62385+int grsec_enable_chroot_unix;
62386+int grsec_enable_tpe;
62387+int grsec_tpe_gid;
62388+int grsec_enable_blackhole;
62389+#ifdef CONFIG_IPV6_MODULE
62390+EXPORT_SYMBOL(grsec_enable_blackhole);
62391+#endif
62392+int grsec_lastack_retries;
62393+int grsec_enable_tpe_all;
62394+int grsec_enable_tpe_invert;
62395+int grsec_enable_socket_all;
62396+int grsec_socket_all_gid;
62397+int grsec_enable_socket_client;
62398+int grsec_socket_client_gid;
62399+int grsec_enable_socket_server;
62400+int grsec_socket_server_gid;
62401+int grsec_resource_logging;
62402+int grsec_disable_privio;
62403+int grsec_enable_log_rwxmaps;
62404+int grsec_lock;
62405+
62406+DEFINE_SPINLOCK(grsec_alert_lock);
62407+unsigned long grsec_alert_wtime = 0;
62408+unsigned long grsec_alert_fyet = 0;
62409+
62410+DEFINE_SPINLOCK(grsec_audit_lock);
62411+
62412+DEFINE_RWLOCK(grsec_exec_file_lock);
62413+
62414+char *gr_shared_page[4];
62415+
62416+char *gr_alert_log_fmt;
62417+char *gr_audit_log_fmt;
62418+char *gr_alert_log_buf;
62419+char *gr_audit_log_buf;
62420+
62421+extern struct gr_arg *gr_usermode;
62422+extern unsigned char *gr_system_salt;
62423+extern unsigned char *gr_system_sum;
62424+
62425+void __init
62426+grsecurity_init(void)
62427+{
62428+ int j;
62429+ /* create the per-cpu shared pages */
62430+
62431+#ifdef CONFIG_X86
62432+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62433+#endif
62434+
62435+ for (j = 0; j < 4; j++) {
62436+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62437+ if (gr_shared_page[j] == NULL) {
62438+ panic("Unable to allocate grsecurity shared page");
62439+ return;
62440+ }
62441+ }
62442+
62443+ /* allocate log buffers */
62444+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62445+ if (!gr_alert_log_fmt) {
62446+ panic("Unable to allocate grsecurity alert log format buffer");
62447+ return;
62448+ }
62449+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62450+ if (!gr_audit_log_fmt) {
62451+ panic("Unable to allocate grsecurity audit log format buffer");
62452+ return;
62453+ }
62454+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62455+ if (!gr_alert_log_buf) {
62456+ panic("Unable to allocate grsecurity alert log buffer");
62457+ return;
62458+ }
62459+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62460+ if (!gr_audit_log_buf) {
62461+ panic("Unable to allocate grsecurity audit log buffer");
62462+ return;
62463+ }
62464+
62465+ /* allocate memory for authentication structure */
62466+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62467+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62468+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62469+
62470+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62471+ panic("Unable to allocate grsecurity authentication structure");
62472+ return;
62473+ }
62474+
62475+
62476+#ifdef CONFIG_GRKERNSEC_IO
62477+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62478+ grsec_disable_privio = 1;
62479+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62480+ grsec_disable_privio = 1;
62481+#else
62482+ grsec_disable_privio = 0;
62483+#endif
62484+#endif
62485+
62486+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62487+ /* for backward compatibility, tpe_invert always defaults to on if
62488+ enabled in the kernel
62489+ */
62490+ grsec_enable_tpe_invert = 1;
62491+#endif
62492+
62493+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62494+#ifndef CONFIG_GRKERNSEC_SYSCTL
62495+ grsec_lock = 1;
62496+#endif
62497+
62498+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62499+ grsec_enable_audit_textrel = 1;
62500+#endif
62501+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62502+ grsec_enable_log_rwxmaps = 1;
62503+#endif
62504+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62505+ grsec_enable_group = 1;
62506+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
62507+#endif
62508+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62509+ grsec_enable_chdir = 1;
62510+#endif
62511+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62512+ grsec_enable_harden_ptrace = 1;
62513+#endif
62514+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62515+ grsec_enable_mount = 1;
62516+#endif
62517+#ifdef CONFIG_GRKERNSEC_LINK
62518+ grsec_enable_link = 1;
62519+#endif
62520+#ifdef CONFIG_GRKERNSEC_BRUTE
62521+ grsec_enable_brute = 1;
62522+#endif
62523+#ifdef CONFIG_GRKERNSEC_DMESG
62524+ grsec_enable_dmesg = 1;
62525+#endif
62526+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
62527+ grsec_enable_blackhole = 1;
62528+ grsec_lastack_retries = 4;
62529+#endif
62530+#ifdef CONFIG_GRKERNSEC_FIFO
62531+ grsec_enable_fifo = 1;
62532+#endif
62533+#ifdef CONFIG_GRKERNSEC_EXECLOG
62534+ grsec_enable_execlog = 1;
62535+#endif
62536+#ifdef CONFIG_GRKERNSEC_SIGNAL
62537+ grsec_enable_signal = 1;
62538+#endif
62539+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62540+ grsec_enable_forkfail = 1;
62541+#endif
62542+#ifdef CONFIG_GRKERNSEC_TIME
62543+ grsec_enable_time = 1;
62544+#endif
62545+#ifdef CONFIG_GRKERNSEC_RESLOG
62546+ grsec_resource_logging = 1;
62547+#endif
62548+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62549+ grsec_enable_chroot_findtask = 1;
62550+#endif
62551+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62552+ grsec_enable_chroot_unix = 1;
62553+#endif
62554+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62555+ grsec_enable_chroot_mount = 1;
62556+#endif
62557+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62558+ grsec_enable_chroot_fchdir = 1;
62559+#endif
62560+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62561+ grsec_enable_chroot_shmat = 1;
62562+#endif
62563+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62564+ grsec_enable_audit_ptrace = 1;
62565+#endif
62566+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62567+ grsec_enable_chroot_double = 1;
62568+#endif
62569+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62570+ grsec_enable_chroot_pivot = 1;
62571+#endif
62572+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62573+ grsec_enable_chroot_chdir = 1;
62574+#endif
62575+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62576+ grsec_enable_chroot_chmod = 1;
62577+#endif
62578+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62579+ grsec_enable_chroot_mknod = 1;
62580+#endif
62581+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62582+ grsec_enable_chroot_nice = 1;
62583+#endif
62584+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62585+ grsec_enable_chroot_execlog = 1;
62586+#endif
62587+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62588+ grsec_enable_chroot_caps = 1;
62589+#endif
62590+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62591+ grsec_enable_chroot_sysctl = 1;
62592+#endif
62593+#ifdef CONFIG_GRKERNSEC_TPE
62594+ grsec_enable_tpe = 1;
62595+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
62596+#ifdef CONFIG_GRKERNSEC_TPE_ALL
62597+ grsec_enable_tpe_all = 1;
62598+#endif
62599+#endif
62600+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62601+ grsec_enable_socket_all = 1;
62602+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
62603+#endif
62604+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62605+ grsec_enable_socket_client = 1;
62606+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
62607+#endif
62608+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62609+ grsec_enable_socket_server = 1;
62610+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
62611+#endif
62612+#endif
62613+
62614+ return;
62615+}
62616diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
62617new file mode 100644
62618index 0000000..3efe141
62619--- /dev/null
62620+++ b/grsecurity/grsec_link.c
62621@@ -0,0 +1,43 @@
62622+#include <linux/kernel.h>
62623+#include <linux/sched.h>
62624+#include <linux/fs.h>
62625+#include <linux/file.h>
62626+#include <linux/grinternal.h>
62627+
62628+int
62629+gr_handle_follow_link(const struct inode *parent,
62630+ const struct inode *inode,
62631+ const struct dentry *dentry, const struct vfsmount *mnt)
62632+{
62633+#ifdef CONFIG_GRKERNSEC_LINK
62634+ const struct cred *cred = current_cred();
62635+
62636+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
62637+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
62638+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
62639+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
62640+ return -EACCES;
62641+ }
62642+#endif
62643+ return 0;
62644+}
62645+
62646+int
62647+gr_handle_hardlink(const struct dentry *dentry,
62648+ const struct vfsmount *mnt,
62649+ struct inode *inode, const int mode, const char *to)
62650+{
62651+#ifdef CONFIG_GRKERNSEC_LINK
62652+ const struct cred *cred = current_cred();
62653+
62654+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
62655+ (!S_ISREG(mode) || (mode & S_ISUID) ||
62656+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
62657+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
62658+ !capable(CAP_FOWNER) && cred->uid) {
62659+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
62660+ return -EPERM;
62661+ }
62662+#endif
62663+ return 0;
62664+}
62665diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
62666new file mode 100644
62667index 0000000..a45d2e9
62668--- /dev/null
62669+++ b/grsecurity/grsec_log.c
62670@@ -0,0 +1,322 @@
62671+#include <linux/kernel.h>
62672+#include <linux/sched.h>
62673+#include <linux/file.h>
62674+#include <linux/tty.h>
62675+#include <linux/fs.h>
62676+#include <linux/grinternal.h>
62677+
62678+#ifdef CONFIG_TREE_PREEMPT_RCU
62679+#define DISABLE_PREEMPT() preempt_disable()
62680+#define ENABLE_PREEMPT() preempt_enable()
62681+#else
62682+#define DISABLE_PREEMPT()
62683+#define ENABLE_PREEMPT()
62684+#endif
62685+
62686+#define BEGIN_LOCKS(x) \
62687+ DISABLE_PREEMPT(); \
62688+ rcu_read_lock(); \
62689+ read_lock(&tasklist_lock); \
62690+ read_lock(&grsec_exec_file_lock); \
62691+ if (x != GR_DO_AUDIT) \
62692+ spin_lock(&grsec_alert_lock); \
62693+ else \
62694+ spin_lock(&grsec_audit_lock)
62695+
62696+#define END_LOCKS(x) \
62697+ if (x != GR_DO_AUDIT) \
62698+ spin_unlock(&grsec_alert_lock); \
62699+ else \
62700+ spin_unlock(&grsec_audit_lock); \
62701+ read_unlock(&grsec_exec_file_lock); \
62702+ read_unlock(&tasklist_lock); \
62703+ rcu_read_unlock(); \
62704+ ENABLE_PREEMPT(); \
62705+ if (x == GR_DONT_AUDIT) \
62706+ gr_handle_alertkill(current)
62707+
62708+enum {
62709+ FLOODING,
62710+ NO_FLOODING
62711+};
62712+
62713+extern char *gr_alert_log_fmt;
62714+extern char *gr_audit_log_fmt;
62715+extern char *gr_alert_log_buf;
62716+extern char *gr_audit_log_buf;
62717+
62718+static int gr_log_start(int audit)
62719+{
62720+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
62721+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
62722+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62723+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
62724+ unsigned long curr_secs = get_seconds();
62725+
62726+ if (audit == GR_DO_AUDIT)
62727+ goto set_fmt;
62728+
62729+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
62730+ grsec_alert_wtime = curr_secs;
62731+ grsec_alert_fyet = 0;
62732+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
62733+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
62734+ grsec_alert_fyet++;
62735+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
62736+ grsec_alert_wtime = curr_secs;
62737+ grsec_alert_fyet++;
62738+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
62739+ return FLOODING;
62740+ }
62741+ else return FLOODING;
62742+
62743+set_fmt:
62744+#endif
62745+ memset(buf, 0, PAGE_SIZE);
62746+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
62747+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
62748+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62749+ } else if (current->signal->curr_ip) {
62750+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
62751+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
62752+ } else if (gr_acl_is_enabled()) {
62753+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
62754+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62755+ } else {
62756+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
62757+ strcpy(buf, fmt);
62758+ }
62759+
62760+ return NO_FLOODING;
62761+}
62762+
62763+static void gr_log_middle(int audit, const char *msg, va_list ap)
62764+ __attribute__ ((format (printf, 2, 0)));
62765+
62766+static void gr_log_middle(int audit, const char *msg, va_list ap)
62767+{
62768+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62769+ unsigned int len = strlen(buf);
62770+
62771+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62772+
62773+ return;
62774+}
62775+
62776+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62777+ __attribute__ ((format (printf, 2, 3)));
62778+
62779+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62780+{
62781+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62782+ unsigned int len = strlen(buf);
62783+ va_list ap;
62784+
62785+ va_start(ap, msg);
62786+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62787+ va_end(ap);
62788+
62789+ return;
62790+}
62791+
62792+static void gr_log_end(int audit, int append_default)
62793+{
62794+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62795+
62796+ if (append_default) {
62797+ unsigned int len = strlen(buf);
62798+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
62799+ }
62800+
62801+ printk("%s\n", buf);
62802+
62803+ return;
62804+}
62805+
62806+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
62807+{
62808+ int logtype;
62809+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
62810+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
62811+ void *voidptr = NULL;
62812+ int num1 = 0, num2 = 0;
62813+ unsigned long ulong1 = 0, ulong2 = 0;
62814+ struct dentry *dentry = NULL;
62815+ struct vfsmount *mnt = NULL;
62816+ struct file *file = NULL;
62817+ struct task_struct *task = NULL;
62818+ const struct cred *cred, *pcred;
62819+ va_list ap;
62820+
62821+ BEGIN_LOCKS(audit);
62822+ logtype = gr_log_start(audit);
62823+ if (logtype == FLOODING) {
62824+ END_LOCKS(audit);
62825+ return;
62826+ }
62827+ va_start(ap, argtypes);
62828+ switch (argtypes) {
62829+ case GR_TTYSNIFF:
62830+ task = va_arg(ap, struct task_struct *);
62831+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
62832+ break;
62833+ case GR_SYSCTL_HIDDEN:
62834+ str1 = va_arg(ap, char *);
62835+ gr_log_middle_varargs(audit, msg, result, str1);
62836+ break;
62837+ case GR_RBAC:
62838+ dentry = va_arg(ap, struct dentry *);
62839+ mnt = va_arg(ap, struct vfsmount *);
62840+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
62841+ break;
62842+ case GR_RBAC_STR:
62843+ dentry = va_arg(ap, struct dentry *);
62844+ mnt = va_arg(ap, struct vfsmount *);
62845+ str1 = va_arg(ap, char *);
62846+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
62847+ break;
62848+ case GR_STR_RBAC:
62849+ str1 = va_arg(ap, char *);
62850+ dentry = va_arg(ap, struct dentry *);
62851+ mnt = va_arg(ap, struct vfsmount *);
62852+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
62853+ break;
62854+ case GR_RBAC_MODE2:
62855+ dentry = va_arg(ap, struct dentry *);
62856+ mnt = va_arg(ap, struct vfsmount *);
62857+ str1 = va_arg(ap, char *);
62858+ str2 = va_arg(ap, char *);
62859+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
62860+ break;
62861+ case GR_RBAC_MODE3:
62862+ dentry = va_arg(ap, struct dentry *);
62863+ mnt = va_arg(ap, struct vfsmount *);
62864+ str1 = va_arg(ap, char *);
62865+ str2 = va_arg(ap, char *);
62866+ str3 = va_arg(ap, char *);
62867+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
62868+ break;
62869+ case GR_FILENAME:
62870+ dentry = va_arg(ap, struct dentry *);
62871+ mnt = va_arg(ap, struct vfsmount *);
62872+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
62873+ break;
62874+ case GR_STR_FILENAME:
62875+ str1 = va_arg(ap, char *);
62876+ dentry = va_arg(ap, struct dentry *);
62877+ mnt = va_arg(ap, struct vfsmount *);
62878+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
62879+ break;
62880+ case GR_FILENAME_STR:
62881+ dentry = va_arg(ap, struct dentry *);
62882+ mnt = va_arg(ap, struct vfsmount *);
62883+ str1 = va_arg(ap, char *);
62884+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
62885+ break;
62886+ case GR_FILENAME_TWO_INT:
62887+ dentry = va_arg(ap, struct dentry *);
62888+ mnt = va_arg(ap, struct vfsmount *);
62889+ num1 = va_arg(ap, int);
62890+ num2 = va_arg(ap, int);
62891+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
62892+ break;
62893+ case GR_FILENAME_TWO_INT_STR:
62894+ dentry = va_arg(ap, struct dentry *);
62895+ mnt = va_arg(ap, struct vfsmount *);
62896+ num1 = va_arg(ap, int);
62897+ num2 = va_arg(ap, int);
62898+ str1 = va_arg(ap, char *);
62899+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
62900+ break;
62901+ case GR_TEXTREL:
62902+ file = va_arg(ap, struct file *);
62903+ ulong1 = va_arg(ap, unsigned long);
62904+ ulong2 = va_arg(ap, unsigned long);
62905+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
62906+ break;
62907+ case GR_PTRACE:
62908+ task = va_arg(ap, struct task_struct *);
62909+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
62910+ break;
62911+ case GR_RESOURCE:
62912+ task = va_arg(ap, struct task_struct *);
62913+ cred = __task_cred(task);
62914+ pcred = __task_cred(task->real_parent);
62915+ ulong1 = va_arg(ap, unsigned long);
62916+ str1 = va_arg(ap, char *);
62917+ ulong2 = va_arg(ap, unsigned long);
62918+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62919+ break;
62920+ case GR_CAP:
62921+ task = va_arg(ap, struct task_struct *);
62922+ cred = __task_cred(task);
62923+ pcred = __task_cred(task->real_parent);
62924+ str1 = va_arg(ap, char *);
62925+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62926+ break;
62927+ case GR_SIG:
62928+ str1 = va_arg(ap, char *);
62929+ voidptr = va_arg(ap, void *);
62930+ gr_log_middle_varargs(audit, msg, str1, voidptr);
62931+ break;
62932+ case GR_SIG2:
62933+ task = va_arg(ap, struct task_struct *);
62934+ cred = __task_cred(task);
62935+ pcred = __task_cred(task->real_parent);
62936+ num1 = va_arg(ap, int);
62937+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62938+ break;
62939+ case GR_CRASH1:
62940+ task = va_arg(ap, struct task_struct *);
62941+ cred = __task_cred(task);
62942+ pcred = __task_cred(task->real_parent);
62943+ ulong1 = va_arg(ap, unsigned long);
62944+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
62945+ break;
62946+ case GR_CRASH2:
62947+ task = va_arg(ap, struct task_struct *);
62948+ cred = __task_cred(task);
62949+ pcred = __task_cred(task->real_parent);
62950+ ulong1 = va_arg(ap, unsigned long);
62951+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
62952+ break;
62953+ case GR_RWXMAP:
62954+ file = va_arg(ap, struct file *);
62955+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
62956+ break;
62957+ case GR_PSACCT:
62958+ {
62959+ unsigned int wday, cday;
62960+ __u8 whr, chr;
62961+ __u8 wmin, cmin;
62962+ __u8 wsec, csec;
62963+ char cur_tty[64] = { 0 };
62964+ char parent_tty[64] = { 0 };
62965+
62966+ task = va_arg(ap, struct task_struct *);
62967+ wday = va_arg(ap, unsigned int);
62968+ cday = va_arg(ap, unsigned int);
62969+ whr = va_arg(ap, int);
62970+ chr = va_arg(ap, int);
62971+ wmin = va_arg(ap, int);
62972+ cmin = va_arg(ap, int);
62973+ wsec = va_arg(ap, int);
62974+ csec = va_arg(ap, int);
62975+ ulong1 = va_arg(ap, unsigned long);
62976+ cred = __task_cred(task);
62977+ pcred = __task_cred(task->real_parent);
62978+
62979+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62980+ }
62981+ break;
62982+ default:
62983+ gr_log_middle(audit, msg, ap);
62984+ }
62985+ va_end(ap);
62986+ // these don't need DEFAULTSECARGS printed on the end
62987+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
62988+ gr_log_end(audit, 0);
62989+ else
62990+ gr_log_end(audit, 1);
62991+ END_LOCKS(audit);
62992+}
62993diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
62994new file mode 100644
62995index 0000000..6c0416b
62996--- /dev/null
62997+++ b/grsecurity/grsec_mem.c
62998@@ -0,0 +1,33 @@
62999+#include <linux/kernel.h>
63000+#include <linux/sched.h>
63001+#include <linux/mm.h>
63002+#include <linux/mman.h>
63003+#include <linux/grinternal.h>
63004+
63005+void
63006+gr_handle_ioperm(void)
63007+{
63008+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63009+ return;
63010+}
63011+
63012+void
63013+gr_handle_iopl(void)
63014+{
63015+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63016+ return;
63017+}
63018+
63019+void
63020+gr_handle_mem_readwrite(u64 from, u64 to)
63021+{
63022+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63023+ return;
63024+}
63025+
63026+void
63027+gr_handle_vm86(void)
63028+{
63029+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63030+ return;
63031+}
63032diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63033new file mode 100644
63034index 0000000..2131422
63035--- /dev/null
63036+++ b/grsecurity/grsec_mount.c
63037@@ -0,0 +1,62 @@
63038+#include <linux/kernel.h>
63039+#include <linux/sched.h>
63040+#include <linux/mount.h>
63041+#include <linux/grsecurity.h>
63042+#include <linux/grinternal.h>
63043+
63044+void
63045+gr_log_remount(const char *devname, const int retval)
63046+{
63047+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63048+ if (grsec_enable_mount && (retval >= 0))
63049+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63050+#endif
63051+ return;
63052+}
63053+
63054+void
63055+gr_log_unmount(const char *devname, const int retval)
63056+{
63057+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63058+ if (grsec_enable_mount && (retval >= 0))
63059+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63060+#endif
63061+ return;
63062+}
63063+
63064+void
63065+gr_log_mount(const char *from, const char *to, const int retval)
63066+{
63067+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63068+ if (grsec_enable_mount && (retval >= 0))
63069+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63070+#endif
63071+ return;
63072+}
63073+
63074+int
63075+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63076+{
63077+#ifdef CONFIG_GRKERNSEC_ROFS
63078+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63079+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63080+ return -EPERM;
63081+ } else
63082+ return 0;
63083+#endif
63084+ return 0;
63085+}
63086+
63087+int
63088+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63089+{
63090+#ifdef CONFIG_GRKERNSEC_ROFS
63091+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63092+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63093+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63094+ return -EPERM;
63095+ } else
63096+ return 0;
63097+#endif
63098+ return 0;
63099+}
63100diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63101new file mode 100644
63102index 0000000..a3b12a0
63103--- /dev/null
63104+++ b/grsecurity/grsec_pax.c
63105@@ -0,0 +1,36 @@
63106+#include <linux/kernel.h>
63107+#include <linux/sched.h>
63108+#include <linux/mm.h>
63109+#include <linux/file.h>
63110+#include <linux/grinternal.h>
63111+#include <linux/grsecurity.h>
63112+
63113+void
63114+gr_log_textrel(struct vm_area_struct * vma)
63115+{
63116+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63117+ if (grsec_enable_audit_textrel)
63118+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63119+#endif
63120+ return;
63121+}
63122+
63123+void
63124+gr_log_rwxmmap(struct file *file)
63125+{
63126+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63127+ if (grsec_enable_log_rwxmaps)
63128+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63129+#endif
63130+ return;
63131+}
63132+
63133+void
63134+gr_log_rwxmprotect(struct file *file)
63135+{
63136+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63137+ if (grsec_enable_log_rwxmaps)
63138+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63139+#endif
63140+ return;
63141+}
63142diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63143new file mode 100644
63144index 0000000..472c1d6
63145--- /dev/null
63146+++ b/grsecurity/grsec_ptrace.c
63147@@ -0,0 +1,14 @@
63148+#include <linux/kernel.h>
63149+#include <linux/sched.h>
63150+#include <linux/grinternal.h>
63151+#include <linux/grsecurity.h>
63152+
63153+void
63154+gr_audit_ptrace(struct task_struct *task)
63155+{
63156+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63157+ if (grsec_enable_audit_ptrace)
63158+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63159+#endif
63160+ return;
63161+}
63162diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63163new file mode 100644
63164index 0000000..dc73fe9
63165--- /dev/null
63166+++ b/grsecurity/grsec_sig.c
63167@@ -0,0 +1,205 @@
63168+#include <linux/kernel.h>
63169+#include <linux/sched.h>
63170+#include <linux/delay.h>
63171+#include <linux/grsecurity.h>
63172+#include <linux/grinternal.h>
63173+#include <linux/hardirq.h>
63174+
63175+char *signames[] = {
63176+ [SIGSEGV] = "Segmentation fault",
63177+ [SIGILL] = "Illegal instruction",
63178+ [SIGABRT] = "Abort",
63179+ [SIGBUS] = "Invalid alignment/Bus error"
63180+};
63181+
63182+void
63183+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63184+{
63185+#ifdef CONFIG_GRKERNSEC_SIGNAL
63186+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63187+ (sig == SIGABRT) || (sig == SIGBUS))) {
63188+ if (t->pid == current->pid) {
63189+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63190+ } else {
63191+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63192+ }
63193+ }
63194+#endif
63195+ return;
63196+}
63197+
63198+int
63199+gr_handle_signal(const struct task_struct *p, const int sig)
63200+{
63201+#ifdef CONFIG_GRKERNSEC
63202+ if (current->pid > 1 && gr_check_protected_task(p)) {
63203+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63204+ return -EPERM;
63205+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63206+ return -EPERM;
63207+ }
63208+#endif
63209+ return 0;
63210+}
63211+
63212+#ifdef CONFIG_GRKERNSEC
63213+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63214+
63215+int gr_fake_force_sig(int sig, struct task_struct *t)
63216+{
63217+ unsigned long int flags;
63218+ int ret, blocked, ignored;
63219+ struct k_sigaction *action;
63220+
63221+ spin_lock_irqsave(&t->sighand->siglock, flags);
63222+ action = &t->sighand->action[sig-1];
63223+ ignored = action->sa.sa_handler == SIG_IGN;
63224+ blocked = sigismember(&t->blocked, sig);
63225+ if (blocked || ignored) {
63226+ action->sa.sa_handler = SIG_DFL;
63227+ if (blocked) {
63228+ sigdelset(&t->blocked, sig);
63229+ recalc_sigpending_and_wake(t);
63230+ }
63231+ }
63232+ if (action->sa.sa_handler == SIG_DFL)
63233+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63234+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63235+
63236+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63237+
63238+ return ret;
63239+}
63240+#endif
63241+
63242+#ifdef CONFIG_GRKERNSEC_BRUTE
63243+#define GR_USER_BAN_TIME (15 * 60)
63244+
63245+static int __get_dumpable(unsigned long mm_flags)
63246+{
63247+ int ret;
63248+
63249+ ret = mm_flags & MMF_DUMPABLE_MASK;
63250+ return (ret >= 2) ? 2 : ret;
63251+}
63252+#endif
63253+
63254+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63255+{
63256+#ifdef CONFIG_GRKERNSEC_BRUTE
63257+ uid_t uid = 0;
63258+
63259+ if (!grsec_enable_brute)
63260+ return;
63261+
63262+ rcu_read_lock();
63263+ read_lock(&tasklist_lock);
63264+ read_lock(&grsec_exec_file_lock);
63265+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63266+ p->real_parent->brute = 1;
63267+ else {
63268+ const struct cred *cred = __task_cred(p), *cred2;
63269+ struct task_struct *tsk, *tsk2;
63270+
63271+ if (!__get_dumpable(mm_flags) && cred->uid) {
63272+ struct user_struct *user;
63273+
63274+ uid = cred->uid;
63275+
63276+ /* this is put upon execution past expiration */
63277+ user = find_user(uid);
63278+ if (user == NULL)
63279+ goto unlock;
63280+ user->banned = 1;
63281+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63282+ if (user->ban_expires == ~0UL)
63283+ user->ban_expires--;
63284+
63285+ do_each_thread(tsk2, tsk) {
63286+ cred2 = __task_cred(tsk);
63287+ if (tsk != p && cred2->uid == uid)
63288+ gr_fake_force_sig(SIGKILL, tsk);
63289+ } while_each_thread(tsk2, tsk);
63290+ }
63291+ }
63292+unlock:
63293+ read_unlock(&grsec_exec_file_lock);
63294+ read_unlock(&tasklist_lock);
63295+ rcu_read_unlock();
63296+
63297+ if (uid)
63298+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63299+#endif
63300+ return;
63301+}
63302+
63303+void gr_handle_brute_check(void)
63304+{
63305+#ifdef CONFIG_GRKERNSEC_BRUTE
63306+ if (current->brute)
63307+ msleep(30 * 1000);
63308+#endif
63309+ return;
63310+}
63311+
63312+void gr_handle_kernel_exploit(void)
63313+{
63314+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63315+ const struct cred *cred;
63316+ struct task_struct *tsk, *tsk2;
63317+ struct user_struct *user;
63318+ uid_t uid;
63319+
63320+ if (in_irq() || in_serving_softirq() || in_nmi())
63321+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63322+
63323+ uid = current_uid();
63324+
63325+ if (uid == 0)
63326+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
63327+ else {
63328+ /* kill all the processes of this user, hold a reference
63329+ to their creds struct, and prevent them from creating
63330+ another process until system reset
63331+ */
63332+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63333+ /* we intentionally leak this ref */
63334+ user = get_uid(current->cred->user);
63335+ if (user) {
63336+ user->banned = 1;
63337+ user->ban_expires = ~0UL;
63338+ }
63339+
63340+ read_lock(&tasklist_lock);
63341+ do_each_thread(tsk2, tsk) {
63342+ cred = __task_cred(tsk);
63343+ if (cred->uid == uid)
63344+ gr_fake_force_sig(SIGKILL, tsk);
63345+ } while_each_thread(tsk2, tsk);
63346+ read_unlock(&tasklist_lock);
63347+ }
63348+#endif
63349+}
63350+
63351+int __gr_process_user_ban(struct user_struct *user)
63352+{
63353+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63354+ if (unlikely(user->banned)) {
63355+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63356+ user->banned = 0;
63357+ user->ban_expires = 0;
63358+ free_uid(user);
63359+ } else
63360+ return -EPERM;
63361+ }
63362+#endif
63363+ return 0;
63364+}
63365+
63366+int gr_process_user_ban(void)
63367+{
63368+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63369+ return __gr_process_user_ban(current->cred->user);
63370+#endif
63371+ return 0;
63372+}
63373diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63374new file mode 100644
63375index 0000000..7512ea9
63376--- /dev/null
63377+++ b/grsecurity/grsec_sock.c
63378@@ -0,0 +1,275 @@
63379+#include <linux/kernel.h>
63380+#include <linux/module.h>
63381+#include <linux/sched.h>
63382+#include <linux/file.h>
63383+#include <linux/net.h>
63384+#include <linux/in.h>
63385+#include <linux/ip.h>
63386+#include <net/sock.h>
63387+#include <net/inet_sock.h>
63388+#include <linux/grsecurity.h>
63389+#include <linux/grinternal.h>
63390+#include <linux/gracl.h>
63391+
63392+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63393+EXPORT_SYMBOL(gr_cap_rtnetlink);
63394+
63395+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63396+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63397+
63398+EXPORT_SYMBOL(gr_search_udp_recvmsg);
63399+EXPORT_SYMBOL(gr_search_udp_sendmsg);
63400+
63401+#ifdef CONFIG_UNIX_MODULE
63402+EXPORT_SYMBOL(gr_acl_handle_unix);
63403+EXPORT_SYMBOL(gr_acl_handle_mknod);
63404+EXPORT_SYMBOL(gr_handle_chroot_unix);
63405+EXPORT_SYMBOL(gr_handle_create);
63406+#endif
63407+
63408+#ifdef CONFIG_GRKERNSEC
63409+#define gr_conn_table_size 32749
63410+struct conn_table_entry {
63411+ struct conn_table_entry *next;
63412+ struct signal_struct *sig;
63413+};
63414+
63415+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63416+DEFINE_SPINLOCK(gr_conn_table_lock);
63417+
63418+extern const char * gr_socktype_to_name(unsigned char type);
63419+extern const char * gr_proto_to_name(unsigned char proto);
63420+extern const char * gr_sockfamily_to_name(unsigned char family);
63421+
63422+static __inline__ int
63423+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63424+{
63425+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63426+}
63427+
63428+static __inline__ int
63429+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63430+ __u16 sport, __u16 dport)
63431+{
63432+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63433+ sig->gr_sport == sport && sig->gr_dport == dport))
63434+ return 1;
63435+ else
63436+ return 0;
63437+}
63438+
63439+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63440+{
63441+ struct conn_table_entry **match;
63442+ unsigned int index;
63443+
63444+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63445+ sig->gr_sport, sig->gr_dport,
63446+ gr_conn_table_size);
63447+
63448+ newent->sig = sig;
63449+
63450+ match = &gr_conn_table[index];
63451+ newent->next = *match;
63452+ *match = newent;
63453+
63454+ return;
63455+}
63456+
63457+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63458+{
63459+ struct conn_table_entry *match, *last = NULL;
63460+ unsigned int index;
63461+
63462+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63463+ sig->gr_sport, sig->gr_dport,
63464+ gr_conn_table_size);
63465+
63466+ match = gr_conn_table[index];
63467+ while (match && !conn_match(match->sig,
63468+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63469+ sig->gr_dport)) {
63470+ last = match;
63471+ match = match->next;
63472+ }
63473+
63474+ if (match) {
63475+ if (last)
63476+ last->next = match->next;
63477+ else
63478+ gr_conn_table[index] = NULL;
63479+ kfree(match);
63480+ }
63481+
63482+ return;
63483+}
63484+
63485+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
63486+ __u16 sport, __u16 dport)
63487+{
63488+ struct conn_table_entry *match;
63489+ unsigned int index;
63490+
63491+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
63492+
63493+ match = gr_conn_table[index];
63494+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
63495+ match = match->next;
63496+
63497+ if (match)
63498+ return match->sig;
63499+ else
63500+ return NULL;
63501+}
63502+
63503+#endif
63504+
63505+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
63506+{
63507+#ifdef CONFIG_GRKERNSEC
63508+ struct signal_struct *sig = task->signal;
63509+ struct conn_table_entry *newent;
63510+
63511+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
63512+ if (newent == NULL)
63513+ return;
63514+ /* no bh lock needed since we are called with bh disabled */
63515+ spin_lock(&gr_conn_table_lock);
63516+ gr_del_task_from_ip_table_nolock(sig);
63517+ sig->gr_saddr = inet->rcv_saddr;
63518+ sig->gr_daddr = inet->daddr;
63519+ sig->gr_sport = inet->sport;
63520+ sig->gr_dport = inet->dport;
63521+ gr_add_to_task_ip_table_nolock(sig, newent);
63522+ spin_unlock(&gr_conn_table_lock);
63523+#endif
63524+ return;
63525+}
63526+
63527+void gr_del_task_from_ip_table(struct task_struct *task)
63528+{
63529+#ifdef CONFIG_GRKERNSEC
63530+ spin_lock_bh(&gr_conn_table_lock);
63531+ gr_del_task_from_ip_table_nolock(task->signal);
63532+ spin_unlock_bh(&gr_conn_table_lock);
63533+#endif
63534+ return;
63535+}
63536+
63537+void
63538+gr_attach_curr_ip(const struct sock *sk)
63539+{
63540+#ifdef CONFIG_GRKERNSEC
63541+ struct signal_struct *p, *set;
63542+ const struct inet_sock *inet = inet_sk(sk);
63543+
63544+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
63545+ return;
63546+
63547+ set = current->signal;
63548+
63549+ spin_lock_bh(&gr_conn_table_lock);
63550+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
63551+ inet->dport, inet->sport);
63552+ if (unlikely(p != NULL)) {
63553+ set->curr_ip = p->curr_ip;
63554+ set->used_accept = 1;
63555+ gr_del_task_from_ip_table_nolock(p);
63556+ spin_unlock_bh(&gr_conn_table_lock);
63557+ return;
63558+ }
63559+ spin_unlock_bh(&gr_conn_table_lock);
63560+
63561+ set->curr_ip = inet->daddr;
63562+ set->used_accept = 1;
63563+#endif
63564+ return;
63565+}
63566+
63567+int
63568+gr_handle_sock_all(const int family, const int type, const int protocol)
63569+{
63570+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63571+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
63572+ (family != AF_UNIX)) {
63573+ if (family == AF_INET)
63574+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
63575+ else
63576+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
63577+ return -EACCES;
63578+ }
63579+#endif
63580+ return 0;
63581+}
63582+
63583+int
63584+gr_handle_sock_server(const struct sockaddr *sck)
63585+{
63586+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63587+ if (grsec_enable_socket_server &&
63588+ in_group_p(grsec_socket_server_gid) &&
63589+ sck && (sck->sa_family != AF_UNIX) &&
63590+ (sck->sa_family != AF_LOCAL)) {
63591+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63592+ return -EACCES;
63593+ }
63594+#endif
63595+ return 0;
63596+}
63597+
63598+int
63599+gr_handle_sock_server_other(const struct sock *sck)
63600+{
63601+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63602+ if (grsec_enable_socket_server &&
63603+ in_group_p(grsec_socket_server_gid) &&
63604+ sck && (sck->sk_family != AF_UNIX) &&
63605+ (sck->sk_family != AF_LOCAL)) {
63606+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63607+ return -EACCES;
63608+ }
63609+#endif
63610+ return 0;
63611+}
63612+
63613+int
63614+gr_handle_sock_client(const struct sockaddr *sck)
63615+{
63616+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63617+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
63618+ sck && (sck->sa_family != AF_UNIX) &&
63619+ (sck->sa_family != AF_LOCAL)) {
63620+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
63621+ return -EACCES;
63622+ }
63623+#endif
63624+ return 0;
63625+}
63626+
63627+kernel_cap_t
63628+gr_cap_rtnetlink(struct sock *sock)
63629+{
63630+#ifdef CONFIG_GRKERNSEC
63631+ if (!gr_acl_is_enabled())
63632+ return current_cap();
63633+ else if (sock->sk_protocol == NETLINK_ISCSI &&
63634+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
63635+ gr_is_capable(CAP_SYS_ADMIN))
63636+ return current_cap();
63637+ else if (sock->sk_protocol == NETLINK_AUDIT &&
63638+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
63639+ gr_is_capable(CAP_AUDIT_WRITE) &&
63640+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
63641+ gr_is_capable(CAP_AUDIT_CONTROL))
63642+ return current_cap();
63643+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
63644+ ((sock->sk_protocol == NETLINK_ROUTE) ?
63645+ gr_is_capable_nolog(CAP_NET_ADMIN) :
63646+ gr_is_capable(CAP_NET_ADMIN)))
63647+ return current_cap();
63648+ else
63649+ return __cap_empty_set;
63650+#else
63651+ return current_cap();
63652+#endif
63653+}
63654diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
63655new file mode 100644
63656index 0000000..2753505
63657--- /dev/null
63658+++ b/grsecurity/grsec_sysctl.c
63659@@ -0,0 +1,479 @@
63660+#include <linux/kernel.h>
63661+#include <linux/sched.h>
63662+#include <linux/sysctl.h>
63663+#include <linux/grsecurity.h>
63664+#include <linux/grinternal.h>
63665+
63666+int
63667+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
63668+{
63669+#ifdef CONFIG_GRKERNSEC_SYSCTL
63670+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
63671+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
63672+ return -EACCES;
63673+ }
63674+#endif
63675+ return 0;
63676+}
63677+
63678+#ifdef CONFIG_GRKERNSEC_ROFS
63679+static int __maybe_unused one = 1;
63680+#endif
63681+
63682+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63683+ctl_table grsecurity_table[] = {
63684+#ifdef CONFIG_GRKERNSEC_SYSCTL
63685+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
63686+#ifdef CONFIG_GRKERNSEC_IO
63687+ {
63688+ .ctl_name = CTL_UNNUMBERED,
63689+ .procname = "disable_priv_io",
63690+ .data = &grsec_disable_privio,
63691+ .maxlen = sizeof(int),
63692+ .mode = 0600,
63693+ .proc_handler = &proc_dointvec,
63694+ },
63695+#endif
63696+#endif
63697+#ifdef CONFIG_GRKERNSEC_LINK
63698+ {
63699+ .ctl_name = CTL_UNNUMBERED,
63700+ .procname = "linking_restrictions",
63701+ .data = &grsec_enable_link,
63702+ .maxlen = sizeof(int),
63703+ .mode = 0600,
63704+ .proc_handler = &proc_dointvec,
63705+ },
63706+#endif
63707+#ifdef CONFIG_GRKERNSEC_BRUTE
63708+ {
63709+ .ctl_name = CTL_UNNUMBERED,
63710+ .procname = "deter_bruteforce",
63711+ .data = &grsec_enable_brute,
63712+ .maxlen = sizeof(int),
63713+ .mode = 0600,
63714+ .proc_handler = &proc_dointvec,
63715+ },
63716+#endif
63717+#ifdef CONFIG_GRKERNSEC_FIFO
63718+ {
63719+ .ctl_name = CTL_UNNUMBERED,
63720+ .procname = "fifo_restrictions",
63721+ .data = &grsec_enable_fifo,
63722+ .maxlen = sizeof(int),
63723+ .mode = 0600,
63724+ .proc_handler = &proc_dointvec,
63725+ },
63726+#endif
63727+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63728+ {
63729+ .ctl_name = CTL_UNNUMBERED,
63730+ .procname = "ip_blackhole",
63731+ .data = &grsec_enable_blackhole,
63732+ .maxlen = sizeof(int),
63733+ .mode = 0600,
63734+ .proc_handler = &proc_dointvec,
63735+ },
63736+ {
63737+ .ctl_name = CTL_UNNUMBERED,
63738+ .procname = "lastack_retries",
63739+ .data = &grsec_lastack_retries,
63740+ .maxlen = sizeof(int),
63741+ .mode = 0600,
63742+ .proc_handler = &proc_dointvec,
63743+ },
63744+#endif
63745+#ifdef CONFIG_GRKERNSEC_EXECLOG
63746+ {
63747+ .ctl_name = CTL_UNNUMBERED,
63748+ .procname = "exec_logging",
63749+ .data = &grsec_enable_execlog,
63750+ .maxlen = sizeof(int),
63751+ .mode = 0600,
63752+ .proc_handler = &proc_dointvec,
63753+ },
63754+#endif
63755+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63756+ {
63757+ .ctl_name = CTL_UNNUMBERED,
63758+ .procname = "rwxmap_logging",
63759+ .data = &grsec_enable_log_rwxmaps,
63760+ .maxlen = sizeof(int),
63761+ .mode = 0600,
63762+ .proc_handler = &proc_dointvec,
63763+ },
63764+#endif
63765+#ifdef CONFIG_GRKERNSEC_SIGNAL
63766+ {
63767+ .ctl_name = CTL_UNNUMBERED,
63768+ .procname = "signal_logging",
63769+ .data = &grsec_enable_signal,
63770+ .maxlen = sizeof(int),
63771+ .mode = 0600,
63772+ .proc_handler = &proc_dointvec,
63773+ },
63774+#endif
63775+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63776+ {
63777+ .ctl_name = CTL_UNNUMBERED,
63778+ .procname = "forkfail_logging",
63779+ .data = &grsec_enable_forkfail,
63780+ .maxlen = sizeof(int),
63781+ .mode = 0600,
63782+ .proc_handler = &proc_dointvec,
63783+ },
63784+#endif
63785+#ifdef CONFIG_GRKERNSEC_TIME
63786+ {
63787+ .ctl_name = CTL_UNNUMBERED,
63788+ .procname = "timechange_logging",
63789+ .data = &grsec_enable_time,
63790+ .maxlen = sizeof(int),
63791+ .mode = 0600,
63792+ .proc_handler = &proc_dointvec,
63793+ },
63794+#endif
63795+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63796+ {
63797+ .ctl_name = CTL_UNNUMBERED,
63798+ .procname = "chroot_deny_shmat",
63799+ .data = &grsec_enable_chroot_shmat,
63800+ .maxlen = sizeof(int),
63801+ .mode = 0600,
63802+ .proc_handler = &proc_dointvec,
63803+ },
63804+#endif
63805+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63806+ {
63807+ .ctl_name = CTL_UNNUMBERED,
63808+ .procname = "chroot_deny_unix",
63809+ .data = &grsec_enable_chroot_unix,
63810+ .maxlen = sizeof(int),
63811+ .mode = 0600,
63812+ .proc_handler = &proc_dointvec,
63813+ },
63814+#endif
63815+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63816+ {
63817+ .ctl_name = CTL_UNNUMBERED,
63818+ .procname = "chroot_deny_mount",
63819+ .data = &grsec_enable_chroot_mount,
63820+ .maxlen = sizeof(int),
63821+ .mode = 0600,
63822+ .proc_handler = &proc_dointvec,
63823+ },
63824+#endif
63825+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63826+ {
63827+ .ctl_name = CTL_UNNUMBERED,
63828+ .procname = "chroot_deny_fchdir",
63829+ .data = &grsec_enable_chroot_fchdir,
63830+ .maxlen = sizeof(int),
63831+ .mode = 0600,
63832+ .proc_handler = &proc_dointvec,
63833+ },
63834+#endif
63835+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63836+ {
63837+ .ctl_name = CTL_UNNUMBERED,
63838+ .procname = "chroot_deny_chroot",
63839+ .data = &grsec_enable_chroot_double,
63840+ .maxlen = sizeof(int),
63841+ .mode = 0600,
63842+ .proc_handler = &proc_dointvec,
63843+ },
63844+#endif
63845+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63846+ {
63847+ .ctl_name = CTL_UNNUMBERED,
63848+ .procname = "chroot_deny_pivot",
63849+ .data = &grsec_enable_chroot_pivot,
63850+ .maxlen = sizeof(int),
63851+ .mode = 0600,
63852+ .proc_handler = &proc_dointvec,
63853+ },
63854+#endif
63855+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63856+ {
63857+ .ctl_name = CTL_UNNUMBERED,
63858+ .procname = "chroot_enforce_chdir",
63859+ .data = &grsec_enable_chroot_chdir,
63860+ .maxlen = sizeof(int),
63861+ .mode = 0600,
63862+ .proc_handler = &proc_dointvec,
63863+ },
63864+#endif
63865+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63866+ {
63867+ .ctl_name = CTL_UNNUMBERED,
63868+ .procname = "chroot_deny_chmod",
63869+ .data = &grsec_enable_chroot_chmod,
63870+ .maxlen = sizeof(int),
63871+ .mode = 0600,
63872+ .proc_handler = &proc_dointvec,
63873+ },
63874+#endif
63875+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63876+ {
63877+ .ctl_name = CTL_UNNUMBERED,
63878+ .procname = "chroot_deny_mknod",
63879+ .data = &grsec_enable_chroot_mknod,
63880+ .maxlen = sizeof(int),
63881+ .mode = 0600,
63882+ .proc_handler = &proc_dointvec,
63883+ },
63884+#endif
63885+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63886+ {
63887+ .ctl_name = CTL_UNNUMBERED,
63888+ .procname = "chroot_restrict_nice",
63889+ .data = &grsec_enable_chroot_nice,
63890+ .maxlen = sizeof(int),
63891+ .mode = 0600,
63892+ .proc_handler = &proc_dointvec,
63893+ },
63894+#endif
63895+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63896+ {
63897+ .ctl_name = CTL_UNNUMBERED,
63898+ .procname = "chroot_execlog",
63899+ .data = &grsec_enable_chroot_execlog,
63900+ .maxlen = sizeof(int),
63901+ .mode = 0600,
63902+ .proc_handler = &proc_dointvec,
63903+ },
63904+#endif
63905+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63906+ {
63907+ .ctl_name = CTL_UNNUMBERED,
63908+ .procname = "chroot_caps",
63909+ .data = &grsec_enable_chroot_caps,
63910+ .maxlen = sizeof(int),
63911+ .mode = 0600,
63912+ .proc_handler = &proc_dointvec,
63913+ },
63914+#endif
63915+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63916+ {
63917+ .ctl_name = CTL_UNNUMBERED,
63918+ .procname = "chroot_deny_sysctl",
63919+ .data = &grsec_enable_chroot_sysctl,
63920+ .maxlen = sizeof(int),
63921+ .mode = 0600,
63922+ .proc_handler = &proc_dointvec,
63923+ },
63924+#endif
63925+#ifdef CONFIG_GRKERNSEC_TPE
63926+ {
63927+ .ctl_name = CTL_UNNUMBERED,
63928+ .procname = "tpe",
63929+ .data = &grsec_enable_tpe,
63930+ .maxlen = sizeof(int),
63931+ .mode = 0600,
63932+ .proc_handler = &proc_dointvec,
63933+ },
63934+ {
63935+ .ctl_name = CTL_UNNUMBERED,
63936+ .procname = "tpe_gid",
63937+ .data = &grsec_tpe_gid,
63938+ .maxlen = sizeof(int),
63939+ .mode = 0600,
63940+ .proc_handler = &proc_dointvec,
63941+ },
63942+#endif
63943+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63944+ {
63945+ .ctl_name = CTL_UNNUMBERED,
63946+ .procname = "tpe_invert",
63947+ .data = &grsec_enable_tpe_invert,
63948+ .maxlen = sizeof(int),
63949+ .mode = 0600,
63950+ .proc_handler = &proc_dointvec,
63951+ },
63952+#endif
63953+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63954+ {
63955+ .ctl_name = CTL_UNNUMBERED,
63956+ .procname = "tpe_restrict_all",
63957+ .data = &grsec_enable_tpe_all,
63958+ .maxlen = sizeof(int),
63959+ .mode = 0600,
63960+ .proc_handler = &proc_dointvec,
63961+ },
63962+#endif
63963+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63964+ {
63965+ .ctl_name = CTL_UNNUMBERED,
63966+ .procname = "socket_all",
63967+ .data = &grsec_enable_socket_all,
63968+ .maxlen = sizeof(int),
63969+ .mode = 0600,
63970+ .proc_handler = &proc_dointvec,
63971+ },
63972+ {
63973+ .ctl_name = CTL_UNNUMBERED,
63974+ .procname = "socket_all_gid",
63975+ .data = &grsec_socket_all_gid,
63976+ .maxlen = sizeof(int),
63977+ .mode = 0600,
63978+ .proc_handler = &proc_dointvec,
63979+ },
63980+#endif
63981+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63982+ {
63983+ .ctl_name = CTL_UNNUMBERED,
63984+ .procname = "socket_client",
63985+ .data = &grsec_enable_socket_client,
63986+ .maxlen = sizeof(int),
63987+ .mode = 0600,
63988+ .proc_handler = &proc_dointvec,
63989+ },
63990+ {
63991+ .ctl_name = CTL_UNNUMBERED,
63992+ .procname = "socket_client_gid",
63993+ .data = &grsec_socket_client_gid,
63994+ .maxlen = sizeof(int),
63995+ .mode = 0600,
63996+ .proc_handler = &proc_dointvec,
63997+ },
63998+#endif
63999+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64000+ {
64001+ .ctl_name = CTL_UNNUMBERED,
64002+ .procname = "socket_server",
64003+ .data = &grsec_enable_socket_server,
64004+ .maxlen = sizeof(int),
64005+ .mode = 0600,
64006+ .proc_handler = &proc_dointvec,
64007+ },
64008+ {
64009+ .ctl_name = CTL_UNNUMBERED,
64010+ .procname = "socket_server_gid",
64011+ .data = &grsec_socket_server_gid,
64012+ .maxlen = sizeof(int),
64013+ .mode = 0600,
64014+ .proc_handler = &proc_dointvec,
64015+ },
64016+#endif
64017+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64018+ {
64019+ .ctl_name = CTL_UNNUMBERED,
64020+ .procname = "audit_group",
64021+ .data = &grsec_enable_group,
64022+ .maxlen = sizeof(int),
64023+ .mode = 0600,
64024+ .proc_handler = &proc_dointvec,
64025+ },
64026+ {
64027+ .ctl_name = CTL_UNNUMBERED,
64028+ .procname = "audit_gid",
64029+ .data = &grsec_audit_gid,
64030+ .maxlen = sizeof(int),
64031+ .mode = 0600,
64032+ .proc_handler = &proc_dointvec,
64033+ },
64034+#endif
64035+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64036+ {
64037+ .ctl_name = CTL_UNNUMBERED,
64038+ .procname = "audit_chdir",
64039+ .data = &grsec_enable_chdir,
64040+ .maxlen = sizeof(int),
64041+ .mode = 0600,
64042+ .proc_handler = &proc_dointvec,
64043+ },
64044+#endif
64045+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64046+ {
64047+ .ctl_name = CTL_UNNUMBERED,
64048+ .procname = "audit_mount",
64049+ .data = &grsec_enable_mount,
64050+ .maxlen = sizeof(int),
64051+ .mode = 0600,
64052+ .proc_handler = &proc_dointvec,
64053+ },
64054+#endif
64055+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64056+ {
64057+ .ctl_name = CTL_UNNUMBERED,
64058+ .procname = "audit_textrel",
64059+ .data = &grsec_enable_audit_textrel,
64060+ .maxlen = sizeof(int),
64061+ .mode = 0600,
64062+ .proc_handler = &proc_dointvec,
64063+ },
64064+#endif
64065+#ifdef CONFIG_GRKERNSEC_DMESG
64066+ {
64067+ .ctl_name = CTL_UNNUMBERED,
64068+ .procname = "dmesg",
64069+ .data = &grsec_enable_dmesg,
64070+ .maxlen = sizeof(int),
64071+ .mode = 0600,
64072+ .proc_handler = &proc_dointvec,
64073+ },
64074+#endif
64075+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64076+ {
64077+ .ctl_name = CTL_UNNUMBERED,
64078+ .procname = "chroot_findtask",
64079+ .data = &grsec_enable_chroot_findtask,
64080+ .maxlen = sizeof(int),
64081+ .mode = 0600,
64082+ .proc_handler = &proc_dointvec,
64083+ },
64084+#endif
64085+#ifdef CONFIG_GRKERNSEC_RESLOG
64086+ {
64087+ .ctl_name = CTL_UNNUMBERED,
64088+ .procname = "resource_logging",
64089+ .data = &grsec_resource_logging,
64090+ .maxlen = sizeof(int),
64091+ .mode = 0600,
64092+ .proc_handler = &proc_dointvec,
64093+ },
64094+#endif
64095+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64096+ {
64097+ .ctl_name = CTL_UNNUMBERED,
64098+ .procname = "audit_ptrace",
64099+ .data = &grsec_enable_audit_ptrace,
64100+ .maxlen = sizeof(int),
64101+ .mode = 0600,
64102+ .proc_handler = &proc_dointvec,
64103+ },
64104+#endif
64105+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64106+ {
64107+ .ctl_name = CTL_UNNUMBERED,
64108+ .procname = "harden_ptrace",
64109+ .data = &grsec_enable_harden_ptrace,
64110+ .maxlen = sizeof(int),
64111+ .mode = 0600,
64112+ .proc_handler = &proc_dointvec,
64113+ },
64114+#endif
64115+ {
64116+ .ctl_name = CTL_UNNUMBERED,
64117+ .procname = "grsec_lock",
64118+ .data = &grsec_lock,
64119+ .maxlen = sizeof(int),
64120+ .mode = 0600,
64121+ .proc_handler = &proc_dointvec,
64122+ },
64123+#endif
64124+#ifdef CONFIG_GRKERNSEC_ROFS
64125+ {
64126+ .ctl_name = CTL_UNNUMBERED,
64127+ .procname = "romount_protect",
64128+ .data = &grsec_enable_rofs,
64129+ .maxlen = sizeof(int),
64130+ .mode = 0600,
64131+ .proc_handler = &proc_dointvec_minmax,
64132+ .extra1 = &one,
64133+ .extra2 = &one,
64134+ },
64135+#endif
64136+ { .ctl_name = 0 }
64137+};
64138+#endif
64139diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64140new file mode 100644
64141index 0000000..0dc13c3
64142--- /dev/null
64143+++ b/grsecurity/grsec_time.c
64144@@ -0,0 +1,16 @@
64145+#include <linux/kernel.h>
64146+#include <linux/sched.h>
64147+#include <linux/grinternal.h>
64148+#include <linux/module.h>
64149+
64150+void
64151+gr_log_timechange(void)
64152+{
64153+#ifdef CONFIG_GRKERNSEC_TIME
64154+ if (grsec_enable_time)
64155+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64156+#endif
64157+ return;
64158+}
64159+
64160+EXPORT_SYMBOL(gr_log_timechange);
64161diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64162new file mode 100644
64163index 0000000..4a78774
64164--- /dev/null
64165+++ b/grsecurity/grsec_tpe.c
64166@@ -0,0 +1,39 @@
64167+#include <linux/kernel.h>
64168+#include <linux/sched.h>
64169+#include <linux/file.h>
64170+#include <linux/fs.h>
64171+#include <linux/grinternal.h>
64172+
64173+extern int gr_acl_tpe_check(void);
64174+
64175+int
64176+gr_tpe_allow(const struct file *file)
64177+{
64178+#ifdef CONFIG_GRKERNSEC
64179+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64180+ const struct cred *cred = current_cred();
64181+
64182+ if (cred->uid && ((grsec_enable_tpe &&
64183+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64184+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
64185+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
64186+#else
64187+ in_group_p(grsec_tpe_gid)
64188+#endif
64189+ ) || gr_acl_tpe_check()) &&
64190+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
64191+ (inode->i_mode & S_IWOTH))))) {
64192+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64193+ return 0;
64194+ }
64195+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64196+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
64197+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
64198+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
64199+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64200+ return 0;
64201+ }
64202+#endif
64203+#endif
64204+ return 1;
64205+}
64206diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64207new file mode 100644
64208index 0000000..9f7b1ac
64209--- /dev/null
64210+++ b/grsecurity/grsum.c
64211@@ -0,0 +1,61 @@
64212+#include <linux/err.h>
64213+#include <linux/kernel.h>
64214+#include <linux/sched.h>
64215+#include <linux/mm.h>
64216+#include <linux/scatterlist.h>
64217+#include <linux/crypto.h>
64218+#include <linux/gracl.h>
64219+
64220+
64221+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64222+#error "crypto and sha256 must be built into the kernel"
64223+#endif
64224+
64225+int
64226+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64227+{
64228+ char *p;
64229+ struct crypto_hash *tfm;
64230+ struct hash_desc desc;
64231+ struct scatterlist sg;
64232+ unsigned char temp_sum[GR_SHA_LEN];
64233+ volatile int retval = 0;
64234+ volatile int dummy = 0;
64235+ unsigned int i;
64236+
64237+ sg_init_table(&sg, 1);
64238+
64239+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64240+ if (IS_ERR(tfm)) {
64241+ /* should never happen, since sha256 should be built in */
64242+ return 1;
64243+ }
64244+
64245+ desc.tfm = tfm;
64246+ desc.flags = 0;
64247+
64248+ crypto_hash_init(&desc);
64249+
64250+ p = salt;
64251+ sg_set_buf(&sg, p, GR_SALT_LEN);
64252+ crypto_hash_update(&desc, &sg, sg.length);
64253+
64254+ p = entry->pw;
64255+ sg_set_buf(&sg, p, strlen(p));
64256+
64257+ crypto_hash_update(&desc, &sg, sg.length);
64258+
64259+ crypto_hash_final(&desc, temp_sum);
64260+
64261+ memset(entry->pw, 0, GR_PW_LEN);
64262+
64263+ for (i = 0; i < GR_SHA_LEN; i++)
64264+ if (sum[i] != temp_sum[i])
64265+ retval = 1;
64266+ else
64267+ dummy = 1; // waste a cycle
64268+
64269+ crypto_free_hash(tfm);
64270+
64271+ return retval;
64272+}
64273diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64274index 3cd9ccd..fe16d47 100644
64275--- a/include/acpi/acpi_bus.h
64276+++ b/include/acpi/acpi_bus.h
64277@@ -107,7 +107,7 @@ struct acpi_device_ops {
64278 acpi_op_bind bind;
64279 acpi_op_unbind unbind;
64280 acpi_op_notify notify;
64281-};
64282+} __no_const;
64283
64284 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64285
64286diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64287index f4906f6..71feb73 100644
64288--- a/include/acpi/acpi_drivers.h
64289+++ b/include/acpi/acpi_drivers.h
64290@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64291 Dock Station
64292 -------------------------------------------------------------------------- */
64293 struct acpi_dock_ops {
64294- acpi_notify_handler handler;
64295- acpi_notify_handler uevent;
64296+ const acpi_notify_handler handler;
64297+ const acpi_notify_handler uevent;
64298 };
64299
64300 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64301@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64302 extern int register_dock_notifier(struct notifier_block *nb);
64303 extern void unregister_dock_notifier(struct notifier_block *nb);
64304 extern int register_hotplug_dock_device(acpi_handle handle,
64305- struct acpi_dock_ops *ops,
64306+ const struct acpi_dock_ops *ops,
64307 void *context);
64308 extern void unregister_hotplug_dock_device(acpi_handle handle);
64309 #else
64310@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64311 {
64312 }
64313 static inline int register_hotplug_dock_device(acpi_handle handle,
64314- struct acpi_dock_ops *ops,
64315+ const struct acpi_dock_ops *ops,
64316 void *context)
64317 {
64318 return -ENODEV;
64319diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64320index b7babf0..a9ac9fc 100644
64321--- a/include/asm-generic/atomic-long.h
64322+++ b/include/asm-generic/atomic-long.h
64323@@ -22,6 +22,12 @@
64324
64325 typedef atomic64_t atomic_long_t;
64326
64327+#ifdef CONFIG_PAX_REFCOUNT
64328+typedef atomic64_unchecked_t atomic_long_unchecked_t;
64329+#else
64330+typedef atomic64_t atomic_long_unchecked_t;
64331+#endif
64332+
64333 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64334
64335 static inline long atomic_long_read(atomic_long_t *l)
64336@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64337 return (long)atomic64_read(v);
64338 }
64339
64340+#ifdef CONFIG_PAX_REFCOUNT
64341+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64342+{
64343+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64344+
64345+ return (long)atomic64_read_unchecked(v);
64346+}
64347+#endif
64348+
64349 static inline void atomic_long_set(atomic_long_t *l, long i)
64350 {
64351 atomic64_t *v = (atomic64_t *)l;
64352@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64353 atomic64_set(v, i);
64354 }
64355
64356+#ifdef CONFIG_PAX_REFCOUNT
64357+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64358+{
64359+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64360+
64361+ atomic64_set_unchecked(v, i);
64362+}
64363+#endif
64364+
64365 static inline void atomic_long_inc(atomic_long_t *l)
64366 {
64367 atomic64_t *v = (atomic64_t *)l;
64368@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64369 atomic64_inc(v);
64370 }
64371
64372+#ifdef CONFIG_PAX_REFCOUNT
64373+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64374+{
64375+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64376+
64377+ atomic64_inc_unchecked(v);
64378+}
64379+#endif
64380+
64381 static inline void atomic_long_dec(atomic_long_t *l)
64382 {
64383 atomic64_t *v = (atomic64_t *)l;
64384@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64385 atomic64_dec(v);
64386 }
64387
64388+#ifdef CONFIG_PAX_REFCOUNT
64389+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64390+{
64391+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64392+
64393+ atomic64_dec_unchecked(v);
64394+}
64395+#endif
64396+
64397 static inline void atomic_long_add(long i, atomic_long_t *l)
64398 {
64399 atomic64_t *v = (atomic64_t *)l;
64400@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64401 atomic64_add(i, v);
64402 }
64403
64404+#ifdef CONFIG_PAX_REFCOUNT
64405+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64406+{
64407+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64408+
64409+ atomic64_add_unchecked(i, v);
64410+}
64411+#endif
64412+
64413 static inline void atomic_long_sub(long i, atomic_long_t *l)
64414 {
64415 atomic64_t *v = (atomic64_t *)l;
64416@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64417 return (long)atomic64_inc_return(v);
64418 }
64419
64420+#ifdef CONFIG_PAX_REFCOUNT
64421+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64422+{
64423+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64424+
64425+ return (long)atomic64_inc_return_unchecked(v);
64426+}
64427+#endif
64428+
64429 static inline long atomic_long_dec_return(atomic_long_t *l)
64430 {
64431 atomic64_t *v = (atomic64_t *)l;
64432@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64433
64434 typedef atomic_t atomic_long_t;
64435
64436+#ifdef CONFIG_PAX_REFCOUNT
64437+typedef atomic_unchecked_t atomic_long_unchecked_t;
64438+#else
64439+typedef atomic_t atomic_long_unchecked_t;
64440+#endif
64441+
64442 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
64443 static inline long atomic_long_read(atomic_long_t *l)
64444 {
64445@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64446 return (long)atomic_read(v);
64447 }
64448
64449+#ifdef CONFIG_PAX_REFCOUNT
64450+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64451+{
64452+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64453+
64454+ return (long)atomic_read_unchecked(v);
64455+}
64456+#endif
64457+
64458 static inline void atomic_long_set(atomic_long_t *l, long i)
64459 {
64460 atomic_t *v = (atomic_t *)l;
64461@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64462 atomic_set(v, i);
64463 }
64464
64465+#ifdef CONFIG_PAX_REFCOUNT
64466+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64467+{
64468+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64469+
64470+ atomic_set_unchecked(v, i);
64471+}
64472+#endif
64473+
64474 static inline void atomic_long_inc(atomic_long_t *l)
64475 {
64476 atomic_t *v = (atomic_t *)l;
64477@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64478 atomic_inc(v);
64479 }
64480
64481+#ifdef CONFIG_PAX_REFCOUNT
64482+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64483+{
64484+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64485+
64486+ atomic_inc_unchecked(v);
64487+}
64488+#endif
64489+
64490 static inline void atomic_long_dec(atomic_long_t *l)
64491 {
64492 atomic_t *v = (atomic_t *)l;
64493@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64494 atomic_dec(v);
64495 }
64496
64497+#ifdef CONFIG_PAX_REFCOUNT
64498+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64499+{
64500+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64501+
64502+ atomic_dec_unchecked(v);
64503+}
64504+#endif
64505+
64506 static inline void atomic_long_add(long i, atomic_long_t *l)
64507 {
64508 atomic_t *v = (atomic_t *)l;
64509@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64510 atomic_add(i, v);
64511 }
64512
64513+#ifdef CONFIG_PAX_REFCOUNT
64514+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64515+{
64516+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64517+
64518+ atomic_add_unchecked(i, v);
64519+}
64520+#endif
64521+
64522 static inline void atomic_long_sub(long i, atomic_long_t *l)
64523 {
64524 atomic_t *v = (atomic_t *)l;
64525@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64526 return (long)atomic_inc_return(v);
64527 }
64528
64529+#ifdef CONFIG_PAX_REFCOUNT
64530+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64531+{
64532+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64533+
64534+ return (long)atomic_inc_return_unchecked(v);
64535+}
64536+#endif
64537+
64538 static inline long atomic_long_dec_return(atomic_long_t *l)
64539 {
64540 atomic_t *v = (atomic_t *)l;
64541@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64542
64543 #endif /* BITS_PER_LONG == 64 */
64544
64545+#ifdef CONFIG_PAX_REFCOUNT
64546+static inline void pax_refcount_needs_these_functions(void)
64547+{
64548+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
64549+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
64550+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
64551+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
64552+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
64553+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
64554+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
64555+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
64556+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
64557+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
64558+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
64559+
64560+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
64561+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
64562+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
64563+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
64564+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
64565+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
64566+}
64567+#else
64568+#define atomic_read_unchecked(v) atomic_read(v)
64569+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
64570+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
64571+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
64572+#define atomic_inc_unchecked(v) atomic_inc(v)
64573+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
64574+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
64575+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
64576+#define atomic_dec_unchecked(v) atomic_dec(v)
64577+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
64578+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
64579+
64580+#define atomic_long_read_unchecked(v) atomic_long_read(v)
64581+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
64582+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
64583+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
64584+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
64585+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
64586+#endif
64587+
64588 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
64589diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
64590index b18ce4f..2ee2843 100644
64591--- a/include/asm-generic/atomic64.h
64592+++ b/include/asm-generic/atomic64.h
64593@@ -16,6 +16,8 @@ typedef struct {
64594 long long counter;
64595 } atomic64_t;
64596
64597+typedef atomic64_t atomic64_unchecked_t;
64598+
64599 #define ATOMIC64_INIT(i) { (i) }
64600
64601 extern long long atomic64_read(const atomic64_t *v);
64602@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
64603 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
64604 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
64605
64606+#define atomic64_read_unchecked(v) atomic64_read(v)
64607+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
64608+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
64609+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
64610+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
64611+#define atomic64_inc_unchecked(v) atomic64_inc(v)
64612+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
64613+#define atomic64_dec_unchecked(v) atomic64_dec(v)
64614+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
64615+
64616 #endif /* _ASM_GENERIC_ATOMIC64_H */
64617diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
64618index d48ddf0..656a0ac 100644
64619--- a/include/asm-generic/bug.h
64620+++ b/include/asm-generic/bug.h
64621@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
64622
64623 #else /* !CONFIG_BUG */
64624 #ifndef HAVE_ARCH_BUG
64625-#define BUG() do {} while(0)
64626+#define BUG() do { for (;;) ; } while(0)
64627 #endif
64628
64629 #ifndef HAVE_ARCH_BUG_ON
64630-#define BUG_ON(condition) do { if (condition) ; } while(0)
64631+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
64632 #endif
64633
64634 #ifndef HAVE_ARCH_WARN_ON
64635diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
64636index 1bfcfe5..e04c5c9 100644
64637--- a/include/asm-generic/cache.h
64638+++ b/include/asm-generic/cache.h
64639@@ -6,7 +6,7 @@
64640 * cache lines need to provide their own cache.h.
64641 */
64642
64643-#define L1_CACHE_SHIFT 5
64644-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
64645+#define L1_CACHE_SHIFT 5UL
64646+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
64647
64648 #endif /* __ASM_GENERIC_CACHE_H */
64649diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
64650index 6920695..41038bc 100644
64651--- a/include/asm-generic/dma-mapping-common.h
64652+++ b/include/asm-generic/dma-mapping-common.h
64653@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
64654 enum dma_data_direction dir,
64655 struct dma_attrs *attrs)
64656 {
64657- struct dma_map_ops *ops = get_dma_ops(dev);
64658+ const struct dma_map_ops *ops = get_dma_ops(dev);
64659 dma_addr_t addr;
64660
64661 kmemcheck_mark_initialized(ptr, size);
64662@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
64663 enum dma_data_direction dir,
64664 struct dma_attrs *attrs)
64665 {
64666- struct dma_map_ops *ops = get_dma_ops(dev);
64667+ const struct dma_map_ops *ops = get_dma_ops(dev);
64668
64669 BUG_ON(!valid_dma_direction(dir));
64670 if (ops->unmap_page)
64671@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
64672 int nents, enum dma_data_direction dir,
64673 struct dma_attrs *attrs)
64674 {
64675- struct dma_map_ops *ops = get_dma_ops(dev);
64676+ const struct dma_map_ops *ops = get_dma_ops(dev);
64677 int i, ents;
64678 struct scatterlist *s;
64679
64680@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
64681 int nents, enum dma_data_direction dir,
64682 struct dma_attrs *attrs)
64683 {
64684- struct dma_map_ops *ops = get_dma_ops(dev);
64685+ const struct dma_map_ops *ops = get_dma_ops(dev);
64686
64687 BUG_ON(!valid_dma_direction(dir));
64688 debug_dma_unmap_sg(dev, sg, nents, dir);
64689@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64690 size_t offset, size_t size,
64691 enum dma_data_direction dir)
64692 {
64693- struct dma_map_ops *ops = get_dma_ops(dev);
64694+ const struct dma_map_ops *ops = get_dma_ops(dev);
64695 dma_addr_t addr;
64696
64697 kmemcheck_mark_initialized(page_address(page) + offset, size);
64698@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64699 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
64700 size_t size, enum dma_data_direction dir)
64701 {
64702- struct dma_map_ops *ops = get_dma_ops(dev);
64703+ const struct dma_map_ops *ops = get_dma_ops(dev);
64704
64705 BUG_ON(!valid_dma_direction(dir));
64706 if (ops->unmap_page)
64707@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
64708 size_t size,
64709 enum dma_data_direction dir)
64710 {
64711- struct dma_map_ops *ops = get_dma_ops(dev);
64712+ const struct dma_map_ops *ops = get_dma_ops(dev);
64713
64714 BUG_ON(!valid_dma_direction(dir));
64715 if (ops->sync_single_for_cpu)
64716@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
64717 dma_addr_t addr, size_t size,
64718 enum dma_data_direction dir)
64719 {
64720- struct dma_map_ops *ops = get_dma_ops(dev);
64721+ const struct dma_map_ops *ops = get_dma_ops(dev);
64722
64723 BUG_ON(!valid_dma_direction(dir));
64724 if (ops->sync_single_for_device)
64725@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
64726 size_t size,
64727 enum dma_data_direction dir)
64728 {
64729- struct dma_map_ops *ops = get_dma_ops(dev);
64730+ const struct dma_map_ops *ops = get_dma_ops(dev);
64731
64732 BUG_ON(!valid_dma_direction(dir));
64733 if (ops->sync_single_range_for_cpu) {
64734@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
64735 size_t size,
64736 enum dma_data_direction dir)
64737 {
64738- struct dma_map_ops *ops = get_dma_ops(dev);
64739+ const struct dma_map_ops *ops = get_dma_ops(dev);
64740
64741 BUG_ON(!valid_dma_direction(dir));
64742 if (ops->sync_single_range_for_device) {
64743@@ -155,7 +155,7 @@ static inline void
64744 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
64745 int nelems, enum dma_data_direction dir)
64746 {
64747- struct dma_map_ops *ops = get_dma_ops(dev);
64748+ const struct dma_map_ops *ops = get_dma_ops(dev);
64749
64750 BUG_ON(!valid_dma_direction(dir));
64751 if (ops->sync_sg_for_cpu)
64752@@ -167,7 +167,7 @@ static inline void
64753 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
64754 int nelems, enum dma_data_direction dir)
64755 {
64756- struct dma_map_ops *ops = get_dma_ops(dev);
64757+ const struct dma_map_ops *ops = get_dma_ops(dev);
64758
64759 BUG_ON(!valid_dma_direction(dir));
64760 if (ops->sync_sg_for_device)
64761diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
64762index 0d68a1e..b74a761 100644
64763--- a/include/asm-generic/emergency-restart.h
64764+++ b/include/asm-generic/emergency-restart.h
64765@@ -1,7 +1,7 @@
64766 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
64767 #define _ASM_GENERIC_EMERGENCY_RESTART_H
64768
64769-static inline void machine_emergency_restart(void)
64770+static inline __noreturn void machine_emergency_restart(void)
64771 {
64772 machine_restart(NULL);
64773 }
64774diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
64775index 3c2344f..4590a7d 100644
64776--- a/include/asm-generic/futex.h
64777+++ b/include/asm-generic/futex.h
64778@@ -6,7 +6,7 @@
64779 #include <asm/errno.h>
64780
64781 static inline int
64782-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64783+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
64784 {
64785 int op = (encoded_op >> 28) & 7;
64786 int cmp = (encoded_op >> 24) & 15;
64787@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64788 }
64789
64790 static inline int
64791-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
64792+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
64793 {
64794 return -ENOSYS;
64795 }
64796diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
64797index 1ca3efc..e3dc852 100644
64798--- a/include/asm-generic/int-l64.h
64799+++ b/include/asm-generic/int-l64.h
64800@@ -46,6 +46,8 @@ typedef unsigned int u32;
64801 typedef signed long s64;
64802 typedef unsigned long u64;
64803
64804+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
64805+
64806 #define S8_C(x) x
64807 #define U8_C(x) x ## U
64808 #define S16_C(x) x
64809diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
64810index f394147..b6152b9 100644
64811--- a/include/asm-generic/int-ll64.h
64812+++ b/include/asm-generic/int-ll64.h
64813@@ -51,6 +51,8 @@ typedef unsigned int u32;
64814 typedef signed long long s64;
64815 typedef unsigned long long u64;
64816
64817+typedef unsigned long long intoverflow_t;
64818+
64819 #define S8_C(x) x
64820 #define U8_C(x) x ## U
64821 #define S16_C(x) x
64822diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
64823index e5f234a..cdb16b3 100644
64824--- a/include/asm-generic/kmap_types.h
64825+++ b/include/asm-generic/kmap_types.h
64826@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
64827 KMAP_D(16) KM_IRQ_PTE,
64828 KMAP_D(17) KM_NMI,
64829 KMAP_D(18) KM_NMI_PTE,
64830-KMAP_D(19) KM_TYPE_NR
64831+KMAP_D(19) KM_CLEARPAGE,
64832+KMAP_D(20) KM_TYPE_NR
64833 };
64834
64835 #undef KMAP_D
64836diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
64837index 725612b..9cc513a 100644
64838--- a/include/asm-generic/pgtable-nopmd.h
64839+++ b/include/asm-generic/pgtable-nopmd.h
64840@@ -1,14 +1,19 @@
64841 #ifndef _PGTABLE_NOPMD_H
64842 #define _PGTABLE_NOPMD_H
64843
64844-#ifndef __ASSEMBLY__
64845-
64846 #include <asm-generic/pgtable-nopud.h>
64847
64848-struct mm_struct;
64849-
64850 #define __PAGETABLE_PMD_FOLDED
64851
64852+#define PMD_SHIFT PUD_SHIFT
64853+#define PTRS_PER_PMD 1
64854+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
64855+#define PMD_MASK (~(PMD_SIZE-1))
64856+
64857+#ifndef __ASSEMBLY__
64858+
64859+struct mm_struct;
64860+
64861 /*
64862 * Having the pmd type consist of a pud gets the size right, and allows
64863 * us to conceptually access the pud entry that this pmd is folded into
64864@@ -16,11 +21,6 @@ struct mm_struct;
64865 */
64866 typedef struct { pud_t pud; } pmd_t;
64867
64868-#define PMD_SHIFT PUD_SHIFT
64869-#define PTRS_PER_PMD 1
64870-#define PMD_SIZE (1UL << PMD_SHIFT)
64871-#define PMD_MASK (~(PMD_SIZE-1))
64872-
64873 /*
64874 * The "pud_xxx()" functions here are trivial for a folded two-level
64875 * setup: the pmd is never bad, and a pmd always exists (as it's folded
64876diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
64877index 810431d..ccc3638 100644
64878--- a/include/asm-generic/pgtable-nopud.h
64879+++ b/include/asm-generic/pgtable-nopud.h
64880@@ -1,10 +1,15 @@
64881 #ifndef _PGTABLE_NOPUD_H
64882 #define _PGTABLE_NOPUD_H
64883
64884-#ifndef __ASSEMBLY__
64885-
64886 #define __PAGETABLE_PUD_FOLDED
64887
64888+#define PUD_SHIFT PGDIR_SHIFT
64889+#define PTRS_PER_PUD 1
64890+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
64891+#define PUD_MASK (~(PUD_SIZE-1))
64892+
64893+#ifndef __ASSEMBLY__
64894+
64895 /*
64896 * Having the pud type consist of a pgd gets the size right, and allows
64897 * us to conceptually access the pgd entry that this pud is folded into
64898@@ -12,11 +17,6 @@
64899 */
64900 typedef struct { pgd_t pgd; } pud_t;
64901
64902-#define PUD_SHIFT PGDIR_SHIFT
64903-#define PTRS_PER_PUD 1
64904-#define PUD_SIZE (1UL << PUD_SHIFT)
64905-#define PUD_MASK (~(PUD_SIZE-1))
64906-
64907 /*
64908 * The "pgd_xxx()" functions here are trivial for a folded two-level
64909 * setup: the pud is never bad, and a pud always exists (as it's folded
64910diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
64911index e2bd73e..fea8ed3 100644
64912--- a/include/asm-generic/pgtable.h
64913+++ b/include/asm-generic/pgtable.h
64914@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
64915 unsigned long size);
64916 #endif
64917
64918+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
64919+static inline unsigned long pax_open_kernel(void) { return 0; }
64920+#endif
64921+
64922+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
64923+static inline unsigned long pax_close_kernel(void) { return 0; }
64924+#endif
64925+
64926 #endif /* !__ASSEMBLY__ */
64927
64928 #endif /* _ASM_GENERIC_PGTABLE_H */
64929diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
64930index b6e818f..21aa58a 100644
64931--- a/include/asm-generic/vmlinux.lds.h
64932+++ b/include/asm-generic/vmlinux.lds.h
64933@@ -199,6 +199,7 @@
64934 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
64935 VMLINUX_SYMBOL(__start_rodata) = .; \
64936 *(.rodata) *(.rodata.*) \
64937+ *(.data.read_only) \
64938 *(__vermagic) /* Kernel version magic */ \
64939 *(__markers_strings) /* Markers: strings */ \
64940 *(__tracepoints_strings)/* Tracepoints: strings */ \
64941@@ -656,22 +657,24 @@
64942 * section in the linker script will go there too. @phdr should have
64943 * a leading colon.
64944 *
64945- * Note that this macros defines __per_cpu_load as an absolute symbol.
64946+ * Note that this macros defines per_cpu_load as an absolute symbol.
64947 * If there is no need to put the percpu section at a predetermined
64948 * address, use PERCPU().
64949 */
64950 #define PERCPU_VADDR(vaddr, phdr) \
64951- VMLINUX_SYMBOL(__per_cpu_load) = .; \
64952- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
64953+ per_cpu_load = .; \
64954+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
64955 - LOAD_OFFSET) { \
64956+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
64957 VMLINUX_SYMBOL(__per_cpu_start) = .; \
64958 *(.data.percpu.first) \
64959- *(.data.percpu.page_aligned) \
64960 *(.data.percpu) \
64961+ . = ALIGN(PAGE_SIZE); \
64962+ *(.data.percpu.page_aligned) \
64963 *(.data.percpu.shared_aligned) \
64964 VMLINUX_SYMBOL(__per_cpu_end) = .; \
64965 } phdr \
64966- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
64967+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
64968
64969 /**
64970 * PERCPU - define output section for percpu area, simple version
64971diff --git a/include/drm/drmP.h b/include/drm/drmP.h
64972index 66713c6..98c0460 100644
64973--- a/include/drm/drmP.h
64974+++ b/include/drm/drmP.h
64975@@ -71,6 +71,7 @@
64976 #include <linux/workqueue.h>
64977 #include <linux/poll.h>
64978 #include <asm/pgalloc.h>
64979+#include <asm/local.h>
64980 #include "drm.h"
64981
64982 #include <linux/idr.h>
64983@@ -814,7 +815,7 @@ struct drm_driver {
64984 void (*vgaarb_irq)(struct drm_device *dev, bool state);
64985
64986 /* Driver private ops for this object */
64987- struct vm_operations_struct *gem_vm_ops;
64988+ const struct vm_operations_struct *gem_vm_ops;
64989
64990 int major;
64991 int minor;
64992@@ -917,7 +918,7 @@ struct drm_device {
64993
64994 /** \name Usage Counters */
64995 /*@{ */
64996- int open_count; /**< Outstanding files open */
64997+ local_t open_count; /**< Outstanding files open */
64998 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
64999 atomic_t vma_count; /**< Outstanding vma areas open */
65000 int buf_use; /**< Buffers in use -- cannot alloc */
65001@@ -928,7 +929,7 @@ struct drm_device {
65002 /*@{ */
65003 unsigned long counters;
65004 enum drm_stat_type types[15];
65005- atomic_t counts[15];
65006+ atomic_unchecked_t counts[15];
65007 /*@} */
65008
65009 struct list_head filelist;
65010@@ -1016,7 +1017,7 @@ struct drm_device {
65011 struct pci_controller *hose;
65012 #endif
65013 struct drm_sg_mem *sg; /**< Scatter gather memory */
65014- unsigned int num_crtcs; /**< Number of CRTCs on this device */
65015+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
65016 void *dev_private; /**< device private data */
65017 void *mm_private;
65018 struct address_space *dev_mapping;
65019@@ -1042,11 +1043,11 @@ struct drm_device {
65020 spinlock_t object_name_lock;
65021 struct idr object_name_idr;
65022 atomic_t object_count;
65023- atomic_t object_memory;
65024+ atomic_unchecked_t object_memory;
65025 atomic_t pin_count;
65026- atomic_t pin_memory;
65027+ atomic_unchecked_t pin_memory;
65028 atomic_t gtt_count;
65029- atomic_t gtt_memory;
65030+ atomic_unchecked_t gtt_memory;
65031 uint32_t gtt_total;
65032 uint32_t invalidate_domains; /* domains pending invalidation */
65033 uint32_t flush_domains; /* domains pending flush */
65034diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65035index b29e201..3413cc9 100644
65036--- a/include/drm/drm_crtc_helper.h
65037+++ b/include/drm/drm_crtc_helper.h
65038@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65039
65040 /* reload the current crtc LUT */
65041 void (*load_lut)(struct drm_crtc *crtc);
65042-};
65043+} __no_const;
65044
65045 struct drm_encoder_helper_funcs {
65046 void (*dpms)(struct drm_encoder *encoder, int mode);
65047@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65048 struct drm_connector *connector);
65049 /* disable encoder when not in use - more explicit than dpms off */
65050 void (*disable)(struct drm_encoder *encoder);
65051-};
65052+} __no_const;
65053
65054 struct drm_connector_helper_funcs {
65055 int (*get_modes)(struct drm_connector *connector);
65056diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65057index b199170..6f9e64c 100644
65058--- a/include/drm/ttm/ttm_memory.h
65059+++ b/include/drm/ttm/ttm_memory.h
65060@@ -47,7 +47,7 @@
65061
65062 struct ttm_mem_shrink {
65063 int (*do_shrink) (struct ttm_mem_shrink *);
65064-};
65065+} __no_const;
65066
65067 /**
65068 * struct ttm_mem_global - Global memory accounting structure.
65069diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65070index e86dfca..40cc55f 100644
65071--- a/include/linux/a.out.h
65072+++ b/include/linux/a.out.h
65073@@ -39,6 +39,14 @@ enum machine_type {
65074 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65075 };
65076
65077+/* Constants for the N_FLAGS field */
65078+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65079+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65080+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65081+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65082+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65083+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65084+
65085 #if !defined (N_MAGIC)
65086 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65087 #endif
65088diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65089index 817b237..62c10bc 100644
65090--- a/include/linux/atmdev.h
65091+++ b/include/linux/atmdev.h
65092@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65093 #endif
65094
65095 struct k_atm_aal_stats {
65096-#define __HANDLE_ITEM(i) atomic_t i
65097+#define __HANDLE_ITEM(i) atomic_unchecked_t i
65098 __AAL_STAT_ITEMS
65099 #undef __HANDLE_ITEM
65100 };
65101diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65102index 0f5f578..8c4f884 100644
65103--- a/include/linux/backlight.h
65104+++ b/include/linux/backlight.h
65105@@ -36,18 +36,18 @@ struct backlight_device;
65106 struct fb_info;
65107
65108 struct backlight_ops {
65109- unsigned int options;
65110+ const unsigned int options;
65111
65112 #define BL_CORE_SUSPENDRESUME (1 << 0)
65113
65114 /* Notify the backlight driver some property has changed */
65115- int (*update_status)(struct backlight_device *);
65116+ int (* const update_status)(struct backlight_device *);
65117 /* Return the current backlight brightness (accounting for power,
65118 fb_blank etc.) */
65119- int (*get_brightness)(struct backlight_device *);
65120+ int (* const get_brightness)(struct backlight_device *);
65121 /* Check if given framebuffer device is the one bound to this backlight;
65122 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65123- int (*check_fb)(struct fb_info *);
65124+ int (* const check_fb)(struct fb_info *);
65125 };
65126
65127 /* This structure defines all the properties of a backlight */
65128@@ -86,7 +86,7 @@ struct backlight_device {
65129 registered this device has been unloaded, and if class_get_devdata()
65130 points to something in the body of that driver, it is also invalid. */
65131 struct mutex ops_lock;
65132- struct backlight_ops *ops;
65133+ const struct backlight_ops *ops;
65134
65135 /* The framebuffer notifier block */
65136 struct notifier_block fb_notif;
65137@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65138 }
65139
65140 extern struct backlight_device *backlight_device_register(const char *name,
65141- struct device *dev, void *devdata, struct backlight_ops *ops);
65142+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65143 extern void backlight_device_unregister(struct backlight_device *bd);
65144 extern void backlight_force_update(struct backlight_device *bd,
65145 enum backlight_update_reason reason);
65146diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65147index a3d802e..482f69c 100644
65148--- a/include/linux/binfmts.h
65149+++ b/include/linux/binfmts.h
65150@@ -83,6 +83,7 @@ struct linux_binfmt {
65151 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65152 int (*load_shlib)(struct file *);
65153 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65154+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65155 unsigned long min_coredump; /* minimal dump size */
65156 int hasvdso;
65157 };
65158diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65159index a06bfab..4fa38bb 100644
65160--- a/include/linux/blkdev.h
65161+++ b/include/linux/blkdev.h
65162@@ -1278,7 +1278,7 @@ struct block_device_operations {
65163 int (*revalidate_disk) (struct gendisk *);
65164 int (*getgeo)(struct block_device *, struct hd_geometry *);
65165 struct module *owner;
65166-};
65167+} __do_const;
65168
65169 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65170 unsigned long);
65171diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65172index 3b73b99..629d21b 100644
65173--- a/include/linux/blktrace_api.h
65174+++ b/include/linux/blktrace_api.h
65175@@ -160,7 +160,7 @@ struct blk_trace {
65176 struct dentry *dir;
65177 struct dentry *dropped_file;
65178 struct dentry *msg_file;
65179- atomic_t dropped;
65180+ atomic_unchecked_t dropped;
65181 };
65182
65183 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65184diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65185index 83195fb..0b0f77d 100644
65186--- a/include/linux/byteorder/little_endian.h
65187+++ b/include/linux/byteorder/little_endian.h
65188@@ -42,51 +42,51 @@
65189
65190 static inline __le64 __cpu_to_le64p(const __u64 *p)
65191 {
65192- return (__force __le64)*p;
65193+ return (__force const __le64)*p;
65194 }
65195 static inline __u64 __le64_to_cpup(const __le64 *p)
65196 {
65197- return (__force __u64)*p;
65198+ return (__force const __u64)*p;
65199 }
65200 static inline __le32 __cpu_to_le32p(const __u32 *p)
65201 {
65202- return (__force __le32)*p;
65203+ return (__force const __le32)*p;
65204 }
65205 static inline __u32 __le32_to_cpup(const __le32 *p)
65206 {
65207- return (__force __u32)*p;
65208+ return (__force const __u32)*p;
65209 }
65210 static inline __le16 __cpu_to_le16p(const __u16 *p)
65211 {
65212- return (__force __le16)*p;
65213+ return (__force const __le16)*p;
65214 }
65215 static inline __u16 __le16_to_cpup(const __le16 *p)
65216 {
65217- return (__force __u16)*p;
65218+ return (__force const __u16)*p;
65219 }
65220 static inline __be64 __cpu_to_be64p(const __u64 *p)
65221 {
65222- return (__force __be64)__swab64p(p);
65223+ return (__force const __be64)__swab64p(p);
65224 }
65225 static inline __u64 __be64_to_cpup(const __be64 *p)
65226 {
65227- return __swab64p((__u64 *)p);
65228+ return __swab64p((const __u64 *)p);
65229 }
65230 static inline __be32 __cpu_to_be32p(const __u32 *p)
65231 {
65232- return (__force __be32)__swab32p(p);
65233+ return (__force const __be32)__swab32p(p);
65234 }
65235 static inline __u32 __be32_to_cpup(const __be32 *p)
65236 {
65237- return __swab32p((__u32 *)p);
65238+ return __swab32p((const __u32 *)p);
65239 }
65240 static inline __be16 __cpu_to_be16p(const __u16 *p)
65241 {
65242- return (__force __be16)__swab16p(p);
65243+ return (__force const __be16)__swab16p(p);
65244 }
65245 static inline __u16 __be16_to_cpup(const __be16 *p)
65246 {
65247- return __swab16p((__u16 *)p);
65248+ return __swab16p((const __u16 *)p);
65249 }
65250 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65251 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65252diff --git a/include/linux/cache.h b/include/linux/cache.h
65253index 97e2488..e7576b9 100644
65254--- a/include/linux/cache.h
65255+++ b/include/linux/cache.h
65256@@ -16,6 +16,10 @@
65257 #define __read_mostly
65258 #endif
65259
65260+#ifndef __read_only
65261+#define __read_only __read_mostly
65262+#endif
65263+
65264 #ifndef ____cacheline_aligned
65265 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65266 #endif
65267diff --git a/include/linux/capability.h b/include/linux/capability.h
65268index c8f2a5f7..1618a5c 100644
65269--- a/include/linux/capability.h
65270+++ b/include/linux/capability.h
65271@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65272 (security_real_capable_noaudit((t), (cap)) == 0)
65273
65274 extern int capable(int cap);
65275+int capable_nolog(int cap);
65276
65277 /* audit system wants to get cap info from files as well */
65278 struct dentry;
65279diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65280index 450fa59..86019fb 100644
65281--- a/include/linux/compiler-gcc4.h
65282+++ b/include/linux/compiler-gcc4.h
65283@@ -36,4 +36,16 @@
65284 the kernel context */
65285 #define __cold __attribute__((__cold__))
65286
65287+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65288+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65289+#define __bos0(ptr) __bos((ptr), 0)
65290+#define __bos1(ptr) __bos((ptr), 1)
65291+
65292+#if __GNUC_MINOR__ >= 5
65293+#ifdef CONSTIFY_PLUGIN
65294+#define __no_const __attribute__((no_const))
65295+#define __do_const __attribute__((do_const))
65296+#endif
65297+#endif
65298+
65299 #endif
65300diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65301index 04fb513..fd6477b 100644
65302--- a/include/linux/compiler.h
65303+++ b/include/linux/compiler.h
65304@@ -5,11 +5,14 @@
65305
65306 #ifdef __CHECKER__
65307 # define __user __attribute__((noderef, address_space(1)))
65308+# define __force_user __force __user
65309 # define __kernel /* default address space */
65310+# define __force_kernel __force __kernel
65311 # define __safe __attribute__((safe))
65312 # define __force __attribute__((force))
65313 # define __nocast __attribute__((nocast))
65314 # define __iomem __attribute__((noderef, address_space(2)))
65315+# define __force_iomem __force __iomem
65316 # define __acquires(x) __attribute__((context(x,0,1)))
65317 # define __releases(x) __attribute__((context(x,1,0)))
65318 # define __acquire(x) __context__(x,1)
65319@@ -17,13 +20,34 @@
65320 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65321 extern void __chk_user_ptr(const volatile void __user *);
65322 extern void __chk_io_ptr(const volatile void __iomem *);
65323+#elif defined(CHECKER_PLUGIN)
65324+//# define __user
65325+//# define __force_user
65326+//# define __kernel
65327+//# define __force_kernel
65328+# define __safe
65329+# define __force
65330+# define __nocast
65331+# define __iomem
65332+# define __force_iomem
65333+# define __chk_user_ptr(x) (void)0
65334+# define __chk_io_ptr(x) (void)0
65335+# define __builtin_warning(x, y...) (1)
65336+# define __acquires(x)
65337+# define __releases(x)
65338+# define __acquire(x) (void)0
65339+# define __release(x) (void)0
65340+# define __cond_lock(x,c) (c)
65341 #else
65342 # define __user
65343+# define __force_user
65344 # define __kernel
65345+# define __force_kernel
65346 # define __safe
65347 # define __force
65348 # define __nocast
65349 # define __iomem
65350+# define __force_iomem
65351 # define __chk_user_ptr(x) (void)0
65352 # define __chk_io_ptr(x) (void)0
65353 # define __builtin_warning(x, y...) (1)
65354@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65355 # define __attribute_const__ /* unimplemented */
65356 #endif
65357
65358+#ifndef __no_const
65359+# define __no_const
65360+#endif
65361+
65362+#ifndef __do_const
65363+# define __do_const
65364+#endif
65365+
65366 /*
65367 * Tell gcc if a function is cold. The compiler will assume any path
65368 * directly leading to the call is unlikely.
65369@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65370 #define __cold
65371 #endif
65372
65373+#ifndef __alloc_size
65374+#define __alloc_size(...)
65375+#endif
65376+
65377+#ifndef __bos
65378+#define __bos(ptr, arg)
65379+#endif
65380+
65381+#ifndef __bos0
65382+#define __bos0(ptr)
65383+#endif
65384+
65385+#ifndef __bos1
65386+#define __bos1(ptr)
65387+#endif
65388+
65389 /* Simple shorthand for a section definition */
65390 #ifndef __section
65391 # define __section(S) __attribute__ ((__section__(#S)))
65392@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65393 * use is to mediate communication between process-level code and irq/NMI
65394 * handlers, all running on the same CPU.
65395 */
65396-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65397+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65398+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65399
65400 #endif /* __LINUX_COMPILER_H */
65401diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65402index fd92988..a3164bd 100644
65403--- a/include/linux/crypto.h
65404+++ b/include/linux/crypto.h
65405@@ -394,7 +394,7 @@ struct cipher_tfm {
65406 const u8 *key, unsigned int keylen);
65407 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65408 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65409-};
65410+} __no_const;
65411
65412 struct hash_tfm {
65413 int (*init)(struct hash_desc *desc);
65414@@ -415,13 +415,13 @@ struct compress_tfm {
65415 int (*cot_decompress)(struct crypto_tfm *tfm,
65416 const u8 *src, unsigned int slen,
65417 u8 *dst, unsigned int *dlen);
65418-};
65419+} __no_const;
65420
65421 struct rng_tfm {
65422 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
65423 unsigned int dlen);
65424 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
65425-};
65426+} __no_const;
65427
65428 #define crt_ablkcipher crt_u.ablkcipher
65429 #define crt_aead crt_u.aead
65430diff --git a/include/linux/dcache.h b/include/linux/dcache.h
65431index 30b93b2..cd7a8db 100644
65432--- a/include/linux/dcache.h
65433+++ b/include/linux/dcache.h
65434@@ -119,6 +119,8 @@ struct dentry {
65435 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
65436 };
65437
65438+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
65439+
65440 /*
65441 * dentry->d_lock spinlock nesting subclasses:
65442 *
65443diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
65444index 3e9bd6a..f4e1aa0 100644
65445--- a/include/linux/decompress/mm.h
65446+++ b/include/linux/decompress/mm.h
65447@@ -78,7 +78,7 @@ static void free(void *where)
65448 * warnings when not needed (indeed large_malloc / large_free are not
65449 * needed by inflate */
65450
65451-#define malloc(a) kmalloc(a, GFP_KERNEL)
65452+#define malloc(a) kmalloc((a), GFP_KERNEL)
65453 #define free(a) kfree(a)
65454
65455 #define large_malloc(a) vmalloc(a)
65456diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
65457index 91b7618..92a93d32 100644
65458--- a/include/linux/dma-mapping.h
65459+++ b/include/linux/dma-mapping.h
65460@@ -16,51 +16,51 @@ enum dma_data_direction {
65461 };
65462
65463 struct dma_map_ops {
65464- void* (*alloc_coherent)(struct device *dev, size_t size,
65465+ void* (* const alloc_coherent)(struct device *dev, size_t size,
65466 dma_addr_t *dma_handle, gfp_t gfp);
65467- void (*free_coherent)(struct device *dev, size_t size,
65468+ void (* const free_coherent)(struct device *dev, size_t size,
65469 void *vaddr, dma_addr_t dma_handle);
65470- dma_addr_t (*map_page)(struct device *dev, struct page *page,
65471+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
65472 unsigned long offset, size_t size,
65473 enum dma_data_direction dir,
65474 struct dma_attrs *attrs);
65475- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
65476+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
65477 size_t size, enum dma_data_direction dir,
65478 struct dma_attrs *attrs);
65479- int (*map_sg)(struct device *dev, struct scatterlist *sg,
65480+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
65481 int nents, enum dma_data_direction dir,
65482 struct dma_attrs *attrs);
65483- void (*unmap_sg)(struct device *dev,
65484+ void (* const unmap_sg)(struct device *dev,
65485 struct scatterlist *sg, int nents,
65486 enum dma_data_direction dir,
65487 struct dma_attrs *attrs);
65488- void (*sync_single_for_cpu)(struct device *dev,
65489+ void (* const sync_single_for_cpu)(struct device *dev,
65490 dma_addr_t dma_handle, size_t size,
65491 enum dma_data_direction dir);
65492- void (*sync_single_for_device)(struct device *dev,
65493+ void (* const sync_single_for_device)(struct device *dev,
65494 dma_addr_t dma_handle, size_t size,
65495 enum dma_data_direction dir);
65496- void (*sync_single_range_for_cpu)(struct device *dev,
65497+ void (* const sync_single_range_for_cpu)(struct device *dev,
65498 dma_addr_t dma_handle,
65499 unsigned long offset,
65500 size_t size,
65501 enum dma_data_direction dir);
65502- void (*sync_single_range_for_device)(struct device *dev,
65503+ void (* const sync_single_range_for_device)(struct device *dev,
65504 dma_addr_t dma_handle,
65505 unsigned long offset,
65506 size_t size,
65507 enum dma_data_direction dir);
65508- void (*sync_sg_for_cpu)(struct device *dev,
65509+ void (* const sync_sg_for_cpu)(struct device *dev,
65510 struct scatterlist *sg, int nents,
65511 enum dma_data_direction dir);
65512- void (*sync_sg_for_device)(struct device *dev,
65513+ void (* const sync_sg_for_device)(struct device *dev,
65514 struct scatterlist *sg, int nents,
65515 enum dma_data_direction dir);
65516- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
65517- int (*dma_supported)(struct device *dev, u64 mask);
65518+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
65519+ int (* const dma_supported)(struct device *dev, u64 mask);
65520 int (*set_dma_mask)(struct device *dev, u64 mask);
65521 int is_phys;
65522-};
65523+} __do_const;
65524
65525 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
65526
65527diff --git a/include/linux/dst.h b/include/linux/dst.h
65528index e26fed8..b976d9f 100644
65529--- a/include/linux/dst.h
65530+++ b/include/linux/dst.h
65531@@ -380,7 +380,7 @@ struct dst_node
65532 struct thread_pool *pool;
65533
65534 /* Transaction IDs live here */
65535- atomic_long_t gen;
65536+ atomic_long_unchecked_t gen;
65537
65538 /*
65539 * How frequently and how many times transaction
65540diff --git a/include/linux/elf.h b/include/linux/elf.h
65541index 90a4ed0..d652617 100644
65542--- a/include/linux/elf.h
65543+++ b/include/linux/elf.h
65544@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
65545 #define PT_GNU_EH_FRAME 0x6474e550
65546
65547 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
65548+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
65549+
65550+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
65551+
65552+/* Constants for the e_flags field */
65553+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65554+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
65555+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
65556+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
65557+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65558+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65559
65560 /* These constants define the different elf file types */
65561 #define ET_NONE 0
65562@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
65563 #define DT_DEBUG 21
65564 #define DT_TEXTREL 22
65565 #define DT_JMPREL 23
65566+#define DT_FLAGS 30
65567+ #define DF_TEXTREL 0x00000004
65568 #define DT_ENCODING 32
65569 #define OLD_DT_LOOS 0x60000000
65570 #define DT_LOOS 0x6000000d
65571@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
65572 #define PF_W 0x2
65573 #define PF_X 0x1
65574
65575+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
65576+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
65577+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
65578+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
65579+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
65580+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
65581+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
65582+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
65583+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
65584+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
65585+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
65586+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
65587+
65588 typedef struct elf32_phdr{
65589 Elf32_Word p_type;
65590 Elf32_Off p_offset;
65591@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
65592 #define EI_OSABI 7
65593 #define EI_PAD 8
65594
65595+#define EI_PAX 14
65596+
65597 #define ELFMAG0 0x7f /* EI_MAG */
65598 #define ELFMAG1 'E'
65599 #define ELFMAG2 'L'
65600@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
65601 #define elf_phdr elf32_phdr
65602 #define elf_note elf32_note
65603 #define elf_addr_t Elf32_Off
65604+#define elf_dyn Elf32_Dyn
65605
65606 #else
65607
65608@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
65609 #define elf_phdr elf64_phdr
65610 #define elf_note elf64_note
65611 #define elf_addr_t Elf64_Off
65612+#define elf_dyn Elf64_Dyn
65613
65614 #endif
65615
65616diff --git a/include/linux/fs.h b/include/linux/fs.h
65617index 1b9a47a..6fe2934 100644
65618--- a/include/linux/fs.h
65619+++ b/include/linux/fs.h
65620@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
65621 unsigned long, unsigned long);
65622
65623 struct address_space_operations {
65624- int (*writepage)(struct page *page, struct writeback_control *wbc);
65625- int (*readpage)(struct file *, struct page *);
65626- void (*sync_page)(struct page *);
65627+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
65628+ int (* const readpage)(struct file *, struct page *);
65629+ void (* const sync_page)(struct page *);
65630
65631 /* Write back some dirty pages from this mapping. */
65632- int (*writepages)(struct address_space *, struct writeback_control *);
65633+ int (* const writepages)(struct address_space *, struct writeback_control *);
65634
65635 /* Set a page dirty. Return true if this dirtied it */
65636- int (*set_page_dirty)(struct page *page);
65637+ int (* const set_page_dirty)(struct page *page);
65638
65639- int (*readpages)(struct file *filp, struct address_space *mapping,
65640+ int (* const readpages)(struct file *filp, struct address_space *mapping,
65641 struct list_head *pages, unsigned nr_pages);
65642
65643- int (*write_begin)(struct file *, struct address_space *mapping,
65644+ int (* const write_begin)(struct file *, struct address_space *mapping,
65645 loff_t pos, unsigned len, unsigned flags,
65646 struct page **pagep, void **fsdata);
65647- int (*write_end)(struct file *, struct address_space *mapping,
65648+ int (* const write_end)(struct file *, struct address_space *mapping,
65649 loff_t pos, unsigned len, unsigned copied,
65650 struct page *page, void *fsdata);
65651
65652 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
65653- sector_t (*bmap)(struct address_space *, sector_t);
65654- void (*invalidatepage) (struct page *, unsigned long);
65655- int (*releasepage) (struct page *, gfp_t);
65656- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
65657+ sector_t (* const bmap)(struct address_space *, sector_t);
65658+ void (* const invalidatepage) (struct page *, unsigned long);
65659+ int (* const releasepage) (struct page *, gfp_t);
65660+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
65661 loff_t offset, unsigned long nr_segs);
65662- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
65663+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
65664 void **, unsigned long *);
65665 /* migrate the contents of a page to the specified target */
65666- int (*migratepage) (struct address_space *,
65667+ int (* const migratepage) (struct address_space *,
65668 struct page *, struct page *);
65669- int (*launder_page) (struct page *);
65670- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
65671+ int (* const launder_page) (struct page *);
65672+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
65673 unsigned long);
65674- int (*error_remove_page)(struct address_space *, struct page *);
65675+ int (* const error_remove_page)(struct address_space *, struct page *);
65676 };
65677
65678 /*
65679@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
65680 typedef struct files_struct *fl_owner_t;
65681
65682 struct file_lock_operations {
65683- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65684- void (*fl_release_private)(struct file_lock *);
65685+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65686+ void (* const fl_release_private)(struct file_lock *);
65687 };
65688
65689 struct lock_manager_operations {
65690- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
65691- void (*fl_notify)(struct file_lock *); /* unblock callback */
65692- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
65693- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65694- void (*fl_release_private)(struct file_lock *);
65695- void (*fl_break)(struct file_lock *);
65696- int (*fl_mylease)(struct file_lock *, struct file_lock *);
65697- int (*fl_change)(struct file_lock **, int);
65698+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
65699+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
65700+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
65701+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65702+ void (* const fl_release_private)(struct file_lock *);
65703+ void (* const fl_break)(struct file_lock *);
65704+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
65705+ int (* const fl_change)(struct file_lock **, int);
65706 };
65707
65708 struct lock_manager {
65709@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
65710 unsigned int fi_flags; /* Flags as passed from user */
65711 unsigned int fi_extents_mapped; /* Number of mapped extents */
65712 unsigned int fi_extents_max; /* Size of fiemap_extent array */
65713- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
65714+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
65715 * array */
65716 };
65717 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
65718@@ -1512,7 +1512,8 @@ struct file_operations {
65719 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
65720 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
65721 int (*setlease)(struct file *, long, struct file_lock **);
65722-};
65723+} __do_const;
65724+typedef struct file_operations __no_const file_operations_no_const;
65725
65726 struct inode_operations {
65727 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
65728@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
65729 unsigned long, loff_t *);
65730
65731 struct super_operations {
65732- struct inode *(*alloc_inode)(struct super_block *sb);
65733- void (*destroy_inode)(struct inode *);
65734+ struct inode *(* const alloc_inode)(struct super_block *sb);
65735+ void (* const destroy_inode)(struct inode *);
65736
65737- void (*dirty_inode) (struct inode *);
65738- int (*write_inode) (struct inode *, int);
65739- void (*drop_inode) (struct inode *);
65740- void (*delete_inode) (struct inode *);
65741- void (*put_super) (struct super_block *);
65742- void (*write_super) (struct super_block *);
65743- int (*sync_fs)(struct super_block *sb, int wait);
65744- int (*freeze_fs) (struct super_block *);
65745- int (*unfreeze_fs) (struct super_block *);
65746- int (*statfs) (struct dentry *, struct kstatfs *);
65747- int (*remount_fs) (struct super_block *, int *, char *);
65748- void (*clear_inode) (struct inode *);
65749- void (*umount_begin) (struct super_block *);
65750+ void (* const dirty_inode) (struct inode *);
65751+ int (* const write_inode) (struct inode *, int);
65752+ void (* const drop_inode) (struct inode *);
65753+ void (* const delete_inode) (struct inode *);
65754+ void (* const put_super) (struct super_block *);
65755+ void (* const write_super) (struct super_block *);
65756+ int (* const sync_fs)(struct super_block *sb, int wait);
65757+ int (* const freeze_fs) (struct super_block *);
65758+ int (* const unfreeze_fs) (struct super_block *);
65759+ int (* const statfs) (struct dentry *, struct kstatfs *);
65760+ int (* const remount_fs) (struct super_block *, int *, char *);
65761+ void (* const clear_inode) (struct inode *);
65762+ void (* const umount_begin) (struct super_block *);
65763
65764- int (*show_options)(struct seq_file *, struct vfsmount *);
65765- int (*show_stats)(struct seq_file *, struct vfsmount *);
65766+ int (* const show_options)(struct seq_file *, struct vfsmount *);
65767+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
65768 #ifdef CONFIG_QUOTA
65769- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
65770- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65771+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
65772+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65773 #endif
65774- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65775+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65776 };
65777
65778 /*
65779diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
65780index 78a05bf..2a7d3e1 100644
65781--- a/include/linux/fs_struct.h
65782+++ b/include/linux/fs_struct.h
65783@@ -4,7 +4,7 @@
65784 #include <linux/path.h>
65785
65786 struct fs_struct {
65787- int users;
65788+ atomic_t users;
65789 rwlock_t lock;
65790 int umask;
65791 int in_exec;
65792diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
65793index 7be0c6f..2f63a2b 100644
65794--- a/include/linux/fscache-cache.h
65795+++ b/include/linux/fscache-cache.h
65796@@ -116,7 +116,7 @@ struct fscache_operation {
65797 #endif
65798 };
65799
65800-extern atomic_t fscache_op_debug_id;
65801+extern atomic_unchecked_t fscache_op_debug_id;
65802 extern const struct slow_work_ops fscache_op_slow_work_ops;
65803
65804 extern void fscache_enqueue_operation(struct fscache_operation *);
65805@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
65806 fscache_operation_release_t release)
65807 {
65808 atomic_set(&op->usage, 1);
65809- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
65810+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
65811 op->release = release;
65812 INIT_LIST_HEAD(&op->pend_link);
65813 fscache_set_op_state(op, "Init");
65814diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
65815index 4ec5e67..42f1eb9 100644
65816--- a/include/linux/ftrace_event.h
65817+++ b/include/linux/ftrace_event.h
65818@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
65819 int filter_type);
65820 extern int trace_define_common_fields(struct ftrace_event_call *call);
65821
65822-#define is_signed_type(type) (((type)(-1)) < 0)
65823+#define is_signed_type(type) (((type)(-1)) < (type)1)
65824
65825 int trace_set_clr_event(const char *system, const char *event, int set);
65826
65827diff --git a/include/linux/genhd.h b/include/linux/genhd.h
65828index 297df45..b6a74ff 100644
65829--- a/include/linux/genhd.h
65830+++ b/include/linux/genhd.h
65831@@ -161,7 +161,7 @@ struct gendisk {
65832
65833 struct timer_rand_state *random;
65834
65835- atomic_t sync_io; /* RAID */
65836+ atomic_unchecked_t sync_io; /* RAID */
65837 struct work_struct async_notify;
65838 #ifdef CONFIG_BLK_DEV_INTEGRITY
65839 struct blk_integrity *integrity;
65840diff --git a/include/linux/gracl.h b/include/linux/gracl.h
65841new file mode 100644
65842index 0000000..0dc3943
65843--- /dev/null
65844+++ b/include/linux/gracl.h
65845@@ -0,0 +1,317 @@
65846+#ifndef GR_ACL_H
65847+#define GR_ACL_H
65848+
65849+#include <linux/grdefs.h>
65850+#include <linux/resource.h>
65851+#include <linux/capability.h>
65852+#include <linux/dcache.h>
65853+#include <asm/resource.h>
65854+
65855+/* Major status information */
65856+
65857+#define GR_VERSION "grsecurity 2.2.2"
65858+#define GRSECURITY_VERSION 0x2202
65859+
65860+enum {
65861+ GR_SHUTDOWN = 0,
65862+ GR_ENABLE = 1,
65863+ GR_SPROLE = 2,
65864+ GR_RELOAD = 3,
65865+ GR_SEGVMOD = 4,
65866+ GR_STATUS = 5,
65867+ GR_UNSPROLE = 6,
65868+ GR_PASSSET = 7,
65869+ GR_SPROLEPAM = 8,
65870+};
65871+
65872+/* Password setup definitions
65873+ * kernel/grhash.c */
65874+enum {
65875+ GR_PW_LEN = 128,
65876+ GR_SALT_LEN = 16,
65877+ GR_SHA_LEN = 32,
65878+};
65879+
65880+enum {
65881+ GR_SPROLE_LEN = 64,
65882+};
65883+
65884+enum {
65885+ GR_NO_GLOB = 0,
65886+ GR_REG_GLOB,
65887+ GR_CREATE_GLOB
65888+};
65889+
65890+#define GR_NLIMITS 32
65891+
65892+/* Begin Data Structures */
65893+
65894+struct sprole_pw {
65895+ unsigned char *rolename;
65896+ unsigned char salt[GR_SALT_LEN];
65897+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
65898+};
65899+
65900+struct name_entry {
65901+ __u32 key;
65902+ ino_t inode;
65903+ dev_t device;
65904+ char *name;
65905+ __u16 len;
65906+ __u8 deleted;
65907+ struct name_entry *prev;
65908+ struct name_entry *next;
65909+};
65910+
65911+struct inodev_entry {
65912+ struct name_entry *nentry;
65913+ struct inodev_entry *prev;
65914+ struct inodev_entry *next;
65915+};
65916+
65917+struct acl_role_db {
65918+ struct acl_role_label **r_hash;
65919+ __u32 r_size;
65920+};
65921+
65922+struct inodev_db {
65923+ struct inodev_entry **i_hash;
65924+ __u32 i_size;
65925+};
65926+
65927+struct name_db {
65928+ struct name_entry **n_hash;
65929+ __u32 n_size;
65930+};
65931+
65932+struct crash_uid {
65933+ uid_t uid;
65934+ unsigned long expires;
65935+};
65936+
65937+struct gr_hash_struct {
65938+ void **table;
65939+ void **nametable;
65940+ void *first;
65941+ __u32 table_size;
65942+ __u32 used_size;
65943+ int type;
65944+};
65945+
65946+/* Userspace Grsecurity ACL data structures */
65947+
65948+struct acl_subject_label {
65949+ char *filename;
65950+ ino_t inode;
65951+ dev_t device;
65952+ __u32 mode;
65953+ kernel_cap_t cap_mask;
65954+ kernel_cap_t cap_lower;
65955+ kernel_cap_t cap_invert_audit;
65956+
65957+ struct rlimit res[GR_NLIMITS];
65958+ __u32 resmask;
65959+
65960+ __u8 user_trans_type;
65961+ __u8 group_trans_type;
65962+ uid_t *user_transitions;
65963+ gid_t *group_transitions;
65964+ __u16 user_trans_num;
65965+ __u16 group_trans_num;
65966+
65967+ __u32 sock_families[2];
65968+ __u32 ip_proto[8];
65969+ __u32 ip_type;
65970+ struct acl_ip_label **ips;
65971+ __u32 ip_num;
65972+ __u32 inaddr_any_override;
65973+
65974+ __u32 crashes;
65975+ unsigned long expires;
65976+
65977+ struct acl_subject_label *parent_subject;
65978+ struct gr_hash_struct *hash;
65979+ struct acl_subject_label *prev;
65980+ struct acl_subject_label *next;
65981+
65982+ struct acl_object_label **obj_hash;
65983+ __u32 obj_hash_size;
65984+ __u16 pax_flags;
65985+};
65986+
65987+struct role_allowed_ip {
65988+ __u32 addr;
65989+ __u32 netmask;
65990+
65991+ struct role_allowed_ip *prev;
65992+ struct role_allowed_ip *next;
65993+};
65994+
65995+struct role_transition {
65996+ char *rolename;
65997+
65998+ struct role_transition *prev;
65999+ struct role_transition *next;
66000+};
66001+
66002+struct acl_role_label {
66003+ char *rolename;
66004+ uid_t uidgid;
66005+ __u16 roletype;
66006+
66007+ __u16 auth_attempts;
66008+ unsigned long expires;
66009+
66010+ struct acl_subject_label *root_label;
66011+ struct gr_hash_struct *hash;
66012+
66013+ struct acl_role_label *prev;
66014+ struct acl_role_label *next;
66015+
66016+ struct role_transition *transitions;
66017+ struct role_allowed_ip *allowed_ips;
66018+ uid_t *domain_children;
66019+ __u16 domain_child_num;
66020+
66021+ struct acl_subject_label **subj_hash;
66022+ __u32 subj_hash_size;
66023+};
66024+
66025+struct user_acl_role_db {
66026+ struct acl_role_label **r_table;
66027+ __u32 num_pointers; /* Number of allocations to track */
66028+ __u32 num_roles; /* Number of roles */
66029+ __u32 num_domain_children; /* Number of domain children */
66030+ __u32 num_subjects; /* Number of subjects */
66031+ __u32 num_objects; /* Number of objects */
66032+};
66033+
66034+struct acl_object_label {
66035+ char *filename;
66036+ ino_t inode;
66037+ dev_t device;
66038+ __u32 mode;
66039+
66040+ struct acl_subject_label *nested;
66041+ struct acl_object_label *globbed;
66042+
66043+ /* next two structures not used */
66044+
66045+ struct acl_object_label *prev;
66046+ struct acl_object_label *next;
66047+};
66048+
66049+struct acl_ip_label {
66050+ char *iface;
66051+ __u32 addr;
66052+ __u32 netmask;
66053+ __u16 low, high;
66054+ __u8 mode;
66055+ __u32 type;
66056+ __u32 proto[8];
66057+
66058+ /* next two structures not used */
66059+
66060+ struct acl_ip_label *prev;
66061+ struct acl_ip_label *next;
66062+};
66063+
66064+struct gr_arg {
66065+ struct user_acl_role_db role_db;
66066+ unsigned char pw[GR_PW_LEN];
66067+ unsigned char salt[GR_SALT_LEN];
66068+ unsigned char sum[GR_SHA_LEN];
66069+ unsigned char sp_role[GR_SPROLE_LEN];
66070+ struct sprole_pw *sprole_pws;
66071+ dev_t segv_device;
66072+ ino_t segv_inode;
66073+ uid_t segv_uid;
66074+ __u16 num_sprole_pws;
66075+ __u16 mode;
66076+};
66077+
66078+struct gr_arg_wrapper {
66079+ struct gr_arg *arg;
66080+ __u32 version;
66081+ __u32 size;
66082+};
66083+
66084+struct subject_map {
66085+ struct acl_subject_label *user;
66086+ struct acl_subject_label *kernel;
66087+ struct subject_map *prev;
66088+ struct subject_map *next;
66089+};
66090+
66091+struct acl_subj_map_db {
66092+ struct subject_map **s_hash;
66093+ __u32 s_size;
66094+};
66095+
66096+/* End Data Structures Section */
66097+
66098+/* Hash functions generated by empirical testing by Brad Spengler
66099+ Makes good use of the low bits of the inode. Generally 0-1 times
66100+ in loop for successful match. 0-3 for unsuccessful match.
66101+ Shift/add algorithm with modulus of table size and an XOR*/
66102+
66103+static __inline__ unsigned int
66104+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66105+{
66106+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
66107+}
66108+
66109+ static __inline__ unsigned int
66110+shash(const struct acl_subject_label *userp, const unsigned int sz)
66111+{
66112+ return ((const unsigned long)userp % sz);
66113+}
66114+
66115+static __inline__ unsigned int
66116+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66117+{
66118+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66119+}
66120+
66121+static __inline__ unsigned int
66122+nhash(const char *name, const __u16 len, const unsigned int sz)
66123+{
66124+ return full_name_hash((const unsigned char *)name, len) % sz;
66125+}
66126+
66127+#define FOR_EACH_ROLE_START(role) \
66128+ role = role_list; \
66129+ while (role) {
66130+
66131+#define FOR_EACH_ROLE_END(role) \
66132+ role = role->prev; \
66133+ }
66134+
66135+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66136+ subj = NULL; \
66137+ iter = 0; \
66138+ while (iter < role->subj_hash_size) { \
66139+ if (subj == NULL) \
66140+ subj = role->subj_hash[iter]; \
66141+ if (subj == NULL) { \
66142+ iter++; \
66143+ continue; \
66144+ }
66145+
66146+#define FOR_EACH_SUBJECT_END(subj,iter) \
66147+ subj = subj->next; \
66148+ if (subj == NULL) \
66149+ iter++; \
66150+ }
66151+
66152+
66153+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66154+ subj = role->hash->first; \
66155+ while (subj != NULL) {
66156+
66157+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66158+ subj = subj->next; \
66159+ }
66160+
66161+#endif
66162+
66163diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66164new file mode 100644
66165index 0000000..323ecf2
66166--- /dev/null
66167+++ b/include/linux/gralloc.h
66168@@ -0,0 +1,9 @@
66169+#ifndef __GRALLOC_H
66170+#define __GRALLOC_H
66171+
66172+void acl_free_all(void);
66173+int acl_alloc_stack_init(unsigned long size);
66174+void *acl_alloc(unsigned long len);
66175+void *acl_alloc_num(unsigned long num, unsigned long len);
66176+
66177+#endif
66178diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66179new file mode 100644
66180index 0000000..70d6cd5
66181--- /dev/null
66182+++ b/include/linux/grdefs.h
66183@@ -0,0 +1,140 @@
66184+#ifndef GRDEFS_H
66185+#define GRDEFS_H
66186+
66187+/* Begin grsecurity status declarations */
66188+
66189+enum {
66190+ GR_READY = 0x01,
66191+ GR_STATUS_INIT = 0x00 // disabled state
66192+};
66193+
66194+/* Begin ACL declarations */
66195+
66196+/* Role flags */
66197+
66198+enum {
66199+ GR_ROLE_USER = 0x0001,
66200+ GR_ROLE_GROUP = 0x0002,
66201+ GR_ROLE_DEFAULT = 0x0004,
66202+ GR_ROLE_SPECIAL = 0x0008,
66203+ GR_ROLE_AUTH = 0x0010,
66204+ GR_ROLE_NOPW = 0x0020,
66205+ GR_ROLE_GOD = 0x0040,
66206+ GR_ROLE_LEARN = 0x0080,
66207+ GR_ROLE_TPE = 0x0100,
66208+ GR_ROLE_DOMAIN = 0x0200,
66209+ GR_ROLE_PAM = 0x0400,
66210+ GR_ROLE_PERSIST = 0x800
66211+};
66212+
66213+/* ACL Subject and Object mode flags */
66214+enum {
66215+ GR_DELETED = 0x80000000
66216+};
66217+
66218+/* ACL Object-only mode flags */
66219+enum {
66220+ GR_READ = 0x00000001,
66221+ GR_APPEND = 0x00000002,
66222+ GR_WRITE = 0x00000004,
66223+ GR_EXEC = 0x00000008,
66224+ GR_FIND = 0x00000010,
66225+ GR_INHERIT = 0x00000020,
66226+ GR_SETID = 0x00000040,
66227+ GR_CREATE = 0x00000080,
66228+ GR_DELETE = 0x00000100,
66229+ GR_LINK = 0x00000200,
66230+ GR_AUDIT_READ = 0x00000400,
66231+ GR_AUDIT_APPEND = 0x00000800,
66232+ GR_AUDIT_WRITE = 0x00001000,
66233+ GR_AUDIT_EXEC = 0x00002000,
66234+ GR_AUDIT_FIND = 0x00004000,
66235+ GR_AUDIT_INHERIT= 0x00008000,
66236+ GR_AUDIT_SETID = 0x00010000,
66237+ GR_AUDIT_CREATE = 0x00020000,
66238+ GR_AUDIT_DELETE = 0x00040000,
66239+ GR_AUDIT_LINK = 0x00080000,
66240+ GR_PTRACERD = 0x00100000,
66241+ GR_NOPTRACE = 0x00200000,
66242+ GR_SUPPRESS = 0x00400000,
66243+ GR_NOLEARN = 0x00800000,
66244+ GR_INIT_TRANSFER= 0x01000000
66245+};
66246+
66247+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66248+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66249+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66250+
66251+/* ACL subject-only mode flags */
66252+enum {
66253+ GR_KILL = 0x00000001,
66254+ GR_VIEW = 0x00000002,
66255+ GR_PROTECTED = 0x00000004,
66256+ GR_LEARN = 0x00000008,
66257+ GR_OVERRIDE = 0x00000010,
66258+ /* just a placeholder, this mode is only used in userspace */
66259+ GR_DUMMY = 0x00000020,
66260+ GR_PROTSHM = 0x00000040,
66261+ GR_KILLPROC = 0x00000080,
66262+ GR_KILLIPPROC = 0x00000100,
66263+ /* just a placeholder, this mode is only used in userspace */
66264+ GR_NOTROJAN = 0x00000200,
66265+ GR_PROTPROCFD = 0x00000400,
66266+ GR_PROCACCT = 0x00000800,
66267+ GR_RELAXPTRACE = 0x00001000,
66268+ GR_NESTED = 0x00002000,
66269+ GR_INHERITLEARN = 0x00004000,
66270+ GR_PROCFIND = 0x00008000,
66271+ GR_POVERRIDE = 0x00010000,
66272+ GR_KERNELAUTH = 0x00020000,
66273+ GR_ATSECURE = 0x00040000,
66274+ GR_SHMEXEC = 0x00080000
66275+};
66276+
66277+enum {
66278+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66279+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66280+ GR_PAX_ENABLE_MPROTECT = 0x0004,
66281+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
66282+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66283+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66284+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66285+ GR_PAX_DISABLE_MPROTECT = 0x0400,
66286+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
66287+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66288+};
66289+
66290+enum {
66291+ GR_ID_USER = 0x01,
66292+ GR_ID_GROUP = 0x02,
66293+};
66294+
66295+enum {
66296+ GR_ID_ALLOW = 0x01,
66297+ GR_ID_DENY = 0x02,
66298+};
66299+
66300+#define GR_CRASH_RES 31
66301+#define GR_UIDTABLE_MAX 500
66302+
66303+/* begin resource learning section */
66304+enum {
66305+ GR_RLIM_CPU_BUMP = 60,
66306+ GR_RLIM_FSIZE_BUMP = 50000,
66307+ GR_RLIM_DATA_BUMP = 10000,
66308+ GR_RLIM_STACK_BUMP = 1000,
66309+ GR_RLIM_CORE_BUMP = 10000,
66310+ GR_RLIM_RSS_BUMP = 500000,
66311+ GR_RLIM_NPROC_BUMP = 1,
66312+ GR_RLIM_NOFILE_BUMP = 5,
66313+ GR_RLIM_MEMLOCK_BUMP = 50000,
66314+ GR_RLIM_AS_BUMP = 500000,
66315+ GR_RLIM_LOCKS_BUMP = 2,
66316+ GR_RLIM_SIGPENDING_BUMP = 5,
66317+ GR_RLIM_MSGQUEUE_BUMP = 10000,
66318+ GR_RLIM_NICE_BUMP = 1,
66319+ GR_RLIM_RTPRIO_BUMP = 1,
66320+ GR_RLIM_RTTIME_BUMP = 1000000
66321+};
66322+
66323+#endif
66324diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66325new file mode 100644
66326index 0000000..e5817d7
66327--- /dev/null
66328+++ b/include/linux/grinternal.h
66329@@ -0,0 +1,218 @@
66330+#ifndef __GRINTERNAL_H
66331+#define __GRINTERNAL_H
66332+
66333+#ifdef CONFIG_GRKERNSEC
66334+
66335+#include <linux/fs.h>
66336+#include <linux/mnt_namespace.h>
66337+#include <linux/nsproxy.h>
66338+#include <linux/gracl.h>
66339+#include <linux/grdefs.h>
66340+#include <linux/grmsg.h>
66341+
66342+void gr_add_learn_entry(const char *fmt, ...)
66343+ __attribute__ ((format (printf, 1, 2)));
66344+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66345+ const struct vfsmount *mnt);
66346+__u32 gr_check_create(const struct dentry *new_dentry,
66347+ const struct dentry *parent,
66348+ const struct vfsmount *mnt, const __u32 mode);
66349+int gr_check_protected_task(const struct task_struct *task);
66350+__u32 to_gr_audit(const __u32 reqmode);
66351+int gr_set_acls(const int type);
66352+int gr_apply_subject_to_task(struct task_struct *task);
66353+int gr_acl_is_enabled(void);
66354+char gr_roletype_to_char(void);
66355+
66356+void gr_handle_alertkill(struct task_struct *task);
66357+char *gr_to_filename(const struct dentry *dentry,
66358+ const struct vfsmount *mnt);
66359+char *gr_to_filename1(const struct dentry *dentry,
66360+ const struct vfsmount *mnt);
66361+char *gr_to_filename2(const struct dentry *dentry,
66362+ const struct vfsmount *mnt);
66363+char *gr_to_filename3(const struct dentry *dentry,
66364+ const struct vfsmount *mnt);
66365+
66366+extern int grsec_enable_harden_ptrace;
66367+extern int grsec_enable_link;
66368+extern int grsec_enable_fifo;
66369+extern int grsec_enable_shm;
66370+extern int grsec_enable_execlog;
66371+extern int grsec_enable_signal;
66372+extern int grsec_enable_audit_ptrace;
66373+extern int grsec_enable_forkfail;
66374+extern int grsec_enable_time;
66375+extern int grsec_enable_rofs;
66376+extern int grsec_enable_chroot_shmat;
66377+extern int grsec_enable_chroot_mount;
66378+extern int grsec_enable_chroot_double;
66379+extern int grsec_enable_chroot_pivot;
66380+extern int grsec_enable_chroot_chdir;
66381+extern int grsec_enable_chroot_chmod;
66382+extern int grsec_enable_chroot_mknod;
66383+extern int grsec_enable_chroot_fchdir;
66384+extern int grsec_enable_chroot_nice;
66385+extern int grsec_enable_chroot_execlog;
66386+extern int grsec_enable_chroot_caps;
66387+extern int grsec_enable_chroot_sysctl;
66388+extern int grsec_enable_chroot_unix;
66389+extern int grsec_enable_tpe;
66390+extern int grsec_tpe_gid;
66391+extern int grsec_enable_tpe_all;
66392+extern int grsec_enable_tpe_invert;
66393+extern int grsec_enable_socket_all;
66394+extern int grsec_socket_all_gid;
66395+extern int grsec_enable_socket_client;
66396+extern int grsec_socket_client_gid;
66397+extern int grsec_enable_socket_server;
66398+extern int grsec_socket_server_gid;
66399+extern int grsec_audit_gid;
66400+extern int grsec_enable_group;
66401+extern int grsec_enable_audit_textrel;
66402+extern int grsec_enable_log_rwxmaps;
66403+extern int grsec_enable_mount;
66404+extern int grsec_enable_chdir;
66405+extern int grsec_resource_logging;
66406+extern int grsec_enable_blackhole;
66407+extern int grsec_lastack_retries;
66408+extern int grsec_enable_brute;
66409+extern int grsec_lock;
66410+
66411+extern spinlock_t grsec_alert_lock;
66412+extern unsigned long grsec_alert_wtime;
66413+extern unsigned long grsec_alert_fyet;
66414+
66415+extern spinlock_t grsec_audit_lock;
66416+
66417+extern rwlock_t grsec_exec_file_lock;
66418+
66419+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
66420+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
66421+ (tsk)->exec_file->f_vfsmnt) : "/")
66422+
66423+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
66424+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
66425+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66426+
66427+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
66428+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
66429+ (tsk)->exec_file->f_vfsmnt) : "/")
66430+
66431+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
66432+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
66433+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66434+
66435+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
66436+
66437+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
66438+
66439+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
66440+ (task)->pid, (cred)->uid, \
66441+ (cred)->euid, (cred)->gid, (cred)->egid, \
66442+ gr_parent_task_fullpath(task), \
66443+ (task)->real_parent->comm, (task)->real_parent->pid, \
66444+ (pcred)->uid, (pcred)->euid, \
66445+ (pcred)->gid, (pcred)->egid
66446+
66447+#define GR_CHROOT_CAPS {{ \
66448+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
66449+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
66450+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
66451+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
66452+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
66453+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
66454+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
66455+
66456+#define security_learn(normal_msg,args...) \
66457+({ \
66458+ read_lock(&grsec_exec_file_lock); \
66459+ gr_add_learn_entry(normal_msg "\n", ## args); \
66460+ read_unlock(&grsec_exec_file_lock); \
66461+})
66462+
66463+enum {
66464+ GR_DO_AUDIT,
66465+ GR_DONT_AUDIT,
66466+ GR_DONT_AUDIT_GOOD
66467+};
66468+
66469+enum {
66470+ GR_TTYSNIFF,
66471+ GR_RBAC,
66472+ GR_RBAC_STR,
66473+ GR_STR_RBAC,
66474+ GR_RBAC_MODE2,
66475+ GR_RBAC_MODE3,
66476+ GR_FILENAME,
66477+ GR_SYSCTL_HIDDEN,
66478+ GR_NOARGS,
66479+ GR_ONE_INT,
66480+ GR_ONE_INT_TWO_STR,
66481+ GR_ONE_STR,
66482+ GR_STR_INT,
66483+ GR_TWO_STR_INT,
66484+ GR_TWO_INT,
66485+ GR_TWO_U64,
66486+ GR_THREE_INT,
66487+ GR_FIVE_INT_TWO_STR,
66488+ GR_TWO_STR,
66489+ GR_THREE_STR,
66490+ GR_FOUR_STR,
66491+ GR_STR_FILENAME,
66492+ GR_FILENAME_STR,
66493+ GR_FILENAME_TWO_INT,
66494+ GR_FILENAME_TWO_INT_STR,
66495+ GR_TEXTREL,
66496+ GR_PTRACE,
66497+ GR_RESOURCE,
66498+ GR_CAP,
66499+ GR_SIG,
66500+ GR_SIG2,
66501+ GR_CRASH1,
66502+ GR_CRASH2,
66503+ GR_PSACCT,
66504+ GR_RWXMAP
66505+};
66506+
66507+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
66508+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
66509+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
66510+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
66511+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
66512+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
66513+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
66514+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
66515+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
66516+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
66517+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
66518+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
66519+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
66520+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
66521+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
66522+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
66523+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
66524+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
66525+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
66526+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
66527+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
66528+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
66529+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
66530+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
66531+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
66532+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
66533+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
66534+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
66535+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
66536+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
66537+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
66538+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
66539+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
66540+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
66541+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
66542+
66543+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
66544+
66545+#endif
66546+
66547+#endif
66548diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
66549new file mode 100644
66550index 0000000..9d5fd4a
66551--- /dev/null
66552+++ b/include/linux/grmsg.h
66553@@ -0,0 +1,108 @@
66554+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
66555+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
66556+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
66557+#define GR_STOPMOD_MSG "denied modification of module state by "
66558+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
66559+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
66560+#define GR_IOPERM_MSG "denied use of ioperm() by "
66561+#define GR_IOPL_MSG "denied use of iopl() by "
66562+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
66563+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
66564+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
66565+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
66566+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
66567+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
66568+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
66569+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
66570+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
66571+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
66572+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
66573+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
66574+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
66575+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
66576+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
66577+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
66578+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
66579+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
66580+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
66581+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
66582+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
66583+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
66584+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
66585+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
66586+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
66587+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
66588+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
66589+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
66590+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
66591+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
66592+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
66593+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
66594+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
66595+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
66596+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
66597+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
66598+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
66599+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
66600+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
66601+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
66602+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
66603+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
66604+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
66605+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
66606+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
66607+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
66608+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
66609+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
66610+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
66611+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
66612+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
66613+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
66614+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
66615+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
66616+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
66617+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
66618+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
66619+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
66620+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
66621+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
66622+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
66623+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
66624+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
66625+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
66626+#define GR_FAILFORK_MSG "failed fork with errno %s by "
66627+#define GR_NICE_CHROOT_MSG "denied priority change by "
66628+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
66629+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
66630+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
66631+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
66632+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
66633+#define GR_TIME_MSG "time set by "
66634+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
66635+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
66636+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
66637+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
66638+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
66639+#define GR_BIND_MSG "denied bind() by "
66640+#define GR_CONNECT_MSG "denied connect() by "
66641+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
66642+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
66643+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
66644+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
66645+#define GR_CAP_ACL_MSG "use of %s denied for "
66646+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
66647+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
66648+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
66649+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
66650+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
66651+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
66652+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
66653+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
66654+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
66655+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
66656+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
66657+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
66658+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
66659+#define GR_VM86_MSG "denied use of vm86 by "
66660+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
66661+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
66662diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
66663new file mode 100644
66664index 0000000..24676f4
66665--- /dev/null
66666+++ b/include/linux/grsecurity.h
66667@@ -0,0 +1,218 @@
66668+#ifndef GR_SECURITY_H
66669+#define GR_SECURITY_H
66670+#include <linux/fs.h>
66671+#include <linux/fs_struct.h>
66672+#include <linux/binfmts.h>
66673+#include <linux/gracl.h>
66674+#include <linux/compat.h>
66675+
66676+/* notify of brain-dead configs */
66677+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66678+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
66679+#endif
66680+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
66681+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
66682+#endif
66683+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66684+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66685+#endif
66686+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66687+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66688+#endif
66689+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
66690+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
66691+#endif
66692+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
66693+#error "CONFIG_PAX enabled, but no PaX options are enabled."
66694+#endif
66695+
66696+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
66697+void gr_handle_brute_check(void);
66698+void gr_handle_kernel_exploit(void);
66699+int gr_process_user_ban(void);
66700+
66701+char gr_roletype_to_char(void);
66702+
66703+int gr_acl_enable_at_secure(void);
66704+
66705+int gr_check_user_change(int real, int effective, int fs);
66706+int gr_check_group_change(int real, int effective, int fs);
66707+
66708+void gr_del_task_from_ip_table(struct task_struct *p);
66709+
66710+int gr_pid_is_chrooted(struct task_struct *p);
66711+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
66712+int gr_handle_chroot_nice(void);
66713+int gr_handle_chroot_sysctl(const int op);
66714+int gr_handle_chroot_setpriority(struct task_struct *p,
66715+ const int niceval);
66716+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
66717+int gr_handle_chroot_chroot(const struct dentry *dentry,
66718+ const struct vfsmount *mnt);
66719+void gr_handle_chroot_chdir(struct path *path);
66720+int gr_handle_chroot_chmod(const struct dentry *dentry,
66721+ const struct vfsmount *mnt, const int mode);
66722+int gr_handle_chroot_mknod(const struct dentry *dentry,
66723+ const struct vfsmount *mnt, const int mode);
66724+int gr_handle_chroot_mount(const struct dentry *dentry,
66725+ const struct vfsmount *mnt,
66726+ const char *dev_name);
66727+int gr_handle_chroot_pivot(void);
66728+int gr_handle_chroot_unix(const pid_t pid);
66729+
66730+int gr_handle_rawio(const struct inode *inode);
66731+
66732+void gr_handle_ioperm(void);
66733+void gr_handle_iopl(void);
66734+
66735+int gr_tpe_allow(const struct file *file);
66736+
66737+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
66738+void gr_clear_chroot_entries(struct task_struct *task);
66739+
66740+void gr_log_forkfail(const int retval);
66741+void gr_log_timechange(void);
66742+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
66743+void gr_log_chdir(const struct dentry *dentry,
66744+ const struct vfsmount *mnt);
66745+void gr_log_chroot_exec(const struct dentry *dentry,
66746+ const struct vfsmount *mnt);
66747+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
66748+#ifdef CONFIG_COMPAT
66749+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
66750+#endif
66751+void gr_log_remount(const char *devname, const int retval);
66752+void gr_log_unmount(const char *devname, const int retval);
66753+void gr_log_mount(const char *from, const char *to, const int retval);
66754+void gr_log_textrel(struct vm_area_struct *vma);
66755+void gr_log_rwxmmap(struct file *file);
66756+void gr_log_rwxmprotect(struct file *file);
66757+
66758+int gr_handle_follow_link(const struct inode *parent,
66759+ const struct inode *inode,
66760+ const struct dentry *dentry,
66761+ const struct vfsmount *mnt);
66762+int gr_handle_fifo(const struct dentry *dentry,
66763+ const struct vfsmount *mnt,
66764+ const struct dentry *dir, const int flag,
66765+ const int acc_mode);
66766+int gr_handle_hardlink(const struct dentry *dentry,
66767+ const struct vfsmount *mnt,
66768+ struct inode *inode,
66769+ const int mode, const char *to);
66770+
66771+int gr_is_capable(const int cap);
66772+int gr_is_capable_nolog(const int cap);
66773+void gr_learn_resource(const struct task_struct *task, const int limit,
66774+ const unsigned long wanted, const int gt);
66775+void gr_copy_label(struct task_struct *tsk);
66776+void gr_handle_crash(struct task_struct *task, const int sig);
66777+int gr_handle_signal(const struct task_struct *p, const int sig);
66778+int gr_check_crash_uid(const uid_t uid);
66779+int gr_check_protected_task(const struct task_struct *task);
66780+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
66781+int gr_acl_handle_mmap(const struct file *file,
66782+ const unsigned long prot);
66783+int gr_acl_handle_mprotect(const struct file *file,
66784+ const unsigned long prot);
66785+int gr_check_hidden_task(const struct task_struct *tsk);
66786+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
66787+ const struct vfsmount *mnt);
66788+__u32 gr_acl_handle_utime(const struct dentry *dentry,
66789+ const struct vfsmount *mnt);
66790+__u32 gr_acl_handle_access(const struct dentry *dentry,
66791+ const struct vfsmount *mnt, const int fmode);
66792+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
66793+ const struct vfsmount *mnt, mode_t mode);
66794+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
66795+ const struct vfsmount *mnt, mode_t mode);
66796+__u32 gr_acl_handle_chown(const struct dentry *dentry,
66797+ const struct vfsmount *mnt);
66798+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
66799+ const struct vfsmount *mnt);
66800+int gr_handle_ptrace(struct task_struct *task, const long request);
66801+int gr_handle_proc_ptrace(struct task_struct *task);
66802+__u32 gr_acl_handle_execve(const struct dentry *dentry,
66803+ const struct vfsmount *mnt);
66804+int gr_check_crash_exec(const struct file *filp);
66805+int gr_acl_is_enabled(void);
66806+void gr_set_kernel_label(struct task_struct *task);
66807+void gr_set_role_label(struct task_struct *task, const uid_t uid,
66808+ const gid_t gid);
66809+int gr_set_proc_label(const struct dentry *dentry,
66810+ const struct vfsmount *mnt,
66811+ const int unsafe_share);
66812+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
66813+ const struct vfsmount *mnt);
66814+__u32 gr_acl_handle_open(const struct dentry *dentry,
66815+ const struct vfsmount *mnt, int acc_mode);
66816+__u32 gr_acl_handle_creat(const struct dentry *dentry,
66817+ const struct dentry *p_dentry,
66818+ const struct vfsmount *p_mnt,
66819+ int open_flags, int acc_mode, const int imode);
66820+void gr_handle_create(const struct dentry *dentry,
66821+ const struct vfsmount *mnt);
66822+void gr_handle_proc_create(const struct dentry *dentry,
66823+ const struct inode *inode);
66824+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
66825+ const struct dentry *parent_dentry,
66826+ const struct vfsmount *parent_mnt,
66827+ const int mode);
66828+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
66829+ const struct dentry *parent_dentry,
66830+ const struct vfsmount *parent_mnt);
66831+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
66832+ const struct vfsmount *mnt);
66833+void gr_handle_delete(const ino_t ino, const dev_t dev);
66834+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
66835+ const struct vfsmount *mnt);
66836+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
66837+ const struct dentry *parent_dentry,
66838+ const struct vfsmount *parent_mnt,
66839+ const char *from);
66840+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
66841+ const struct dentry *parent_dentry,
66842+ const struct vfsmount *parent_mnt,
66843+ const struct dentry *old_dentry,
66844+ const struct vfsmount *old_mnt, const char *to);
66845+int gr_acl_handle_rename(struct dentry *new_dentry,
66846+ struct dentry *parent_dentry,
66847+ const struct vfsmount *parent_mnt,
66848+ struct dentry *old_dentry,
66849+ struct inode *old_parent_inode,
66850+ struct vfsmount *old_mnt, const char *newname);
66851+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
66852+ struct dentry *old_dentry,
66853+ struct dentry *new_dentry,
66854+ struct vfsmount *mnt, const __u8 replace);
66855+__u32 gr_check_link(const struct dentry *new_dentry,
66856+ const struct dentry *parent_dentry,
66857+ const struct vfsmount *parent_mnt,
66858+ const struct dentry *old_dentry,
66859+ const struct vfsmount *old_mnt);
66860+int gr_acl_handle_filldir(const struct file *file, const char *name,
66861+ const unsigned int namelen, const ino_t ino);
66862+
66863+__u32 gr_acl_handle_unix(const struct dentry *dentry,
66864+ const struct vfsmount *mnt);
66865+void gr_acl_handle_exit(void);
66866+void gr_acl_handle_psacct(struct task_struct *task, const long code);
66867+int gr_acl_handle_procpidmem(const struct task_struct *task);
66868+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
66869+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
66870+void gr_audit_ptrace(struct task_struct *task);
66871+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
66872+
66873+#ifdef CONFIG_GRKERNSEC
66874+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
66875+void gr_handle_vm86(void);
66876+void gr_handle_mem_readwrite(u64 from, u64 to);
66877+
66878+extern int grsec_enable_dmesg;
66879+extern int grsec_disable_privio;
66880+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66881+extern int grsec_enable_chroot_findtask;
66882+#endif
66883+#endif
66884+
66885+#endif
66886diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
66887index 6a87154..a3ce57b 100644
66888--- a/include/linux/hdpu_features.h
66889+++ b/include/linux/hdpu_features.h
66890@@ -3,7 +3,7 @@
66891 struct cpustate_t {
66892 spinlock_t lock;
66893 int excl;
66894- int open_count;
66895+ atomic_t open_count;
66896 unsigned char cached_val;
66897 int inited;
66898 unsigned long *set_addr;
66899diff --git a/include/linux/highmem.h b/include/linux/highmem.h
66900index 211ff44..00ab6d7 100644
66901--- a/include/linux/highmem.h
66902+++ b/include/linux/highmem.h
66903@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
66904 kunmap_atomic(kaddr, KM_USER0);
66905 }
66906
66907+static inline void sanitize_highpage(struct page *page)
66908+{
66909+ void *kaddr;
66910+ unsigned long flags;
66911+
66912+ local_irq_save(flags);
66913+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
66914+ clear_page(kaddr);
66915+ kunmap_atomic(kaddr, KM_CLEARPAGE);
66916+ local_irq_restore(flags);
66917+}
66918+
66919 static inline void zero_user_segments(struct page *page,
66920 unsigned start1, unsigned end1,
66921 unsigned start2, unsigned end2)
66922diff --git a/include/linux/i2c.h b/include/linux/i2c.h
66923index 7b40cda..24eb44e 100644
66924--- a/include/linux/i2c.h
66925+++ b/include/linux/i2c.h
66926@@ -325,6 +325,7 @@ struct i2c_algorithm {
66927 /* To determine what the adapter supports */
66928 u32 (*functionality) (struct i2c_adapter *);
66929 };
66930+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
66931
66932 /*
66933 * i2c_adapter is the structure used to identify a physical i2c bus along
66934diff --git a/include/linux/i2o.h b/include/linux/i2o.h
66935index 4c4e57d..f3c5303 100644
66936--- a/include/linux/i2o.h
66937+++ b/include/linux/i2o.h
66938@@ -564,7 +564,7 @@ struct i2o_controller {
66939 struct i2o_device *exec; /* Executive */
66940 #if BITS_PER_LONG == 64
66941 spinlock_t context_list_lock; /* lock for context_list */
66942- atomic_t context_list_counter; /* needed for unique contexts */
66943+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
66944 struct list_head context_list; /* list of context id's
66945 and pointers */
66946 #endif
66947diff --git a/include/linux/init_task.h b/include/linux/init_task.h
66948index 21a6f5d..dc42eab 100644
66949--- a/include/linux/init_task.h
66950+++ b/include/linux/init_task.h
66951@@ -83,6 +83,12 @@ extern struct group_info init_groups;
66952 #define INIT_IDS
66953 #endif
66954
66955+#ifdef CONFIG_X86
66956+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
66957+#else
66958+#define INIT_TASK_THREAD_INFO
66959+#endif
66960+
66961 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
66962 /*
66963 * Because of the reduced scope of CAP_SETPCAP when filesystem
66964@@ -156,6 +162,7 @@ extern struct cred init_cred;
66965 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
66966 .comm = "swapper", \
66967 .thread = INIT_THREAD, \
66968+ INIT_TASK_THREAD_INFO \
66969 .fs = &init_fs, \
66970 .files = &init_files, \
66971 .signal = &init_signals, \
66972diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
66973index 4f0a72a..a849599 100644
66974--- a/include/linux/intel-iommu.h
66975+++ b/include/linux/intel-iommu.h
66976@@ -296,7 +296,7 @@ struct iommu_flush {
66977 u8 fm, u64 type);
66978 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
66979 unsigned int size_order, u64 type);
66980-};
66981+} __no_const;
66982
66983 enum {
66984 SR_DMAR_FECTL_REG,
66985diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
66986index c739150..be577b5 100644
66987--- a/include/linux/interrupt.h
66988+++ b/include/linux/interrupt.h
66989@@ -369,7 +369,7 @@ enum
66990 /* map softirq index to softirq name. update 'softirq_to_name' in
66991 * kernel/softirq.c when adding a new softirq.
66992 */
66993-extern char *softirq_to_name[NR_SOFTIRQS];
66994+extern const char * const softirq_to_name[NR_SOFTIRQS];
66995
66996 /* softirq mask and active fields moved to irq_cpustat_t in
66997 * asm/hardirq.h to get better cache usage. KAO
66998@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
66999
67000 struct softirq_action
67001 {
67002- void (*action)(struct softirq_action *);
67003+ void (*action)(void);
67004 };
67005
67006 asmlinkage void do_softirq(void);
67007 asmlinkage void __do_softirq(void);
67008-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67009+extern void open_softirq(int nr, void (*action)(void));
67010 extern void softirq_init(void);
67011 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67012 extern void raise_softirq_irqoff(unsigned int nr);
67013diff --git a/include/linux/irq.h b/include/linux/irq.h
67014index 9e5f45a..025865b 100644
67015--- a/include/linux/irq.h
67016+++ b/include/linux/irq.h
67017@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67018 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67019 bool boot)
67020 {
67021+#ifdef CONFIG_CPUMASK_OFFSTACK
67022 gfp_t gfp = GFP_ATOMIC;
67023
67024 if (boot)
67025 gfp = GFP_NOWAIT;
67026
67027-#ifdef CONFIG_CPUMASK_OFFSTACK
67028 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67029 return false;
67030
67031diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67032index 7922742..27306a2 100644
67033--- a/include/linux/kallsyms.h
67034+++ b/include/linux/kallsyms.h
67035@@ -15,7 +15,8 @@
67036
67037 struct module;
67038
67039-#ifdef CONFIG_KALLSYMS
67040+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67041+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67042 /* Lookup the address for a symbol. Returns 0 if not found. */
67043 unsigned long kallsyms_lookup_name(const char *name);
67044
67045@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67046 /* Stupid that this does nothing, but I didn't create this mess. */
67047 #define __print_symbol(fmt, addr)
67048 #endif /*CONFIG_KALLSYMS*/
67049+#else /* when included by kallsyms.c, vsnprintf.c, or
67050+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67051+extern void __print_symbol(const char *fmt, unsigned long address);
67052+extern int sprint_symbol(char *buffer, unsigned long address);
67053+const char *kallsyms_lookup(unsigned long addr,
67054+ unsigned long *symbolsize,
67055+ unsigned long *offset,
67056+ char **modname, char *namebuf);
67057+#endif
67058
67059 /* This macro allows us to keep printk typechecking */
67060 static void __check_printsym_format(const char *fmt, ...)
67061diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67062index 6adcc29..13369e8 100644
67063--- a/include/linux/kgdb.h
67064+++ b/include/linux/kgdb.h
67065@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67066
67067 extern int kgdb_connected;
67068
67069-extern atomic_t kgdb_setting_breakpoint;
67070-extern atomic_t kgdb_cpu_doing_single_step;
67071+extern atomic_unchecked_t kgdb_setting_breakpoint;
67072+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67073
67074 extern struct task_struct *kgdb_usethread;
67075 extern struct task_struct *kgdb_contthread;
67076@@ -235,7 +235,7 @@ struct kgdb_arch {
67077 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67078 void (*remove_all_hw_break)(void);
67079 void (*correct_hw_break)(void);
67080-};
67081+} __do_const;
67082
67083 /**
67084 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67085@@ -257,14 +257,14 @@ struct kgdb_io {
67086 int (*init) (void);
67087 void (*pre_exception) (void);
67088 void (*post_exception) (void);
67089-};
67090+} __do_const;
67091
67092-extern struct kgdb_arch arch_kgdb_ops;
67093+extern const struct kgdb_arch arch_kgdb_ops;
67094
67095 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67096
67097-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67098-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67099+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67100+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67101
67102 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67103 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67104diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67105index 384ca8b..83dd97d 100644
67106--- a/include/linux/kmod.h
67107+++ b/include/linux/kmod.h
67108@@ -31,6 +31,8 @@
67109 * usually useless though. */
67110 extern int __request_module(bool wait, const char *name, ...) \
67111 __attribute__((format(printf, 2, 3)));
67112+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67113+ __attribute__((format(printf, 3, 4)));
67114 #define request_module(mod...) __request_module(true, mod)
67115 #define request_module_nowait(mod...) __request_module(false, mod)
67116 #define try_then_request_module(x, mod...) \
67117diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67118index 58ae8e0..3950d3c 100644
67119--- a/include/linux/kobject.h
67120+++ b/include/linux/kobject.h
67121@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67122
67123 struct kobj_type {
67124 void (*release)(struct kobject *kobj);
67125- struct sysfs_ops *sysfs_ops;
67126+ const struct sysfs_ops *sysfs_ops;
67127 struct attribute **default_attrs;
67128 };
67129
67130@@ -118,9 +118,9 @@ struct kobj_uevent_env {
67131 };
67132
67133 struct kset_uevent_ops {
67134- int (*filter)(struct kset *kset, struct kobject *kobj);
67135- const char *(*name)(struct kset *kset, struct kobject *kobj);
67136- int (*uevent)(struct kset *kset, struct kobject *kobj,
67137+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67138+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67139+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67140 struct kobj_uevent_env *env);
67141 };
67142
67143@@ -132,7 +132,7 @@ struct kobj_attribute {
67144 const char *buf, size_t count);
67145 };
67146
67147-extern struct sysfs_ops kobj_sysfs_ops;
67148+extern const struct sysfs_ops kobj_sysfs_ops;
67149
67150 /**
67151 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67152@@ -155,14 +155,14 @@ struct kset {
67153 struct list_head list;
67154 spinlock_t list_lock;
67155 struct kobject kobj;
67156- struct kset_uevent_ops *uevent_ops;
67157+ const struct kset_uevent_ops *uevent_ops;
67158 };
67159
67160 extern void kset_init(struct kset *kset);
67161 extern int __must_check kset_register(struct kset *kset);
67162 extern void kset_unregister(struct kset *kset);
67163 extern struct kset * __must_check kset_create_and_add(const char *name,
67164- struct kset_uevent_ops *u,
67165+ const struct kset_uevent_ops *u,
67166 struct kobject *parent_kobj);
67167
67168 static inline struct kset *to_kset(struct kobject *kobj)
67169diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67170index c728a50..752d821 100644
67171--- a/include/linux/kvm_host.h
67172+++ b/include/linux/kvm_host.h
67173@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67174 void vcpu_load(struct kvm_vcpu *vcpu);
67175 void vcpu_put(struct kvm_vcpu *vcpu);
67176
67177-int kvm_init(void *opaque, unsigned int vcpu_size,
67178+int kvm_init(const void *opaque, unsigned int vcpu_size,
67179 struct module *module);
67180 void kvm_exit(void);
67181
67182@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67183 struct kvm_guest_debug *dbg);
67184 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67185
67186-int kvm_arch_init(void *opaque);
67187+int kvm_arch_init(const void *opaque);
67188 void kvm_arch_exit(void);
67189
67190 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67191diff --git a/include/linux/libata.h b/include/linux/libata.h
67192index a069916..223edde 100644
67193--- a/include/linux/libata.h
67194+++ b/include/linux/libata.h
67195@@ -525,11 +525,11 @@ struct ata_ioports {
67196
67197 struct ata_host {
67198 spinlock_t lock;
67199- struct device *dev;
67200+ struct device *dev;
67201 void __iomem * const *iomap;
67202 unsigned int n_ports;
67203 void *private_data;
67204- struct ata_port_operations *ops;
67205+ const struct ata_port_operations *ops;
67206 unsigned long flags;
67207 #ifdef CONFIG_ATA_ACPI
67208 acpi_handle acpi_handle;
67209@@ -710,7 +710,7 @@ struct ata_link {
67210
67211 struct ata_port {
67212 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67213- struct ata_port_operations *ops;
67214+ const struct ata_port_operations *ops;
67215 spinlock_t *lock;
67216 /* Flags owned by the EH context. Only EH should touch these once the
67217 port is active */
67218@@ -884,7 +884,7 @@ struct ata_port_operations {
67219 * fields must be pointers.
67220 */
67221 const struct ata_port_operations *inherits;
67222-};
67223+} __do_const;
67224
67225 struct ata_port_info {
67226 unsigned long flags;
67227@@ -892,7 +892,7 @@ struct ata_port_info {
67228 unsigned long pio_mask;
67229 unsigned long mwdma_mask;
67230 unsigned long udma_mask;
67231- struct ata_port_operations *port_ops;
67232+ const struct ata_port_operations *port_ops;
67233 void *private_data;
67234 };
67235
67236@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67237 extern const unsigned long sata_deb_timing_hotplug[];
67238 extern const unsigned long sata_deb_timing_long[];
67239
67240-extern struct ata_port_operations ata_dummy_port_ops;
67241+extern const struct ata_port_operations ata_dummy_port_ops;
67242 extern const struct ata_port_info ata_dummy_port_info;
67243
67244 static inline const unsigned long *
67245@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67246 struct scsi_host_template *sht);
67247 extern void ata_host_detach(struct ata_host *host);
67248 extern void ata_host_init(struct ata_host *, struct device *,
67249- unsigned long, struct ata_port_operations *);
67250+ unsigned long, const struct ata_port_operations *);
67251 extern int ata_scsi_detect(struct scsi_host_template *sht);
67252 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67253 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67254diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67255index fbc48f8..0886e57 100644
67256--- a/include/linux/lockd/bind.h
67257+++ b/include/linux/lockd/bind.h
67258@@ -23,13 +23,13 @@ struct svc_rqst;
67259 * This is the set of functions for lockd->nfsd communication
67260 */
67261 struct nlmsvc_binding {
67262- __be32 (*fopen)(struct svc_rqst *,
67263+ __be32 (* const fopen)(struct svc_rqst *,
67264 struct nfs_fh *,
67265 struct file **);
67266- void (*fclose)(struct file *);
67267+ void (* const fclose)(struct file *);
67268 };
67269
67270-extern struct nlmsvc_binding * nlmsvc_ops;
67271+extern const struct nlmsvc_binding * nlmsvc_ops;
67272
67273 /*
67274 * Similar to nfs_client_initdata, but without the NFS-specific
67275diff --git a/include/linux/mca.h b/include/linux/mca.h
67276index 3797270..7765ede 100644
67277--- a/include/linux/mca.h
67278+++ b/include/linux/mca.h
67279@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67280 int region);
67281 void * (*mca_transform_memory)(struct mca_device *,
67282 void *memory);
67283-};
67284+} __no_const;
67285
67286 struct mca_bus {
67287 u64 default_dma_mask;
67288diff --git a/include/linux/memory.h b/include/linux/memory.h
67289index 37fa19b..b597c85 100644
67290--- a/include/linux/memory.h
67291+++ b/include/linux/memory.h
67292@@ -108,7 +108,7 @@ struct memory_accessor {
67293 size_t count);
67294 ssize_t (*write)(struct memory_accessor *, const char *buf,
67295 off_t offset, size_t count);
67296-};
67297+} __no_const;
67298
67299 /*
67300 * Kernel text modification mutex, used for code patching. Users of this lock
67301diff --git a/include/linux/mm.h b/include/linux/mm.h
67302index 11e5be6..1ff2423 100644
67303--- a/include/linux/mm.h
67304+++ b/include/linux/mm.h
67305@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67306
67307 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67308 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67309+
67310+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67311+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67312+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67313+#else
67314 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67315+#endif
67316+
67317 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67318 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67319
67320@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67321 int set_page_dirty_lock(struct page *page);
67322 int clear_page_dirty_for_io(struct page *page);
67323
67324-/* Is the vma a continuation of the stack vma above it? */
67325-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67326-{
67327- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67328-}
67329-
67330 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67331 unsigned long old_addr, struct vm_area_struct *new_vma,
67332 unsigned long new_addr, unsigned long len);
67333@@ -890,6 +891,8 @@ struct shrinker {
67334 extern void register_shrinker(struct shrinker *);
67335 extern void unregister_shrinker(struct shrinker *);
67336
67337+pgprot_t vm_get_page_prot(unsigned long vm_flags);
67338+
67339 int vma_wants_writenotify(struct vm_area_struct *vma);
67340
67341 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67342@@ -1162,6 +1165,7 @@ out:
67343 }
67344
67345 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67346+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67347
67348 extern unsigned long do_brk(unsigned long, unsigned long);
67349
67350@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67351 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67352 struct vm_area_struct **pprev);
67353
67354+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67355+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67356+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67357+
67358 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67359 NULL if none. Assume start_addr < end_addr. */
67360 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67361@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67362 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67363 }
67364
67365-pgprot_t vm_get_page_prot(unsigned long vm_flags);
67366 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67367 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67368 unsigned long pfn, unsigned long size, pgprot_t);
67369@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67370 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67371 extern int sysctl_memory_failure_early_kill;
67372 extern int sysctl_memory_failure_recovery;
67373-extern atomic_long_t mce_bad_pages;
67374+extern atomic_long_unchecked_t mce_bad_pages;
67375+
67376+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67377+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67378+#else
67379+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67380+#endif
67381
67382 #endif /* __KERNEL__ */
67383 #endif /* _LINUX_MM_H */
67384diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67385index 9d12ed5..8023125 100644
67386--- a/include/linux/mm_types.h
67387+++ b/include/linux/mm_types.h
67388@@ -186,6 +186,8 @@ struct vm_area_struct {
67389 #ifdef CONFIG_NUMA
67390 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67391 #endif
67392+
67393+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67394 };
67395
67396 struct core_thread {
67397@@ -287,6 +289,24 @@ struct mm_struct {
67398 #ifdef CONFIG_MMU_NOTIFIER
67399 struct mmu_notifier_mm *mmu_notifier_mm;
67400 #endif
67401+
67402+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67403+ unsigned long pax_flags;
67404+#endif
67405+
67406+#ifdef CONFIG_PAX_DLRESOLVE
67407+ unsigned long call_dl_resolve;
67408+#endif
67409+
67410+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
67411+ unsigned long call_syscall;
67412+#endif
67413+
67414+#ifdef CONFIG_PAX_ASLR
67415+ unsigned long delta_mmap; /* randomized offset */
67416+ unsigned long delta_stack; /* randomized offset */
67417+#endif
67418+
67419 };
67420
67421 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
67422diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
67423index 4e02ee2..afb159e 100644
67424--- a/include/linux/mmu_notifier.h
67425+++ b/include/linux/mmu_notifier.h
67426@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
67427 */
67428 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
67429 ({ \
67430- pte_t __pte; \
67431+ pte_t ___pte; \
67432 struct vm_area_struct *___vma = __vma; \
67433 unsigned long ___address = __address; \
67434- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
67435+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
67436 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
67437- __pte; \
67438+ ___pte; \
67439 })
67440
67441 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
67442diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
67443index 6c31a2a..4b0e930 100644
67444--- a/include/linux/mmzone.h
67445+++ b/include/linux/mmzone.h
67446@@ -350,7 +350,7 @@ struct zone {
67447 unsigned long flags; /* zone flags, see below */
67448
67449 /* Zone statistics */
67450- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67451+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67452
67453 /*
67454 * prev_priority holds the scanning priority for this zone. It is
67455diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
67456index f58e9d8..3503935 100644
67457--- a/include/linux/mod_devicetable.h
67458+++ b/include/linux/mod_devicetable.h
67459@@ -12,7 +12,7 @@
67460 typedef unsigned long kernel_ulong_t;
67461 #endif
67462
67463-#define PCI_ANY_ID (~0)
67464+#define PCI_ANY_ID ((__u16)~0)
67465
67466 struct pci_device_id {
67467 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
67468@@ -131,7 +131,7 @@ struct usb_device_id {
67469 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
67470 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
67471
67472-#define HID_ANY_ID (~0)
67473+#define HID_ANY_ID (~0U)
67474
67475 struct hid_device_id {
67476 __u16 bus;
67477diff --git a/include/linux/module.h b/include/linux/module.h
67478index 482efc8..642032b 100644
67479--- a/include/linux/module.h
67480+++ b/include/linux/module.h
67481@@ -16,6 +16,7 @@
67482 #include <linux/kobject.h>
67483 #include <linux/moduleparam.h>
67484 #include <linux/tracepoint.h>
67485+#include <linux/fs.h>
67486
67487 #include <asm/local.h>
67488 #include <asm/module.h>
67489@@ -287,16 +288,16 @@ struct module
67490 int (*init)(void);
67491
67492 /* If this is non-NULL, vfree after init() returns */
67493- void *module_init;
67494+ void *module_init_rx, *module_init_rw;
67495
67496 /* Here is the actual code + data, vfree'd on unload. */
67497- void *module_core;
67498+ void *module_core_rx, *module_core_rw;
67499
67500 /* Here are the sizes of the init and core sections */
67501- unsigned int init_size, core_size;
67502+ unsigned int init_size_rw, core_size_rw;
67503
67504 /* The size of the executable code in each section. */
67505- unsigned int init_text_size, core_text_size;
67506+ unsigned int init_size_rx, core_size_rx;
67507
67508 /* Arch-specific module values */
67509 struct mod_arch_specific arch;
67510@@ -345,6 +346,10 @@ struct module
67511 #ifdef CONFIG_EVENT_TRACING
67512 struct ftrace_event_call *trace_events;
67513 unsigned int num_trace_events;
67514+ struct file_operations trace_id;
67515+ struct file_operations trace_enable;
67516+ struct file_operations trace_format;
67517+ struct file_operations trace_filter;
67518 #endif
67519 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
67520 unsigned long *ftrace_callsites;
67521@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
67522 bool is_module_address(unsigned long addr);
67523 bool is_module_text_address(unsigned long addr);
67524
67525+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
67526+{
67527+
67528+#ifdef CONFIG_PAX_KERNEXEC
67529+ if (ktla_ktva(addr) >= (unsigned long)start &&
67530+ ktla_ktva(addr) < (unsigned long)start + size)
67531+ return 1;
67532+#endif
67533+
67534+ return ((void *)addr >= start && (void *)addr < start + size);
67535+}
67536+
67537+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
67538+{
67539+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
67540+}
67541+
67542+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
67543+{
67544+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
67545+}
67546+
67547+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
67548+{
67549+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
67550+}
67551+
67552+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
67553+{
67554+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
67555+}
67556+
67557 static inline int within_module_core(unsigned long addr, struct module *mod)
67558 {
67559- return (unsigned long)mod->module_core <= addr &&
67560- addr < (unsigned long)mod->module_core + mod->core_size;
67561+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
67562 }
67563
67564 static inline int within_module_init(unsigned long addr, struct module *mod)
67565 {
67566- return (unsigned long)mod->module_init <= addr &&
67567- addr < (unsigned long)mod->module_init + mod->init_size;
67568+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
67569 }
67570
67571 /* Search for module by name: must hold module_mutex. */
67572diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
67573index c1f40c2..682ca53 100644
67574--- a/include/linux/moduleloader.h
67575+++ b/include/linux/moduleloader.h
67576@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
67577 sections. Returns NULL on failure. */
67578 void *module_alloc(unsigned long size);
67579
67580+#ifdef CONFIG_PAX_KERNEXEC
67581+void *module_alloc_exec(unsigned long size);
67582+#else
67583+#define module_alloc_exec(x) module_alloc(x)
67584+#endif
67585+
67586 /* Free memory returned from module_alloc. */
67587 void module_free(struct module *mod, void *module_region);
67588
67589+#ifdef CONFIG_PAX_KERNEXEC
67590+void module_free_exec(struct module *mod, void *module_region);
67591+#else
67592+#define module_free_exec(x, y) module_free((x), (y))
67593+#endif
67594+
67595 /* Apply the given relocation to the (simplified) ELF. Return -error
67596 or 0. */
67597 int apply_relocate(Elf_Shdr *sechdrs,
67598diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
67599index 82a9124..8a5f622 100644
67600--- a/include/linux/moduleparam.h
67601+++ b/include/linux/moduleparam.h
67602@@ -132,7 +132,7 @@ struct kparam_array
67603
67604 /* Actually copy string: maxlen param is usually sizeof(string). */
67605 #define module_param_string(name, string, len, perm) \
67606- static const struct kparam_string __param_string_##name \
67607+ static const struct kparam_string __param_string_##name __used \
67608 = { len, string }; \
67609 __module_param_call(MODULE_PARAM_PREFIX, name, \
67610 param_set_copystring, param_get_string, \
67611@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
67612
67613 /* Comma-separated array: *nump is set to number they actually specified. */
67614 #define module_param_array_named(name, array, type, nump, perm) \
67615- static const struct kparam_array __param_arr_##name \
67616+ static const struct kparam_array __param_arr_##name __used \
67617 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
67618 sizeof(array[0]), array }; \
67619 __module_param_call(MODULE_PARAM_PREFIX, name, \
67620diff --git a/include/linux/mutex.h b/include/linux/mutex.h
67621index 878cab4..c92cb3e 100644
67622--- a/include/linux/mutex.h
67623+++ b/include/linux/mutex.h
67624@@ -51,7 +51,7 @@ struct mutex {
67625 spinlock_t wait_lock;
67626 struct list_head wait_list;
67627 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
67628- struct thread_info *owner;
67629+ struct task_struct *owner;
67630 #endif
67631 #ifdef CONFIG_DEBUG_MUTEXES
67632 const char *name;
67633diff --git a/include/linux/namei.h b/include/linux/namei.h
67634index ec0f607..d19e675 100644
67635--- a/include/linux/namei.h
67636+++ b/include/linux/namei.h
67637@@ -22,7 +22,7 @@ struct nameidata {
67638 unsigned int flags;
67639 int last_type;
67640 unsigned depth;
67641- char *saved_names[MAX_NESTED_LINKS + 1];
67642+ const char *saved_names[MAX_NESTED_LINKS + 1];
67643
67644 /* Intent data */
67645 union {
67646@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
67647 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
67648 extern void unlock_rename(struct dentry *, struct dentry *);
67649
67650-static inline void nd_set_link(struct nameidata *nd, char *path)
67651+static inline void nd_set_link(struct nameidata *nd, const char *path)
67652 {
67653 nd->saved_names[nd->depth] = path;
67654 }
67655
67656-static inline char *nd_get_link(struct nameidata *nd)
67657+static inline const char *nd_get_link(const struct nameidata *nd)
67658 {
67659 return nd->saved_names[nd->depth];
67660 }
67661diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
67662index 9d7e8f7..04428c5 100644
67663--- a/include/linux/netdevice.h
67664+++ b/include/linux/netdevice.h
67665@@ -637,6 +637,7 @@ struct net_device_ops {
67666 u16 xid);
67667 #endif
67668 };
67669+typedef struct net_device_ops __no_const net_device_ops_no_const;
67670
67671 /*
67672 * The DEVICE structure.
67673diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
67674new file mode 100644
67675index 0000000..33f4af8
67676--- /dev/null
67677+++ b/include/linux/netfilter/xt_gradm.h
67678@@ -0,0 +1,9 @@
67679+#ifndef _LINUX_NETFILTER_XT_GRADM_H
67680+#define _LINUX_NETFILTER_XT_GRADM_H 1
67681+
67682+struct xt_gradm_mtinfo {
67683+ __u16 flags;
67684+ __u16 invflags;
67685+};
67686+
67687+#endif
67688diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
67689index b359c4a..c08b334 100644
67690--- a/include/linux/nodemask.h
67691+++ b/include/linux/nodemask.h
67692@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
67693
67694 #define any_online_node(mask) \
67695 ({ \
67696- int node; \
67697- for_each_node_mask(node, (mask)) \
67698- if (node_online(node)) \
67699+ int __node; \
67700+ for_each_node_mask(__node, (mask)) \
67701+ if (node_online(__node)) \
67702 break; \
67703- node; \
67704+ __node; \
67705 })
67706
67707 #define num_online_nodes() num_node_state(N_ONLINE)
67708diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
67709index 5171639..7cf4235 100644
67710--- a/include/linux/oprofile.h
67711+++ b/include/linux/oprofile.h
67712@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
67713 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
67714 char const * name, ulong * val);
67715
67716-/** Create a file for read-only access to an atomic_t. */
67717+/** Create a file for read-only access to an atomic_unchecked_t. */
67718 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
67719- char const * name, atomic_t * val);
67720+ char const * name, atomic_unchecked_t * val);
67721
67722 /** create a directory */
67723 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
67724diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
67725index 3c62ed4..8924c7c 100644
67726--- a/include/linux/pagemap.h
67727+++ b/include/linux/pagemap.h
67728@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
67729 if (((unsigned long)uaddr & PAGE_MASK) !=
67730 ((unsigned long)end & PAGE_MASK))
67731 ret = __get_user(c, end);
67732+ (void)c;
67733 }
67734+ (void)c;
67735 return ret;
67736 }
67737
67738diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
67739index 81c9689..a567a55 100644
67740--- a/include/linux/perf_event.h
67741+++ b/include/linux/perf_event.h
67742@@ -476,7 +476,7 @@ struct hw_perf_event {
67743 struct hrtimer hrtimer;
67744 };
67745 };
67746- atomic64_t prev_count;
67747+ atomic64_unchecked_t prev_count;
67748 u64 sample_period;
67749 u64 last_period;
67750 atomic64_t period_left;
67751@@ -557,7 +557,7 @@ struct perf_event {
67752 const struct pmu *pmu;
67753
67754 enum perf_event_active_state state;
67755- atomic64_t count;
67756+ atomic64_unchecked_t count;
67757
67758 /*
67759 * These are the total time in nanoseconds that the event
67760@@ -595,8 +595,8 @@ struct perf_event {
67761 * These accumulate total time (in nanoseconds) that children
67762 * events have been enabled and running, respectively.
67763 */
67764- atomic64_t child_total_time_enabled;
67765- atomic64_t child_total_time_running;
67766+ atomic64_unchecked_t child_total_time_enabled;
67767+ atomic64_unchecked_t child_total_time_running;
67768
67769 /*
67770 * Protect attach/detach and child_list:
67771diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
67772index b43a9e0..b77d869 100644
67773--- a/include/linux/pipe_fs_i.h
67774+++ b/include/linux/pipe_fs_i.h
67775@@ -46,9 +46,9 @@ struct pipe_inode_info {
67776 wait_queue_head_t wait;
67777 unsigned int nrbufs, curbuf;
67778 struct page *tmp_page;
67779- unsigned int readers;
67780- unsigned int writers;
67781- unsigned int waiting_writers;
67782+ atomic_t readers;
67783+ atomic_t writers;
67784+ atomic_t waiting_writers;
67785 unsigned int r_counter;
67786 unsigned int w_counter;
67787 struct fasync_struct *fasync_readers;
67788diff --git a/include/linux/poison.h b/include/linux/poison.h
67789index 34066ff..e95d744 100644
67790--- a/include/linux/poison.h
67791+++ b/include/linux/poison.h
67792@@ -19,8 +19,8 @@
67793 * under normal circumstances, used to verify that nobody uses
67794 * non-initialized list entries.
67795 */
67796-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
67797-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
67798+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
67799+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
67800
67801 /********** include/linux/timer.h **********/
67802 /*
67803diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
67804index 4f71bf4..77ffa64 100644
67805--- a/include/linux/posix-timers.h
67806+++ b/include/linux/posix-timers.h
67807@@ -67,7 +67,7 @@ struct k_itimer {
67808 };
67809
67810 struct k_clock {
67811- int res; /* in nanoseconds */
67812+ const int res; /* in nanoseconds */
67813 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
67814 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
67815 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
67816diff --git a/include/linux/preempt.h b/include/linux/preempt.h
67817index 72b1a10..13303a9 100644
67818--- a/include/linux/preempt.h
67819+++ b/include/linux/preempt.h
67820@@ -110,7 +110,7 @@ struct preempt_ops {
67821 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
67822 void (*sched_out)(struct preempt_notifier *notifier,
67823 struct task_struct *next);
67824-};
67825+} __no_const;
67826
67827 /**
67828 * preempt_notifier - key for installing preemption notifiers
67829diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
67830index 379eaed..1bf73e3 100644
67831--- a/include/linux/proc_fs.h
67832+++ b/include/linux/proc_fs.h
67833@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
67834 return proc_create_data(name, mode, parent, proc_fops, NULL);
67835 }
67836
67837+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
67838+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
67839+{
67840+#ifdef CONFIG_GRKERNSEC_PROC_USER
67841+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
67842+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67843+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
67844+#else
67845+ return proc_create_data(name, mode, parent, proc_fops, NULL);
67846+#endif
67847+}
67848+
67849+
67850 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
67851 mode_t mode, struct proc_dir_entry *base,
67852 read_proc_t *read_proc, void * data)
67853@@ -256,7 +269,7 @@ union proc_op {
67854 int (*proc_show)(struct seq_file *m,
67855 struct pid_namespace *ns, struct pid *pid,
67856 struct task_struct *task);
67857-};
67858+} __no_const;
67859
67860 struct ctl_table_header;
67861 struct ctl_table;
67862diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
67863index 7456d7d..6c1cfc9 100644
67864--- a/include/linux/ptrace.h
67865+++ b/include/linux/ptrace.h
67866@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
67867 extern void exit_ptrace(struct task_struct *tracer);
67868 #define PTRACE_MODE_READ 1
67869 #define PTRACE_MODE_ATTACH 2
67870-/* Returns 0 on success, -errno on denial. */
67871-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
67872 /* Returns true on success, false on denial. */
67873 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
67874+/* Returns true on success, false on denial. */
67875+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
67876
67877 static inline int ptrace_reparented(struct task_struct *child)
67878 {
67879diff --git a/include/linux/random.h b/include/linux/random.h
67880index 2948046..3262567 100644
67881--- a/include/linux/random.h
67882+++ b/include/linux/random.h
67883@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
67884 u32 random32(void);
67885 void srandom32(u32 seed);
67886
67887+static inline unsigned long pax_get_random_long(void)
67888+{
67889+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
67890+}
67891+
67892 #endif /* __KERNEL___ */
67893
67894 #endif /* _LINUX_RANDOM_H */
67895diff --git a/include/linux/reboot.h b/include/linux/reboot.h
67896index 988e55f..17cb4ef 100644
67897--- a/include/linux/reboot.h
67898+++ b/include/linux/reboot.h
67899@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
67900 * Architecture-specific implementations of sys_reboot commands.
67901 */
67902
67903-extern void machine_restart(char *cmd);
67904-extern void machine_halt(void);
67905-extern void machine_power_off(void);
67906+extern void machine_restart(char *cmd) __noreturn;
67907+extern void machine_halt(void) __noreturn;
67908+extern void machine_power_off(void) __noreturn;
67909
67910 extern void machine_shutdown(void);
67911 struct pt_regs;
67912@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
67913 */
67914
67915 extern void kernel_restart_prepare(char *cmd);
67916-extern void kernel_restart(char *cmd);
67917-extern void kernel_halt(void);
67918-extern void kernel_power_off(void);
67919+extern void kernel_restart(char *cmd) __noreturn;
67920+extern void kernel_halt(void) __noreturn;
67921+extern void kernel_power_off(void) __noreturn;
67922
67923 void ctrl_alt_del(void);
67924
67925@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
67926 * Emergency restart, callable from an interrupt handler.
67927 */
67928
67929-extern void emergency_restart(void);
67930+extern void emergency_restart(void) __noreturn;
67931 #include <asm/emergency-restart.h>
67932
67933 #endif
67934diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
67935index dd31e7b..5b03c5c 100644
67936--- a/include/linux/reiserfs_fs.h
67937+++ b/include/linux/reiserfs_fs.h
67938@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67939 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
67940
67941 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67942-#define get_generation(s) atomic_read (&fs_generation(s))
67943+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67944 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67945 #define __fs_changed(gen,s) (gen != get_generation (s))
67946 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
67947@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
67948 */
67949
67950 struct item_operations {
67951- int (*bytes_number) (struct item_head * ih, int block_size);
67952- void (*decrement_key) (struct cpu_key *);
67953- int (*is_left_mergeable) (struct reiserfs_key * ih,
67954+ int (* const bytes_number) (struct item_head * ih, int block_size);
67955+ void (* const decrement_key) (struct cpu_key *);
67956+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
67957 unsigned long bsize);
67958- void (*print_item) (struct item_head *, char *item);
67959- void (*check_item) (struct item_head *, char *item);
67960+ void (* const print_item) (struct item_head *, char *item);
67961+ void (* const check_item) (struct item_head *, char *item);
67962
67963- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67964+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67965 int is_affected, int insert_size);
67966- int (*check_left) (struct virtual_item * vi, int free,
67967+ int (* const check_left) (struct virtual_item * vi, int free,
67968 int start_skip, int end_skip);
67969- int (*check_right) (struct virtual_item * vi, int free);
67970- int (*part_size) (struct virtual_item * vi, int from, int to);
67971- int (*unit_num) (struct virtual_item * vi);
67972- void (*print_vi) (struct virtual_item * vi);
67973+ int (* const check_right) (struct virtual_item * vi, int free);
67974+ int (* const part_size) (struct virtual_item * vi, int from, int to);
67975+ int (* const unit_num) (struct virtual_item * vi);
67976+ void (* const print_vi) (struct virtual_item * vi);
67977 };
67978
67979-extern struct item_operations *item_ops[TYPE_ANY + 1];
67980+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
67981
67982 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
67983 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
67984diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
67985index dab68bb..0688727 100644
67986--- a/include/linux/reiserfs_fs_sb.h
67987+++ b/include/linux/reiserfs_fs_sb.h
67988@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
67989 /* Comment? -Hans */
67990 wait_queue_head_t s_wait;
67991 /* To be obsoleted soon by per buffer seals.. -Hans */
67992- atomic_t s_generation_counter; // increased by one every time the
67993+ atomic_unchecked_t s_generation_counter; // increased by one every time the
67994 // tree gets re-balanced
67995 unsigned long s_properties; /* File system properties. Currently holds
67996 on-disk FS format */
67997diff --git a/include/linux/relay.h b/include/linux/relay.h
67998index 14a86bc..17d0700 100644
67999--- a/include/linux/relay.h
68000+++ b/include/linux/relay.h
68001@@ -159,7 +159,7 @@ struct rchan_callbacks
68002 * The callback should return 0 if successful, negative if not.
68003 */
68004 int (*remove_buf_file)(struct dentry *dentry);
68005-};
68006+} __no_const;
68007
68008 /*
68009 * CONFIG_RELAY kernel API, kernel/relay.c
68010diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68011index 3392c59..a746428 100644
68012--- a/include/linux/rfkill.h
68013+++ b/include/linux/rfkill.h
68014@@ -144,6 +144,7 @@ struct rfkill_ops {
68015 void (*query)(struct rfkill *rfkill, void *data);
68016 int (*set_block)(void *data, bool blocked);
68017 };
68018+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68019
68020 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68021 /**
68022diff --git a/include/linux/sched.h b/include/linux/sched.h
68023index 71849bf..40217dc 100644
68024--- a/include/linux/sched.h
68025+++ b/include/linux/sched.h
68026@@ -101,6 +101,7 @@ struct bio;
68027 struct fs_struct;
68028 struct bts_context;
68029 struct perf_event_context;
68030+struct linux_binprm;
68031
68032 /*
68033 * List of flags we want to share for kernel threads,
68034@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68035 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68036 asmlinkage void __schedule(void);
68037 asmlinkage void schedule(void);
68038-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68039+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68040
68041 struct nsproxy;
68042 struct user_namespace;
68043@@ -371,9 +372,12 @@ struct user_namespace;
68044 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68045
68046 extern int sysctl_max_map_count;
68047+extern unsigned long sysctl_heap_stack_gap;
68048
68049 #include <linux/aio.h>
68050
68051+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68052+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68053 extern unsigned long
68054 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68055 unsigned long, unsigned long);
68056@@ -666,6 +670,16 @@ struct signal_struct {
68057 struct tty_audit_buf *tty_audit_buf;
68058 #endif
68059
68060+#ifdef CONFIG_GRKERNSEC
68061+ u32 curr_ip;
68062+ u32 saved_ip;
68063+ u32 gr_saddr;
68064+ u32 gr_daddr;
68065+ u16 gr_sport;
68066+ u16 gr_dport;
68067+ u8 used_accept:1;
68068+#endif
68069+
68070 int oom_adj; /* OOM kill score adjustment (bit shift) */
68071 };
68072
68073@@ -723,6 +737,11 @@ struct user_struct {
68074 struct key *session_keyring; /* UID's default session keyring */
68075 #endif
68076
68077+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68078+ unsigned int banned;
68079+ unsigned long ban_expires;
68080+#endif
68081+
68082 /* Hash table maintenance information */
68083 struct hlist_node uidhash_node;
68084 uid_t uid;
68085@@ -1328,8 +1347,8 @@ struct task_struct {
68086 struct list_head thread_group;
68087
68088 struct completion *vfork_done; /* for vfork() */
68089- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68090- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68091+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68092+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68093
68094 cputime_t utime, stime, utimescaled, stimescaled;
68095 cputime_t gtime;
68096@@ -1343,16 +1362,6 @@ struct task_struct {
68097 struct task_cputime cputime_expires;
68098 struct list_head cpu_timers[3];
68099
68100-/* process credentials */
68101- const struct cred *real_cred; /* objective and real subjective task
68102- * credentials (COW) */
68103- const struct cred *cred; /* effective (overridable) subjective task
68104- * credentials (COW) */
68105- struct mutex cred_guard_mutex; /* guard against foreign influences on
68106- * credential calculations
68107- * (notably. ptrace) */
68108- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68109-
68110 char comm[TASK_COMM_LEN]; /* executable name excluding path
68111 - access with [gs]et_task_comm (which lock
68112 it with task_lock())
68113@@ -1369,6 +1378,10 @@ struct task_struct {
68114 #endif
68115 /* CPU-specific state of this task */
68116 struct thread_struct thread;
68117+/* thread_info moved to task_struct */
68118+#ifdef CONFIG_X86
68119+ struct thread_info tinfo;
68120+#endif
68121 /* filesystem information */
68122 struct fs_struct *fs;
68123 /* open file information */
68124@@ -1436,6 +1449,15 @@ struct task_struct {
68125 int hardirq_context;
68126 int softirq_context;
68127 #endif
68128+
68129+/* process credentials */
68130+ const struct cred *real_cred; /* objective and real subjective task
68131+ * credentials (COW) */
68132+ struct mutex cred_guard_mutex; /* guard against foreign influences on
68133+ * credential calculations
68134+ * (notably. ptrace) */
68135+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68136+
68137 #ifdef CONFIG_LOCKDEP
68138 # define MAX_LOCK_DEPTH 48UL
68139 u64 curr_chain_key;
68140@@ -1456,6 +1478,9 @@ struct task_struct {
68141
68142 struct backing_dev_info *backing_dev_info;
68143
68144+ const struct cred *cred; /* effective (overridable) subjective task
68145+ * credentials (COW) */
68146+
68147 struct io_context *io_context;
68148
68149 unsigned long ptrace_message;
68150@@ -1519,6 +1544,21 @@ struct task_struct {
68151 unsigned long default_timer_slack_ns;
68152
68153 struct list_head *scm_work_list;
68154+
68155+#ifdef CONFIG_GRKERNSEC
68156+ /* grsecurity */
68157+ struct dentry *gr_chroot_dentry;
68158+ struct acl_subject_label *acl;
68159+ struct acl_role_label *role;
68160+ struct file *exec_file;
68161+ u16 acl_role_id;
68162+ /* is this the task that authenticated to the special role */
68163+ u8 acl_sp_role;
68164+ u8 is_writable;
68165+ u8 brute;
68166+ u8 gr_is_chrooted;
68167+#endif
68168+
68169 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68170 /* Index of current stored adress in ret_stack */
68171 int curr_ret_stack;
68172@@ -1542,6 +1582,57 @@ struct task_struct {
68173 #endif /* CONFIG_TRACING */
68174 };
68175
68176+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68177+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68178+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68179+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68180+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68181+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68182+
68183+#ifdef CONFIG_PAX_SOFTMODE
68184+extern int pax_softmode;
68185+#endif
68186+
68187+extern int pax_check_flags(unsigned long *);
68188+
68189+/* if tsk != current then task_lock must be held on it */
68190+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68191+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68192+{
68193+ if (likely(tsk->mm))
68194+ return tsk->mm->pax_flags;
68195+ else
68196+ return 0UL;
68197+}
68198+
68199+/* if tsk != current then task_lock must be held on it */
68200+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68201+{
68202+ if (likely(tsk->mm)) {
68203+ tsk->mm->pax_flags = flags;
68204+ return 0;
68205+ }
68206+ return -EINVAL;
68207+}
68208+#endif
68209+
68210+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68211+extern void pax_set_initial_flags(struct linux_binprm *bprm);
68212+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68213+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68214+#endif
68215+
68216+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68217+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68218+extern void pax_report_refcount_overflow(struct pt_regs *regs);
68219+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68220+
68221+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68222+extern void pax_track_stack(void);
68223+#else
68224+static inline void pax_track_stack(void) {}
68225+#endif
68226+
68227 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68228 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68229
68230@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68231 #define PF_DUMPCORE 0x00000200 /* dumped core */
68232 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68233 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68234-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68235+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68236 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68237 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68238 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68239@@ -1978,7 +2069,9 @@ void yield(void);
68240 extern struct exec_domain default_exec_domain;
68241
68242 union thread_union {
68243+#ifndef CONFIG_X86
68244 struct thread_info thread_info;
68245+#endif
68246 unsigned long stack[THREAD_SIZE/sizeof(long)];
68247 };
68248
68249@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
68250 */
68251
68252 extern struct task_struct *find_task_by_vpid(pid_t nr);
68253+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68254 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68255 struct pid_namespace *ns);
68256
68257@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68258 extern void exit_itimers(struct signal_struct *);
68259 extern void flush_itimer_signals(void);
68260
68261-extern NORET_TYPE void do_group_exit(int);
68262+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68263
68264 extern void daemonize(const char *, ...);
68265 extern int allow_signal(int);
68266@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68267
68268 #endif
68269
68270-static inline int object_is_on_stack(void *obj)
68271+static inline int object_starts_on_stack(void *obj)
68272 {
68273- void *stack = task_stack_page(current);
68274+ const void *stack = task_stack_page(current);
68275
68276 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68277 }
68278
68279+#ifdef CONFIG_PAX_USERCOPY
68280+extern int object_is_on_stack(const void *obj, unsigned long len);
68281+#endif
68282+
68283 extern void thread_info_cache_init(void);
68284
68285 #ifdef CONFIG_DEBUG_STACK_USAGE
68286diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68287index 1ee2c05..81b7ec4 100644
68288--- a/include/linux/screen_info.h
68289+++ b/include/linux/screen_info.h
68290@@ -42,7 +42,8 @@ struct screen_info {
68291 __u16 pages; /* 0x32 */
68292 __u16 vesa_attributes; /* 0x34 */
68293 __u32 capabilities; /* 0x36 */
68294- __u8 _reserved[6]; /* 0x3a */
68295+ __u16 vesapm_size; /* 0x3a */
68296+ __u8 _reserved[4]; /* 0x3c */
68297 } __attribute__((packed));
68298
68299 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68300diff --git a/include/linux/security.h b/include/linux/security.h
68301index d40d23f..253bd14 100644
68302--- a/include/linux/security.h
68303+++ b/include/linux/security.h
68304@@ -34,6 +34,7 @@
68305 #include <linux/key.h>
68306 #include <linux/xfrm.h>
68307 #include <linux/gfp.h>
68308+#include <linux/grsecurity.h>
68309 #include <net/flow.h>
68310
68311 /* Maximum number of letters for an LSM name string */
68312diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68313index 8366d8f..2307490 100644
68314--- a/include/linux/seq_file.h
68315+++ b/include/linux/seq_file.h
68316@@ -32,6 +32,7 @@ struct seq_operations {
68317 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68318 int (*show) (struct seq_file *m, void *v);
68319 };
68320+typedef struct seq_operations __no_const seq_operations_no_const;
68321
68322 #define SEQ_SKIP 1
68323
68324diff --git a/include/linux/shm.h b/include/linux/shm.h
68325index eca6235..c7417ed 100644
68326--- a/include/linux/shm.h
68327+++ b/include/linux/shm.h
68328@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68329 pid_t shm_cprid;
68330 pid_t shm_lprid;
68331 struct user_struct *mlock_user;
68332+#ifdef CONFIG_GRKERNSEC
68333+ time_t shm_createtime;
68334+ pid_t shm_lapid;
68335+#endif
68336 };
68337
68338 /* shm_mode upper byte flags */
68339diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68340index bcdd660..6e12e11 100644
68341--- a/include/linux/skbuff.h
68342+++ b/include/linux/skbuff.h
68343@@ -14,6 +14,7 @@
68344 #ifndef _LINUX_SKBUFF_H
68345 #define _LINUX_SKBUFF_H
68346
68347+#include <linux/const.h>
68348 #include <linux/kernel.h>
68349 #include <linux/kmemcheck.h>
68350 #include <linux/compiler.h>
68351@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68352 */
68353 static inline int skb_queue_empty(const struct sk_buff_head *list)
68354 {
68355- return list->next == (struct sk_buff *)list;
68356+ return list->next == (const struct sk_buff *)list;
68357 }
68358
68359 /**
68360@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
68361 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68362 const struct sk_buff *skb)
68363 {
68364- return (skb->next == (struct sk_buff *) list);
68365+ return (skb->next == (const struct sk_buff *) list);
68366 }
68367
68368 /**
68369@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68370 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
68371 const struct sk_buff *skb)
68372 {
68373- return (skb->prev == (struct sk_buff *) list);
68374+ return (skb->prev == (const struct sk_buff *) list);
68375 }
68376
68377 /**
68378@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
68379 * headroom, you should not reduce this.
68380 */
68381 #ifndef NET_SKB_PAD
68382-#define NET_SKB_PAD 32
68383+#define NET_SKB_PAD (_AC(32,UL))
68384 #endif
68385
68386 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
68387diff --git a/include/linux/slab.h b/include/linux/slab.h
68388index 2da8372..a3be824 100644
68389--- a/include/linux/slab.h
68390+++ b/include/linux/slab.h
68391@@ -11,12 +11,20 @@
68392
68393 #include <linux/gfp.h>
68394 #include <linux/types.h>
68395+#include <linux/err.h>
68396
68397 /*
68398 * Flags to pass to kmem_cache_create().
68399 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
68400 */
68401 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
68402+
68403+#ifdef CONFIG_PAX_USERCOPY
68404+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
68405+#else
68406+#define SLAB_USERCOPY 0x00000000UL
68407+#endif
68408+
68409 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
68410 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
68411 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
68412@@ -82,10 +90,13 @@
68413 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
68414 * Both make kfree a no-op.
68415 */
68416-#define ZERO_SIZE_PTR ((void *)16)
68417+#define ZERO_SIZE_PTR \
68418+({ \
68419+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
68420+ (void *)(-MAX_ERRNO-1L); \
68421+})
68422
68423-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
68424- (unsigned long)ZERO_SIZE_PTR)
68425+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
68426
68427 /*
68428 * struct kmem_cache related prototypes
68429@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
68430 void kfree(const void *);
68431 void kzfree(const void *);
68432 size_t ksize(const void *);
68433+void check_object_size(const void *ptr, unsigned long n, bool to);
68434
68435 /*
68436 * Allocator specific definitions. These are mainly used to establish optimized
68437@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
68438
68439 void __init kmem_cache_init_late(void);
68440
68441+#define kmalloc(x, y) \
68442+({ \
68443+ void *___retval; \
68444+ intoverflow_t ___x = (intoverflow_t)x; \
68445+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
68446+ ___retval = NULL; \
68447+ else \
68448+ ___retval = kmalloc((size_t)___x, (y)); \
68449+ ___retval; \
68450+})
68451+
68452+#define kmalloc_node(x, y, z) \
68453+({ \
68454+ void *___retval; \
68455+ intoverflow_t ___x = (intoverflow_t)x; \
68456+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
68457+ ___retval = NULL; \
68458+ else \
68459+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
68460+ ___retval; \
68461+})
68462+
68463+#define kzalloc(x, y) \
68464+({ \
68465+ void *___retval; \
68466+ intoverflow_t ___x = (intoverflow_t)x; \
68467+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
68468+ ___retval = NULL; \
68469+ else \
68470+ ___retval = kzalloc((size_t)___x, (y)); \
68471+ ___retval; \
68472+})
68473+
68474 #endif /* _LINUX_SLAB_H */
68475diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
68476index 850d057..d9dfe3c 100644
68477--- a/include/linux/slab_def.h
68478+++ b/include/linux/slab_def.h
68479@@ -69,10 +69,10 @@ struct kmem_cache {
68480 unsigned long node_allocs;
68481 unsigned long node_frees;
68482 unsigned long node_overflow;
68483- atomic_t allochit;
68484- atomic_t allocmiss;
68485- atomic_t freehit;
68486- atomic_t freemiss;
68487+ atomic_unchecked_t allochit;
68488+ atomic_unchecked_t allocmiss;
68489+ atomic_unchecked_t freehit;
68490+ atomic_unchecked_t freemiss;
68491
68492 /*
68493 * If debugging is enabled, then the allocator can add additional
68494diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
68495index 5ad70a6..57f9f65 100644
68496--- a/include/linux/slub_def.h
68497+++ b/include/linux/slub_def.h
68498@@ -86,7 +86,7 @@ struct kmem_cache {
68499 struct kmem_cache_order_objects max;
68500 struct kmem_cache_order_objects min;
68501 gfp_t allocflags; /* gfp flags to use on each alloc */
68502- int refcount; /* Refcount for slab cache destroy */
68503+ atomic_t refcount; /* Refcount for slab cache destroy */
68504 void (*ctor)(void *);
68505 int inuse; /* Offset to metadata */
68506 int align; /* Alignment */
68507@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
68508 #endif
68509
68510 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
68511-void *__kmalloc(size_t size, gfp_t flags);
68512+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
68513
68514 #ifdef CONFIG_KMEMTRACE
68515 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
68516diff --git a/include/linux/sonet.h b/include/linux/sonet.h
68517index 67ad11f..0bbd8af 100644
68518--- a/include/linux/sonet.h
68519+++ b/include/linux/sonet.h
68520@@ -61,7 +61,7 @@ struct sonet_stats {
68521 #include <asm/atomic.h>
68522
68523 struct k_sonet_stats {
68524-#define __HANDLE_ITEM(i) atomic_t i
68525+#define __HANDLE_ITEM(i) atomic_unchecked_t i
68526 __SONET_ITEMS
68527 #undef __HANDLE_ITEM
68528 };
68529diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
68530index 6f52b4d..5500323 100644
68531--- a/include/linux/sunrpc/cache.h
68532+++ b/include/linux/sunrpc/cache.h
68533@@ -125,7 +125,7 @@ struct cache_detail {
68534 */
68535 struct cache_req {
68536 struct cache_deferred_req *(*defer)(struct cache_req *req);
68537-};
68538+} __no_const;
68539 /* this must be embedded in a deferred_request that is being
68540 * delayed awaiting cache-fill
68541 */
68542diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
68543index 8ed9642..101ceab 100644
68544--- a/include/linux/sunrpc/clnt.h
68545+++ b/include/linux/sunrpc/clnt.h
68546@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
68547 {
68548 switch (sap->sa_family) {
68549 case AF_INET:
68550- return ntohs(((struct sockaddr_in *)sap)->sin_port);
68551+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
68552 case AF_INET6:
68553- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
68554+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
68555 }
68556 return 0;
68557 }
68558@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
68559 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
68560 const struct sockaddr *src)
68561 {
68562- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
68563+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
68564 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
68565
68566 dsin->sin_family = ssin->sin_family;
68567@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
68568 if (sa->sa_family != AF_INET6)
68569 return 0;
68570
68571- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
68572+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
68573 }
68574
68575 #endif /* __KERNEL__ */
68576diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
68577index c14fe86..393245e 100644
68578--- a/include/linux/sunrpc/svc_rdma.h
68579+++ b/include/linux/sunrpc/svc_rdma.h
68580@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
68581 extern unsigned int svcrdma_max_requests;
68582 extern unsigned int svcrdma_max_req_size;
68583
68584-extern atomic_t rdma_stat_recv;
68585-extern atomic_t rdma_stat_read;
68586-extern atomic_t rdma_stat_write;
68587-extern atomic_t rdma_stat_sq_starve;
68588-extern atomic_t rdma_stat_rq_starve;
68589-extern atomic_t rdma_stat_rq_poll;
68590-extern atomic_t rdma_stat_rq_prod;
68591-extern atomic_t rdma_stat_sq_poll;
68592-extern atomic_t rdma_stat_sq_prod;
68593+extern atomic_unchecked_t rdma_stat_recv;
68594+extern atomic_unchecked_t rdma_stat_read;
68595+extern atomic_unchecked_t rdma_stat_write;
68596+extern atomic_unchecked_t rdma_stat_sq_starve;
68597+extern atomic_unchecked_t rdma_stat_rq_starve;
68598+extern atomic_unchecked_t rdma_stat_rq_poll;
68599+extern atomic_unchecked_t rdma_stat_rq_prod;
68600+extern atomic_unchecked_t rdma_stat_sq_poll;
68601+extern atomic_unchecked_t rdma_stat_sq_prod;
68602
68603 #define RPCRDMA_VERSION 1
68604
68605diff --git a/include/linux/suspend.h b/include/linux/suspend.h
68606index 5e781d8..1e62818 100644
68607--- a/include/linux/suspend.h
68608+++ b/include/linux/suspend.h
68609@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
68610 * which require special recovery actions in that situation.
68611 */
68612 struct platform_suspend_ops {
68613- int (*valid)(suspend_state_t state);
68614- int (*begin)(suspend_state_t state);
68615- int (*prepare)(void);
68616- int (*prepare_late)(void);
68617- int (*enter)(suspend_state_t state);
68618- void (*wake)(void);
68619- void (*finish)(void);
68620- void (*end)(void);
68621- void (*recover)(void);
68622+ int (* const valid)(suspend_state_t state);
68623+ int (* const begin)(suspend_state_t state);
68624+ int (* const prepare)(void);
68625+ int (* const prepare_late)(void);
68626+ int (* const enter)(suspend_state_t state);
68627+ void (* const wake)(void);
68628+ void (* const finish)(void);
68629+ void (* const end)(void);
68630+ void (* const recover)(void);
68631 };
68632
68633 #ifdef CONFIG_SUSPEND
68634@@ -120,7 +120,7 @@ struct platform_suspend_ops {
68635 * suspend_set_ops - set platform dependent suspend operations
68636 * @ops: The new suspend operations to set.
68637 */
68638-extern void suspend_set_ops(struct platform_suspend_ops *ops);
68639+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
68640 extern int suspend_valid_only_mem(suspend_state_t state);
68641
68642 /**
68643@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
68644 #else /* !CONFIG_SUSPEND */
68645 #define suspend_valid_only_mem NULL
68646
68647-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
68648+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
68649 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
68650 #endif /* !CONFIG_SUSPEND */
68651
68652@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
68653 * platforms which require special recovery actions in that situation.
68654 */
68655 struct platform_hibernation_ops {
68656- int (*begin)(void);
68657- void (*end)(void);
68658- int (*pre_snapshot)(void);
68659- void (*finish)(void);
68660- int (*prepare)(void);
68661- int (*enter)(void);
68662- void (*leave)(void);
68663- int (*pre_restore)(void);
68664- void (*restore_cleanup)(void);
68665- void (*recover)(void);
68666+ int (* const begin)(void);
68667+ void (* const end)(void);
68668+ int (* const pre_snapshot)(void);
68669+ void (* const finish)(void);
68670+ int (* const prepare)(void);
68671+ int (* const enter)(void);
68672+ void (* const leave)(void);
68673+ int (* const pre_restore)(void);
68674+ void (* const restore_cleanup)(void);
68675+ void (* const recover)(void);
68676 };
68677
68678 #ifdef CONFIG_HIBERNATION
68679@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
68680 extern void swsusp_unset_page_free(struct page *);
68681 extern unsigned long get_safe_page(gfp_t gfp_mask);
68682
68683-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
68684+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
68685 extern int hibernate(void);
68686 extern bool system_entering_hibernation(void);
68687 #else /* CONFIG_HIBERNATION */
68688@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
68689 static inline void swsusp_set_page_free(struct page *p) {}
68690 static inline void swsusp_unset_page_free(struct page *p) {}
68691
68692-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
68693+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
68694 static inline int hibernate(void) { return -ENOSYS; }
68695 static inline bool system_entering_hibernation(void) { return false; }
68696 #endif /* CONFIG_HIBERNATION */
68697diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
68698index 0eb6942..a805cb6 100644
68699--- a/include/linux/sysctl.h
68700+++ b/include/linux/sysctl.h
68701@@ -164,7 +164,11 @@ enum
68702 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
68703 };
68704
68705-
68706+#ifdef CONFIG_PAX_SOFTMODE
68707+enum {
68708+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
68709+};
68710+#endif
68711
68712 /* CTL_VM names: */
68713 enum
68714@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
68715
68716 extern int proc_dostring(struct ctl_table *, int,
68717 void __user *, size_t *, loff_t *);
68718+extern int proc_dostring_modpriv(struct ctl_table *, int,
68719+ void __user *, size_t *, loff_t *);
68720 extern int proc_dointvec(struct ctl_table *, int,
68721 void __user *, size_t *, loff_t *);
68722 extern int proc_dointvec_minmax(struct ctl_table *, int,
68723@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
68724
68725 extern ctl_handler sysctl_data;
68726 extern ctl_handler sysctl_string;
68727+extern ctl_handler sysctl_string_modpriv;
68728 extern ctl_handler sysctl_intvec;
68729 extern ctl_handler sysctl_jiffies;
68730 extern ctl_handler sysctl_ms_jiffies;
68731diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
68732index 9d68fed..71f02cc 100644
68733--- a/include/linux/sysfs.h
68734+++ b/include/linux/sysfs.h
68735@@ -75,8 +75,8 @@ struct bin_attribute {
68736 };
68737
68738 struct sysfs_ops {
68739- ssize_t (*show)(struct kobject *, struct attribute *,char *);
68740- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
68741+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
68742+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
68743 };
68744
68745 struct sysfs_dirent;
68746diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
68747index a8cc4e1..98d3b85 100644
68748--- a/include/linux/thread_info.h
68749+++ b/include/linux/thread_info.h
68750@@ -23,7 +23,7 @@ struct restart_block {
68751 };
68752 /* For futex_wait and futex_wait_requeue_pi */
68753 struct {
68754- u32 *uaddr;
68755+ u32 __user *uaddr;
68756 u32 val;
68757 u32 flags;
68758 u32 bitset;
68759diff --git a/include/linux/tty.h b/include/linux/tty.h
68760index e9c57e9..ee6d489 100644
68761--- a/include/linux/tty.h
68762+++ b/include/linux/tty.h
68763@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
68764 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
68765 extern void tty_ldisc_enable(struct tty_struct *tty);
68766
68767-
68768 /* n_tty.c */
68769 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
68770
68771diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
68772index 0c4ee9b..9f7c426 100644
68773--- a/include/linux/tty_ldisc.h
68774+++ b/include/linux/tty_ldisc.h
68775@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
68776
68777 struct module *owner;
68778
68779- int refcount;
68780+ atomic_t refcount;
68781 };
68782
68783 struct tty_ldisc {
68784diff --git a/include/linux/types.h b/include/linux/types.h
68785index c42724f..d190eee 100644
68786--- a/include/linux/types.h
68787+++ b/include/linux/types.h
68788@@ -191,10 +191,26 @@ typedef struct {
68789 volatile int counter;
68790 } atomic_t;
68791
68792+#ifdef CONFIG_PAX_REFCOUNT
68793+typedef struct {
68794+ volatile int counter;
68795+} atomic_unchecked_t;
68796+#else
68797+typedef atomic_t atomic_unchecked_t;
68798+#endif
68799+
68800 #ifdef CONFIG_64BIT
68801 typedef struct {
68802 volatile long counter;
68803 } atomic64_t;
68804+
68805+#ifdef CONFIG_PAX_REFCOUNT
68806+typedef struct {
68807+ volatile long counter;
68808+} atomic64_unchecked_t;
68809+#else
68810+typedef atomic64_t atomic64_unchecked_t;
68811+#endif
68812 #endif
68813
68814 struct ustat {
68815diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
68816index 6b58367..53a3e8e 100644
68817--- a/include/linux/uaccess.h
68818+++ b/include/linux/uaccess.h
68819@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68820 long ret; \
68821 mm_segment_t old_fs = get_fs(); \
68822 \
68823- set_fs(KERNEL_DS); \
68824 pagefault_disable(); \
68825- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
68826- pagefault_enable(); \
68827+ set_fs(KERNEL_DS); \
68828+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
68829 set_fs(old_fs); \
68830+ pagefault_enable(); \
68831 ret; \
68832 })
68833
68834@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68835 * Safely read from address @src to the buffer at @dst. If a kernel fault
68836 * happens, handle that and return -EFAULT.
68837 */
68838-extern long probe_kernel_read(void *dst, void *src, size_t size);
68839+extern long probe_kernel_read(void *dst, const void *src, size_t size);
68840
68841 /*
68842 * probe_kernel_write(): safely attempt to write to a location
68843@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
68844 * Safely write to address @dst from the buffer at @src. If a kernel fault
68845 * happens, handle that and return -EFAULT.
68846 */
68847-extern long probe_kernel_write(void *dst, void *src, size_t size);
68848+extern long probe_kernel_write(void *dst, const void *src, size_t size);
68849
68850 #endif /* __LINUX_UACCESS_H__ */
68851diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
68852index 99c1b4d..bb94261 100644
68853--- a/include/linux/unaligned/access_ok.h
68854+++ b/include/linux/unaligned/access_ok.h
68855@@ -6,32 +6,32 @@
68856
68857 static inline u16 get_unaligned_le16(const void *p)
68858 {
68859- return le16_to_cpup((__le16 *)p);
68860+ return le16_to_cpup((const __le16 *)p);
68861 }
68862
68863 static inline u32 get_unaligned_le32(const void *p)
68864 {
68865- return le32_to_cpup((__le32 *)p);
68866+ return le32_to_cpup((const __le32 *)p);
68867 }
68868
68869 static inline u64 get_unaligned_le64(const void *p)
68870 {
68871- return le64_to_cpup((__le64 *)p);
68872+ return le64_to_cpup((const __le64 *)p);
68873 }
68874
68875 static inline u16 get_unaligned_be16(const void *p)
68876 {
68877- return be16_to_cpup((__be16 *)p);
68878+ return be16_to_cpup((const __be16 *)p);
68879 }
68880
68881 static inline u32 get_unaligned_be32(const void *p)
68882 {
68883- return be32_to_cpup((__be32 *)p);
68884+ return be32_to_cpup((const __be32 *)p);
68885 }
68886
68887 static inline u64 get_unaligned_be64(const void *p)
68888 {
68889- return be64_to_cpup((__be64 *)p);
68890+ return be64_to_cpup((const __be64 *)p);
68891 }
68892
68893 static inline void put_unaligned_le16(u16 val, void *p)
68894diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
68895index 79b9837..b5a56f9 100644
68896--- a/include/linux/vermagic.h
68897+++ b/include/linux/vermagic.h
68898@@ -26,9 +26,35 @@
68899 #define MODULE_ARCH_VERMAGIC ""
68900 #endif
68901
68902+#ifdef CONFIG_PAX_REFCOUNT
68903+#define MODULE_PAX_REFCOUNT "REFCOUNT "
68904+#else
68905+#define MODULE_PAX_REFCOUNT ""
68906+#endif
68907+
68908+#ifdef CONSTIFY_PLUGIN
68909+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
68910+#else
68911+#define MODULE_CONSTIFY_PLUGIN ""
68912+#endif
68913+
68914+#ifdef STACKLEAK_PLUGIN
68915+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
68916+#else
68917+#define MODULE_STACKLEAK_PLUGIN ""
68918+#endif
68919+
68920+#ifdef CONFIG_GRKERNSEC
68921+#define MODULE_GRSEC "GRSEC "
68922+#else
68923+#define MODULE_GRSEC ""
68924+#endif
68925+
68926 #define VERMAGIC_STRING \
68927 UTS_RELEASE " " \
68928 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
68929 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
68930- MODULE_ARCH_VERMAGIC
68931+ MODULE_ARCH_VERMAGIC \
68932+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
68933+ MODULE_GRSEC
68934
68935diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
68936index 819a634..462ac12 100644
68937--- a/include/linux/vmalloc.h
68938+++ b/include/linux/vmalloc.h
68939@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
68940 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
68941 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
68942 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
68943+
68944+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
68945+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
68946+#endif
68947+
68948 /* bits [20..32] reserved for arch specific ioremap internals */
68949
68950 /*
68951@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
68952
68953 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
68954
68955+#define vmalloc(x) \
68956+({ \
68957+ void *___retval; \
68958+ intoverflow_t ___x = (intoverflow_t)x; \
68959+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
68960+ ___retval = NULL; \
68961+ else \
68962+ ___retval = vmalloc((unsigned long)___x); \
68963+ ___retval; \
68964+})
68965+
68966+#define __vmalloc(x, y, z) \
68967+({ \
68968+ void *___retval; \
68969+ intoverflow_t ___x = (intoverflow_t)x; \
68970+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
68971+ ___retval = NULL; \
68972+ else \
68973+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
68974+ ___retval; \
68975+})
68976+
68977+#define vmalloc_user(x) \
68978+({ \
68979+ void *___retval; \
68980+ intoverflow_t ___x = (intoverflow_t)x; \
68981+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
68982+ ___retval = NULL; \
68983+ else \
68984+ ___retval = vmalloc_user((unsigned long)___x); \
68985+ ___retval; \
68986+})
68987+
68988+#define vmalloc_exec(x) \
68989+({ \
68990+ void *___retval; \
68991+ intoverflow_t ___x = (intoverflow_t)x; \
68992+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
68993+ ___retval = NULL; \
68994+ else \
68995+ ___retval = vmalloc_exec((unsigned long)___x); \
68996+ ___retval; \
68997+})
68998+
68999+#define vmalloc_node(x, y) \
69000+({ \
69001+ void *___retval; \
69002+ intoverflow_t ___x = (intoverflow_t)x; \
69003+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69004+ ___retval = NULL; \
69005+ else \
69006+ ___retval = vmalloc_node((unsigned long)___x, (y));\
69007+ ___retval; \
69008+})
69009+
69010+#define vmalloc_32(x) \
69011+({ \
69012+ void *___retval; \
69013+ intoverflow_t ___x = (intoverflow_t)x; \
69014+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69015+ ___retval = NULL; \
69016+ else \
69017+ ___retval = vmalloc_32((unsigned long)___x); \
69018+ ___retval; \
69019+})
69020+
69021+#define vmalloc_32_user(x) \
69022+({ \
69023+ void *___retval; \
69024+ intoverflow_t ___x = (intoverflow_t)x; \
69025+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69026+ ___retval = NULL; \
69027+ else \
69028+ ___retval = vmalloc_32_user((unsigned long)___x);\
69029+ ___retval; \
69030+})
69031+
69032 #endif /* _LINUX_VMALLOC_H */
69033diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69034index 13070d6..aa4159a 100644
69035--- a/include/linux/vmstat.h
69036+++ b/include/linux/vmstat.h
69037@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69038 /*
69039 * Zone based page accounting with per cpu differentials.
69040 */
69041-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69042+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69043
69044 static inline void zone_page_state_add(long x, struct zone *zone,
69045 enum zone_stat_item item)
69046 {
69047- atomic_long_add(x, &zone->vm_stat[item]);
69048- atomic_long_add(x, &vm_stat[item]);
69049+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69050+ atomic_long_add_unchecked(x, &vm_stat[item]);
69051 }
69052
69053 static inline unsigned long global_page_state(enum zone_stat_item item)
69054 {
69055- long x = atomic_long_read(&vm_stat[item]);
69056+ long x = atomic_long_read_unchecked(&vm_stat[item]);
69057 #ifdef CONFIG_SMP
69058 if (x < 0)
69059 x = 0;
69060@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
69061 static inline unsigned long zone_page_state(struct zone *zone,
69062 enum zone_stat_item item)
69063 {
69064- long x = atomic_long_read(&zone->vm_stat[item]);
69065+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69066 #ifdef CONFIG_SMP
69067 if (x < 0)
69068 x = 0;
69069@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
69070 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
69071 enum zone_stat_item item)
69072 {
69073- long x = atomic_long_read(&zone->vm_stat[item]);
69074+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69075
69076 #ifdef CONFIG_SMP
69077 int cpu;
69078@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
69079
69080 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
69081 {
69082- atomic_long_inc(&zone->vm_stat[item]);
69083- atomic_long_inc(&vm_stat[item]);
69084+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
69085+ atomic_long_inc_unchecked(&vm_stat[item]);
69086 }
69087
69088 static inline void __inc_zone_page_state(struct page *page,
69089@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
69090
69091 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
69092 {
69093- atomic_long_dec(&zone->vm_stat[item]);
69094- atomic_long_dec(&vm_stat[item]);
69095+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
69096+ atomic_long_dec_unchecked(&vm_stat[item]);
69097 }
69098
69099 static inline void __dec_zone_page_state(struct page *page,
69100diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
69101index eed5fcc..5080d24 100644
69102--- a/include/media/saa7146_vv.h
69103+++ b/include/media/saa7146_vv.h
69104@@ -167,7 +167,7 @@ struct saa7146_ext_vv
69105 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
69106
69107 /* the extension can override this */
69108- struct v4l2_ioctl_ops ops;
69109+ v4l2_ioctl_ops_no_const ops;
69110 /* pointer to the saa7146 core ops */
69111 const struct v4l2_ioctl_ops *core_ops;
69112
69113diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
69114index 73c9867..2da8837 100644
69115--- a/include/media/v4l2-dev.h
69116+++ b/include/media/v4l2-dev.h
69117@@ -34,7 +34,7 @@ struct v4l2_device;
69118 #define V4L2_FL_UNREGISTERED (0)
69119
69120 struct v4l2_file_operations {
69121- struct module *owner;
69122+ struct module * const owner;
69123 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
69124 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69125 unsigned int (*poll) (struct file *, struct poll_table_struct *);
69126@@ -46,6 +46,7 @@ struct v4l2_file_operations {
69127 int (*open) (struct file *);
69128 int (*release) (struct file *);
69129 };
69130+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69131
69132 /*
69133 * Newer version of video_device, handled by videodev2.c
69134diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69135index 5d5d550..f559ef1 100644
69136--- a/include/media/v4l2-device.h
69137+++ b/include/media/v4l2-device.h
69138@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69139 this function returns 0. If the name ends with a digit (e.g. cx18),
69140 then the name will be set to cx18-0 since cx180 looks really odd. */
69141 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69142- atomic_t *instance);
69143+ atomic_unchecked_t *instance);
69144
69145 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69146 Since the parent disappears this ensures that v4l2_dev doesn't have an
69147diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69148index 7a4529d..7244290 100644
69149--- a/include/media/v4l2-ioctl.h
69150+++ b/include/media/v4l2-ioctl.h
69151@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69152 long (*vidioc_default) (struct file *file, void *fh,
69153 int cmd, void *arg);
69154 };
69155+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69156
69157
69158 /* v4l debugging and diagnostics */
69159diff --git a/include/net/flow.h b/include/net/flow.h
69160index 809970b..c3df4f3 100644
69161--- a/include/net/flow.h
69162+++ b/include/net/flow.h
69163@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69164 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69165 u8 dir, flow_resolve_t resolver);
69166 extern void flow_cache_flush(void);
69167-extern atomic_t flow_cache_genid;
69168+extern atomic_unchecked_t flow_cache_genid;
69169
69170 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69171 {
69172diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69173index 15e1f8fe..668837c 100644
69174--- a/include/net/inetpeer.h
69175+++ b/include/net/inetpeer.h
69176@@ -24,7 +24,7 @@ struct inet_peer
69177 __u32 dtime; /* the time of last use of not
69178 * referenced entries */
69179 atomic_t refcnt;
69180- atomic_t rid; /* Frag reception counter */
69181+ atomic_unchecked_t rid; /* Frag reception counter */
69182 __u32 tcp_ts;
69183 unsigned long tcp_ts_stamp;
69184 };
69185diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69186index 98978e7..2243a3d 100644
69187--- a/include/net/ip_vs.h
69188+++ b/include/net/ip_vs.h
69189@@ -365,7 +365,7 @@ struct ip_vs_conn {
69190 struct ip_vs_conn *control; /* Master control connection */
69191 atomic_t n_control; /* Number of controlled ones */
69192 struct ip_vs_dest *dest; /* real server */
69193- atomic_t in_pkts; /* incoming packet counter */
69194+ atomic_unchecked_t in_pkts; /* incoming packet counter */
69195
69196 /* packet transmitter for different forwarding methods. If it
69197 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69198@@ -466,7 +466,7 @@ struct ip_vs_dest {
69199 union nf_inet_addr addr; /* IP address of the server */
69200 __be16 port; /* port number of the server */
69201 volatile unsigned flags; /* dest status flags */
69202- atomic_t conn_flags; /* flags to copy to conn */
69203+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
69204 atomic_t weight; /* server weight */
69205
69206 atomic_t refcnt; /* reference counter */
69207diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69208index 69b610a..fe3962c 100644
69209--- a/include/net/irda/ircomm_core.h
69210+++ b/include/net/irda/ircomm_core.h
69211@@ -51,7 +51,7 @@ typedef struct {
69212 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69213 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69214 struct ircomm_info *);
69215-} call_t;
69216+} __no_const call_t;
69217
69218 struct ircomm_cb {
69219 irda_queue_t queue;
69220diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69221index eea2e61..08c692d 100644
69222--- a/include/net/irda/ircomm_tty.h
69223+++ b/include/net/irda/ircomm_tty.h
69224@@ -35,6 +35,7 @@
69225 #include <linux/termios.h>
69226 #include <linux/timer.h>
69227 #include <linux/tty.h> /* struct tty_struct */
69228+#include <asm/local.h>
69229
69230 #include <net/irda/irias_object.h>
69231 #include <net/irda/ircomm_core.h>
69232@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69233 unsigned short close_delay;
69234 unsigned short closing_wait; /* time to wait before closing */
69235
69236- int open_count;
69237- int blocked_open; /* # of blocked opens */
69238+ local_t open_count;
69239+ local_t blocked_open; /* # of blocked opens */
69240
69241 /* Protect concurent access to :
69242 * o self->open_count
69243diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69244index f82a1e8..82d81e8 100644
69245--- a/include/net/iucv/af_iucv.h
69246+++ b/include/net/iucv/af_iucv.h
69247@@ -87,7 +87,7 @@ struct iucv_sock {
69248 struct iucv_sock_list {
69249 struct hlist_head head;
69250 rwlock_t lock;
69251- atomic_t autobind_name;
69252+ atomic_unchecked_t autobind_name;
69253 };
69254
69255 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
69256diff --git a/include/net/lapb.h b/include/net/lapb.h
69257index 96cb5dd..25e8d4f 100644
69258--- a/include/net/lapb.h
69259+++ b/include/net/lapb.h
69260@@ -95,7 +95,7 @@ struct lapb_cb {
69261 struct sk_buff_head write_queue;
69262 struct sk_buff_head ack_queue;
69263 unsigned char window;
69264- struct lapb_register_struct callbacks;
69265+ struct lapb_register_struct *callbacks;
69266
69267 /* FRMR control information */
69268 struct lapb_frame frmr_data;
69269diff --git a/include/net/neighbour.h b/include/net/neighbour.h
69270index 3817fda..cdb2343 100644
69271--- a/include/net/neighbour.h
69272+++ b/include/net/neighbour.h
69273@@ -131,7 +131,7 @@ struct neigh_ops
69274 int (*connected_output)(struct sk_buff*);
69275 int (*hh_output)(struct sk_buff*);
69276 int (*queue_xmit)(struct sk_buff*);
69277-};
69278+} __do_const;
69279
69280 struct pneigh_entry
69281 {
69282diff --git a/include/net/netlink.h b/include/net/netlink.h
69283index c344646..4778c71 100644
69284--- a/include/net/netlink.h
69285+++ b/include/net/netlink.h
69286@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
69287 {
69288 return (remaining >= (int) sizeof(struct nlmsghdr) &&
69289 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
69290- nlh->nlmsg_len <= remaining);
69291+ nlh->nlmsg_len <= (unsigned int)remaining);
69292 }
69293
69294 /**
69295@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
69296 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
69297 {
69298 if (mark)
69299- skb_trim(skb, (unsigned char *) mark - skb->data);
69300+ skb_trim(skb, (const unsigned char *) mark - skb->data);
69301 }
69302
69303 /**
69304diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
69305index 9a4b8b7..e49e077 100644
69306--- a/include/net/netns/ipv4.h
69307+++ b/include/net/netns/ipv4.h
69308@@ -54,7 +54,7 @@ struct netns_ipv4 {
69309 int current_rt_cache_rebuild_count;
69310
69311 struct timer_list rt_secret_timer;
69312- atomic_t rt_genid;
69313+ atomic_unchecked_t rt_genid;
69314
69315 #ifdef CONFIG_IP_MROUTE
69316 struct sock *mroute_sk;
69317diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
69318index 8a6d529..171f401 100644
69319--- a/include/net/sctp/sctp.h
69320+++ b/include/net/sctp/sctp.h
69321@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
69322
69323 #else /* SCTP_DEBUG */
69324
69325-#define SCTP_DEBUG_PRINTK(whatever...)
69326-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
69327+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
69328+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
69329 #define SCTP_ENABLE_DEBUG
69330 #define SCTP_DISABLE_DEBUG
69331 #define SCTP_ASSERT(expr, str, func)
69332diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
69333index d97f689..f3b90ab 100644
69334--- a/include/net/secure_seq.h
69335+++ b/include/net/secure_seq.h
69336@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
69337 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
69338 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
69339 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
69340- __be16 dport);
69341+ __be16 dport);
69342 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
69343 __be16 sport, __be16 dport);
69344 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69345- __be16 sport, __be16 dport);
69346+ __be16 sport, __be16 dport);
69347 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
69348- __be16 sport, __be16 dport);
69349+ __be16 sport, __be16 dport);
69350 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69351- __be16 sport, __be16 dport);
69352+ __be16 sport, __be16 dport);
69353
69354 #endif /* _NET_SECURE_SEQ */
69355diff --git a/include/net/sock.h b/include/net/sock.h
69356index 9f96394..76fc9c7 100644
69357--- a/include/net/sock.h
69358+++ b/include/net/sock.h
69359@@ -272,7 +272,7 @@ struct sock {
69360 rwlock_t sk_callback_lock;
69361 int sk_err,
69362 sk_err_soft;
69363- atomic_t sk_drops;
69364+ atomic_unchecked_t sk_drops;
69365 unsigned short sk_ack_backlog;
69366 unsigned short sk_max_ack_backlog;
69367 __u32 sk_priority;
69368@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
69369 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
69370 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
69371 #else
69372-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
69373+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
69374 int inc)
69375 {
69376 }
69377diff --git a/include/net/tcp.h b/include/net/tcp.h
69378index 6cfe18b..dd21acb 100644
69379--- a/include/net/tcp.h
69380+++ b/include/net/tcp.h
69381@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
69382 struct tcp_seq_afinfo {
69383 char *name;
69384 sa_family_t family;
69385- struct file_operations seq_fops;
69386- struct seq_operations seq_ops;
69387+ file_operations_no_const seq_fops;
69388+ seq_operations_no_const seq_ops;
69389 };
69390
69391 struct tcp_iter_state {
69392diff --git a/include/net/udp.h b/include/net/udp.h
69393index f98abd2..b4b042f 100644
69394--- a/include/net/udp.h
69395+++ b/include/net/udp.h
69396@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
69397 char *name;
69398 sa_family_t family;
69399 struct udp_table *udp_table;
69400- struct file_operations seq_fops;
69401- struct seq_operations seq_ops;
69402+ file_operations_no_const seq_fops;
69403+ seq_operations_no_const seq_ops;
69404 };
69405
69406 struct udp_iter_state {
69407diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
69408index cbb822e..e9c1cbe 100644
69409--- a/include/rdma/iw_cm.h
69410+++ b/include/rdma/iw_cm.h
69411@@ -129,7 +129,7 @@ struct iw_cm_verbs {
69412 int backlog);
69413
69414 int (*destroy_listen)(struct iw_cm_id *cm_id);
69415-};
69416+} __no_const;
69417
69418 /**
69419 * iw_create_cm_id - Create an IW CM identifier.
69420diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
69421index 09a124b..caa8ca8 100644
69422--- a/include/scsi/libfc.h
69423+++ b/include/scsi/libfc.h
69424@@ -675,6 +675,7 @@ struct libfc_function_template {
69425 */
69426 void (*disc_stop_final) (struct fc_lport *);
69427 };
69428+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
69429
69430 /* information used by the discovery layer */
69431 struct fc_disc {
69432@@ -707,7 +708,7 @@ struct fc_lport {
69433 struct fc_disc disc;
69434
69435 /* Operational Information */
69436- struct libfc_function_template tt;
69437+ libfc_function_template_no_const tt;
69438 u8 link_up;
69439 u8 qfull;
69440 enum fc_lport_state state;
69441diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
69442index de8e180..f15e0d7 100644
69443--- a/include/scsi/scsi_device.h
69444+++ b/include/scsi/scsi_device.h
69445@@ -156,9 +156,9 @@ struct scsi_device {
69446 unsigned int max_device_blocked; /* what device_blocked counts down from */
69447 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
69448
69449- atomic_t iorequest_cnt;
69450- atomic_t iodone_cnt;
69451- atomic_t ioerr_cnt;
69452+ atomic_unchecked_t iorequest_cnt;
69453+ atomic_unchecked_t iodone_cnt;
69454+ atomic_unchecked_t ioerr_cnt;
69455
69456 struct device sdev_gendev,
69457 sdev_dev;
69458diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
69459index fc50bd6..81ba9cb 100644
69460--- a/include/scsi/scsi_transport_fc.h
69461+++ b/include/scsi/scsi_transport_fc.h
69462@@ -708,7 +708,7 @@ struct fc_function_template {
69463 unsigned long show_host_system_hostname:1;
69464
69465 unsigned long disable_target_scan:1;
69466-};
69467+} __do_const;
69468
69469
69470 /**
69471diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
69472index 3dae3f7..8440d6f 100644
69473--- a/include/sound/ac97_codec.h
69474+++ b/include/sound/ac97_codec.h
69475@@ -419,15 +419,15 @@
69476 struct snd_ac97;
69477
69478 struct snd_ac97_build_ops {
69479- int (*build_3d) (struct snd_ac97 *ac97);
69480- int (*build_specific) (struct snd_ac97 *ac97);
69481- int (*build_spdif) (struct snd_ac97 *ac97);
69482- int (*build_post_spdif) (struct snd_ac97 *ac97);
69483+ int (* const build_3d) (struct snd_ac97 *ac97);
69484+ int (* const build_specific) (struct snd_ac97 *ac97);
69485+ int (* const build_spdif) (struct snd_ac97 *ac97);
69486+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
69487 #ifdef CONFIG_PM
69488- void (*suspend) (struct snd_ac97 *ac97);
69489- void (*resume) (struct snd_ac97 *ac97);
69490+ void (* const suspend) (struct snd_ac97 *ac97);
69491+ void (* const resume) (struct snd_ac97 *ac97);
69492 #endif
69493- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69494+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69495 };
69496
69497 struct snd_ac97_bus_ops {
69498@@ -477,7 +477,7 @@ struct snd_ac97_template {
69499
69500 struct snd_ac97 {
69501 /* -- lowlevel (hardware) driver specific -- */
69502- struct snd_ac97_build_ops * build_ops;
69503+ const struct snd_ac97_build_ops * build_ops;
69504 void *private_data;
69505 void (*private_free) (struct snd_ac97 *ac97);
69506 /* --- */
69507diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
69508index 891cf1a..a94ba2b 100644
69509--- a/include/sound/ak4xxx-adda.h
69510+++ b/include/sound/ak4xxx-adda.h
69511@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
69512 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
69513 unsigned char val);
69514 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69515-};
69516+} __no_const;
69517
69518 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
69519
69520diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
69521index 8c05e47..2b5df97 100644
69522--- a/include/sound/hwdep.h
69523+++ b/include/sound/hwdep.h
69524@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
69525 struct snd_hwdep_dsp_status *status);
69526 int (*dsp_load)(struct snd_hwdep *hw,
69527 struct snd_hwdep_dsp_image *image);
69528-};
69529+} __no_const;
69530
69531 struct snd_hwdep {
69532 struct snd_card *card;
69533diff --git a/include/sound/info.h b/include/sound/info.h
69534index 112e894..6fda5b5 100644
69535--- a/include/sound/info.h
69536+++ b/include/sound/info.h
69537@@ -44,7 +44,7 @@ struct snd_info_entry_text {
69538 struct snd_info_buffer *buffer);
69539 void (*write)(struct snd_info_entry *entry,
69540 struct snd_info_buffer *buffer);
69541-};
69542+} __no_const;
69543
69544 struct snd_info_entry_ops {
69545 int (*open)(struct snd_info_entry *entry,
69546diff --git a/include/sound/pcm.h b/include/sound/pcm.h
69547index de6d981..590a550 100644
69548--- a/include/sound/pcm.h
69549+++ b/include/sound/pcm.h
69550@@ -80,6 +80,7 @@ struct snd_pcm_ops {
69551 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
69552 int (*ack)(struct snd_pcm_substream *substream);
69553 };
69554+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
69555
69556 /*
69557 *
69558diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
69559index 736eac7..fe8a80f 100644
69560--- a/include/sound/sb16_csp.h
69561+++ b/include/sound/sb16_csp.h
69562@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
69563 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
69564 int (*csp_stop) (struct snd_sb_csp * p);
69565 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
69566-};
69567+} __no_const;
69568
69569 /*
69570 * CSP private data
69571diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
69572index 444cd6b..3327cc5 100644
69573--- a/include/sound/ymfpci.h
69574+++ b/include/sound/ymfpci.h
69575@@ -358,7 +358,7 @@ struct snd_ymfpci {
69576 spinlock_t reg_lock;
69577 spinlock_t voice_lock;
69578 wait_queue_head_t interrupt_sleep;
69579- atomic_t interrupt_sleep_count;
69580+ atomic_unchecked_t interrupt_sleep_count;
69581 struct snd_info_entry *proc_entry;
69582 const struct firmware *dsp_microcode;
69583 const struct firmware *controller_microcode;
69584diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
69585index b89f9db..f097b38 100644
69586--- a/include/trace/events/irq.h
69587+++ b/include/trace/events/irq.h
69588@@ -34,7 +34,7 @@
69589 */
69590 TRACE_EVENT(irq_handler_entry,
69591
69592- TP_PROTO(int irq, struct irqaction *action),
69593+ TP_PROTO(int irq, const struct irqaction *action),
69594
69595 TP_ARGS(irq, action),
69596
69597@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
69598 */
69599 TRACE_EVENT(irq_handler_exit,
69600
69601- TP_PROTO(int irq, struct irqaction *action, int ret),
69602+ TP_PROTO(int irq, const struct irqaction *action, int ret),
69603
69604 TP_ARGS(irq, action, ret),
69605
69606@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
69607 */
69608 TRACE_EVENT(softirq_entry,
69609
69610- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69611+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69612
69613 TP_ARGS(h, vec),
69614
69615@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
69616 */
69617 TRACE_EVENT(softirq_exit,
69618
69619- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69620+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69621
69622 TP_ARGS(h, vec),
69623
69624diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
69625index 0993a22..32ba2fe 100644
69626--- a/include/video/uvesafb.h
69627+++ b/include/video/uvesafb.h
69628@@ -177,6 +177,7 @@ struct uvesafb_par {
69629 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
69630 u8 pmi_setpal; /* PMI for palette changes */
69631 u16 *pmi_base; /* protected mode interface location */
69632+ u8 *pmi_code; /* protected mode code location */
69633 void *pmi_start;
69634 void *pmi_pal;
69635 u8 *vbe_state_orig; /*
69636diff --git a/init/Kconfig b/init/Kconfig
69637index d72691b..3996e54 100644
69638--- a/init/Kconfig
69639+++ b/init/Kconfig
69640@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
69641
69642 config COMPAT_BRK
69643 bool "Disable heap randomization"
69644- default y
69645+ default n
69646 help
69647 Randomizing heap placement makes heap exploits harder, but it
69648 also breaks ancient binaries (including anything libc5 based).
69649diff --git a/init/do_mounts.c b/init/do_mounts.c
69650index bb008d0..4fa3933 100644
69651--- a/init/do_mounts.c
69652+++ b/init/do_mounts.c
69653@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
69654
69655 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
69656 {
69657- int err = sys_mount(name, "/root", fs, flags, data);
69658+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
69659 if (err)
69660 return err;
69661
69662- sys_chdir("/root");
69663+ sys_chdir((__force const char __user *)"/root");
69664 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
69665 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
69666 current->fs->pwd.mnt->mnt_sb->s_type->name,
69667@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
69668 va_start(args, fmt);
69669 vsprintf(buf, fmt, args);
69670 va_end(args);
69671- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
69672+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
69673 if (fd >= 0) {
69674 sys_ioctl(fd, FDEJECT, 0);
69675 sys_close(fd);
69676 }
69677 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
69678- fd = sys_open("/dev/console", O_RDWR, 0);
69679+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
69680 if (fd >= 0) {
69681 sys_ioctl(fd, TCGETS, (long)&termios);
69682 termios.c_lflag &= ~ICANON;
69683 sys_ioctl(fd, TCSETSF, (long)&termios);
69684- sys_read(fd, &c, 1);
69685+ sys_read(fd, (char __user *)&c, 1);
69686 termios.c_lflag |= ICANON;
69687 sys_ioctl(fd, TCSETSF, (long)&termios);
69688 sys_close(fd);
69689@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
69690 mount_root();
69691 out:
69692 devtmpfs_mount("dev");
69693- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69694- sys_chroot(".");
69695+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
69696+ sys_chroot((__force char __user *)".");
69697 }
69698diff --git a/init/do_mounts.h b/init/do_mounts.h
69699index f5b978a..69dbfe8 100644
69700--- a/init/do_mounts.h
69701+++ b/init/do_mounts.h
69702@@ -15,15 +15,15 @@ extern int root_mountflags;
69703
69704 static inline int create_dev(char *name, dev_t dev)
69705 {
69706- sys_unlink(name);
69707- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
69708+ sys_unlink((char __force_user *)name);
69709+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
69710 }
69711
69712 #if BITS_PER_LONG == 32
69713 static inline u32 bstat(char *name)
69714 {
69715 struct stat64 stat;
69716- if (sys_stat64(name, &stat) != 0)
69717+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
69718 return 0;
69719 if (!S_ISBLK(stat.st_mode))
69720 return 0;
69721@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
69722 static inline u32 bstat(char *name)
69723 {
69724 struct stat stat;
69725- if (sys_newstat(name, &stat) != 0)
69726+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
69727 return 0;
69728 if (!S_ISBLK(stat.st_mode))
69729 return 0;
69730diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
69731index 614241b..4da046b 100644
69732--- a/init/do_mounts_initrd.c
69733+++ b/init/do_mounts_initrd.c
69734@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
69735 sys_close(old_fd);sys_close(root_fd);
69736 sys_close(0);sys_close(1);sys_close(2);
69737 sys_setsid();
69738- (void) sys_open("/dev/console",O_RDWR,0);
69739+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
69740 (void) sys_dup(0);
69741 (void) sys_dup(0);
69742 return kernel_execve(shell, argv, envp_init);
69743@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
69744 create_dev("/dev/root.old", Root_RAM0);
69745 /* mount initrd on rootfs' /root */
69746 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
69747- sys_mkdir("/old", 0700);
69748- root_fd = sys_open("/", 0, 0);
69749- old_fd = sys_open("/old", 0, 0);
69750+ sys_mkdir((const char __force_user *)"/old", 0700);
69751+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
69752+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
69753 /* move initrd over / and chdir/chroot in initrd root */
69754- sys_chdir("/root");
69755- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69756- sys_chroot(".");
69757+ sys_chdir((const char __force_user *)"/root");
69758+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
69759+ sys_chroot((const char __force_user *)".");
69760
69761 /*
69762 * In case that a resume from disk is carried out by linuxrc or one of
69763@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
69764
69765 /* move initrd to rootfs' /old */
69766 sys_fchdir(old_fd);
69767- sys_mount("/", ".", NULL, MS_MOVE, NULL);
69768+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
69769 /* switch root and cwd back to / of rootfs */
69770 sys_fchdir(root_fd);
69771- sys_chroot(".");
69772+ sys_chroot((const char __force_user *)".");
69773 sys_close(old_fd);
69774 sys_close(root_fd);
69775
69776 if (new_decode_dev(real_root_dev) == Root_RAM0) {
69777- sys_chdir("/old");
69778+ sys_chdir((const char __force_user *)"/old");
69779 return;
69780 }
69781
69782@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
69783 mount_root();
69784
69785 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
69786- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
69787+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
69788 if (!error)
69789 printk("okay\n");
69790 else {
69791- int fd = sys_open("/dev/root.old", O_RDWR, 0);
69792+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
69793 if (error == -ENOENT)
69794 printk("/initrd does not exist. Ignored.\n");
69795 else
69796 printk("failed\n");
69797 printk(KERN_NOTICE "Unmounting old root\n");
69798- sys_umount("/old", MNT_DETACH);
69799+ sys_umount((char __force_user *)"/old", MNT_DETACH);
69800 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
69801 if (fd < 0) {
69802 error = fd;
69803@@ -119,11 +119,11 @@ int __init initrd_load(void)
69804 * mounted in the normal path.
69805 */
69806 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
69807- sys_unlink("/initrd.image");
69808+ sys_unlink((const char __force_user *)"/initrd.image");
69809 handle_initrd();
69810 return 1;
69811 }
69812 }
69813- sys_unlink("/initrd.image");
69814+ sys_unlink((const char __force_user *)"/initrd.image");
69815 return 0;
69816 }
69817diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
69818index 69aebbf..c0bf6a7 100644
69819--- a/init/do_mounts_md.c
69820+++ b/init/do_mounts_md.c
69821@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
69822 partitioned ? "_d" : "", minor,
69823 md_setup_args[ent].device_names);
69824
69825- fd = sys_open(name, 0, 0);
69826+ fd = sys_open((char __force_user *)name, 0, 0);
69827 if (fd < 0) {
69828 printk(KERN_ERR "md: open failed - cannot start "
69829 "array %s\n", name);
69830@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
69831 * array without it
69832 */
69833 sys_close(fd);
69834- fd = sys_open(name, 0, 0);
69835+ fd = sys_open((char __force_user *)name, 0, 0);
69836 sys_ioctl(fd, BLKRRPART, 0);
69837 }
69838 sys_close(fd);
69839@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
69840
69841 wait_for_device_probe();
69842
69843- fd = sys_open("/dev/md0", 0, 0);
69844+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
69845 if (fd >= 0) {
69846 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
69847 sys_close(fd);
69848diff --git a/init/initramfs.c b/init/initramfs.c
69849index 1fd59b8..a01b079 100644
69850--- a/init/initramfs.c
69851+++ b/init/initramfs.c
69852@@ -74,7 +74,7 @@ static void __init free_hash(void)
69853 }
69854 }
69855
69856-static long __init do_utime(char __user *filename, time_t mtime)
69857+static long __init do_utime(__force char __user *filename, time_t mtime)
69858 {
69859 struct timespec t[2];
69860
69861@@ -109,7 +109,7 @@ static void __init dir_utime(void)
69862 struct dir_entry *de, *tmp;
69863 list_for_each_entry_safe(de, tmp, &dir_list, list) {
69864 list_del(&de->list);
69865- do_utime(de->name, de->mtime);
69866+ do_utime((char __force_user *)de->name, de->mtime);
69867 kfree(de->name);
69868 kfree(de);
69869 }
69870@@ -271,7 +271,7 @@ static int __init maybe_link(void)
69871 if (nlink >= 2) {
69872 char *old = find_link(major, minor, ino, mode, collected);
69873 if (old)
69874- return (sys_link(old, collected) < 0) ? -1 : 1;
69875+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
69876 }
69877 return 0;
69878 }
69879@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
69880 {
69881 struct stat st;
69882
69883- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
69884+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
69885 if (S_ISDIR(st.st_mode))
69886- sys_rmdir(path);
69887+ sys_rmdir((char __force_user *)path);
69888 else
69889- sys_unlink(path);
69890+ sys_unlink((char __force_user *)path);
69891 }
69892 }
69893
69894@@ -305,7 +305,7 @@ static int __init do_name(void)
69895 int openflags = O_WRONLY|O_CREAT;
69896 if (ml != 1)
69897 openflags |= O_TRUNC;
69898- wfd = sys_open(collected, openflags, mode);
69899+ wfd = sys_open((char __force_user *)collected, openflags, mode);
69900
69901 if (wfd >= 0) {
69902 sys_fchown(wfd, uid, gid);
69903@@ -317,17 +317,17 @@ static int __init do_name(void)
69904 }
69905 }
69906 } else if (S_ISDIR(mode)) {
69907- sys_mkdir(collected, mode);
69908- sys_chown(collected, uid, gid);
69909- sys_chmod(collected, mode);
69910+ sys_mkdir((char __force_user *)collected, mode);
69911+ sys_chown((char __force_user *)collected, uid, gid);
69912+ sys_chmod((char __force_user *)collected, mode);
69913 dir_add(collected, mtime);
69914 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
69915 S_ISFIFO(mode) || S_ISSOCK(mode)) {
69916 if (maybe_link() == 0) {
69917- sys_mknod(collected, mode, rdev);
69918- sys_chown(collected, uid, gid);
69919- sys_chmod(collected, mode);
69920- do_utime(collected, mtime);
69921+ sys_mknod((char __force_user *)collected, mode, rdev);
69922+ sys_chown((char __force_user *)collected, uid, gid);
69923+ sys_chmod((char __force_user *)collected, mode);
69924+ do_utime((char __force_user *)collected, mtime);
69925 }
69926 }
69927 return 0;
69928@@ -336,15 +336,15 @@ static int __init do_name(void)
69929 static int __init do_copy(void)
69930 {
69931 if (count >= body_len) {
69932- sys_write(wfd, victim, body_len);
69933+ sys_write(wfd, (char __force_user *)victim, body_len);
69934 sys_close(wfd);
69935- do_utime(vcollected, mtime);
69936+ do_utime((char __force_user *)vcollected, mtime);
69937 kfree(vcollected);
69938 eat(body_len);
69939 state = SkipIt;
69940 return 0;
69941 } else {
69942- sys_write(wfd, victim, count);
69943+ sys_write(wfd, (char __force_user *)victim, count);
69944 body_len -= count;
69945 eat(count);
69946 return 1;
69947@@ -355,9 +355,9 @@ static int __init do_symlink(void)
69948 {
69949 collected[N_ALIGN(name_len) + body_len] = '\0';
69950 clean_path(collected, 0);
69951- sys_symlink(collected + N_ALIGN(name_len), collected);
69952- sys_lchown(collected, uid, gid);
69953- do_utime(collected, mtime);
69954+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
69955+ sys_lchown((char __force_user *)collected, uid, gid);
69956+ do_utime((char __force_user *)collected, mtime);
69957 state = SkipIt;
69958 next_state = Reset;
69959 return 0;
69960diff --git a/init/main.c b/init/main.c
69961index 1eb4bd5..da8c6f5 100644
69962--- a/init/main.c
69963+++ b/init/main.c
69964@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
69965 #ifdef CONFIG_TC
69966 extern void tc_init(void);
69967 #endif
69968+extern void grsecurity_init(void);
69969
69970 enum system_states system_state __read_mostly;
69971 EXPORT_SYMBOL(system_state);
69972@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
69973
69974 __setup("reset_devices", set_reset_devices);
69975
69976+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
69977+extern char pax_enter_kernel_user[];
69978+extern char pax_exit_kernel_user[];
69979+extern pgdval_t clone_pgd_mask;
69980+#endif
69981+
69982+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
69983+static int __init setup_pax_nouderef(char *str)
69984+{
69985+#ifdef CONFIG_X86_32
69986+ unsigned int cpu;
69987+ struct desc_struct *gdt;
69988+
69989+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
69990+ gdt = get_cpu_gdt_table(cpu);
69991+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
69992+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
69993+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
69994+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
69995+ }
69996+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
69997+#else
69998+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
69999+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70000+ clone_pgd_mask = ~(pgdval_t)0UL;
70001+#endif
70002+
70003+ return 0;
70004+}
70005+early_param("pax_nouderef", setup_pax_nouderef);
70006+#endif
70007+
70008+#ifdef CONFIG_PAX_SOFTMODE
70009+int pax_softmode;
70010+
70011+static int __init setup_pax_softmode(char *str)
70012+{
70013+ get_option(&str, &pax_softmode);
70014+ return 1;
70015+}
70016+__setup("pax_softmode=", setup_pax_softmode);
70017+#endif
70018+
70019 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70020 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70021 static const char *panic_later, *panic_param;
70022@@ -705,52 +749,53 @@ int initcall_debug;
70023 core_param(initcall_debug, initcall_debug, bool, 0644);
70024
70025 static char msgbuf[64];
70026-static struct boot_trace_call call;
70027-static struct boot_trace_ret ret;
70028+static struct boot_trace_call trace_call;
70029+static struct boot_trace_ret trace_ret;
70030
70031 int do_one_initcall(initcall_t fn)
70032 {
70033 int count = preempt_count();
70034 ktime_t calltime, delta, rettime;
70035+ const char *msg1 = "", *msg2 = "";
70036
70037 if (initcall_debug) {
70038- call.caller = task_pid_nr(current);
70039- printk("calling %pF @ %i\n", fn, call.caller);
70040+ trace_call.caller = task_pid_nr(current);
70041+ printk("calling %pF @ %i\n", fn, trace_call.caller);
70042 calltime = ktime_get();
70043- trace_boot_call(&call, fn);
70044+ trace_boot_call(&trace_call, fn);
70045 enable_boot_trace();
70046 }
70047
70048- ret.result = fn();
70049+ trace_ret.result = fn();
70050
70051 if (initcall_debug) {
70052 disable_boot_trace();
70053 rettime = ktime_get();
70054 delta = ktime_sub(rettime, calltime);
70055- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70056- trace_boot_ret(&ret, fn);
70057+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70058+ trace_boot_ret(&trace_ret, fn);
70059 printk("initcall %pF returned %d after %Ld usecs\n", fn,
70060- ret.result, ret.duration);
70061+ trace_ret.result, trace_ret.duration);
70062 }
70063
70064 msgbuf[0] = 0;
70065
70066- if (ret.result && ret.result != -ENODEV && initcall_debug)
70067- sprintf(msgbuf, "error code %d ", ret.result);
70068+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
70069+ sprintf(msgbuf, "error code %d ", trace_ret.result);
70070
70071 if (preempt_count() != count) {
70072- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
70073+ msg1 = " preemption imbalance";
70074 preempt_count() = count;
70075 }
70076 if (irqs_disabled()) {
70077- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
70078+ msg2 = " disabled interrupts";
70079 local_irq_enable();
70080 }
70081- if (msgbuf[0]) {
70082- printk("initcall %pF returned with %s\n", fn, msgbuf);
70083+ if (msgbuf[0] || *msg1 || *msg2) {
70084+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
70085 }
70086
70087- return ret.result;
70088+ return trace_ret.result;
70089 }
70090
70091
70092@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
70093 if (!ramdisk_execute_command)
70094 ramdisk_execute_command = "/init";
70095
70096- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
70097+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
70098 ramdisk_execute_command = NULL;
70099 prepare_namespace();
70100 }
70101
70102+ grsecurity_init();
70103+
70104 /*
70105 * Ok, we have completed the initial bootup, and
70106 * we're essentially up and running. Get rid of the
70107diff --git a/init/noinitramfs.c b/init/noinitramfs.c
70108index f4c1a3a..96c19bd 100644
70109--- a/init/noinitramfs.c
70110+++ b/init/noinitramfs.c
70111@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
70112 {
70113 int err;
70114
70115- err = sys_mkdir("/dev", 0755);
70116+ err = sys_mkdir((const char __user *)"/dev", 0755);
70117 if (err < 0)
70118 goto out;
70119
70120@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
70121 if (err < 0)
70122 goto out;
70123
70124- err = sys_mkdir("/root", 0700);
70125+ err = sys_mkdir((const char __user *)"/root", 0700);
70126 if (err < 0)
70127 goto out;
70128
70129diff --git a/ipc/mqueue.c b/ipc/mqueue.c
70130index d01bc14..8df81db 100644
70131--- a/ipc/mqueue.c
70132+++ b/ipc/mqueue.c
70133@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70134 mq_bytes = (mq_msg_tblsz +
70135 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70136
70137+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70138 spin_lock(&mq_lock);
70139 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70140 u->mq_bytes + mq_bytes >
70141diff --git a/ipc/msg.c b/ipc/msg.c
70142index 779f762..4af9e36 100644
70143--- a/ipc/msg.c
70144+++ b/ipc/msg.c
70145@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70146 return security_msg_queue_associate(msq, msgflg);
70147 }
70148
70149+static struct ipc_ops msg_ops = {
70150+ .getnew = newque,
70151+ .associate = msg_security,
70152+ .more_checks = NULL
70153+};
70154+
70155 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70156 {
70157 struct ipc_namespace *ns;
70158- struct ipc_ops msg_ops;
70159 struct ipc_params msg_params;
70160
70161 ns = current->nsproxy->ipc_ns;
70162
70163- msg_ops.getnew = newque;
70164- msg_ops.associate = msg_security;
70165- msg_ops.more_checks = NULL;
70166-
70167 msg_params.key = key;
70168 msg_params.flg = msgflg;
70169
70170diff --git a/ipc/sem.c b/ipc/sem.c
70171index b781007..f738b04 100644
70172--- a/ipc/sem.c
70173+++ b/ipc/sem.c
70174@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70175 return 0;
70176 }
70177
70178+static struct ipc_ops sem_ops = {
70179+ .getnew = newary,
70180+ .associate = sem_security,
70181+ .more_checks = sem_more_checks
70182+};
70183+
70184 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70185 {
70186 struct ipc_namespace *ns;
70187- struct ipc_ops sem_ops;
70188 struct ipc_params sem_params;
70189
70190 ns = current->nsproxy->ipc_ns;
70191@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70192 if (nsems < 0 || nsems > ns->sc_semmsl)
70193 return -EINVAL;
70194
70195- sem_ops.getnew = newary;
70196- sem_ops.associate = sem_security;
70197- sem_ops.more_checks = sem_more_checks;
70198-
70199 sem_params.key = key;
70200 sem_params.flg = semflg;
70201 sem_params.u.nsems = nsems;
70202@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70203 ushort* sem_io = fast_sem_io;
70204 int nsems;
70205
70206+ pax_track_stack();
70207+
70208 sma = sem_lock_check(ns, semid);
70209 if (IS_ERR(sma))
70210 return PTR_ERR(sma);
70211@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70212 unsigned long jiffies_left = 0;
70213 struct ipc_namespace *ns;
70214
70215+ pax_track_stack();
70216+
70217 ns = current->nsproxy->ipc_ns;
70218
70219 if (nsops < 1 || semid < 0)
70220diff --git a/ipc/shm.c b/ipc/shm.c
70221index d30732c..7379456 100644
70222--- a/ipc/shm.c
70223+++ b/ipc/shm.c
70224@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70225 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70226 #endif
70227
70228+#ifdef CONFIG_GRKERNSEC
70229+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70230+ const time_t shm_createtime, const uid_t cuid,
70231+ const int shmid);
70232+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70233+ const time_t shm_createtime);
70234+#endif
70235+
70236 void shm_init_ns(struct ipc_namespace *ns)
70237 {
70238 ns->shm_ctlmax = SHMMAX;
70239@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70240 shp->shm_lprid = 0;
70241 shp->shm_atim = shp->shm_dtim = 0;
70242 shp->shm_ctim = get_seconds();
70243+#ifdef CONFIG_GRKERNSEC
70244+ {
70245+ struct timespec timeval;
70246+ do_posix_clock_monotonic_gettime(&timeval);
70247+
70248+ shp->shm_createtime = timeval.tv_sec;
70249+ }
70250+#endif
70251 shp->shm_segsz = size;
70252 shp->shm_nattch = 0;
70253 shp->shm_file = file;
70254@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
70255 return 0;
70256 }
70257
70258+static struct ipc_ops shm_ops = {
70259+ .getnew = newseg,
70260+ .associate = shm_security,
70261+ .more_checks = shm_more_checks
70262+};
70263+
70264 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
70265 {
70266 struct ipc_namespace *ns;
70267- struct ipc_ops shm_ops;
70268 struct ipc_params shm_params;
70269
70270 ns = current->nsproxy->ipc_ns;
70271
70272- shm_ops.getnew = newseg;
70273- shm_ops.associate = shm_security;
70274- shm_ops.more_checks = shm_more_checks;
70275-
70276 shm_params.key = key;
70277 shm_params.flg = shmflg;
70278 shm_params.u.size = size;
70279@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
70280 if (err)
70281 goto out_unlock;
70282
70283+#ifdef CONFIG_GRKERNSEC
70284+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
70285+ shp->shm_perm.cuid, shmid) ||
70286+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
70287+ err = -EACCES;
70288+ goto out_unlock;
70289+ }
70290+#endif
70291+
70292 path.dentry = dget(shp->shm_file->f_path.dentry);
70293 path.mnt = shp->shm_file->f_path.mnt;
70294 shp->shm_nattch++;
70295+#ifdef CONFIG_GRKERNSEC
70296+ shp->shm_lapid = current->pid;
70297+#endif
70298 size = i_size_read(path.dentry->d_inode);
70299 shm_unlock(shp);
70300
70301diff --git a/kernel/acct.c b/kernel/acct.c
70302index a6605ca..ca91111 100644
70303--- a/kernel/acct.c
70304+++ b/kernel/acct.c
70305@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
70306 */
70307 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
70308 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
70309- file->f_op->write(file, (char *)&ac,
70310+ file->f_op->write(file, (char __force_user *)&ac,
70311 sizeof(acct_t), &file->f_pos);
70312 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
70313 set_fs(fs);
70314diff --git a/kernel/audit.c b/kernel/audit.c
70315index 5feed23..513b02c 100644
70316--- a/kernel/audit.c
70317+++ b/kernel/audit.c
70318@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
70319 3) suppressed due to audit_rate_limit
70320 4) suppressed due to audit_backlog_limit
70321 */
70322-static atomic_t audit_lost = ATOMIC_INIT(0);
70323+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
70324
70325 /* The netlink socket. */
70326 static struct sock *audit_sock;
70327@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
70328 unsigned long now;
70329 int print;
70330
70331- atomic_inc(&audit_lost);
70332+ atomic_inc_unchecked(&audit_lost);
70333
70334 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
70335
70336@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
70337 printk(KERN_WARNING
70338 "audit: audit_lost=%d audit_rate_limit=%d "
70339 "audit_backlog_limit=%d\n",
70340- atomic_read(&audit_lost),
70341+ atomic_read_unchecked(&audit_lost),
70342 audit_rate_limit,
70343 audit_backlog_limit);
70344 audit_panic(message);
70345@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70346 status_set.pid = audit_pid;
70347 status_set.rate_limit = audit_rate_limit;
70348 status_set.backlog_limit = audit_backlog_limit;
70349- status_set.lost = atomic_read(&audit_lost);
70350+ status_set.lost = atomic_read_unchecked(&audit_lost);
70351 status_set.backlog = skb_queue_len(&audit_skb_queue);
70352 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
70353 &status_set, sizeof(status_set));
70354@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70355 spin_unlock_irq(&tsk->sighand->siglock);
70356 }
70357 read_unlock(&tasklist_lock);
70358- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
70359- &s, sizeof(s));
70360+
70361+ if (!err)
70362+ audit_send_reply(NETLINK_CB(skb).pid, seq,
70363+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
70364 break;
70365 }
70366 case AUDIT_TTY_SET: {
70367diff --git a/kernel/auditsc.c b/kernel/auditsc.c
70368index 267e484..f8e295a 100644
70369--- a/kernel/auditsc.c
70370+++ b/kernel/auditsc.c
70371@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
70372 }
70373
70374 /* global counter which is incremented every time something logs in */
70375-static atomic_t session_id = ATOMIC_INIT(0);
70376+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
70377
70378 /**
70379 * audit_set_loginuid - set a task's audit_context loginuid
70380@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
70381 */
70382 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
70383 {
70384- unsigned int sessionid = atomic_inc_return(&session_id);
70385+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
70386 struct audit_context *context = task->audit_context;
70387
70388 if (context && context->in_syscall) {
70389diff --git a/kernel/capability.c b/kernel/capability.c
70390index 8a944f5..db5001e 100644
70391--- a/kernel/capability.c
70392+++ b/kernel/capability.c
70393@@ -305,10 +305,26 @@ int capable(int cap)
70394 BUG();
70395 }
70396
70397- if (security_capable(cap) == 0) {
70398+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
70399 current->flags |= PF_SUPERPRIV;
70400 return 1;
70401 }
70402 return 0;
70403 }
70404+
70405+int capable_nolog(int cap)
70406+{
70407+ if (unlikely(!cap_valid(cap))) {
70408+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
70409+ BUG();
70410+ }
70411+
70412+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
70413+ current->flags |= PF_SUPERPRIV;
70414+ return 1;
70415+ }
70416+ return 0;
70417+}
70418+
70419 EXPORT_SYMBOL(capable);
70420+EXPORT_SYMBOL(capable_nolog);
70421diff --git a/kernel/cgroup.c b/kernel/cgroup.c
70422index 1fbcc74..7000012 100644
70423--- a/kernel/cgroup.c
70424+++ b/kernel/cgroup.c
70425@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
70426 struct hlist_head *hhead;
70427 struct cg_cgroup_link *link;
70428
70429+ pax_track_stack();
70430+
70431 /* First see if we already have a cgroup group that matches
70432 * the desired set */
70433 read_lock(&css_set_lock);
70434diff --git a/kernel/compat.c b/kernel/compat.c
70435index 8bc5578..186e44a 100644
70436--- a/kernel/compat.c
70437+++ b/kernel/compat.c
70438@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
70439 mm_segment_t oldfs;
70440 long ret;
70441
70442- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
70443+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
70444 oldfs = get_fs();
70445 set_fs(KERNEL_DS);
70446 ret = hrtimer_nanosleep_restart(restart);
70447@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
70448 oldfs = get_fs();
70449 set_fs(KERNEL_DS);
70450 ret = hrtimer_nanosleep(&tu,
70451- rmtp ? (struct timespec __user *)&rmt : NULL,
70452+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
70453 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
70454 set_fs(oldfs);
70455
70456@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
70457 mm_segment_t old_fs = get_fs();
70458
70459 set_fs(KERNEL_DS);
70460- ret = sys_sigpending((old_sigset_t __user *) &s);
70461+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
70462 set_fs(old_fs);
70463 if (ret == 0)
70464 ret = put_user(s, set);
70465@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
70466 old_fs = get_fs();
70467 set_fs(KERNEL_DS);
70468 ret = sys_sigprocmask(how,
70469- set ? (old_sigset_t __user *) &s : NULL,
70470- oset ? (old_sigset_t __user *) &s : NULL);
70471+ set ? (old_sigset_t __force_user *) &s : NULL,
70472+ oset ? (old_sigset_t __force_user *) &s : NULL);
70473 set_fs(old_fs);
70474 if (ret == 0)
70475 if (oset)
70476@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
70477 mm_segment_t old_fs = get_fs();
70478
70479 set_fs(KERNEL_DS);
70480- ret = sys_old_getrlimit(resource, &r);
70481+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
70482 set_fs(old_fs);
70483
70484 if (!ret) {
70485@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
70486 mm_segment_t old_fs = get_fs();
70487
70488 set_fs(KERNEL_DS);
70489- ret = sys_getrusage(who, (struct rusage __user *) &r);
70490+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
70491 set_fs(old_fs);
70492
70493 if (ret)
70494@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
70495 set_fs (KERNEL_DS);
70496 ret = sys_wait4(pid,
70497 (stat_addr ?
70498- (unsigned int __user *) &status : NULL),
70499- options, (struct rusage __user *) &r);
70500+ (unsigned int __force_user *) &status : NULL),
70501+ options, (struct rusage __force_user *) &r);
70502 set_fs (old_fs);
70503
70504 if (ret > 0) {
70505@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
70506 memset(&info, 0, sizeof(info));
70507
70508 set_fs(KERNEL_DS);
70509- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
70510- uru ? (struct rusage __user *)&ru : NULL);
70511+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
70512+ uru ? (struct rusage __force_user *)&ru : NULL);
70513 set_fs(old_fs);
70514
70515 if ((ret < 0) || (info.si_signo == 0))
70516@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
70517 oldfs = get_fs();
70518 set_fs(KERNEL_DS);
70519 err = sys_timer_settime(timer_id, flags,
70520- (struct itimerspec __user *) &newts,
70521- (struct itimerspec __user *) &oldts);
70522+ (struct itimerspec __force_user *) &newts,
70523+ (struct itimerspec __force_user *) &oldts);
70524 set_fs(oldfs);
70525 if (!err && old && put_compat_itimerspec(old, &oldts))
70526 return -EFAULT;
70527@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
70528 oldfs = get_fs();
70529 set_fs(KERNEL_DS);
70530 err = sys_timer_gettime(timer_id,
70531- (struct itimerspec __user *) &ts);
70532+ (struct itimerspec __force_user *) &ts);
70533 set_fs(oldfs);
70534 if (!err && put_compat_itimerspec(setting, &ts))
70535 return -EFAULT;
70536@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
70537 oldfs = get_fs();
70538 set_fs(KERNEL_DS);
70539 err = sys_clock_settime(which_clock,
70540- (struct timespec __user *) &ts);
70541+ (struct timespec __force_user *) &ts);
70542 set_fs(oldfs);
70543 return err;
70544 }
70545@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
70546 oldfs = get_fs();
70547 set_fs(KERNEL_DS);
70548 err = sys_clock_gettime(which_clock,
70549- (struct timespec __user *) &ts);
70550+ (struct timespec __force_user *) &ts);
70551 set_fs(oldfs);
70552 if (!err && put_compat_timespec(&ts, tp))
70553 return -EFAULT;
70554@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
70555 oldfs = get_fs();
70556 set_fs(KERNEL_DS);
70557 err = sys_clock_getres(which_clock,
70558- (struct timespec __user *) &ts);
70559+ (struct timespec __force_user *) &ts);
70560 set_fs(oldfs);
70561 if (!err && tp && put_compat_timespec(&ts, tp))
70562 return -EFAULT;
70563@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
70564 long err;
70565 mm_segment_t oldfs;
70566 struct timespec tu;
70567- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
70568+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
70569
70570- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
70571+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
70572 oldfs = get_fs();
70573 set_fs(KERNEL_DS);
70574 err = clock_nanosleep_restart(restart);
70575@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
70576 oldfs = get_fs();
70577 set_fs(KERNEL_DS);
70578 err = sys_clock_nanosleep(which_clock, flags,
70579- (struct timespec __user *) &in,
70580- (struct timespec __user *) &out);
70581+ (struct timespec __force_user *) &in,
70582+ (struct timespec __force_user *) &out);
70583 set_fs(oldfs);
70584
70585 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
70586diff --git a/kernel/configs.c b/kernel/configs.c
70587index abaee68..047facd 100644
70588--- a/kernel/configs.c
70589+++ b/kernel/configs.c
70590@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
70591 struct proc_dir_entry *entry;
70592
70593 /* create the current config file */
70594+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
70595+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
70596+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
70597+ &ikconfig_file_ops);
70598+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70599+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
70600+ &ikconfig_file_ops);
70601+#endif
70602+#else
70603 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
70604 &ikconfig_file_ops);
70605+#endif
70606+
70607 if (!entry)
70608 return -ENOMEM;
70609
70610diff --git a/kernel/cpu.c b/kernel/cpu.c
70611index 7e8b6ac..8921388 100644
70612--- a/kernel/cpu.c
70613+++ b/kernel/cpu.c
70614@@ -19,7 +19,7 @@
70615 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
70616 static DEFINE_MUTEX(cpu_add_remove_lock);
70617
70618-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
70619+static RAW_NOTIFIER_HEAD(cpu_chain);
70620
70621 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
70622 * Should always be manipulated under cpu_add_remove_lock
70623diff --git a/kernel/cred.c b/kernel/cred.c
70624index 0b5b5fc..419b86a 100644
70625--- a/kernel/cred.c
70626+++ b/kernel/cred.c
70627@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
70628 */
70629 void __put_cred(struct cred *cred)
70630 {
70631+ pax_track_stack();
70632+
70633 kdebug("__put_cred(%p{%d,%d})", cred,
70634 atomic_read(&cred->usage),
70635 read_cred_subscribers(cred));
70636@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
70637 {
70638 struct cred *cred;
70639
70640+ pax_track_stack();
70641+
70642 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
70643 atomic_read(&tsk->cred->usage),
70644 read_cred_subscribers(tsk->cred));
70645@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct task_struct *task)
70646 {
70647 const struct cred *cred;
70648
70649+ pax_track_stack();
70650+
70651 rcu_read_lock();
70652
70653 do {
70654@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
70655 {
70656 struct cred *new;
70657
70658+ pax_track_stack();
70659+
70660 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
70661 if (!new)
70662 return NULL;
70663@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
70664 const struct cred *old;
70665 struct cred *new;
70666
70667+ pax_track_stack();
70668+
70669 validate_process_creds();
70670
70671 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70672@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
70673 struct thread_group_cred *tgcred = NULL;
70674 struct cred *new;
70675
70676+ pax_track_stack();
70677+
70678 #ifdef CONFIG_KEYS
70679 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
70680 if (!tgcred)
70681@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
70682 struct cred *new;
70683 int ret;
70684
70685+ pax_track_stack();
70686+
70687 mutex_init(&p->cred_guard_mutex);
70688
70689 if (
70690@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
70691 struct task_struct *task = current;
70692 const struct cred *old = task->real_cred;
70693
70694+ pax_track_stack();
70695+
70696 kdebug("commit_creds(%p{%d,%d})", new,
70697 atomic_read(&new->usage),
70698 read_cred_subscribers(new));
70699@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
70700
70701 get_cred(new); /* we will require a ref for the subj creds too */
70702
70703+ gr_set_role_label(task, new->uid, new->gid);
70704+
70705 /* dumpability changes */
70706 if (old->euid != new->euid ||
70707 old->egid != new->egid ||
70708@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
70709 key_fsgid_changed(task);
70710
70711 /* do it
70712- * - What if a process setreuid()'s and this brings the
70713- * new uid over his NPROC rlimit? We can check this now
70714- * cheaply with the new uid cache, so if it matters
70715- * we should be checking for it. -DaveM
70716+ * RLIMIT_NPROC limits on user->processes have already been checked
70717+ * in set_user().
70718 */
70719 alter_cred_subscribers(new, 2);
70720 if (new->user != old->user)
70721@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
70722 */
70723 void abort_creds(struct cred *new)
70724 {
70725+ pax_track_stack();
70726+
70727 kdebug("abort_creds(%p{%d,%d})", new,
70728 atomic_read(&new->usage),
70729 read_cred_subscribers(new));
70730@@ -629,6 +647,8 @@ const struct cred *override_creds(const struct cred *new)
70731 {
70732 const struct cred *old = current->cred;
70733
70734+ pax_track_stack();
70735+
70736 kdebug("override_creds(%p{%d,%d})", new,
70737 atomic_read(&new->usage),
70738 read_cred_subscribers(new));
70739@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old)
70740 {
70741 const struct cred *override = current->cred;
70742
70743+ pax_track_stack();
70744+
70745 kdebug("revert_creds(%p{%d,%d})", old,
70746 atomic_read(&old->usage),
70747 read_cred_subscribers(old));
70748@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
70749 const struct cred *old;
70750 struct cred *new;
70751
70752+ pax_track_stack();
70753+
70754 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70755 if (!new)
70756 return NULL;
70757@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
70758 */
70759 int set_security_override(struct cred *new, u32 secid)
70760 {
70761+ pax_track_stack();
70762+
70763 return security_kernel_act_as(new, secid);
70764 }
70765 EXPORT_SYMBOL(set_security_override);
70766@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
70767 u32 secid;
70768 int ret;
70769
70770+ pax_track_stack();
70771+
70772 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
70773 if (ret < 0)
70774 return ret;
70775diff --git a/kernel/exit.c b/kernel/exit.c
70776index 0f8fae3..9344a56 100644
70777--- a/kernel/exit.c
70778+++ b/kernel/exit.c
70779@@ -55,6 +55,10 @@
70780 #include <asm/pgtable.h>
70781 #include <asm/mmu_context.h>
70782
70783+#ifdef CONFIG_GRKERNSEC
70784+extern rwlock_t grsec_exec_file_lock;
70785+#endif
70786+
70787 static void exit_mm(struct task_struct * tsk);
70788
70789 static void __unhash_process(struct task_struct *p)
70790@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
70791 struct task_struct *leader;
70792 int zap_leader;
70793 repeat:
70794+#ifdef CONFIG_NET
70795+ gr_del_task_from_ip_table(p);
70796+#endif
70797+
70798 tracehook_prepare_release_task(p);
70799 /* don't need to get the RCU readlock here - the process is dead and
70800 * can't be modifying its own credentials */
70801@@ -397,7 +405,7 @@ int allow_signal(int sig)
70802 * know it'll be handled, so that they don't get converted to
70803 * SIGKILL or just silently dropped.
70804 */
70805- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
70806+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
70807 recalc_sigpending();
70808 spin_unlock_irq(&current->sighand->siglock);
70809 return 0;
70810@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
70811 vsnprintf(current->comm, sizeof(current->comm), name, args);
70812 va_end(args);
70813
70814+#ifdef CONFIG_GRKERNSEC
70815+ write_lock(&grsec_exec_file_lock);
70816+ if (current->exec_file) {
70817+ fput(current->exec_file);
70818+ current->exec_file = NULL;
70819+ }
70820+ write_unlock(&grsec_exec_file_lock);
70821+#endif
70822+
70823+ gr_set_kernel_label(current);
70824+
70825 /*
70826 * If we were started as result of loading a module, close all of the
70827 * user space pages. We don't need them, and if we didn't close them
70828@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
70829 struct task_struct *tsk = current;
70830 int group_dead;
70831
70832- profile_task_exit(tsk);
70833-
70834- WARN_ON(atomic_read(&tsk->fs_excl));
70835-
70836+ /*
70837+ * Check this first since set_fs() below depends on
70838+ * current_thread_info(), which we better not access when we're in
70839+ * interrupt context. Other than that, we want to do the set_fs()
70840+ * as early as possible.
70841+ */
70842 if (unlikely(in_interrupt()))
70843 panic("Aiee, killing interrupt handler!");
70844- if (unlikely(!tsk->pid))
70845- panic("Attempted to kill the idle task!");
70846
70847 /*
70848- * If do_exit is called because this processes oopsed, it's possible
70849+ * If do_exit is called because this processes Oops'ed, it's possible
70850 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
70851 * continuing. Amongst other possible reasons, this is to prevent
70852 * mm_release()->clear_child_tid() from writing to a user-controlled
70853@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
70854 */
70855 set_fs(USER_DS);
70856
70857+ profile_task_exit(tsk);
70858+
70859+ WARN_ON(atomic_read(&tsk->fs_excl));
70860+
70861+ if (unlikely(!tsk->pid))
70862+ panic("Attempted to kill the idle task!");
70863+
70864 tracehook_report_exit(&code);
70865
70866 validate_creds_for_do_exit(tsk);
70867@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
70868 tsk->exit_code = code;
70869 taskstats_exit(tsk, group_dead);
70870
70871+ gr_acl_handle_psacct(tsk, code);
70872+ gr_acl_handle_exit();
70873+
70874 exit_mm(tsk);
70875
70876 if (group_dead)
70877@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
70878
70879 if (unlikely(wo->wo_flags & WNOWAIT)) {
70880 int exit_code = p->exit_code;
70881- int why, status;
70882+ int why;
70883
70884 get_task_struct(p);
70885 read_unlock(&tasklist_lock);
70886diff --git a/kernel/fork.c b/kernel/fork.c
70887index 4bde56f..29a9bab 100644
70888--- a/kernel/fork.c
70889+++ b/kernel/fork.c
70890@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
70891 *stackend = STACK_END_MAGIC; /* for overflow detection */
70892
70893 #ifdef CONFIG_CC_STACKPROTECTOR
70894- tsk->stack_canary = get_random_int();
70895+ tsk->stack_canary = pax_get_random_long();
70896 #endif
70897
70898 /* One for us, one for whoever does the "release_task()" (usually parent) */
70899@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70900 mm->locked_vm = 0;
70901 mm->mmap = NULL;
70902 mm->mmap_cache = NULL;
70903- mm->free_area_cache = oldmm->mmap_base;
70904- mm->cached_hole_size = ~0UL;
70905+ mm->free_area_cache = oldmm->free_area_cache;
70906+ mm->cached_hole_size = oldmm->cached_hole_size;
70907 mm->map_count = 0;
70908 cpumask_clear(mm_cpumask(mm));
70909 mm->mm_rb = RB_ROOT;
70910@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70911 tmp->vm_flags &= ~VM_LOCKED;
70912 tmp->vm_mm = mm;
70913 tmp->vm_next = tmp->vm_prev = NULL;
70914+ tmp->vm_mirror = NULL;
70915 anon_vma_link(tmp);
70916 file = tmp->vm_file;
70917 if (file) {
70918@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70919 if (retval)
70920 goto out;
70921 }
70922+
70923+#ifdef CONFIG_PAX_SEGMEXEC
70924+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
70925+ struct vm_area_struct *mpnt_m;
70926+
70927+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
70928+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
70929+
70930+ if (!mpnt->vm_mirror)
70931+ continue;
70932+
70933+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
70934+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
70935+ mpnt->vm_mirror = mpnt_m;
70936+ } else {
70937+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
70938+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
70939+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
70940+ mpnt->vm_mirror->vm_mirror = mpnt;
70941+ }
70942+ }
70943+ BUG_ON(mpnt_m);
70944+ }
70945+#endif
70946+
70947 /* a new mm has just been created */
70948 arch_dup_mmap(oldmm, mm);
70949 retval = 0;
70950@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
70951 write_unlock(&fs->lock);
70952 return -EAGAIN;
70953 }
70954- fs->users++;
70955+ atomic_inc(&fs->users);
70956 write_unlock(&fs->lock);
70957 return 0;
70958 }
70959 tsk->fs = copy_fs_struct(fs);
70960 if (!tsk->fs)
70961 return -ENOMEM;
70962+ gr_set_chroot_entries(tsk, &tsk->fs->root);
70963 return 0;
70964 }
70965
70966@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70967 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
70968 #endif
70969 retval = -EAGAIN;
70970+
70971+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
70972+
70973 if (atomic_read(&p->real_cred->user->processes) >=
70974 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
70975- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
70976- p->real_cred->user != INIT_USER)
70977+ if (p->real_cred->user != INIT_USER &&
70978+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
70979 goto bad_fork_free;
70980 }
70981+ current->flags &= ~PF_NPROC_EXCEEDED;
70982
70983 retval = copy_creds(p, clone_flags);
70984 if (retval < 0)
70985@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70986 goto bad_fork_free_pid;
70987 }
70988
70989+ gr_copy_label(p);
70990+
70991 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
70992 /*
70993 * Clear TID on mm_release()?
70994@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
70995 bad_fork_free:
70996 free_task(p);
70997 fork_out:
70998+ gr_log_forkfail(retval);
70999+
71000 return ERR_PTR(retval);
71001 }
71002
71003@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
71004 if (clone_flags & CLONE_PARENT_SETTID)
71005 put_user(nr, parent_tidptr);
71006
71007+ gr_handle_brute_check();
71008+
71009 if (clone_flags & CLONE_VFORK) {
71010 p->vfork_done = &vfork;
71011 init_completion(&vfork);
71012@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
71013 return 0;
71014
71015 /* don't need lock here; in the worst case we'll do useless copy */
71016- if (fs->users == 1)
71017+ if (atomic_read(&fs->users) == 1)
71018 return 0;
71019
71020 *new_fsp = copy_fs_struct(fs);
71021@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
71022 fs = current->fs;
71023 write_lock(&fs->lock);
71024 current->fs = new_fs;
71025- if (--fs->users)
71026+ gr_set_chroot_entries(current, &current->fs->root);
71027+ if (atomic_dec_return(&fs->users))
71028 new_fs = NULL;
71029 else
71030 new_fs = fs;
71031diff --git a/kernel/futex.c b/kernel/futex.c
71032index fb98c9f..f158c0c 100644
71033--- a/kernel/futex.c
71034+++ b/kernel/futex.c
71035@@ -54,6 +54,7 @@
71036 #include <linux/mount.h>
71037 #include <linux/pagemap.h>
71038 #include <linux/syscalls.h>
71039+#include <linux/ptrace.h>
71040 #include <linux/signal.h>
71041 #include <linux/module.h>
71042 #include <linux/magic.h>
71043@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
71044 struct page *page;
71045 int err, ro = 0;
71046
71047+#ifdef CONFIG_PAX_SEGMEXEC
71048+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
71049+ return -EFAULT;
71050+#endif
71051+
71052 /*
71053 * The futex address must be "naturally" aligned.
71054 */
71055@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
71056 struct futex_q q;
71057 int ret;
71058
71059+ pax_track_stack();
71060+
71061 if (!bitset)
71062 return -EINVAL;
71063
71064@@ -1871,7 +1879,7 @@ retry:
71065
71066 restart = &current_thread_info()->restart_block;
71067 restart->fn = futex_wait_restart;
71068- restart->futex.uaddr = (u32 *)uaddr;
71069+ restart->futex.uaddr = uaddr;
71070 restart->futex.val = val;
71071 restart->futex.time = abs_time->tv64;
71072 restart->futex.bitset = bitset;
71073@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
71074 struct futex_q q;
71075 int res, ret;
71076
71077+ pax_track_stack();
71078+
71079 if (!bitset)
71080 return -EINVAL;
71081
71082@@ -2407,7 +2417,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
71083 {
71084 struct robust_list_head __user *head;
71085 unsigned long ret;
71086+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
71087 const struct cred *cred = current_cred(), *pcred;
71088+#endif
71089
71090 if (!futex_cmpxchg_enabled)
71091 return -ENOSYS;
71092@@ -2423,11 +2435,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
71093 if (!p)
71094 goto err_unlock;
71095 ret = -EPERM;
71096+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71097+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
71098+ goto err_unlock;
71099+#else
71100 pcred = __task_cred(p);
71101 if (cred->euid != pcred->euid &&
71102 cred->euid != pcred->uid &&
71103 !capable(CAP_SYS_PTRACE))
71104 goto err_unlock;
71105+#endif
71106 head = p->robust_list;
71107 rcu_read_unlock();
71108 }
71109@@ -2489,7 +2506,7 @@ retry:
71110 */
71111 static inline int fetch_robust_entry(struct robust_list __user **entry,
71112 struct robust_list __user * __user *head,
71113- int *pi)
71114+ unsigned int *pi)
71115 {
71116 unsigned long uentry;
71117
71118@@ -2670,6 +2687,7 @@ static int __init futex_init(void)
71119 {
71120 u32 curval;
71121 int i;
71122+ mm_segment_t oldfs;
71123
71124 /*
71125 * This will fail and we want it. Some arch implementations do
71126@@ -2681,7 +2699,10 @@ static int __init futex_init(void)
71127 * implementation, the non functional ones will return
71128 * -ENOSYS.
71129 */
71130+ oldfs = get_fs();
71131+ set_fs(USER_DS);
71132 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
71133+ set_fs(oldfs);
71134 if (curval == -EFAULT)
71135 futex_cmpxchg_enabled = 1;
71136
71137diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
71138index 2357165..8d70cee 100644
71139--- a/kernel/futex_compat.c
71140+++ b/kernel/futex_compat.c
71141@@ -10,6 +10,7 @@
71142 #include <linux/compat.h>
71143 #include <linux/nsproxy.h>
71144 #include <linux/futex.h>
71145+#include <linux/ptrace.h>
71146
71147 #include <asm/uaccess.h>
71148
71149@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71150 {
71151 struct compat_robust_list_head __user *head;
71152 unsigned long ret;
71153- const struct cred *cred = current_cred(), *pcred;
71154+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
71155+ const struct cred *cred = current_cred();
71156+ const struct cred *pcred;
71157+#endif
71158
71159 if (!futex_cmpxchg_enabled)
71160 return -ENOSYS;
71161@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71162 if (!p)
71163 goto err_unlock;
71164 ret = -EPERM;
71165+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71166+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
71167+ goto err_unlock;
71168+#else
71169 pcred = __task_cred(p);
71170 if (cred->euid != pcred->euid &&
71171 cred->euid != pcred->uid &&
71172 !capable(CAP_SYS_PTRACE))
71173 goto err_unlock;
71174+#endif
71175 head = p->compat_robust_list;
71176 read_unlock(&tasklist_lock);
71177 }
71178diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
71179index 9b22d03..6295b62 100644
71180--- a/kernel/gcov/base.c
71181+++ b/kernel/gcov/base.c
71182@@ -102,11 +102,6 @@ void gcov_enable_events(void)
71183 }
71184
71185 #ifdef CONFIG_MODULES
71186-static inline int within(void *addr, void *start, unsigned long size)
71187-{
71188- return ((addr >= start) && (addr < start + size));
71189-}
71190-
71191 /* Update list and generate events when modules are unloaded. */
71192 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71193 void *data)
71194@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71195 prev = NULL;
71196 /* Remove entries located in module from linked list. */
71197 for (info = gcov_info_head; info; info = info->next) {
71198- if (within(info, mod->module_core, mod->core_size)) {
71199+ if (within_module_core_rw((unsigned long)info, mod)) {
71200 if (prev)
71201 prev->next = info->next;
71202 else
71203diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
71204index a6e9d00..a0da4f9 100644
71205--- a/kernel/hrtimer.c
71206+++ b/kernel/hrtimer.c
71207@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
71208 local_irq_restore(flags);
71209 }
71210
71211-static void run_hrtimer_softirq(struct softirq_action *h)
71212+static void run_hrtimer_softirq(void)
71213 {
71214 hrtimer_peek_ahead_timers();
71215 }
71216diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
71217index 8b6b8b6..6bc87df 100644
71218--- a/kernel/kallsyms.c
71219+++ b/kernel/kallsyms.c
71220@@ -11,6 +11,9 @@
71221 * Changed the compression method from stem compression to "table lookup"
71222 * compression (see scripts/kallsyms.c for a more complete description)
71223 */
71224+#ifdef CONFIG_GRKERNSEC_HIDESYM
71225+#define __INCLUDED_BY_HIDESYM 1
71226+#endif
71227 #include <linux/kallsyms.h>
71228 #include <linux/module.h>
71229 #include <linux/init.h>
71230@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
71231
71232 static inline int is_kernel_inittext(unsigned long addr)
71233 {
71234+ if (system_state != SYSTEM_BOOTING)
71235+ return 0;
71236+
71237 if (addr >= (unsigned long)_sinittext
71238 && addr <= (unsigned long)_einittext)
71239 return 1;
71240 return 0;
71241 }
71242
71243+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71244+#ifdef CONFIG_MODULES
71245+static inline int is_module_text(unsigned long addr)
71246+{
71247+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
71248+ return 1;
71249+
71250+ addr = ktla_ktva(addr);
71251+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
71252+}
71253+#else
71254+static inline int is_module_text(unsigned long addr)
71255+{
71256+ return 0;
71257+}
71258+#endif
71259+#endif
71260+
71261 static inline int is_kernel_text(unsigned long addr)
71262 {
71263 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
71264@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
71265
71266 static inline int is_kernel(unsigned long addr)
71267 {
71268+
71269+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71270+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
71271+ return 1;
71272+
71273+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
71274+#else
71275 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
71276+#endif
71277+
71278 return 1;
71279 return in_gate_area_no_task(addr);
71280 }
71281
71282 static int is_ksym_addr(unsigned long addr)
71283 {
71284+
71285+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71286+ if (is_module_text(addr))
71287+ return 0;
71288+#endif
71289+
71290 if (all_var)
71291 return is_kernel(addr);
71292
71293@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
71294
71295 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
71296 {
71297- iter->name[0] = '\0';
71298 iter->nameoff = get_symbol_offset(new_pos);
71299 iter->pos = new_pos;
71300 }
71301@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
71302 {
71303 struct kallsym_iter *iter = m->private;
71304
71305+#ifdef CONFIG_GRKERNSEC_HIDESYM
71306+ if (current_uid())
71307+ return 0;
71308+#endif
71309+
71310 /* Some debugging symbols have no name. Ignore them. */
71311 if (!iter->name[0])
71312 return 0;
71313@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
71314 struct kallsym_iter *iter;
71315 int ret;
71316
71317- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
71318+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
71319 if (!iter)
71320 return -ENOMEM;
71321 reset_iter(iter, 0);
71322diff --git a/kernel/kexec.c b/kernel/kexec.c
71323index f336e21..9c1c20b 100644
71324--- a/kernel/kexec.c
71325+++ b/kernel/kexec.c
71326@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
71327 unsigned long flags)
71328 {
71329 struct compat_kexec_segment in;
71330- struct kexec_segment out, __user *ksegments;
71331+ struct kexec_segment out;
71332+ struct kexec_segment __user *ksegments;
71333 unsigned long i, result;
71334
71335 /* Don't allow clients that don't understand the native
71336diff --git a/kernel/kgdb.c b/kernel/kgdb.c
71337index 53dae4b..9ba3743 100644
71338--- a/kernel/kgdb.c
71339+++ b/kernel/kgdb.c
71340@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
71341 /* Guard for recursive entry */
71342 static int exception_level;
71343
71344-static struct kgdb_io *kgdb_io_ops;
71345+static const struct kgdb_io *kgdb_io_ops;
71346 static DEFINE_SPINLOCK(kgdb_registration_lock);
71347
71348 /* kgdb console driver is loaded */
71349@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
71350 */
71351 static atomic_t passive_cpu_wait[NR_CPUS];
71352 static atomic_t cpu_in_kgdb[NR_CPUS];
71353-atomic_t kgdb_setting_breakpoint;
71354+atomic_unchecked_t kgdb_setting_breakpoint;
71355
71356 struct task_struct *kgdb_usethread;
71357 struct task_struct *kgdb_contthread;
71358@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
71359 sizeof(unsigned long)];
71360
71361 /* to keep track of the CPU which is doing the single stepping*/
71362-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71363+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71364
71365 /*
71366 * If you are debugging a problem where roundup (the collection of
71367@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
71368 return 0;
71369 if (kgdb_connected)
71370 return 1;
71371- if (atomic_read(&kgdb_setting_breakpoint))
71372+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
71373 return 1;
71374 if (print_wait)
71375 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
71376@@ -1426,8 +1426,8 @@ acquirelock:
71377 * instance of the exception handler wanted to come into the
71378 * debugger on a different CPU via a single step
71379 */
71380- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
71381- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
71382+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
71383+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
71384
71385 atomic_set(&kgdb_active, -1);
71386 touch_softlockup_watchdog();
71387@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
71388 *
71389 * Register it with the KGDB core.
71390 */
71391-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
71392+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
71393 {
71394 int err;
71395
71396@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
71397 *
71398 * Unregister it with the KGDB core.
71399 */
71400-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
71401+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
71402 {
71403 BUG_ON(kgdb_connected);
71404
71405@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
71406 */
71407 void kgdb_breakpoint(void)
71408 {
71409- atomic_set(&kgdb_setting_breakpoint, 1);
71410+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
71411 wmb(); /* Sync point before breakpoint */
71412 arch_kgdb_breakpoint();
71413 wmb(); /* Sync point after breakpoint */
71414- atomic_set(&kgdb_setting_breakpoint, 0);
71415+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
71416 }
71417 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
71418
71419diff --git a/kernel/kmod.c b/kernel/kmod.c
71420index d206078..e27ba6a 100644
71421--- a/kernel/kmod.c
71422+++ b/kernel/kmod.c
71423@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71424 * If module auto-loading support is disabled then this function
71425 * becomes a no-operation.
71426 */
71427-int __request_module(bool wait, const char *fmt, ...)
71428+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
71429 {
71430- va_list args;
71431 char module_name[MODULE_NAME_LEN];
71432 unsigned int max_modprobes;
71433 int ret;
71434- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
71435+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
71436 static char *envp[] = { "HOME=/",
71437 "TERM=linux",
71438 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
71439@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
71440 if (ret)
71441 return ret;
71442
71443- va_start(args, fmt);
71444- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
71445- va_end(args);
71446+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
71447 if (ret >= MODULE_NAME_LEN)
71448 return -ENAMETOOLONG;
71449
71450+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71451+ if (!current_uid()) {
71452+ /* hack to workaround consolekit/udisks stupidity */
71453+ read_lock(&tasklist_lock);
71454+ if (!strcmp(current->comm, "mount") &&
71455+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
71456+ read_unlock(&tasklist_lock);
71457+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
71458+ return -EPERM;
71459+ }
71460+ read_unlock(&tasklist_lock);
71461+ }
71462+#endif
71463+
71464 /* If modprobe needs a service that is in a module, we get a recursive
71465 * loop. Limit the number of running kmod threads to max_threads/2 or
71466 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
71467@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
71468 atomic_dec(&kmod_concurrent);
71469 return ret;
71470 }
71471+
71472+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
71473+{
71474+ va_list args;
71475+ int ret;
71476+
71477+ va_start(args, fmt);
71478+ ret = ____request_module(wait, module_param, fmt, args);
71479+ va_end(args);
71480+
71481+ return ret;
71482+}
71483+
71484+int __request_module(bool wait, const char *fmt, ...)
71485+{
71486+ va_list args;
71487+ int ret;
71488+
71489+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71490+ if (current_uid()) {
71491+ char module_param[MODULE_NAME_LEN];
71492+
71493+ memset(module_param, 0, sizeof(module_param));
71494+
71495+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
71496+
71497+ va_start(args, fmt);
71498+ ret = ____request_module(wait, module_param, fmt, args);
71499+ va_end(args);
71500+
71501+ return ret;
71502+ }
71503+#endif
71504+
71505+ va_start(args, fmt);
71506+ ret = ____request_module(wait, NULL, fmt, args);
71507+ va_end(args);
71508+
71509+ return ret;
71510+}
71511+
71512+
71513 EXPORT_SYMBOL(__request_module);
71514 #endif /* CONFIG_MODULES */
71515
71516@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
71517 *
71518 * Thus the __user pointer cast is valid here.
71519 */
71520- sys_wait4(pid, (int __user *)&ret, 0, NULL);
71521+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
71522
71523 /*
71524 * If ret is 0, either ____call_usermodehelper failed and the
71525diff --git a/kernel/kprobes.c b/kernel/kprobes.c
71526index 5240d75..5a6fb33 100644
71527--- a/kernel/kprobes.c
71528+++ b/kernel/kprobes.c
71529@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
71530 * kernel image and loaded module images reside. This is required
71531 * so x86_64 can correctly handle the %rip-relative fixups.
71532 */
71533- kip->insns = module_alloc(PAGE_SIZE);
71534+ kip->insns = module_alloc_exec(PAGE_SIZE);
71535 if (!kip->insns) {
71536 kfree(kip);
71537 return NULL;
71538@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
71539 */
71540 if (!list_is_singular(&kprobe_insn_pages)) {
71541 list_del(&kip->list);
71542- module_free(NULL, kip->insns);
71543+ module_free_exec(NULL, kip->insns);
71544 kfree(kip);
71545 }
71546 return 1;
71547@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
71548 {
71549 int i, err = 0;
71550 unsigned long offset = 0, size = 0;
71551- char *modname, namebuf[128];
71552+ char *modname, namebuf[KSYM_NAME_LEN];
71553 const char *symbol_name;
71554 void *addr;
71555 struct kprobe_blackpoint *kb;
71556@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
71557 const char *sym = NULL;
71558 unsigned int i = *(loff_t *) v;
71559 unsigned long offset = 0;
71560- char *modname, namebuf[128];
71561+ char *modname, namebuf[KSYM_NAME_LEN];
71562
71563 head = &kprobe_table[i];
71564 preempt_disable();
71565diff --git a/kernel/lockdep.c b/kernel/lockdep.c
71566index d86fe89..d12fc66 100644
71567--- a/kernel/lockdep.c
71568+++ b/kernel/lockdep.c
71569@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
71570 /*
71571 * Various lockdep statistics:
71572 */
71573-atomic_t chain_lookup_hits;
71574-atomic_t chain_lookup_misses;
71575-atomic_t hardirqs_on_events;
71576-atomic_t hardirqs_off_events;
71577-atomic_t redundant_hardirqs_on;
71578-atomic_t redundant_hardirqs_off;
71579-atomic_t softirqs_on_events;
71580-atomic_t softirqs_off_events;
71581-atomic_t redundant_softirqs_on;
71582-atomic_t redundant_softirqs_off;
71583-atomic_t nr_unused_locks;
71584-atomic_t nr_cyclic_checks;
71585-atomic_t nr_find_usage_forwards_checks;
71586-atomic_t nr_find_usage_backwards_checks;
71587+atomic_unchecked_t chain_lookup_hits;
71588+atomic_unchecked_t chain_lookup_misses;
71589+atomic_unchecked_t hardirqs_on_events;
71590+atomic_unchecked_t hardirqs_off_events;
71591+atomic_unchecked_t redundant_hardirqs_on;
71592+atomic_unchecked_t redundant_hardirqs_off;
71593+atomic_unchecked_t softirqs_on_events;
71594+atomic_unchecked_t softirqs_off_events;
71595+atomic_unchecked_t redundant_softirqs_on;
71596+atomic_unchecked_t redundant_softirqs_off;
71597+atomic_unchecked_t nr_unused_locks;
71598+atomic_unchecked_t nr_cyclic_checks;
71599+atomic_unchecked_t nr_find_usage_forwards_checks;
71600+atomic_unchecked_t nr_find_usage_backwards_checks;
71601 #endif
71602
71603 /*
71604@@ -577,6 +577,10 @@ static int static_obj(void *obj)
71605 int i;
71606 #endif
71607
71608+#ifdef CONFIG_PAX_KERNEXEC
71609+ start = ktla_ktva(start);
71610+#endif
71611+
71612 /*
71613 * static variable?
71614 */
71615@@ -592,8 +596,7 @@ static int static_obj(void *obj)
71616 */
71617 for_each_possible_cpu(i) {
71618 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
71619- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
71620- + per_cpu_offset(i);
71621+ end = start + PERCPU_ENOUGH_ROOM;
71622
71623 if ((addr >= start) && (addr < end))
71624 return 1;
71625@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
71626 if (!static_obj(lock->key)) {
71627 debug_locks_off();
71628 printk("INFO: trying to register non-static key.\n");
71629+ printk("lock:%pS key:%pS.\n", lock, lock->key);
71630 printk("the code is fine but needs lockdep annotation.\n");
71631 printk("turning off the locking correctness validator.\n");
71632 dump_stack();
71633@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
71634 if (!class)
71635 return 0;
71636 }
71637- debug_atomic_inc((atomic_t *)&class->ops);
71638+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
71639 if (very_verbose(class)) {
71640 printk("\nacquire class [%p] %s", class->key, class->name);
71641 if (class->name_version > 1)
71642diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
71643index a2ee95a..092f0f2 100644
71644--- a/kernel/lockdep_internals.h
71645+++ b/kernel/lockdep_internals.h
71646@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
71647 /*
71648 * Various lockdep statistics:
71649 */
71650-extern atomic_t chain_lookup_hits;
71651-extern atomic_t chain_lookup_misses;
71652-extern atomic_t hardirqs_on_events;
71653-extern atomic_t hardirqs_off_events;
71654-extern atomic_t redundant_hardirqs_on;
71655-extern atomic_t redundant_hardirqs_off;
71656-extern atomic_t softirqs_on_events;
71657-extern atomic_t softirqs_off_events;
71658-extern atomic_t redundant_softirqs_on;
71659-extern atomic_t redundant_softirqs_off;
71660-extern atomic_t nr_unused_locks;
71661-extern atomic_t nr_cyclic_checks;
71662-extern atomic_t nr_cyclic_check_recursions;
71663-extern atomic_t nr_find_usage_forwards_checks;
71664-extern atomic_t nr_find_usage_forwards_recursions;
71665-extern atomic_t nr_find_usage_backwards_checks;
71666-extern atomic_t nr_find_usage_backwards_recursions;
71667-# define debug_atomic_inc(ptr) atomic_inc(ptr)
71668-# define debug_atomic_dec(ptr) atomic_dec(ptr)
71669-# define debug_atomic_read(ptr) atomic_read(ptr)
71670+extern atomic_unchecked_t chain_lookup_hits;
71671+extern atomic_unchecked_t chain_lookup_misses;
71672+extern atomic_unchecked_t hardirqs_on_events;
71673+extern atomic_unchecked_t hardirqs_off_events;
71674+extern atomic_unchecked_t redundant_hardirqs_on;
71675+extern atomic_unchecked_t redundant_hardirqs_off;
71676+extern atomic_unchecked_t softirqs_on_events;
71677+extern atomic_unchecked_t softirqs_off_events;
71678+extern atomic_unchecked_t redundant_softirqs_on;
71679+extern atomic_unchecked_t redundant_softirqs_off;
71680+extern atomic_unchecked_t nr_unused_locks;
71681+extern atomic_unchecked_t nr_cyclic_checks;
71682+extern atomic_unchecked_t nr_cyclic_check_recursions;
71683+extern atomic_unchecked_t nr_find_usage_forwards_checks;
71684+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
71685+extern atomic_unchecked_t nr_find_usage_backwards_checks;
71686+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
71687+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
71688+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
71689+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
71690 #else
71691 # define debug_atomic_inc(ptr) do { } while (0)
71692 # define debug_atomic_dec(ptr) do { } while (0)
71693diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
71694index d4aba4f..02a353f 100644
71695--- a/kernel/lockdep_proc.c
71696+++ b/kernel/lockdep_proc.c
71697@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
71698
71699 static void print_name(struct seq_file *m, struct lock_class *class)
71700 {
71701- char str[128];
71702+ char str[KSYM_NAME_LEN];
71703 const char *name = class->name;
71704
71705 if (!name) {
71706diff --git a/kernel/module.c b/kernel/module.c
71707index 4b270e6..2226274 100644
71708--- a/kernel/module.c
71709+++ b/kernel/module.c
71710@@ -55,6 +55,7 @@
71711 #include <linux/async.h>
71712 #include <linux/percpu.h>
71713 #include <linux/kmemleak.h>
71714+#include <linux/grsecurity.h>
71715
71716 #define CREATE_TRACE_POINTS
71717 #include <trace/events/module.h>
71718@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
71719 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
71720
71721 /* Bounds of module allocation, for speeding __module_address */
71722-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
71723+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
71724+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
71725
71726 int register_module_notifier(struct notifier_block * nb)
71727 {
71728@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71729 return true;
71730
71731 list_for_each_entry_rcu(mod, &modules, list) {
71732- struct symsearch arr[] = {
71733+ struct symsearch modarr[] = {
71734 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
71735 NOT_GPL_ONLY, false },
71736 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
71737@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71738 #endif
71739 };
71740
71741- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
71742+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
71743 return true;
71744 }
71745 return false;
71746@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
71747 void *ptr;
71748 int cpu;
71749
71750- if (align > PAGE_SIZE) {
71751+ if (align-1 >= PAGE_SIZE) {
71752 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
71753 name, align, PAGE_SIZE);
71754 align = PAGE_SIZE;
71755@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
71756 * /sys/module/foo/sections stuff
71757 * J. Corbet <corbet@lwn.net>
71758 */
71759-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
71760+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71761
71762 static inline bool sect_empty(const Elf_Shdr *sect)
71763 {
71764@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
71765 destroy_params(mod->kp, mod->num_kp);
71766
71767 /* This may be NULL, but that's OK */
71768- module_free(mod, mod->module_init);
71769+ module_free(mod, mod->module_init_rw);
71770+ module_free_exec(mod, mod->module_init_rx);
71771 kfree(mod->args);
71772 if (mod->percpu)
71773 percpu_modfree(mod->percpu);
71774@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
71775 percpu_modfree(mod->refptr);
71776 #endif
71777 /* Free lock-classes: */
71778- lockdep_free_key_range(mod->module_core, mod->core_size);
71779+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
71780+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
71781
71782 /* Finally, free the core (containing the module structure) */
71783- module_free(mod, mod->module_core);
71784+ module_free_exec(mod, mod->module_core_rx);
71785+ module_free(mod, mod->module_core_rw);
71786
71787 #ifdef CONFIG_MPU
71788 update_protections(current->mm);
71789@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71790 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71791 int ret = 0;
71792 const struct kernel_symbol *ksym;
71793+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71794+ int is_fs_load = 0;
71795+ int register_filesystem_found = 0;
71796+ char *p;
71797+
71798+ p = strstr(mod->args, "grsec_modharden_fs");
71799+
71800+ if (p) {
71801+ char *endptr = p + strlen("grsec_modharden_fs");
71802+ /* copy \0 as well */
71803+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71804+ is_fs_load = 1;
71805+ }
71806+#endif
71807+
71808
71809 for (i = 1; i < n; i++) {
71810+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71811+ const char *name = strtab + sym[i].st_name;
71812+
71813+ /* it's a real shame this will never get ripped and copied
71814+ upstream! ;(
71815+ */
71816+ if (is_fs_load && !strcmp(name, "register_filesystem"))
71817+ register_filesystem_found = 1;
71818+#endif
71819 switch (sym[i].st_shndx) {
71820 case SHN_COMMON:
71821 /* We compiled with -fno-common. These are not
71822@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71823 strtab + sym[i].st_name, mod);
71824 /* Ok if resolved. */
71825 if (ksym) {
71826+ pax_open_kernel();
71827 sym[i].st_value = ksym->value;
71828+ pax_close_kernel();
71829 break;
71830 }
71831
71832@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71833 secbase = (unsigned long)mod->percpu;
71834 else
71835 secbase = sechdrs[sym[i].st_shndx].sh_addr;
71836+ pax_open_kernel();
71837 sym[i].st_value += secbase;
71838+ pax_close_kernel();
71839 break;
71840 }
71841 }
71842
71843+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71844+ if (is_fs_load && !register_filesystem_found) {
71845+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
71846+ ret = -EPERM;
71847+ }
71848+#endif
71849+
71850 return ret;
71851 }
71852
71853@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
71854 || s->sh_entsize != ~0UL
71855 || strstarts(secstrings + s->sh_name, ".init"))
71856 continue;
71857- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
71858+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71859+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
71860+ else
71861+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
71862 DEBUGP("\t%s\n", secstrings + s->sh_name);
71863 }
71864- if (m == 0)
71865- mod->core_text_size = mod->core_size;
71866 }
71867
71868 DEBUGP("Init section allocation order:\n");
71869@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
71870 || s->sh_entsize != ~0UL
71871 || !strstarts(secstrings + s->sh_name, ".init"))
71872 continue;
71873- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
71874- | INIT_OFFSET_MASK);
71875+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71876+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
71877+ else
71878+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
71879+ s->sh_entsize |= INIT_OFFSET_MASK;
71880 DEBUGP("\t%s\n", secstrings + s->sh_name);
71881 }
71882- if (m == 0)
71883- mod->init_text_size = mod->init_size;
71884 }
71885 }
71886
71887@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
71888
71889 /* As per nm */
71890 static char elf_type(const Elf_Sym *sym,
71891- Elf_Shdr *sechdrs,
71892- const char *secstrings,
71893- struct module *mod)
71894+ const Elf_Shdr *sechdrs,
71895+ const char *secstrings)
71896 {
71897 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
71898 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
71899@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
71900
71901 /* Put symbol section at end of init part of module. */
71902 symsect->sh_flags |= SHF_ALLOC;
71903- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
71904+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
71905 symindex) | INIT_OFFSET_MASK;
71906 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
71907
71908@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
71909 }
71910
71911 /* Append room for core symbols at end of core part. */
71912- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
71913- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
71914+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
71915+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
71916
71917 /* Put string table section at end of init part of module. */
71918 strsect->sh_flags |= SHF_ALLOC;
71919- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
71920+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
71921 strindex) | INIT_OFFSET_MASK;
71922 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
71923
71924 /* Append room for core symbols' strings at end of core part. */
71925- *pstroffs = mod->core_size;
71926+ *pstroffs = mod->core_size_rx;
71927 __set_bit(0, strmap);
71928- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
71929+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
71930
71931 return symoffs;
71932 }
71933@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
71934 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71935 mod->strtab = (void *)sechdrs[strindex].sh_addr;
71936
71937+ pax_open_kernel();
71938+
71939 /* Set types up while we still have access to sections. */
71940 for (i = 0; i < mod->num_symtab; i++)
71941 mod->symtab[i].st_info
71942- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
71943+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
71944
71945- mod->core_symtab = dst = mod->module_core + symoffs;
71946+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
71947 src = mod->symtab;
71948 *dst = *src;
71949 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
71950@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
71951 }
71952 mod->core_num_syms = ndst;
71953
71954- mod->core_strtab = s = mod->module_core + stroffs;
71955+ mod->core_strtab = s = mod->module_core_rx + stroffs;
71956 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
71957 if (test_bit(i, strmap))
71958 *++s = mod->strtab[i];
71959+
71960+ pax_close_kernel();
71961 }
71962 #else
71963 static inline unsigned long layout_symtab(struct module *mod,
71964@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
71965 #endif
71966 }
71967
71968-static void *module_alloc_update_bounds(unsigned long size)
71969+static void *module_alloc_update_bounds_rw(unsigned long size)
71970 {
71971 void *ret = module_alloc(size);
71972
71973 if (ret) {
71974 /* Update module bounds. */
71975- if ((unsigned long)ret < module_addr_min)
71976- module_addr_min = (unsigned long)ret;
71977- if ((unsigned long)ret + size > module_addr_max)
71978- module_addr_max = (unsigned long)ret + size;
71979+ if ((unsigned long)ret < module_addr_min_rw)
71980+ module_addr_min_rw = (unsigned long)ret;
71981+ if ((unsigned long)ret + size > module_addr_max_rw)
71982+ module_addr_max_rw = (unsigned long)ret + size;
71983+ }
71984+ return ret;
71985+}
71986+
71987+static void *module_alloc_update_bounds_rx(unsigned long size)
71988+{
71989+ void *ret = module_alloc_exec(size);
71990+
71991+ if (ret) {
71992+ /* Update module bounds. */
71993+ if ((unsigned long)ret < module_addr_min_rx)
71994+ module_addr_min_rx = (unsigned long)ret;
71995+ if ((unsigned long)ret + size > module_addr_max_rx)
71996+ module_addr_max_rx = (unsigned long)ret + size;
71997 }
71998 return ret;
71999 }
72000@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72001 unsigned int i;
72002
72003 /* only scan the sections containing data */
72004- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
72005- (unsigned long)mod->module_core,
72006+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
72007+ (unsigned long)mod->module_core_rw,
72008 sizeof(struct module), GFP_KERNEL);
72009
72010 for (i = 1; i < hdr->e_shnum; i++) {
72011@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72012 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
72013 continue;
72014
72015- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
72016- (unsigned long)mod->module_core,
72017+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
72018+ (unsigned long)mod->module_core_rw,
72019 sechdrs[i].sh_size, GFP_KERNEL);
72020 }
72021 }
72022@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
72023 Elf_Ehdr *hdr;
72024 Elf_Shdr *sechdrs;
72025 char *secstrings, *args, *modmagic, *strtab = NULL;
72026- char *staging;
72027+ char *staging, *license;
72028 unsigned int i;
72029 unsigned int symindex = 0;
72030 unsigned int strindex = 0;
72031@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
72032 goto free_hdr;
72033 }
72034
72035+ license = get_modinfo(sechdrs, infoindex, "license");
72036+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
72037+ if (!license || !license_is_gpl_compatible(license)) {
72038+ err -ENOEXEC;
72039+ goto free_hdr;
72040+ }
72041+#endif
72042+
72043 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
72044 /* This is allowed: modprobe --force will invalidate it. */
72045 if (!modmagic) {
72046@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
72047 secstrings, &stroffs, strmap);
72048
72049 /* Do the allocs. */
72050- ptr = module_alloc_update_bounds(mod->core_size);
72051+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
72052 /*
72053 * The pointer to this block is stored in the module structure
72054 * which is inside the block. Just mark it as not being a
72055@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
72056 err = -ENOMEM;
72057 goto free_percpu;
72058 }
72059- memset(ptr, 0, mod->core_size);
72060- mod->module_core = ptr;
72061+ memset(ptr, 0, mod->core_size_rw);
72062+ mod->module_core_rw = ptr;
72063
72064- ptr = module_alloc_update_bounds(mod->init_size);
72065+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
72066 /*
72067 * The pointer to this block is stored in the module structure
72068 * which is inside the block. This block doesn't need to be
72069 * scanned as it contains data and code that will be freed
72070 * after the module is initialized.
72071 */
72072- kmemleak_ignore(ptr);
72073- if (!ptr && mod->init_size) {
72074+ kmemleak_not_leak(ptr);
72075+ if (!ptr && mod->init_size_rw) {
72076 err = -ENOMEM;
72077- goto free_core;
72078+ goto free_core_rw;
72079 }
72080- memset(ptr, 0, mod->init_size);
72081- mod->module_init = ptr;
72082+ memset(ptr, 0, mod->init_size_rw);
72083+ mod->module_init_rw = ptr;
72084+
72085+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
72086+ kmemleak_not_leak(ptr);
72087+ if (!ptr) {
72088+ err = -ENOMEM;
72089+ goto free_init_rw;
72090+ }
72091+
72092+ pax_open_kernel();
72093+ memset(ptr, 0, mod->core_size_rx);
72094+ pax_close_kernel();
72095+ mod->module_core_rx = ptr;
72096+
72097+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
72098+ kmemleak_not_leak(ptr);
72099+ if (!ptr && mod->init_size_rx) {
72100+ err = -ENOMEM;
72101+ goto free_core_rx;
72102+ }
72103+
72104+ pax_open_kernel();
72105+ memset(ptr, 0, mod->init_size_rx);
72106+ pax_close_kernel();
72107+ mod->module_init_rx = ptr;
72108
72109 /* Transfer each section which specifies SHF_ALLOC */
72110 DEBUGP("final section addresses:\n");
72111@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
72112 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
72113 continue;
72114
72115- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
72116- dest = mod->module_init
72117- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72118- else
72119- dest = mod->module_core + sechdrs[i].sh_entsize;
72120+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
72121+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72122+ dest = mod->module_init_rw
72123+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72124+ else
72125+ dest = mod->module_init_rx
72126+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72127+ } else {
72128+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72129+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
72130+ else
72131+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
72132+ }
72133
72134- if (sechdrs[i].sh_type != SHT_NOBITS)
72135- memcpy(dest, (void *)sechdrs[i].sh_addr,
72136- sechdrs[i].sh_size);
72137+ if (sechdrs[i].sh_type != SHT_NOBITS) {
72138+
72139+#ifdef CONFIG_PAX_KERNEXEC
72140+#ifdef CONFIG_X86_64
72141+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
72142+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
72143+#endif
72144+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
72145+ pax_open_kernel();
72146+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72147+ pax_close_kernel();
72148+ } else
72149+#endif
72150+
72151+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72152+ }
72153 /* Update sh_addr to point to copy in image. */
72154- sechdrs[i].sh_addr = (unsigned long)dest;
72155+
72156+#ifdef CONFIG_PAX_KERNEXEC
72157+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
72158+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
72159+ else
72160+#endif
72161+
72162+ sechdrs[i].sh_addr = (unsigned long)dest;
72163 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
72164 }
72165 /* Module has been moved. */
72166@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
72167 mod->name);
72168 if (!mod->refptr) {
72169 err = -ENOMEM;
72170- goto free_init;
72171+ goto free_init_rx;
72172 }
72173 #endif
72174 /* Now we've moved module, initialize linked lists, etc. */
72175@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
72176 goto free_unload;
72177
72178 /* Set up license info based on the info section */
72179- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
72180+ set_license(mod, license);
72181
72182 /*
72183 * ndiswrapper is under GPL by itself, but loads proprietary modules.
72184@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
72185 /* Set up MODINFO_ATTR fields */
72186 setup_modinfo(mod, sechdrs, infoindex);
72187
72188+ mod->args = args;
72189+
72190+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72191+ {
72192+ char *p, *p2;
72193+
72194+ if (strstr(mod->args, "grsec_modharden_netdev")) {
72195+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
72196+ err = -EPERM;
72197+ goto cleanup;
72198+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
72199+ p += strlen("grsec_modharden_normal");
72200+ p2 = strstr(p, "_");
72201+ if (p2) {
72202+ *p2 = '\0';
72203+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
72204+ *p2 = '_';
72205+ }
72206+ err = -EPERM;
72207+ goto cleanup;
72208+ }
72209+ }
72210+#endif
72211+
72212+
72213 /* Fix up syms, so that st_value is a pointer to location. */
72214 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
72215 mod);
72216@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
72217
72218 /* Now do relocations. */
72219 for (i = 1; i < hdr->e_shnum; i++) {
72220- const char *strtab = (char *)sechdrs[strindex].sh_addr;
72221 unsigned int info = sechdrs[i].sh_info;
72222+ strtab = (char *)sechdrs[strindex].sh_addr;
72223
72224 /* Not a valid relocation section? */
72225 if (info >= hdr->e_shnum)
72226@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
72227 * Do it before processing of module parameters, so the module
72228 * can provide parameter accessor functions of its own.
72229 */
72230- if (mod->module_init)
72231- flush_icache_range((unsigned long)mod->module_init,
72232- (unsigned long)mod->module_init
72233- + mod->init_size);
72234- flush_icache_range((unsigned long)mod->module_core,
72235- (unsigned long)mod->module_core + mod->core_size);
72236+ if (mod->module_init_rx)
72237+ flush_icache_range((unsigned long)mod->module_init_rx,
72238+ (unsigned long)mod->module_init_rx
72239+ + mod->init_size_rx);
72240+ flush_icache_range((unsigned long)mod->module_core_rx,
72241+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
72242
72243 set_fs(old_fs);
72244
72245- mod->args = args;
72246 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
72247 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
72248 mod->name);
72249@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
72250 free_unload:
72251 module_unload_free(mod);
72252 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
72253+ free_init_rx:
72254 percpu_modfree(mod->refptr);
72255- free_init:
72256 #endif
72257- module_free(mod, mod->module_init);
72258- free_core:
72259- module_free(mod, mod->module_core);
72260+ module_free_exec(mod, mod->module_init_rx);
72261+ free_core_rx:
72262+ module_free_exec(mod, mod->module_core_rx);
72263+ free_init_rw:
72264+ module_free(mod, mod->module_init_rw);
72265+ free_core_rw:
72266+ module_free(mod, mod->module_core_rw);
72267 /* mod will be freed with core. Don't access it beyond this line! */
72268 free_percpu:
72269 if (percpu)
72270@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
72271 mod->symtab = mod->core_symtab;
72272 mod->strtab = mod->core_strtab;
72273 #endif
72274- module_free(mod, mod->module_init);
72275- mod->module_init = NULL;
72276- mod->init_size = 0;
72277- mod->init_text_size = 0;
72278+ module_free(mod, mod->module_init_rw);
72279+ module_free_exec(mod, mod->module_init_rx);
72280+ mod->module_init_rw = NULL;
72281+ mod->module_init_rx = NULL;
72282+ mod->init_size_rw = 0;
72283+ mod->init_size_rx = 0;
72284 mutex_unlock(&module_mutex);
72285
72286 return 0;
72287@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
72288 unsigned long nextval;
72289
72290 /* At worse, next value is at end of module */
72291- if (within_module_init(addr, mod))
72292- nextval = (unsigned long)mod->module_init+mod->init_text_size;
72293+ if (within_module_init_rx(addr, mod))
72294+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
72295+ else if (within_module_init_rw(addr, mod))
72296+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
72297+ else if (within_module_core_rx(addr, mod))
72298+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
72299+ else if (within_module_core_rw(addr, mod))
72300+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
72301 else
72302- nextval = (unsigned long)mod->module_core+mod->core_text_size;
72303+ return NULL;
72304
72305 /* Scan for closest preceeding symbol, and next symbol. (ELF
72306 starts real symbols at 1). */
72307@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
72308 char buf[8];
72309
72310 seq_printf(m, "%s %u",
72311- mod->name, mod->init_size + mod->core_size);
72312+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
72313 print_unload_info(m, mod);
72314
72315 /* Informative for users. */
72316@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
72317 mod->state == MODULE_STATE_COMING ? "Loading":
72318 "Live");
72319 /* Used by oprofile and other similar tools. */
72320- seq_printf(m, " 0x%p", mod->module_core);
72321+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
72322
72323 /* Taints info */
72324 if (mod->taints)
72325@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
72326
72327 static int __init proc_modules_init(void)
72328 {
72329+#ifndef CONFIG_GRKERNSEC_HIDESYM
72330+#ifdef CONFIG_GRKERNSEC_PROC_USER
72331+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72332+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72333+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
72334+#else
72335 proc_create("modules", 0, NULL, &proc_modules_operations);
72336+#endif
72337+#else
72338+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72339+#endif
72340 return 0;
72341 }
72342 module_init(proc_modules_init);
72343@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
72344 {
72345 struct module *mod;
72346
72347- if (addr < module_addr_min || addr > module_addr_max)
72348+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
72349+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
72350 return NULL;
72351
72352 list_for_each_entry_rcu(mod, &modules, list)
72353- if (within_module_core(addr, mod)
72354- || within_module_init(addr, mod))
72355+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
72356 return mod;
72357 return NULL;
72358 }
72359@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
72360 */
72361 struct module *__module_text_address(unsigned long addr)
72362 {
72363- struct module *mod = __module_address(addr);
72364+ struct module *mod;
72365+
72366+#ifdef CONFIG_X86_32
72367+ addr = ktla_ktva(addr);
72368+#endif
72369+
72370+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
72371+ return NULL;
72372+
72373+ mod = __module_address(addr);
72374+
72375 if (mod) {
72376 /* Make sure it's within the text section. */
72377- if (!within(addr, mod->module_init, mod->init_text_size)
72378- && !within(addr, mod->module_core, mod->core_text_size))
72379+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
72380 mod = NULL;
72381 }
72382 return mod;
72383diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
72384index ec815a9..fe46e99 100644
72385--- a/kernel/mutex-debug.c
72386+++ b/kernel/mutex-debug.c
72387@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
72388 }
72389
72390 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72391- struct thread_info *ti)
72392+ struct task_struct *task)
72393 {
72394 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
72395
72396 /* Mark the current thread as blocked on the lock: */
72397- ti->task->blocked_on = waiter;
72398+ task->blocked_on = waiter;
72399 }
72400
72401 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72402- struct thread_info *ti)
72403+ struct task_struct *task)
72404 {
72405 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
72406- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
72407- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
72408- ti->task->blocked_on = NULL;
72409+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
72410+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
72411+ task->blocked_on = NULL;
72412
72413 list_del_init(&waiter->list);
72414 waiter->task = NULL;
72415@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
72416 return;
72417
72418 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
72419- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
72420+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
72421 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
72422 mutex_clear_owner(lock);
72423 }
72424diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
72425index 6b2d735..372d3c4 100644
72426--- a/kernel/mutex-debug.h
72427+++ b/kernel/mutex-debug.h
72428@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
72429 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
72430 extern void debug_mutex_add_waiter(struct mutex *lock,
72431 struct mutex_waiter *waiter,
72432- struct thread_info *ti);
72433+ struct task_struct *task);
72434 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72435- struct thread_info *ti);
72436+ struct task_struct *task);
72437 extern void debug_mutex_unlock(struct mutex *lock);
72438 extern void debug_mutex_init(struct mutex *lock, const char *name,
72439 struct lock_class_key *key);
72440
72441 static inline void mutex_set_owner(struct mutex *lock)
72442 {
72443- lock->owner = current_thread_info();
72444+ lock->owner = current;
72445 }
72446
72447 static inline void mutex_clear_owner(struct mutex *lock)
72448diff --git a/kernel/mutex.c b/kernel/mutex.c
72449index f85644c..5ee9f77 100644
72450--- a/kernel/mutex.c
72451+++ b/kernel/mutex.c
72452@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72453 */
72454
72455 for (;;) {
72456- struct thread_info *owner;
72457+ struct task_struct *owner;
72458
72459 /*
72460 * If we own the BKL, then don't spin. The owner of
72461@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72462 spin_lock_mutex(&lock->wait_lock, flags);
72463
72464 debug_mutex_lock_common(lock, &waiter);
72465- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
72466+ debug_mutex_add_waiter(lock, &waiter, task);
72467
72468 /* add waiting tasks to the end of the waitqueue (FIFO): */
72469 list_add_tail(&waiter.list, &lock->wait_list);
72470@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72471 * TASK_UNINTERRUPTIBLE case.)
72472 */
72473 if (unlikely(signal_pending_state(state, task))) {
72474- mutex_remove_waiter(lock, &waiter,
72475- task_thread_info(task));
72476+ mutex_remove_waiter(lock, &waiter, task);
72477 mutex_release(&lock->dep_map, 1, ip);
72478 spin_unlock_mutex(&lock->wait_lock, flags);
72479
72480@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72481 done:
72482 lock_acquired(&lock->dep_map, ip);
72483 /* got the lock - rejoice! */
72484- mutex_remove_waiter(lock, &waiter, current_thread_info());
72485+ mutex_remove_waiter(lock, &waiter, task);
72486 mutex_set_owner(lock);
72487
72488 /* set it to 0 if there are no waiters left: */
72489diff --git a/kernel/mutex.h b/kernel/mutex.h
72490index 67578ca..4115fbf 100644
72491--- a/kernel/mutex.h
72492+++ b/kernel/mutex.h
72493@@ -19,7 +19,7 @@
72494 #ifdef CONFIG_SMP
72495 static inline void mutex_set_owner(struct mutex *lock)
72496 {
72497- lock->owner = current_thread_info();
72498+ lock->owner = current;
72499 }
72500
72501 static inline void mutex_clear_owner(struct mutex *lock)
72502diff --git a/kernel/panic.c b/kernel/panic.c
72503index 96b45d0..45c447a 100644
72504--- a/kernel/panic.c
72505+++ b/kernel/panic.c
72506@@ -352,7 +352,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
72507 const char *board;
72508
72509 printk(KERN_WARNING "------------[ cut here ]------------\n");
72510- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
72511+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
72512 board = dmi_get_system_info(DMI_PRODUCT_NAME);
72513 if (board)
72514 printk(KERN_WARNING "Hardware name: %s\n", board);
72515@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
72516 */
72517 void __stack_chk_fail(void)
72518 {
72519- panic("stack-protector: Kernel stack is corrupted in: %p\n",
72520+ dump_stack();
72521+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
72522 __builtin_return_address(0));
72523 }
72524 EXPORT_SYMBOL(__stack_chk_fail);
72525diff --git a/kernel/params.c b/kernel/params.c
72526index d656c27..21e452c 100644
72527--- a/kernel/params.c
72528+++ b/kernel/params.c
72529@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
72530 return ret;
72531 }
72532
72533-static struct sysfs_ops module_sysfs_ops = {
72534+static const struct sysfs_ops module_sysfs_ops = {
72535 .show = module_attr_show,
72536 .store = module_attr_store,
72537 };
72538@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
72539 return 0;
72540 }
72541
72542-static struct kset_uevent_ops module_uevent_ops = {
72543+static const struct kset_uevent_ops module_uevent_ops = {
72544 .filter = uevent_filter,
72545 };
72546
72547diff --git a/kernel/perf_event.c b/kernel/perf_event.c
72548index 37ebc14..9c121d9 100644
72549--- a/kernel/perf_event.c
72550+++ b/kernel/perf_event.c
72551@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
72552 */
72553 int sysctl_perf_event_sample_rate __read_mostly = 100000;
72554
72555-static atomic64_t perf_event_id;
72556+static atomic64_unchecked_t perf_event_id;
72557
72558 /*
72559 * Lock for (sysadmin-configurable) event reservations:
72560@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
72561 * In order to keep per-task stats reliable we need to flip the event
72562 * values when we flip the contexts.
72563 */
72564- value = atomic64_read(&next_event->count);
72565- value = atomic64_xchg(&event->count, value);
72566- atomic64_set(&next_event->count, value);
72567+ value = atomic64_read_unchecked(&next_event->count);
72568+ value = atomic64_xchg_unchecked(&event->count, value);
72569+ atomic64_set_unchecked(&next_event->count, value);
72570
72571 swap(event->total_time_enabled, next_event->total_time_enabled);
72572 swap(event->total_time_running, next_event->total_time_running);
72573@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
72574 update_event_times(event);
72575 }
72576
72577- return atomic64_read(&event->count);
72578+ return atomic64_read_unchecked(&event->count);
72579 }
72580
72581 /*
72582@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
72583 values[n++] = 1 + leader->nr_siblings;
72584 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72585 values[n++] = leader->total_time_enabled +
72586- atomic64_read(&leader->child_total_time_enabled);
72587+ atomic64_read_unchecked(&leader->child_total_time_enabled);
72588 }
72589 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72590 values[n++] = leader->total_time_running +
72591- atomic64_read(&leader->child_total_time_running);
72592+ atomic64_read_unchecked(&leader->child_total_time_running);
72593 }
72594
72595 size = n * sizeof(u64);
72596@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
72597 values[n++] = perf_event_read_value(event);
72598 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72599 values[n++] = event->total_time_enabled +
72600- atomic64_read(&event->child_total_time_enabled);
72601+ atomic64_read_unchecked(&event->child_total_time_enabled);
72602 }
72603 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72604 values[n++] = event->total_time_running +
72605- atomic64_read(&event->child_total_time_running);
72606+ atomic64_read_unchecked(&event->child_total_time_running);
72607 }
72608 if (read_format & PERF_FORMAT_ID)
72609 values[n++] = primary_event_id(event);
72610@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
72611 static void perf_event_reset(struct perf_event *event)
72612 {
72613 (void)perf_event_read(event);
72614- atomic64_set(&event->count, 0);
72615+ atomic64_set_unchecked(&event->count, 0);
72616 perf_event_update_userpage(event);
72617 }
72618
72619@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
72620 ++userpg->lock;
72621 barrier();
72622 userpg->index = perf_event_index(event);
72623- userpg->offset = atomic64_read(&event->count);
72624+ userpg->offset = atomic64_read_unchecked(&event->count);
72625 if (event->state == PERF_EVENT_STATE_ACTIVE)
72626- userpg->offset -= atomic64_read(&event->hw.prev_count);
72627+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
72628
72629 userpg->time_enabled = event->total_time_enabled +
72630- atomic64_read(&event->child_total_time_enabled);
72631+ atomic64_read_unchecked(&event->child_total_time_enabled);
72632
72633 userpg->time_running = event->total_time_running +
72634- atomic64_read(&event->child_total_time_running);
72635+ atomic64_read_unchecked(&event->child_total_time_running);
72636
72637 barrier();
72638 ++userpg->lock;
72639@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
72640 u64 values[4];
72641 int n = 0;
72642
72643- values[n++] = atomic64_read(&event->count);
72644+ values[n++] = atomic64_read_unchecked(&event->count);
72645 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72646 values[n++] = event->total_time_enabled +
72647- atomic64_read(&event->child_total_time_enabled);
72648+ atomic64_read_unchecked(&event->child_total_time_enabled);
72649 }
72650 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72651 values[n++] = event->total_time_running +
72652- atomic64_read(&event->child_total_time_running);
72653+ atomic64_read_unchecked(&event->child_total_time_running);
72654 }
72655 if (read_format & PERF_FORMAT_ID)
72656 values[n++] = primary_event_id(event);
72657@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72658 if (leader != event)
72659 leader->pmu->read(leader);
72660
72661- values[n++] = atomic64_read(&leader->count);
72662+ values[n++] = atomic64_read_unchecked(&leader->count);
72663 if (read_format & PERF_FORMAT_ID)
72664 values[n++] = primary_event_id(leader);
72665
72666@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72667 if (sub != event)
72668 sub->pmu->read(sub);
72669
72670- values[n++] = atomic64_read(&sub->count);
72671+ values[n++] = atomic64_read_unchecked(&sub->count);
72672 if (read_format & PERF_FORMAT_ID)
72673 values[n++] = primary_event_id(sub);
72674
72675@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
72676 * need to add enough zero bytes after the string to handle
72677 * the 64bit alignment we do later.
72678 */
72679- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
72680+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
72681 if (!buf) {
72682 name = strncpy(tmp, "//enomem", sizeof(tmp));
72683 goto got_name;
72684 }
72685- name = d_path(&file->f_path, buf, PATH_MAX);
72686+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
72687 if (IS_ERR(name)) {
72688 name = strncpy(tmp, "//toolong", sizeof(tmp));
72689 goto got_name;
72690@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
72691 {
72692 struct hw_perf_event *hwc = &event->hw;
72693
72694- atomic64_add(nr, &event->count);
72695+ atomic64_add_unchecked(nr, &event->count);
72696
72697 if (!hwc->sample_period)
72698 return;
72699@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
72700 u64 now;
72701
72702 now = cpu_clock(cpu);
72703- prev = atomic64_read(&event->hw.prev_count);
72704- atomic64_set(&event->hw.prev_count, now);
72705- atomic64_add(now - prev, &event->count);
72706+ prev = atomic64_read_unchecked(&event->hw.prev_count);
72707+ atomic64_set_unchecked(&event->hw.prev_count, now);
72708+ atomic64_add_unchecked(now - prev, &event->count);
72709 }
72710
72711 static int cpu_clock_perf_event_enable(struct perf_event *event)
72712@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
72713 struct hw_perf_event *hwc = &event->hw;
72714 int cpu = raw_smp_processor_id();
72715
72716- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
72717+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
72718 perf_swevent_start_hrtimer(event);
72719
72720 return 0;
72721@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
72722 u64 prev;
72723 s64 delta;
72724
72725- prev = atomic64_xchg(&event->hw.prev_count, now);
72726+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
72727 delta = now - prev;
72728- atomic64_add(delta, &event->count);
72729+ atomic64_add_unchecked(delta, &event->count);
72730 }
72731
72732 static int task_clock_perf_event_enable(struct perf_event *event)
72733@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
72734
72735 now = event->ctx->time;
72736
72737- atomic64_set(&hwc->prev_count, now);
72738+ atomic64_set_unchecked(&hwc->prev_count, now);
72739
72740 perf_swevent_start_hrtimer(event);
72741
72742@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
72743 event->parent = parent_event;
72744
72745 event->ns = get_pid_ns(current->nsproxy->pid_ns);
72746- event->id = atomic64_inc_return(&perf_event_id);
72747+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
72748
72749 event->state = PERF_EVENT_STATE_INACTIVE;
72750
72751@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
72752 if (child_event->attr.inherit_stat)
72753 perf_event_read_event(child_event, child);
72754
72755- child_val = atomic64_read(&child_event->count);
72756+ child_val = atomic64_read_unchecked(&child_event->count);
72757
72758 /*
72759 * Add back the child's count to the parent's count:
72760 */
72761- atomic64_add(child_val, &parent_event->count);
72762- atomic64_add(child_event->total_time_enabled,
72763+ atomic64_add_unchecked(child_val, &parent_event->count);
72764+ atomic64_add_unchecked(child_event->total_time_enabled,
72765 &parent_event->child_total_time_enabled);
72766- atomic64_add(child_event->total_time_running,
72767+ atomic64_add_unchecked(child_event->total_time_running,
72768 &parent_event->child_total_time_running);
72769
72770 /*
72771diff --git a/kernel/pid.c b/kernel/pid.c
72772index fce7198..4f23a7e 100644
72773--- a/kernel/pid.c
72774+++ b/kernel/pid.c
72775@@ -33,6 +33,7 @@
72776 #include <linux/rculist.h>
72777 #include <linux/bootmem.h>
72778 #include <linux/hash.h>
72779+#include <linux/security.h>
72780 #include <linux/pid_namespace.h>
72781 #include <linux/init_task.h>
72782 #include <linux/syscalls.h>
72783@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
72784
72785 int pid_max = PID_MAX_DEFAULT;
72786
72787-#define RESERVED_PIDS 300
72788+#define RESERVED_PIDS 500
72789
72790 int pid_max_min = RESERVED_PIDS + 1;
72791 int pid_max_max = PID_MAX_LIMIT;
72792@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
72793 */
72794 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
72795 {
72796- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72797+ struct task_struct *task;
72798+
72799+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72800+
72801+ if (gr_pid_is_chrooted(task))
72802+ return NULL;
72803+
72804+ return task;
72805 }
72806
72807 struct task_struct *find_task_by_vpid(pid_t vnr)
72808@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
72809 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
72810 }
72811
72812+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
72813+{
72814+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
72815+}
72816+
72817 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
72818 {
72819 struct pid *pid;
72820diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
72821index 5c9dc22..d271117 100644
72822--- a/kernel/posix-cpu-timers.c
72823+++ b/kernel/posix-cpu-timers.c
72824@@ -6,6 +6,7 @@
72825 #include <linux/posix-timers.h>
72826 #include <linux/errno.h>
72827 #include <linux/math64.h>
72828+#include <linux/security.h>
72829 #include <asm/uaccess.h>
72830 #include <linux/kernel_stat.h>
72831 #include <trace/events/timer.h>
72832@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
72833
72834 static __init int init_posix_cpu_timers(void)
72835 {
72836- struct k_clock process = {
72837+ static struct k_clock process = {
72838 .clock_getres = process_cpu_clock_getres,
72839 .clock_get = process_cpu_clock_get,
72840 .clock_set = do_posix_clock_nosettime,
72841@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
72842 .nsleep = process_cpu_nsleep,
72843 .nsleep_restart = process_cpu_nsleep_restart,
72844 };
72845- struct k_clock thread = {
72846+ static struct k_clock thread = {
72847 .clock_getres = thread_cpu_clock_getres,
72848 .clock_get = thread_cpu_clock_get,
72849 .clock_set = do_posix_clock_nosettime,
72850diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
72851index 5e76d22..cf1baeb 100644
72852--- a/kernel/posix-timers.c
72853+++ b/kernel/posix-timers.c
72854@@ -42,6 +42,7 @@
72855 #include <linux/compiler.h>
72856 #include <linux/idr.h>
72857 #include <linux/posix-timers.h>
72858+#include <linux/grsecurity.h>
72859 #include <linux/syscalls.h>
72860 #include <linux/wait.h>
72861 #include <linux/workqueue.h>
72862@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
72863 * which we beg off on and pass to do_sys_settimeofday().
72864 */
72865
72866-static struct k_clock posix_clocks[MAX_CLOCKS];
72867+static struct k_clock *posix_clocks[MAX_CLOCKS];
72868
72869 /*
72870 * These ones are defined below.
72871@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
72872 */
72873 #define CLOCK_DISPATCH(clock, call, arglist) \
72874 ((clock) < 0 ? posix_cpu_##call arglist : \
72875- (posix_clocks[clock].call != NULL \
72876- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
72877+ (posix_clocks[clock]->call != NULL \
72878+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
72879
72880 /*
72881 * Default clock hook functions when the struct k_clock passed
72882@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
72883 struct timespec *tp)
72884 {
72885 tp->tv_sec = 0;
72886- tp->tv_nsec = posix_clocks[which_clock].res;
72887+ tp->tv_nsec = posix_clocks[which_clock]->res;
72888 return 0;
72889 }
72890
72891@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
72892 return 0;
72893 if ((unsigned) which_clock >= MAX_CLOCKS)
72894 return 1;
72895- if (posix_clocks[which_clock].clock_getres != NULL)
72896+ if (posix_clocks[which_clock] == NULL)
72897 return 0;
72898- if (posix_clocks[which_clock].res != 0)
72899+ if (posix_clocks[which_clock]->clock_getres != NULL)
72900+ return 0;
72901+ if (posix_clocks[which_clock]->res != 0)
72902 return 0;
72903 return 1;
72904 }
72905@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
72906 */
72907 static __init int init_posix_timers(void)
72908 {
72909- struct k_clock clock_realtime = {
72910+ static struct k_clock clock_realtime = {
72911 .clock_getres = hrtimer_get_res,
72912 };
72913- struct k_clock clock_monotonic = {
72914+ static struct k_clock clock_monotonic = {
72915 .clock_getres = hrtimer_get_res,
72916 .clock_get = posix_ktime_get_ts,
72917 .clock_set = do_posix_clock_nosettime,
72918 };
72919- struct k_clock clock_monotonic_raw = {
72920+ static struct k_clock clock_monotonic_raw = {
72921 .clock_getres = hrtimer_get_res,
72922 .clock_get = posix_get_monotonic_raw,
72923 .clock_set = do_posix_clock_nosettime,
72924 .timer_create = no_timer_create,
72925 .nsleep = no_nsleep,
72926 };
72927- struct k_clock clock_realtime_coarse = {
72928+ static struct k_clock clock_realtime_coarse = {
72929 .clock_getres = posix_get_coarse_res,
72930 .clock_get = posix_get_realtime_coarse,
72931 .clock_set = do_posix_clock_nosettime,
72932 .timer_create = no_timer_create,
72933 .nsleep = no_nsleep,
72934 };
72935- struct k_clock clock_monotonic_coarse = {
72936+ static struct k_clock clock_monotonic_coarse = {
72937 .clock_getres = posix_get_coarse_res,
72938 .clock_get = posix_get_monotonic_coarse,
72939 .clock_set = do_posix_clock_nosettime,
72940@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
72941 .nsleep = no_nsleep,
72942 };
72943
72944+ pax_track_stack();
72945+
72946 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
72947 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
72948 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
72949@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
72950 return;
72951 }
72952
72953- posix_clocks[clock_id] = *new_clock;
72954+ posix_clocks[clock_id] = new_clock;
72955 }
72956 EXPORT_SYMBOL_GPL(register_posix_clock);
72957
72958@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
72959 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
72960 return -EFAULT;
72961
72962+ /* only the CLOCK_REALTIME clock can be set, all other clocks
72963+ have their clock_set fptr set to a nosettime dummy function
72964+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
72965+ call common_clock_set, which calls do_sys_settimeofday, which
72966+ we hook
72967+ */
72968+
72969 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
72970 }
72971
72972diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
72973index 04a9e90..bc355aa 100644
72974--- a/kernel/power/hibernate.c
72975+++ b/kernel/power/hibernate.c
72976@@ -48,14 +48,14 @@ enum {
72977
72978 static int hibernation_mode = HIBERNATION_SHUTDOWN;
72979
72980-static struct platform_hibernation_ops *hibernation_ops;
72981+static const struct platform_hibernation_ops *hibernation_ops;
72982
72983 /**
72984 * hibernation_set_ops - set the global hibernate operations
72985 * @ops: the hibernation operations to use in subsequent hibernation transitions
72986 */
72987
72988-void hibernation_set_ops(struct platform_hibernation_ops *ops)
72989+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
72990 {
72991 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
72992 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
72993diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
72994index e8b3370..484c2e4 100644
72995--- a/kernel/power/poweroff.c
72996+++ b/kernel/power/poweroff.c
72997@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
72998 .enable_mask = SYSRQ_ENABLE_BOOT,
72999 };
73000
73001-static int pm_sysrq_init(void)
73002+static int __init pm_sysrq_init(void)
73003 {
73004 register_sysrq_key('o', &sysrq_poweroff_op);
73005 return 0;
73006diff --git a/kernel/power/process.c b/kernel/power/process.c
73007index e7cd671..56d5f459 100644
73008--- a/kernel/power/process.c
73009+++ b/kernel/power/process.c
73010@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
73011 struct timeval start, end;
73012 u64 elapsed_csecs64;
73013 unsigned int elapsed_csecs;
73014+ bool timedout = false;
73015
73016 do_gettimeofday(&start);
73017
73018 end_time = jiffies + TIMEOUT;
73019 do {
73020 todo = 0;
73021+ if (time_after(jiffies, end_time))
73022+ timedout = true;
73023 read_lock(&tasklist_lock);
73024 do_each_thread(g, p) {
73025 if (frozen(p) || !freezeable(p))
73026@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
73027 * It is "frozen enough". If the task does wake
73028 * up, it will immediately call try_to_freeze.
73029 */
73030- if (!task_is_stopped_or_traced(p) &&
73031- !freezer_should_skip(p))
73032+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
73033 todo++;
73034+ if (timedout) {
73035+ printk(KERN_ERR "Task refusing to freeze:\n");
73036+ sched_show_task(p);
73037+ }
73038+ }
73039 } while_each_thread(g, p);
73040 read_unlock(&tasklist_lock);
73041 yield(); /* Yield is okay here */
73042- if (time_after(jiffies, end_time))
73043- break;
73044- } while (todo);
73045+ } while (todo && !timedout);
73046
73047 do_gettimeofday(&end);
73048 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
73049diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
73050index 40dd021..fb30ceb 100644
73051--- a/kernel/power/suspend.c
73052+++ b/kernel/power/suspend.c
73053@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
73054 [PM_SUSPEND_MEM] = "mem",
73055 };
73056
73057-static struct platform_suspend_ops *suspend_ops;
73058+static const struct platform_suspend_ops *suspend_ops;
73059
73060 /**
73061 * suspend_set_ops - Set the global suspend method table.
73062 * @ops: Pointer to ops structure.
73063 */
73064-void suspend_set_ops(struct platform_suspend_ops *ops)
73065+void suspend_set_ops(const struct platform_suspend_ops *ops)
73066 {
73067 mutex_lock(&pm_mutex);
73068 suspend_ops = ops;
73069diff --git a/kernel/printk.c b/kernel/printk.c
73070index 4cade47..637e78a 100644
73071--- a/kernel/printk.c
73072+++ b/kernel/printk.c
73073@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf, int len)
73074 char c;
73075 int error = 0;
73076
73077+#ifdef CONFIG_GRKERNSEC_DMESG
73078+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
73079+ return -EPERM;
73080+#endif
73081+
73082 error = security_syslog(type);
73083 if (error)
73084 return error;
73085diff --git a/kernel/profile.c b/kernel/profile.c
73086index dfadc5b..7f59404 100644
73087--- a/kernel/profile.c
73088+++ b/kernel/profile.c
73089@@ -39,7 +39,7 @@ struct profile_hit {
73090 /* Oprofile timer tick hook */
73091 static int (*timer_hook)(struct pt_regs *) __read_mostly;
73092
73093-static atomic_t *prof_buffer;
73094+static atomic_unchecked_t *prof_buffer;
73095 static unsigned long prof_len, prof_shift;
73096
73097 int prof_on __read_mostly;
73098@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
73099 hits[i].pc = 0;
73100 continue;
73101 }
73102- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73103+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73104 hits[i].hits = hits[i].pc = 0;
73105 }
73106 }
73107@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73108 * Add the current hit(s) and flush the write-queue out
73109 * to the global buffer:
73110 */
73111- atomic_add(nr_hits, &prof_buffer[pc]);
73112+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
73113 for (i = 0; i < NR_PROFILE_HIT; ++i) {
73114- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73115+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73116 hits[i].pc = hits[i].hits = 0;
73117 }
73118 out:
73119@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73120 if (prof_on != type || !prof_buffer)
73121 return;
73122 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
73123- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73124+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73125 }
73126 #endif /* !CONFIG_SMP */
73127 EXPORT_SYMBOL_GPL(profile_hits);
73128@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
73129 return -EFAULT;
73130 buf++; p++; count--; read++;
73131 }
73132- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
73133+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
73134 if (copy_to_user(buf, (void *)pnt, count))
73135 return -EFAULT;
73136 read += count;
73137@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
73138 }
73139 #endif
73140 profile_discard_flip_buffers();
73141- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
73142+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
73143 return count;
73144 }
73145
73146diff --git a/kernel/ptrace.c b/kernel/ptrace.c
73147index 05625f6..733bf70 100644
73148--- a/kernel/ptrace.c
73149+++ b/kernel/ptrace.c
73150@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
73151 return ret;
73152 }
73153
73154-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73155+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
73156+ unsigned int log)
73157 {
73158 const struct cred *cred = current_cred(), *tcred;
73159
73160@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73161 cred->gid != tcred->egid ||
73162 cred->gid != tcred->sgid ||
73163 cred->gid != tcred->gid) &&
73164- !capable(CAP_SYS_PTRACE)) {
73165+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73166+ (log && !capable(CAP_SYS_PTRACE)))
73167+ ) {
73168 rcu_read_unlock();
73169 return -EPERM;
73170 }
73171@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73172 smp_rmb();
73173 if (task->mm)
73174 dumpable = get_dumpable(task->mm);
73175- if (!dumpable && !capable(CAP_SYS_PTRACE))
73176+ if (!dumpable &&
73177+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73178+ (log && !capable(CAP_SYS_PTRACE))))
73179 return -EPERM;
73180
73181 return security_ptrace_access_check(task, mode);
73182@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
73183 {
73184 int err;
73185 task_lock(task);
73186- err = __ptrace_may_access(task, mode);
73187+ err = __ptrace_may_access(task, mode, 0);
73188+ task_unlock(task);
73189+ return !err;
73190+}
73191+
73192+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
73193+{
73194+ int err;
73195+ task_lock(task);
73196+ err = __ptrace_may_access(task, mode, 1);
73197 task_unlock(task);
73198 return !err;
73199 }
73200@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
73201 goto out;
73202
73203 task_lock(task);
73204- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
73205+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
73206 task_unlock(task);
73207 if (retval)
73208 goto unlock_creds;
73209@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
73210 goto unlock_tasklist;
73211
73212 task->ptrace = PT_PTRACED;
73213- if (capable(CAP_SYS_PTRACE))
73214+ if (capable_nolog(CAP_SYS_PTRACE))
73215 task->ptrace |= PT_PTRACE_CAP;
73216
73217 __ptrace_link(task, current);
73218@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
73219 {
73220 int copied = 0;
73221
73222+ pax_track_stack();
73223+
73224 while (len > 0) {
73225 char buf[128];
73226 int this_len, retval;
73227@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
73228 {
73229 int copied = 0;
73230
73231+ pax_track_stack();
73232+
73233 while (len > 0) {
73234 char buf[128];
73235 int this_len, retval;
73236@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
73237 int ret = -EIO;
73238 siginfo_t siginfo;
73239
73240+ pax_track_stack();
73241+
73242 switch (request) {
73243 case PTRACE_PEEKTEXT:
73244 case PTRACE_PEEKDATA:
73245@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
73246 ret = ptrace_setoptions(child, data);
73247 break;
73248 case PTRACE_GETEVENTMSG:
73249- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
73250+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
73251 break;
73252
73253 case PTRACE_GETSIGINFO:
73254 ret = ptrace_getsiginfo(child, &siginfo);
73255 if (!ret)
73256- ret = copy_siginfo_to_user((siginfo_t __user *) data,
73257+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
73258 &siginfo);
73259 break;
73260
73261 case PTRACE_SETSIGINFO:
73262- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
73263+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
73264 sizeof siginfo))
73265 ret = -EFAULT;
73266 else
73267@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
73268 goto out;
73269 }
73270
73271+ if (gr_handle_ptrace(child, request)) {
73272+ ret = -EPERM;
73273+ goto out_put_task_struct;
73274+ }
73275+
73276 if (request == PTRACE_ATTACH) {
73277 ret = ptrace_attach(child);
73278 /*
73279 * Some architectures need to do book-keeping after
73280 * a ptrace attach.
73281 */
73282- if (!ret)
73283+ if (!ret) {
73284 arch_ptrace_attach(child);
73285+ gr_audit_ptrace(child);
73286+ }
73287 goto out_put_task_struct;
73288 }
73289
73290@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
73291 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
73292 if (copied != sizeof(tmp))
73293 return -EIO;
73294- return put_user(tmp, (unsigned long __user *)data);
73295+ return put_user(tmp, (__force unsigned long __user *)data);
73296 }
73297
73298 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
73299@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
73300 siginfo_t siginfo;
73301 int ret;
73302
73303+ pax_track_stack();
73304+
73305 switch (request) {
73306 case PTRACE_PEEKTEXT:
73307 case PTRACE_PEEKDATA:
73308@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
73309 goto out;
73310 }
73311
73312+ if (gr_handle_ptrace(child, request)) {
73313+ ret = -EPERM;
73314+ goto out_put_task_struct;
73315+ }
73316+
73317 if (request == PTRACE_ATTACH) {
73318 ret = ptrace_attach(child);
73319 /*
73320 * Some architectures need to do book-keeping after
73321 * a ptrace attach.
73322 */
73323- if (!ret)
73324+ if (!ret) {
73325 arch_ptrace_attach(child);
73326+ gr_audit_ptrace(child);
73327+ }
73328 goto out_put_task_struct;
73329 }
73330
73331diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
73332index 697c0a0..2402696 100644
73333--- a/kernel/rcutorture.c
73334+++ b/kernel/rcutorture.c
73335@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
73336 { 0 };
73337 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
73338 { 0 };
73339-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73340-static atomic_t n_rcu_torture_alloc;
73341-static atomic_t n_rcu_torture_alloc_fail;
73342-static atomic_t n_rcu_torture_free;
73343-static atomic_t n_rcu_torture_mberror;
73344-static atomic_t n_rcu_torture_error;
73345+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73346+static atomic_unchecked_t n_rcu_torture_alloc;
73347+static atomic_unchecked_t n_rcu_torture_alloc_fail;
73348+static atomic_unchecked_t n_rcu_torture_free;
73349+static atomic_unchecked_t n_rcu_torture_mberror;
73350+static atomic_unchecked_t n_rcu_torture_error;
73351 static long n_rcu_torture_timers;
73352 static struct list_head rcu_torture_removed;
73353 static cpumask_var_t shuffle_tmp_mask;
73354@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
73355
73356 spin_lock_bh(&rcu_torture_lock);
73357 if (list_empty(&rcu_torture_freelist)) {
73358- atomic_inc(&n_rcu_torture_alloc_fail);
73359+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
73360 spin_unlock_bh(&rcu_torture_lock);
73361 return NULL;
73362 }
73363- atomic_inc(&n_rcu_torture_alloc);
73364+ atomic_inc_unchecked(&n_rcu_torture_alloc);
73365 p = rcu_torture_freelist.next;
73366 list_del_init(p);
73367 spin_unlock_bh(&rcu_torture_lock);
73368@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
73369 static void
73370 rcu_torture_free(struct rcu_torture *p)
73371 {
73372- atomic_inc(&n_rcu_torture_free);
73373+ atomic_inc_unchecked(&n_rcu_torture_free);
73374 spin_lock_bh(&rcu_torture_lock);
73375 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
73376 spin_unlock_bh(&rcu_torture_lock);
73377@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
73378 i = rp->rtort_pipe_count;
73379 if (i > RCU_TORTURE_PIPE_LEN)
73380 i = RCU_TORTURE_PIPE_LEN;
73381- atomic_inc(&rcu_torture_wcount[i]);
73382+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73383 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73384 rp->rtort_mbtest = 0;
73385 rcu_torture_free(rp);
73386@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
73387 i = rp->rtort_pipe_count;
73388 if (i > RCU_TORTURE_PIPE_LEN)
73389 i = RCU_TORTURE_PIPE_LEN;
73390- atomic_inc(&rcu_torture_wcount[i]);
73391+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73392 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73393 rp->rtort_mbtest = 0;
73394 list_del(&rp->rtort_free);
73395@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
73396 i = old_rp->rtort_pipe_count;
73397 if (i > RCU_TORTURE_PIPE_LEN)
73398 i = RCU_TORTURE_PIPE_LEN;
73399- atomic_inc(&rcu_torture_wcount[i]);
73400+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73401 old_rp->rtort_pipe_count++;
73402 cur_ops->deferred_free(old_rp);
73403 }
73404@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
73405 return;
73406 }
73407 if (p->rtort_mbtest == 0)
73408- atomic_inc(&n_rcu_torture_mberror);
73409+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73410 spin_lock(&rand_lock);
73411 cur_ops->read_delay(&rand);
73412 n_rcu_torture_timers++;
73413@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
73414 continue;
73415 }
73416 if (p->rtort_mbtest == 0)
73417- atomic_inc(&n_rcu_torture_mberror);
73418+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73419 cur_ops->read_delay(&rand);
73420 preempt_disable();
73421 pipe_count = p->rtort_pipe_count;
73422@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
73423 rcu_torture_current,
73424 rcu_torture_current_version,
73425 list_empty(&rcu_torture_freelist),
73426- atomic_read(&n_rcu_torture_alloc),
73427- atomic_read(&n_rcu_torture_alloc_fail),
73428- atomic_read(&n_rcu_torture_free),
73429- atomic_read(&n_rcu_torture_mberror),
73430+ atomic_read_unchecked(&n_rcu_torture_alloc),
73431+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
73432+ atomic_read_unchecked(&n_rcu_torture_free),
73433+ atomic_read_unchecked(&n_rcu_torture_mberror),
73434 n_rcu_torture_timers);
73435- if (atomic_read(&n_rcu_torture_mberror) != 0)
73436+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
73437 cnt += sprintf(&page[cnt], " !!!");
73438 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
73439 if (i > 1) {
73440 cnt += sprintf(&page[cnt], "!!! ");
73441- atomic_inc(&n_rcu_torture_error);
73442+ atomic_inc_unchecked(&n_rcu_torture_error);
73443 WARN_ON_ONCE(1);
73444 }
73445 cnt += sprintf(&page[cnt], "Reader Pipe: ");
73446@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
73447 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
73448 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73449 cnt += sprintf(&page[cnt], " %d",
73450- atomic_read(&rcu_torture_wcount[i]));
73451+ atomic_read_unchecked(&rcu_torture_wcount[i]));
73452 }
73453 cnt += sprintf(&page[cnt], "\n");
73454 if (cur_ops->stats)
73455@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
73456
73457 if (cur_ops->cleanup)
73458 cur_ops->cleanup();
73459- if (atomic_read(&n_rcu_torture_error))
73460+ if (atomic_read_unchecked(&n_rcu_torture_error))
73461 rcu_torture_print_module_parms("End of test: FAILURE");
73462 else
73463 rcu_torture_print_module_parms("End of test: SUCCESS");
73464@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
73465
73466 rcu_torture_current = NULL;
73467 rcu_torture_current_version = 0;
73468- atomic_set(&n_rcu_torture_alloc, 0);
73469- atomic_set(&n_rcu_torture_alloc_fail, 0);
73470- atomic_set(&n_rcu_torture_free, 0);
73471- atomic_set(&n_rcu_torture_mberror, 0);
73472- atomic_set(&n_rcu_torture_error, 0);
73473+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
73474+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
73475+ atomic_set_unchecked(&n_rcu_torture_free, 0);
73476+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
73477+ atomic_set_unchecked(&n_rcu_torture_error, 0);
73478 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
73479- atomic_set(&rcu_torture_wcount[i], 0);
73480+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
73481 for_each_possible_cpu(cpu) {
73482 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73483 per_cpu(rcu_torture_count, cpu)[i] = 0;
73484diff --git a/kernel/rcutree.c b/kernel/rcutree.c
73485index 683c4f3..97f54c6 100644
73486--- a/kernel/rcutree.c
73487+++ b/kernel/rcutree.c
73488@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
73489 /*
73490 * Do softirq processing for the current CPU.
73491 */
73492-static void rcu_process_callbacks(struct softirq_action *unused)
73493+static void rcu_process_callbacks(void)
73494 {
73495 /*
73496 * Memory references from any prior RCU read-side critical sections
73497diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
73498index c03edf7..ac1b341 100644
73499--- a/kernel/rcutree_plugin.h
73500+++ b/kernel/rcutree_plugin.h
73501@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
73502 */
73503 void __rcu_read_lock(void)
73504 {
73505- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
73506+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
73507 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
73508 }
73509 EXPORT_SYMBOL_GPL(__rcu_read_lock);
73510@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
73511 struct task_struct *t = current;
73512
73513 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
73514- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
73515+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
73516 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
73517 rcu_read_unlock_special(t);
73518 }
73519diff --git a/kernel/relay.c b/kernel/relay.c
73520index 760c262..a9fd241 100644
73521--- a/kernel/relay.c
73522+++ b/kernel/relay.c
73523@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
73524 unsigned int flags,
73525 int *nonpad_ret)
73526 {
73527- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
73528+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
73529 struct rchan_buf *rbuf = in->private_data;
73530 unsigned int subbuf_size = rbuf->chan->subbuf_size;
73531 uint64_t pos = (uint64_t) *ppos;
73532@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
73533 .ops = &relay_pipe_buf_ops,
73534 .spd_release = relay_page_release,
73535 };
73536+ ssize_t ret;
73537+
73538+ pax_track_stack();
73539
73540 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
73541 return 0;
73542diff --git a/kernel/resource.c b/kernel/resource.c
73543index fb11a58..4e61ae1 100644
73544--- a/kernel/resource.c
73545+++ b/kernel/resource.c
73546@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
73547
73548 static int __init ioresources_init(void)
73549 {
73550+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73551+#ifdef CONFIG_GRKERNSEC_PROC_USER
73552+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
73553+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
73554+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73555+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
73556+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
73557+#endif
73558+#else
73559 proc_create("ioports", 0, NULL, &proc_ioports_operations);
73560 proc_create("iomem", 0, NULL, &proc_iomem_operations);
73561+#endif
73562 return 0;
73563 }
73564 __initcall(ioresources_init);
73565diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
73566index a56f629..1fc4989 100644
73567--- a/kernel/rtmutex-tester.c
73568+++ b/kernel/rtmutex-tester.c
73569@@ -21,7 +21,7 @@
73570 #define MAX_RT_TEST_MUTEXES 8
73571
73572 static spinlock_t rttest_lock;
73573-static atomic_t rttest_event;
73574+static atomic_unchecked_t rttest_event;
73575
73576 struct test_thread_data {
73577 int opcode;
73578@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73579
73580 case RTTEST_LOCKCONT:
73581 td->mutexes[td->opdata] = 1;
73582- td->event = atomic_add_return(1, &rttest_event);
73583+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73584 return 0;
73585
73586 case RTTEST_RESET:
73587@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73588 return 0;
73589
73590 case RTTEST_RESETEVENT:
73591- atomic_set(&rttest_event, 0);
73592+ atomic_set_unchecked(&rttest_event, 0);
73593 return 0;
73594
73595 default:
73596@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73597 return ret;
73598
73599 td->mutexes[id] = 1;
73600- td->event = atomic_add_return(1, &rttest_event);
73601+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73602 rt_mutex_lock(&mutexes[id]);
73603- td->event = atomic_add_return(1, &rttest_event);
73604+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73605 td->mutexes[id] = 4;
73606 return 0;
73607
73608@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73609 return ret;
73610
73611 td->mutexes[id] = 1;
73612- td->event = atomic_add_return(1, &rttest_event);
73613+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73614 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
73615- td->event = atomic_add_return(1, &rttest_event);
73616+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73617 td->mutexes[id] = ret ? 0 : 4;
73618 return ret ? -EINTR : 0;
73619
73620@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73621 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
73622 return ret;
73623
73624- td->event = atomic_add_return(1, &rttest_event);
73625+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73626 rt_mutex_unlock(&mutexes[id]);
73627- td->event = atomic_add_return(1, &rttest_event);
73628+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73629 td->mutexes[id] = 0;
73630 return 0;
73631
73632@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73633 break;
73634
73635 td->mutexes[dat] = 2;
73636- td->event = atomic_add_return(1, &rttest_event);
73637+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73638 break;
73639
73640 case RTTEST_LOCKBKL:
73641@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73642 return;
73643
73644 td->mutexes[dat] = 3;
73645- td->event = atomic_add_return(1, &rttest_event);
73646+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73647 break;
73648
73649 case RTTEST_LOCKNOWAIT:
73650@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73651 return;
73652
73653 td->mutexes[dat] = 1;
73654- td->event = atomic_add_return(1, &rttest_event);
73655+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73656 return;
73657
73658 case RTTEST_LOCKBKL:
73659diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
73660index 29bd4ba..8c5de90 100644
73661--- a/kernel/rtmutex.c
73662+++ b/kernel/rtmutex.c
73663@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
73664 */
73665 spin_lock_irqsave(&pendowner->pi_lock, flags);
73666
73667- WARN_ON(!pendowner->pi_blocked_on);
73668+ BUG_ON(!pendowner->pi_blocked_on);
73669 WARN_ON(pendowner->pi_blocked_on != waiter);
73670 WARN_ON(pendowner->pi_blocked_on->lock != lock);
73671
73672diff --git a/kernel/sched.c b/kernel/sched.c
73673index 0591df8..6e343c3 100644
73674--- a/kernel/sched.c
73675+++ b/kernel/sched.c
73676@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
73677 {
73678 unsigned long flags;
73679 struct rq *rq;
73680- int cpu = get_cpu();
73681
73682 #ifdef CONFIG_SMP
73683+ int cpu = get_cpu();
73684+
73685 rq = task_rq_lock(p, &flags);
73686 p->state = TASK_WAKING;
73687
73688@@ -5043,7 +5044,7 @@ out:
73689 * In CONFIG_NO_HZ case, the idle load balance owner will do the
73690 * rebalancing for all the cpus for whom scheduler ticks are stopped.
73691 */
73692-static void run_rebalance_domains(struct softirq_action *h)
73693+static void run_rebalance_domains(void)
73694 {
73695 int this_cpu = smp_processor_id();
73696 struct rq *this_rq = cpu_rq(this_cpu);
73697@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
73698 struct rq *rq;
73699 int cpu;
73700
73701+ pax_track_stack();
73702+
73703 need_resched:
73704 preempt_disable();
73705 cpu = smp_processor_id();
73706@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
73707 * Look out! "owner" is an entirely speculative pointer
73708 * access and not reliable.
73709 */
73710-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73711+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
73712 {
73713 unsigned int cpu;
73714 struct rq *rq;
73715@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73716 * DEBUG_PAGEALLOC could have unmapped it if
73717 * the mutex owner just released it and exited.
73718 */
73719- if (probe_kernel_address(&owner->cpu, cpu))
73720+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
73721 return 0;
73722 #else
73723- cpu = owner->cpu;
73724+ cpu = task_thread_info(owner)->cpu;
73725 #endif
73726
73727 /*
73728@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73729 /*
73730 * Is that owner really running on that cpu?
73731 */
73732- if (task_thread_info(rq->curr) != owner || need_resched())
73733+ if (rq->curr != owner || need_resched())
73734 return 0;
73735
73736 cpu_relax();
73737@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p, const int nice)
73738 /* convert nice value [19,-20] to rlimit style value [1,40] */
73739 int nice_rlim = 20 - nice;
73740
73741+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
73742+
73743 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
73744 capable(CAP_SYS_NICE));
73745 }
73746@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
73747 if (nice > 19)
73748 nice = 19;
73749
73750- if (increment < 0 && !can_nice(current, nice))
73751+ if (increment < 0 && (!can_nice(current, nice) ||
73752+ gr_handle_chroot_nice()))
73753 return -EPERM;
73754
73755 retval = security_task_setnice(current, nice);
73756@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
73757 long power;
73758 int weight;
73759
73760- WARN_ON(!sd || !sd->groups);
73761+ BUG_ON(!sd || !sd->groups);
73762
73763 if (cpu != group_first_cpu(sd->groups))
73764 return;
73765diff --git a/kernel/signal.c b/kernel/signal.c
73766index 2494827..cda80a0 100644
73767--- a/kernel/signal.c
73768+++ b/kernel/signal.c
73769@@ -41,12 +41,12 @@
73770
73771 static struct kmem_cache *sigqueue_cachep;
73772
73773-static void __user *sig_handler(struct task_struct *t, int sig)
73774+static __sighandler_t sig_handler(struct task_struct *t, int sig)
73775 {
73776 return t->sighand->action[sig - 1].sa.sa_handler;
73777 }
73778
73779-static int sig_handler_ignored(void __user *handler, int sig)
73780+static int sig_handler_ignored(__sighandler_t handler, int sig)
73781 {
73782 /* Is it explicitly or implicitly ignored? */
73783 return handler == SIG_IGN ||
73784@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
73785 static int sig_task_ignored(struct task_struct *t, int sig,
73786 int from_ancestor_ns)
73787 {
73788- void __user *handler;
73789+ __sighandler_t handler;
73790
73791 handler = sig_handler(t, sig);
73792
73793@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
73794 */
73795 user = get_uid(__task_cred(t)->user);
73796 atomic_inc(&user->sigpending);
73797+
73798+ if (!override_rlimit)
73799+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
73800 if (override_rlimit ||
73801 atomic_read(&user->sigpending) <=
73802 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
73803@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
73804
73805 int unhandled_signal(struct task_struct *tsk, int sig)
73806 {
73807- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
73808+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
73809 if (is_global_init(tsk))
73810 return 1;
73811 if (handler != SIG_IGN && handler != SIG_DFL)
73812@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
73813 }
73814 }
73815
73816+ /* allow glibc communication via tgkill to other threads in our
73817+ thread group */
73818+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
73819+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
73820+ && gr_handle_signal(t, sig))
73821+ return -EPERM;
73822+
73823 return security_task_kill(t, info, sig, 0);
73824 }
73825
73826@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73827 return send_signal(sig, info, p, 1);
73828 }
73829
73830-static int
73831+int
73832 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73833 {
73834 return send_signal(sig, info, t, 0);
73835@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73836 unsigned long int flags;
73837 int ret, blocked, ignored;
73838 struct k_sigaction *action;
73839+ int is_unhandled = 0;
73840
73841 spin_lock_irqsave(&t->sighand->siglock, flags);
73842 action = &t->sighand->action[sig-1];
73843@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73844 }
73845 if (action->sa.sa_handler == SIG_DFL)
73846 t->signal->flags &= ~SIGNAL_UNKILLABLE;
73847+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
73848+ is_unhandled = 1;
73849 ret = specific_send_sig_info(sig, info, t);
73850 spin_unlock_irqrestore(&t->sighand->siglock, flags);
73851
73852+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
73853+ normal operation */
73854+ if (is_unhandled) {
73855+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
73856+ gr_handle_crash(t, sig);
73857+ }
73858+
73859 return ret;
73860 }
73861
73862@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73863 {
73864 int ret = check_kill_permission(sig, info, p);
73865
73866- if (!ret && sig)
73867+ if (!ret && sig) {
73868 ret = do_send_sig_info(sig, info, p, true);
73869+ if (!ret)
73870+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
73871+ }
73872
73873 return ret;
73874 }
73875@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
73876 {
73877 siginfo_t info;
73878
73879+ pax_track_stack();
73880+
73881 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
73882
73883 memset(&info, 0, sizeof info);
73884@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
73885 int error = -ESRCH;
73886
73887 rcu_read_lock();
73888- p = find_task_by_vpid(pid);
73889+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73890+ /* allow glibc communication via tgkill to other threads in our
73891+ thread group */
73892+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
73893+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
73894+ p = find_task_by_vpid_unrestricted(pid);
73895+ else
73896+#endif
73897+ p = find_task_by_vpid(pid);
73898 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
73899 error = check_kill_permission(sig, info, p);
73900 /*
73901diff --git a/kernel/smp.c b/kernel/smp.c
73902index aa9cff3..631a0de 100644
73903--- a/kernel/smp.c
73904+++ b/kernel/smp.c
73905@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
73906 }
73907 EXPORT_SYMBOL(smp_call_function);
73908
73909-void ipi_call_lock(void)
73910+void ipi_call_lock(void) __acquires(call_function.lock)
73911 {
73912 spin_lock(&call_function.lock);
73913 }
73914
73915-void ipi_call_unlock(void)
73916+void ipi_call_unlock(void) __releases(call_function.lock)
73917 {
73918 spin_unlock(&call_function.lock);
73919 }
73920
73921-void ipi_call_lock_irq(void)
73922+void ipi_call_lock_irq(void) __acquires(call_function.lock)
73923 {
73924 spin_lock_irq(&call_function.lock);
73925 }
73926
73927-void ipi_call_unlock_irq(void)
73928+void ipi_call_unlock_irq(void) __releases(call_function.lock)
73929 {
73930 spin_unlock_irq(&call_function.lock);
73931 }
73932diff --git a/kernel/softirq.c b/kernel/softirq.c
73933index 04a0252..580c512 100644
73934--- a/kernel/softirq.c
73935+++ b/kernel/softirq.c
73936@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
73937
73938 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
73939
73940-char *softirq_to_name[NR_SOFTIRQS] = {
73941+const char * const softirq_to_name[NR_SOFTIRQS] = {
73942 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
73943 "TASKLET", "SCHED", "HRTIMER", "RCU"
73944 };
73945@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
73946
73947 asmlinkage void __do_softirq(void)
73948 {
73949- struct softirq_action *h;
73950+ const struct softirq_action *h;
73951 __u32 pending;
73952 int max_restart = MAX_SOFTIRQ_RESTART;
73953 int cpu;
73954@@ -233,7 +233,7 @@ restart:
73955 kstat_incr_softirqs_this_cpu(h - softirq_vec);
73956
73957 trace_softirq_entry(h, softirq_vec);
73958- h->action(h);
73959+ h->action();
73960 trace_softirq_exit(h, softirq_vec);
73961 if (unlikely(prev_count != preempt_count())) {
73962 printk(KERN_ERR "huh, entered softirq %td %s %p"
73963@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
73964 local_irq_restore(flags);
73965 }
73966
73967-void open_softirq(int nr, void (*action)(struct softirq_action *))
73968+void open_softirq(int nr, void (*action)(void))
73969 {
73970- softirq_vec[nr].action = action;
73971+ pax_open_kernel();
73972+ *(void **)&softirq_vec[nr].action = action;
73973+ pax_close_kernel();
73974 }
73975
73976 /*
73977@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
73978
73979 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
73980
73981-static void tasklet_action(struct softirq_action *a)
73982+static void tasklet_action(void)
73983 {
73984 struct tasklet_struct *list;
73985
73986@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
73987 }
73988 }
73989
73990-static void tasklet_hi_action(struct softirq_action *a)
73991+static void tasklet_hi_action(void)
73992 {
73993 struct tasklet_struct *list;
73994
73995diff --git a/kernel/sys.c b/kernel/sys.c
73996index e9512b1..3c265de 100644
73997--- a/kernel/sys.c
73998+++ b/kernel/sys.c
73999@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
74000 error = -EACCES;
74001 goto out;
74002 }
74003+
74004+ if (gr_handle_chroot_setpriority(p, niceval)) {
74005+ error = -EACCES;
74006+ goto out;
74007+ }
74008+
74009 no_nice = security_task_setnice(p, niceval);
74010 if (no_nice) {
74011 error = no_nice;
74012@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
74013 !(user = find_user(who)))
74014 goto out_unlock; /* No processes for this user */
74015
74016- do_each_thread(g, p)
74017+ do_each_thread(g, p) {
74018 if (__task_cred(p)->uid == who)
74019 error = set_one_prio(p, niceval, error);
74020- while_each_thread(g, p);
74021+ } while_each_thread(g, p);
74022 if (who != cred->uid)
74023 free_uid(user); /* For find_user() */
74024 break;
74025@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
74026 !(user = find_user(who)))
74027 goto out_unlock; /* No processes for this user */
74028
74029- do_each_thread(g, p)
74030+ do_each_thread(g, p) {
74031 if (__task_cred(p)->uid == who) {
74032 niceval = 20 - task_nice(p);
74033 if (niceval > retval)
74034 retval = niceval;
74035 }
74036- while_each_thread(g, p);
74037+ } while_each_thread(g, p);
74038 if (who != cred->uid)
74039 free_uid(user); /* for find_user() */
74040 break;
74041@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
74042 goto error;
74043 }
74044
74045+ if (gr_check_group_change(new->gid, new->egid, -1))
74046+ goto error;
74047+
74048 if (rgid != (gid_t) -1 ||
74049 (egid != (gid_t) -1 && egid != old->gid))
74050 new->sgid = new->egid;
74051@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
74052 goto error;
74053
74054 retval = -EPERM;
74055+
74056+ if (gr_check_group_change(gid, gid, gid))
74057+ goto error;
74058+
74059 if (capable(CAP_SETGID))
74060 new->gid = new->egid = new->sgid = new->fsgid = gid;
74061 else if (gid == old->gid || gid == old->sgid)
74062@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
74063 if (!new_user)
74064 return -EAGAIN;
74065
74066+ /*
74067+ * We don't fail in case of NPROC limit excess here because too many
74068+ * poorly written programs don't check set*uid() return code, assuming
74069+ * it never fails if called by root. We may still enforce NPROC limit
74070+ * for programs doing set*uid()+execve() by harmlessly deferring the
74071+ * failure to the execve() stage.
74072+ */
74073 if (atomic_read(&new_user->processes) >=
74074 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
74075- new_user != INIT_USER) {
74076- free_uid(new_user);
74077- return -EAGAIN;
74078- }
74079+ new_user != INIT_USER)
74080+ current->flags |= PF_NPROC_EXCEEDED;
74081+ else
74082+ current->flags &= ~PF_NPROC_EXCEEDED;
74083
74084 free_uid(new->user);
74085 new->user = new_user;
74086@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
74087 goto error;
74088 }
74089
74090+ if (gr_check_user_change(new->uid, new->euid, -1))
74091+ goto error;
74092+
74093 if (new->uid != old->uid) {
74094 retval = set_user(new);
74095 if (retval < 0)
74096@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
74097 goto error;
74098
74099 retval = -EPERM;
74100+
74101+ if (gr_check_crash_uid(uid))
74102+ goto error;
74103+ if (gr_check_user_change(uid, uid, uid))
74104+ goto error;
74105+
74106 if (capable(CAP_SETUID)) {
74107 new->suid = new->uid = uid;
74108 if (uid != old->uid) {
74109@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
74110 goto error;
74111 }
74112
74113+ if (gr_check_user_change(ruid, euid, -1))
74114+ goto error;
74115+
74116 if (ruid != (uid_t) -1) {
74117 new->uid = ruid;
74118 if (ruid != old->uid) {
74119@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
74120 goto error;
74121 }
74122
74123+ if (gr_check_group_change(rgid, egid, -1))
74124+ goto error;
74125+
74126 if (rgid != (gid_t) -1)
74127 new->gid = rgid;
74128 if (egid != (gid_t) -1)
74129@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
74130 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
74131 goto error;
74132
74133+ if (gr_check_user_change(-1, -1, uid))
74134+ goto error;
74135+
74136 if (uid == old->uid || uid == old->euid ||
74137 uid == old->suid || uid == old->fsuid ||
74138 capable(CAP_SETUID)) {
74139@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
74140 if (gid == old->gid || gid == old->egid ||
74141 gid == old->sgid || gid == old->fsgid ||
74142 capable(CAP_SETGID)) {
74143+ if (gr_check_group_change(-1, -1, gid))
74144+ goto error;
74145+
74146 if (gid != old_fsgid) {
74147 new->fsgid = gid;
74148 goto change_okay;
74149@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
74150 error = get_dumpable(me->mm);
74151 break;
74152 case PR_SET_DUMPABLE:
74153- if (arg2 < 0 || arg2 > 1) {
74154+ if (arg2 > 1) {
74155 error = -EINVAL;
74156 break;
74157 }
74158diff --git a/kernel/sysctl.c b/kernel/sysctl.c
74159index b8bd058..ab6a76be 100644
74160--- a/kernel/sysctl.c
74161+++ b/kernel/sysctl.c
74162@@ -63,6 +63,13 @@
74163 static int deprecated_sysctl_warning(struct __sysctl_args *args);
74164
74165 #if defined(CONFIG_SYSCTL)
74166+#include <linux/grsecurity.h>
74167+#include <linux/grinternal.h>
74168+
74169+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
74170+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74171+ const int op);
74172+extern int gr_handle_chroot_sysctl(const int op);
74173
74174 /* External variables not in a header file. */
74175 extern int C_A_D;
74176@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
74177 static int proc_taint(struct ctl_table *table, int write,
74178 void __user *buffer, size_t *lenp, loff_t *ppos);
74179 #endif
74180+extern ctl_table grsecurity_table[];
74181
74182 static struct ctl_table root_table[];
74183 static struct ctl_table_root sysctl_table_root;
74184@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
74185 int sysctl_legacy_va_layout;
74186 #endif
74187
74188+#ifdef CONFIG_PAX_SOFTMODE
74189+static ctl_table pax_table[] = {
74190+ {
74191+ .ctl_name = CTL_UNNUMBERED,
74192+ .procname = "softmode",
74193+ .data = &pax_softmode,
74194+ .maxlen = sizeof(unsigned int),
74195+ .mode = 0600,
74196+ .proc_handler = &proc_dointvec,
74197+ },
74198+
74199+ { .ctl_name = 0 }
74200+};
74201+#endif
74202+
74203 extern int prove_locking;
74204 extern int lock_stat;
74205
74206@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
74207 #endif
74208
74209 static struct ctl_table kern_table[] = {
74210+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
74211+ {
74212+ .ctl_name = CTL_UNNUMBERED,
74213+ .procname = "grsecurity",
74214+ .mode = 0500,
74215+ .child = grsecurity_table,
74216+ },
74217+#endif
74218+
74219+#ifdef CONFIG_PAX_SOFTMODE
74220+ {
74221+ .ctl_name = CTL_UNNUMBERED,
74222+ .procname = "pax",
74223+ .mode = 0500,
74224+ .child = pax_table,
74225+ },
74226+#endif
74227+
74228 {
74229 .ctl_name = CTL_UNNUMBERED,
74230 .procname = "sched_child_runs_first",
74231@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
74232 .data = &modprobe_path,
74233 .maxlen = KMOD_PATH_LEN,
74234 .mode = 0644,
74235- .proc_handler = &proc_dostring,
74236- .strategy = &sysctl_string,
74237+ .proc_handler = &proc_dostring_modpriv,
74238+ .strategy = &sysctl_string_modpriv,
74239 },
74240 {
74241 .ctl_name = CTL_UNNUMBERED,
74242@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
74243 .mode = 0644,
74244 .proc_handler = &proc_dointvec
74245 },
74246+ {
74247+ .procname = "heap_stack_gap",
74248+ .data = &sysctl_heap_stack_gap,
74249+ .maxlen = sizeof(sysctl_heap_stack_gap),
74250+ .mode = 0644,
74251+ .proc_handler = proc_doulongvec_minmax,
74252+ },
74253 #else
74254 {
74255 .ctl_name = CTL_UNNUMBERED,
74256@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
74257 return 0;
74258 }
74259
74260+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
74261+
74262 static int parse_table(int __user *name, int nlen,
74263 void __user *oldval, size_t __user *oldlenp,
74264 void __user *newval, size_t newlen,
74265@@ -1821,7 +1871,7 @@ repeat:
74266 if (n == table->ctl_name) {
74267 int error;
74268 if (table->child) {
74269- if (sysctl_perm(root, table, MAY_EXEC))
74270+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
74271 return -EPERM;
74272 name++;
74273 nlen--;
74274@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
74275 int error;
74276 int mode;
74277
74278+ if (table->parent != NULL && table->parent->procname != NULL &&
74279+ table->procname != NULL &&
74280+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
74281+ return -EACCES;
74282+ if (gr_handle_chroot_sysctl(op))
74283+ return -EACCES;
74284+ error = gr_handle_sysctl(table, op);
74285+ if (error)
74286+ return error;
74287+
74288+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74289+ if (error)
74290+ return error;
74291+
74292+ if (root->permissions)
74293+ mode = root->permissions(root, current->nsproxy, table);
74294+ else
74295+ mode = table->mode;
74296+
74297+ return test_perm(mode, op);
74298+}
74299+
74300+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
74301+{
74302+ int error;
74303+ int mode;
74304+
74305 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74306 if (error)
74307 return error;
74308@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
74309 buffer, lenp, ppos);
74310 }
74311
74312+int proc_dostring_modpriv(struct ctl_table *table, int write,
74313+ void __user *buffer, size_t *lenp, loff_t *ppos)
74314+{
74315+ if (write && !capable(CAP_SYS_MODULE))
74316+ return -EPERM;
74317+
74318+ return _proc_do_string(table->data, table->maxlen, write,
74319+ buffer, lenp, ppos);
74320+}
74321+
74322
74323 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
74324 int *valp,
74325@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
74326 vleft = table->maxlen / sizeof(unsigned long);
74327 left = *lenp;
74328
74329- for (; left && vleft--; i++, min++, max++, first=0) {
74330+ for (; left && vleft--; i++, first=0) {
74331 if (write) {
74332 while (left) {
74333 char c;
74334@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
74335 return -ENOSYS;
74336 }
74337
74338+int proc_dostring_modpriv(struct ctl_table *table, int write,
74339+ void __user *buffer, size_t *lenp, loff_t *ppos)
74340+{
74341+ return -ENOSYS;
74342+}
74343+
74344 int proc_dointvec(struct ctl_table *table, int write,
74345 void __user *buffer, size_t *lenp, loff_t *ppos)
74346 {
74347@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
74348 return 1;
74349 }
74350
74351+int sysctl_string_modpriv(struct ctl_table *table,
74352+ void __user *oldval, size_t __user *oldlenp,
74353+ void __user *newval, size_t newlen)
74354+{
74355+ if (newval && newlen && !capable(CAP_SYS_MODULE))
74356+ return -EPERM;
74357+
74358+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
74359+}
74360+
74361 /*
74362 * This function makes sure that all of the integers in the vector
74363 * are between the minimum and maximum values given in the arrays
74364@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
74365 return -ENOSYS;
74366 }
74367
74368+int sysctl_string_modpriv(struct ctl_table *table,
74369+ void __user *oldval, size_t __user *oldlenp,
74370+ void __user *newval, size_t newlen)
74371+{
74372+ return -ENOSYS;
74373+}
74374+
74375 int sysctl_intvec(struct ctl_table *table,
74376 void __user *oldval, size_t __user *oldlenp,
74377 void __user *newval, size_t newlen)
74378@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
74379 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
74380 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
74381 EXPORT_SYMBOL(proc_dostring);
74382+EXPORT_SYMBOL(proc_dostring_modpriv);
74383 EXPORT_SYMBOL(proc_doulongvec_minmax);
74384 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
74385 EXPORT_SYMBOL(register_sysctl_table);
74386@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
74387 EXPORT_SYMBOL(sysctl_jiffies);
74388 EXPORT_SYMBOL(sysctl_ms_jiffies);
74389 EXPORT_SYMBOL(sysctl_string);
74390+EXPORT_SYMBOL(sysctl_string_modpriv);
74391 EXPORT_SYMBOL(sysctl_data);
74392 EXPORT_SYMBOL(unregister_sysctl_table);
74393diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
74394index 469193c..ea3ecb2 100644
74395--- a/kernel/sysctl_check.c
74396+++ b/kernel/sysctl_check.c
74397@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
74398 } else {
74399 if ((table->strategy == sysctl_data) ||
74400 (table->strategy == sysctl_string) ||
74401+ (table->strategy == sysctl_string_modpriv) ||
74402 (table->strategy == sysctl_intvec) ||
74403 (table->strategy == sysctl_jiffies) ||
74404 (table->strategy == sysctl_ms_jiffies) ||
74405 (table->proc_handler == proc_dostring) ||
74406+ (table->proc_handler == proc_dostring_modpriv) ||
74407 (table->proc_handler == proc_dointvec) ||
74408 (table->proc_handler == proc_dointvec_minmax) ||
74409 (table->proc_handler == proc_dointvec_jiffies) ||
74410diff --git a/kernel/taskstats.c b/kernel/taskstats.c
74411index b080920..d344f89 100644
74412--- a/kernel/taskstats.c
74413+++ b/kernel/taskstats.c
74414@@ -26,9 +26,12 @@
74415 #include <linux/cgroup.h>
74416 #include <linux/fs.h>
74417 #include <linux/file.h>
74418+#include <linux/grsecurity.h>
74419 #include <net/genetlink.h>
74420 #include <asm/atomic.h>
74421
74422+extern int gr_is_taskstats_denied(int pid);
74423+
74424 /*
74425 * Maximum length of a cpumask that can be specified in
74426 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
74427@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
74428 size_t size;
74429 cpumask_var_t mask;
74430
74431+ if (gr_is_taskstats_denied(current->pid))
74432+ return -EACCES;
74433+
74434 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
74435 return -ENOMEM;
74436
74437diff --git a/kernel/time.c b/kernel/time.c
74438index 33df60e..ca768bd 100644
74439--- a/kernel/time.c
74440+++ b/kernel/time.c
74441@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
74442 return error;
74443
74444 if (tz) {
74445+ /* we log in do_settimeofday called below, so don't log twice
74446+ */
74447+ if (!tv)
74448+ gr_log_timechange();
74449+
74450 /* SMP safe, global irq locking makes it work. */
74451 sys_tz = *tz;
74452 update_vsyscall_tz();
74453@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
74454 * Avoid unnecessary multiplications/divisions in the
74455 * two most common HZ cases:
74456 */
74457-unsigned int inline jiffies_to_msecs(const unsigned long j)
74458+inline unsigned int jiffies_to_msecs(const unsigned long j)
74459 {
74460 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
74461 return (MSEC_PER_SEC / HZ) * j;
74462@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
74463 }
74464 EXPORT_SYMBOL(jiffies_to_msecs);
74465
74466-unsigned int inline jiffies_to_usecs(const unsigned long j)
74467+inline unsigned int jiffies_to_usecs(const unsigned long j)
74468 {
74469 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
74470 return (USEC_PER_SEC / HZ) * j;
74471diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
74472index 57b953f..06f149f 100644
74473--- a/kernel/time/tick-broadcast.c
74474+++ b/kernel/time/tick-broadcast.c
74475@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
74476 * then clear the broadcast bit.
74477 */
74478 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
74479- int cpu = smp_processor_id();
74480+ cpu = smp_processor_id();
74481
74482 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
74483 tick_broadcast_clear_oneshot(cpu);
74484diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
74485index 4a71cff..ffb5548 100644
74486--- a/kernel/time/timekeeping.c
74487+++ b/kernel/time/timekeeping.c
74488@@ -14,6 +14,7 @@
74489 #include <linux/init.h>
74490 #include <linux/mm.h>
74491 #include <linux/sched.h>
74492+#include <linux/grsecurity.h>
74493 #include <linux/sysdev.h>
74494 #include <linux/clocksource.h>
74495 #include <linux/jiffies.h>
74496@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
74497 */
74498 struct timespec ts = xtime;
74499 timespec_add_ns(&ts, nsec);
74500- ACCESS_ONCE(xtime_cache) = ts;
74501+ ACCESS_ONCE_RW(xtime_cache) = ts;
74502 }
74503
74504 /* must hold xtime_lock */
74505@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
74506 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
74507 return -EINVAL;
74508
74509+ gr_log_timechange();
74510+
74511 write_seqlock_irqsave(&xtime_lock, flags);
74512
74513 timekeeping_forward_now();
74514diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
74515index 54c0dda..e9095d9 100644
74516--- a/kernel/time/timer_list.c
74517+++ b/kernel/time/timer_list.c
74518@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
74519
74520 static void print_name_offset(struct seq_file *m, void *sym)
74521 {
74522+#ifdef CONFIG_GRKERNSEC_HIDESYM
74523+ SEQ_printf(m, "<%p>", NULL);
74524+#else
74525 char symname[KSYM_NAME_LEN];
74526
74527 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
74528 SEQ_printf(m, "<%p>", sym);
74529 else
74530 SEQ_printf(m, "%s", symname);
74531+#endif
74532 }
74533
74534 static void
74535@@ -112,7 +116,11 @@ next_one:
74536 static void
74537 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
74538 {
74539+#ifdef CONFIG_GRKERNSEC_HIDESYM
74540+ SEQ_printf(m, " .base: %p\n", NULL);
74541+#else
74542 SEQ_printf(m, " .base: %p\n", base);
74543+#endif
74544 SEQ_printf(m, " .index: %d\n",
74545 base->index);
74546 SEQ_printf(m, " .resolution: %Lu nsecs\n",
74547@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
74548 {
74549 struct proc_dir_entry *pe;
74550
74551+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74552+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
74553+#else
74554 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
74555+#endif
74556 if (!pe)
74557 return -ENOMEM;
74558 return 0;
74559diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
74560index ee5681f..634089b 100644
74561--- a/kernel/time/timer_stats.c
74562+++ b/kernel/time/timer_stats.c
74563@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
74564 static unsigned long nr_entries;
74565 static struct entry entries[MAX_ENTRIES];
74566
74567-static atomic_t overflow_count;
74568+static atomic_unchecked_t overflow_count;
74569
74570 /*
74571 * The entries are in a hash-table, for fast lookup:
74572@@ -140,7 +140,7 @@ static void reset_entries(void)
74573 nr_entries = 0;
74574 memset(entries, 0, sizeof(entries));
74575 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
74576- atomic_set(&overflow_count, 0);
74577+ atomic_set_unchecked(&overflow_count, 0);
74578 }
74579
74580 static struct entry *alloc_entry(void)
74581@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74582 if (likely(entry))
74583 entry->count++;
74584 else
74585- atomic_inc(&overflow_count);
74586+ atomic_inc_unchecked(&overflow_count);
74587
74588 out_unlock:
74589 spin_unlock_irqrestore(lock, flags);
74590@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74591
74592 static void print_name_offset(struct seq_file *m, unsigned long addr)
74593 {
74594+#ifdef CONFIG_GRKERNSEC_HIDESYM
74595+ seq_printf(m, "<%p>", NULL);
74596+#else
74597 char symname[KSYM_NAME_LEN];
74598
74599 if (lookup_symbol_name(addr, symname) < 0)
74600 seq_printf(m, "<%p>", (void *)addr);
74601 else
74602 seq_printf(m, "%s", symname);
74603+#endif
74604 }
74605
74606 static int tstats_show(struct seq_file *m, void *v)
74607@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
74608
74609 seq_puts(m, "Timer Stats Version: v0.2\n");
74610 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
74611- if (atomic_read(&overflow_count))
74612+ if (atomic_read_unchecked(&overflow_count))
74613 seq_printf(m, "Overflow: %d entries\n",
74614- atomic_read(&overflow_count));
74615+ atomic_read_unchecked(&overflow_count));
74616
74617 for (i = 0; i < nr_entries; i++) {
74618 entry = entries + i;
74619@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
74620 {
74621 struct proc_dir_entry *pe;
74622
74623+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74624+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
74625+#else
74626 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
74627+#endif
74628 if (!pe)
74629 return -ENOMEM;
74630 return 0;
74631diff --git a/kernel/timer.c b/kernel/timer.c
74632index cb3c1f1..8bf5526 100644
74633--- a/kernel/timer.c
74634+++ b/kernel/timer.c
74635@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
74636 /*
74637 * This function runs timers and the timer-tq in bottom half context.
74638 */
74639-static void run_timer_softirq(struct softirq_action *h)
74640+static void run_timer_softirq(void)
74641 {
74642 struct tvec_base *base = __get_cpu_var(tvec_bases);
74643
74644diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
74645index d9d6206..f19467e 100644
74646--- a/kernel/trace/blktrace.c
74647+++ b/kernel/trace/blktrace.c
74648@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
74649 struct blk_trace *bt = filp->private_data;
74650 char buf[16];
74651
74652- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
74653+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
74654
74655 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
74656 }
74657@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
74658 return 1;
74659
74660 bt = buf->chan->private_data;
74661- atomic_inc(&bt->dropped);
74662+ atomic_inc_unchecked(&bt->dropped);
74663 return 0;
74664 }
74665
74666@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
74667
74668 bt->dir = dir;
74669 bt->dev = dev;
74670- atomic_set(&bt->dropped, 0);
74671+ atomic_set_unchecked(&bt->dropped, 0);
74672
74673 ret = -EIO;
74674 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
74675diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
74676index 4872937..c794d40 100644
74677--- a/kernel/trace/ftrace.c
74678+++ b/kernel/trace/ftrace.c
74679@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
74680
74681 ip = rec->ip;
74682
74683+ ret = ftrace_arch_code_modify_prepare();
74684+ FTRACE_WARN_ON(ret);
74685+ if (ret)
74686+ return 0;
74687+
74688 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
74689+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
74690 if (ret) {
74691 ftrace_bug(ret, ip);
74692 rec->flags |= FTRACE_FL_FAILED;
74693- return 0;
74694 }
74695- return 1;
74696+ return ret ? 0 : 1;
74697 }
74698
74699 /*
74700diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
74701index e749a05..19c6e94 100644
74702--- a/kernel/trace/ring_buffer.c
74703+++ b/kernel/trace/ring_buffer.c
74704@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
74705 * the reader page). But if the next page is a header page,
74706 * its flags will be non zero.
74707 */
74708-static int inline
74709+static inline int
74710 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
74711 struct buffer_page *page, struct list_head *list)
74712 {
74713diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
74714index a2a2d1f..7f32b09 100644
74715--- a/kernel/trace/trace.c
74716+++ b/kernel/trace/trace.c
74717@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
74718 size_t rem;
74719 unsigned int i;
74720
74721+ pax_track_stack();
74722+
74723 /* copy the tracer to avoid using a global lock all around */
74724 mutex_lock(&trace_types_lock);
74725 if (unlikely(old_tracer != current_trace && current_trace)) {
74726@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
74727 int entries, size, i;
74728 size_t ret;
74729
74730+ pax_track_stack();
74731+
74732 if (*ppos & (PAGE_SIZE - 1)) {
74733 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
74734 return -EINVAL;
74735@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
74736 };
74737 #endif
74738
74739-static struct dentry *d_tracer;
74740-
74741 struct dentry *tracing_init_dentry(void)
74742 {
74743+ static struct dentry *d_tracer;
74744 static int once;
74745
74746 if (d_tracer)
74747@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
74748 return d_tracer;
74749 }
74750
74751-static struct dentry *d_percpu;
74752-
74753 struct dentry *tracing_dentry_percpu(void)
74754 {
74755+ static struct dentry *d_percpu;
74756 static int once;
74757 struct dentry *d_tracer;
74758
74759diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
74760index d128f65..f37b4af 100644
74761--- a/kernel/trace/trace_events.c
74762+++ b/kernel/trace/trace_events.c
74763@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
74764 * Modules must own their file_operations to keep up with
74765 * reference counting.
74766 */
74767+
74768 struct ftrace_module_file_ops {
74769 struct list_head list;
74770 struct module *mod;
74771- struct file_operations id;
74772- struct file_operations enable;
74773- struct file_operations format;
74774- struct file_operations filter;
74775 };
74776
74777 static void remove_subsystem_dir(const char *name)
74778@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
74779
74780 file_ops->mod = mod;
74781
74782- file_ops->id = ftrace_event_id_fops;
74783- file_ops->id.owner = mod;
74784-
74785- file_ops->enable = ftrace_enable_fops;
74786- file_ops->enable.owner = mod;
74787-
74788- file_ops->filter = ftrace_event_filter_fops;
74789- file_ops->filter.owner = mod;
74790-
74791- file_ops->format = ftrace_event_format_fops;
74792- file_ops->format.owner = mod;
74793+ pax_open_kernel();
74794+ *(void **)&mod->trace_id.owner = mod;
74795+ *(void **)&mod->trace_enable.owner = mod;
74796+ *(void **)&mod->trace_filter.owner = mod;
74797+ *(void **)&mod->trace_format.owner = mod;
74798+ pax_close_kernel();
74799
74800 list_add(&file_ops->list, &ftrace_module_file_list);
74801
74802@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
74803 call->mod = mod;
74804 list_add(&call->list, &ftrace_events);
74805 event_create_dir(call, d_events,
74806- &file_ops->id, &file_ops->enable,
74807- &file_ops->filter, &file_ops->format);
74808+ &mod->trace_id, &mod->trace_enable,
74809+ &mod->trace_filter, &mod->trace_format);
74810 }
74811 }
74812
74813diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
74814index 0acd834..b800b56 100644
74815--- a/kernel/trace/trace_mmiotrace.c
74816+++ b/kernel/trace/trace_mmiotrace.c
74817@@ -23,7 +23,7 @@ struct header_iter {
74818 static struct trace_array *mmio_trace_array;
74819 static bool overrun_detected;
74820 static unsigned long prev_overruns;
74821-static atomic_t dropped_count;
74822+static atomic_unchecked_t dropped_count;
74823
74824 static void mmio_reset_data(struct trace_array *tr)
74825 {
74826@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
74827
74828 static unsigned long count_overruns(struct trace_iterator *iter)
74829 {
74830- unsigned long cnt = atomic_xchg(&dropped_count, 0);
74831+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
74832 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
74833
74834 if (over > prev_overruns)
74835@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
74836 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
74837 sizeof(*entry), 0, pc);
74838 if (!event) {
74839- atomic_inc(&dropped_count);
74840+ atomic_inc_unchecked(&dropped_count);
74841 return;
74842 }
74843 entry = ring_buffer_event_data(event);
74844@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
74845 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
74846 sizeof(*entry), 0, pc);
74847 if (!event) {
74848- atomic_inc(&dropped_count);
74849+ atomic_inc_unchecked(&dropped_count);
74850 return;
74851 }
74852 entry = ring_buffer_event_data(event);
74853diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
74854index b6c12c6..41fdc53 100644
74855--- a/kernel/trace/trace_output.c
74856+++ b/kernel/trace/trace_output.c
74857@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
74858 return 0;
74859 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
74860 if (!IS_ERR(p)) {
74861- p = mangle_path(s->buffer + s->len, p, "\n");
74862+ p = mangle_path(s->buffer + s->len, p, "\n\\");
74863 if (p) {
74864 s->len = p - s->buffer;
74865 return 1;
74866diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
74867index 8504ac7..ecf0adb 100644
74868--- a/kernel/trace/trace_stack.c
74869+++ b/kernel/trace/trace_stack.c
74870@@ -50,7 +50,7 @@ static inline void check_stack(void)
74871 return;
74872
74873 /* we do not handle interrupt stacks yet */
74874- if (!object_is_on_stack(&this_size))
74875+ if (!object_starts_on_stack(&this_size))
74876 return;
74877
74878 local_irq_save(flags);
74879diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
74880index 40cafb0..d5ead43 100644
74881--- a/kernel/trace/trace_workqueue.c
74882+++ b/kernel/trace/trace_workqueue.c
74883@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
74884 int cpu;
74885 pid_t pid;
74886 /* Can be inserted from interrupt or user context, need to be atomic */
74887- atomic_t inserted;
74888+ atomic_unchecked_t inserted;
74889 /*
74890 * Don't need to be atomic, works are serialized in a single workqueue thread
74891 * on a single CPU.
74892@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
74893 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
74894 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
74895 if (node->pid == wq_thread->pid) {
74896- atomic_inc(&node->inserted);
74897+ atomic_inc_unchecked(&node->inserted);
74898 goto found;
74899 }
74900 }
74901@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
74902 tsk = get_pid_task(pid, PIDTYPE_PID);
74903 if (tsk) {
74904 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
74905- atomic_read(&cws->inserted), cws->executed,
74906+ atomic_read_unchecked(&cws->inserted), cws->executed,
74907 tsk->comm);
74908 put_task_struct(tsk);
74909 }
74910diff --git a/kernel/user.c b/kernel/user.c
74911index 1b91701..8795237 100644
74912--- a/kernel/user.c
74913+++ b/kernel/user.c
74914@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
74915 spin_lock_irq(&uidhash_lock);
74916 up = uid_hash_find(uid, hashent);
74917 if (up) {
74918+ put_user_ns(ns);
74919 key_put(new->uid_keyring);
74920 key_put(new->session_keyring);
74921 kmem_cache_free(uid_cachep, new);
74922diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
74923index 234ceb1..ad74049 100644
74924--- a/lib/Kconfig.debug
74925+++ b/lib/Kconfig.debug
74926@@ -905,7 +905,7 @@ config LATENCYTOP
74927 select STACKTRACE
74928 select SCHEDSTATS
74929 select SCHED_DEBUG
74930- depends on HAVE_LATENCYTOP_SUPPORT
74931+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
74932 help
74933 Enable this option if you want to use the LatencyTOP tool
74934 to find out which userspace is blocking on what kernel operations.
74935diff --git a/lib/bitmap.c b/lib/bitmap.c
74936index 7025658..8d14cab 100644
74937--- a/lib/bitmap.c
74938+++ b/lib/bitmap.c
74939@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
74940 {
74941 int c, old_c, totaldigits, ndigits, nchunks, nbits;
74942 u32 chunk;
74943- const char __user *ubuf = buf;
74944+ const char __user *ubuf = (const char __force_user *)buf;
74945
74946 bitmap_zero(maskp, nmaskbits);
74947
74948@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
74949 {
74950 if (!access_ok(VERIFY_READ, ubuf, ulen))
74951 return -EFAULT;
74952- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
74953+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
74954 }
74955 EXPORT_SYMBOL(bitmap_parse_user);
74956
74957diff --git a/lib/bug.c b/lib/bug.c
74958index 300e41a..2779eb0 100644
74959--- a/lib/bug.c
74960+++ b/lib/bug.c
74961@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
74962 return BUG_TRAP_TYPE_NONE;
74963
74964 bug = find_bug(bugaddr);
74965+ if (!bug)
74966+ return BUG_TRAP_TYPE_NONE;
74967
74968 printk(KERN_EMERG "------------[ cut here ]------------\n");
74969
74970diff --git a/lib/debugobjects.c b/lib/debugobjects.c
74971index 2b413db..e21d207 100644
74972--- a/lib/debugobjects.c
74973+++ b/lib/debugobjects.c
74974@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
74975 if (limit > 4)
74976 return;
74977
74978- is_on_stack = object_is_on_stack(addr);
74979+ is_on_stack = object_starts_on_stack(addr);
74980 if (is_on_stack == onstack)
74981 return;
74982
74983diff --git a/lib/devres.c b/lib/devres.c
74984index 72c8909..7543868 100644
74985--- a/lib/devres.c
74986+++ b/lib/devres.c
74987@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
74988 {
74989 iounmap(addr);
74990 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
74991- (void *)addr));
74992+ (void __force *)addr));
74993 }
74994 EXPORT_SYMBOL(devm_iounmap);
74995
74996@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
74997 {
74998 ioport_unmap(addr);
74999 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
75000- devm_ioport_map_match, (void *)addr));
75001+ devm_ioport_map_match, (void __force *)addr));
75002 }
75003 EXPORT_SYMBOL(devm_ioport_unmap);
75004
75005diff --git a/lib/dma-debug.c b/lib/dma-debug.c
75006index 084e879..0674448 100644
75007--- a/lib/dma-debug.c
75008+++ b/lib/dma-debug.c
75009@@ -861,7 +861,7 @@ out:
75010
75011 static void check_for_stack(struct device *dev, void *addr)
75012 {
75013- if (object_is_on_stack(addr))
75014+ if (object_starts_on_stack(addr))
75015 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
75016 "stack [addr=%p]\n", addr);
75017 }
75018diff --git a/lib/idr.c b/lib/idr.c
75019index eda7ba3..915dfae 100644
75020--- a/lib/idr.c
75021+++ b/lib/idr.c
75022@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
75023 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
75024
75025 /* if already at the top layer, we need to grow */
75026- if (id >= 1 << (idp->layers * IDR_BITS)) {
75027+ if (id >= (1 << (idp->layers * IDR_BITS))) {
75028 *starting_id = id;
75029 return IDR_NEED_TO_GROW;
75030 }
75031diff --git a/lib/inflate.c b/lib/inflate.c
75032index d102559..4215f31 100644
75033--- a/lib/inflate.c
75034+++ b/lib/inflate.c
75035@@ -266,7 +266,7 @@ static void free(void *where)
75036 malloc_ptr = free_mem_ptr;
75037 }
75038 #else
75039-#define malloc(a) kmalloc(a, GFP_KERNEL)
75040+#define malloc(a) kmalloc((a), GFP_KERNEL)
75041 #define free(a) kfree(a)
75042 #endif
75043
75044diff --git a/lib/kobject.c b/lib/kobject.c
75045index b512b74..8115eb1 100644
75046--- a/lib/kobject.c
75047+++ b/lib/kobject.c
75048@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
75049 return ret;
75050 }
75051
75052-struct sysfs_ops kobj_sysfs_ops = {
75053+const struct sysfs_ops kobj_sysfs_ops = {
75054 .show = kobj_attr_show,
75055 .store = kobj_attr_store,
75056 };
75057@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
75058 * If the kset was not able to be created, NULL will be returned.
75059 */
75060 static struct kset *kset_create(const char *name,
75061- struct kset_uevent_ops *uevent_ops,
75062+ const struct kset_uevent_ops *uevent_ops,
75063 struct kobject *parent_kobj)
75064 {
75065 struct kset *kset;
75066@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
75067 * If the kset was not able to be created, NULL will be returned.
75068 */
75069 struct kset *kset_create_and_add(const char *name,
75070- struct kset_uevent_ops *uevent_ops,
75071+ const struct kset_uevent_ops *uevent_ops,
75072 struct kobject *parent_kobj)
75073 {
75074 struct kset *kset;
75075diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
75076index 507b821..0bf8ed0 100644
75077--- a/lib/kobject_uevent.c
75078+++ b/lib/kobject_uevent.c
75079@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
75080 const char *subsystem;
75081 struct kobject *top_kobj;
75082 struct kset *kset;
75083- struct kset_uevent_ops *uevent_ops;
75084+ const struct kset_uevent_ops *uevent_ops;
75085 u64 seq;
75086 int i = 0;
75087 int retval = 0;
75088diff --git a/lib/kref.c b/lib/kref.c
75089index 9ecd6e8..12c94c1 100644
75090--- a/lib/kref.c
75091+++ b/lib/kref.c
75092@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
75093 */
75094 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
75095 {
75096- WARN_ON(release == NULL);
75097+ BUG_ON(release == NULL);
75098 WARN_ON(release == (void (*)(struct kref *))kfree);
75099
75100 if (atomic_dec_and_test(&kref->refcount)) {
75101diff --git a/lib/parser.c b/lib/parser.c
75102index b00d020..1b34325 100644
75103--- a/lib/parser.c
75104+++ b/lib/parser.c
75105@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
75106 char *buf;
75107 int ret;
75108
75109- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
75110+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
75111 if (!buf)
75112 return -ENOMEM;
75113 memcpy(buf, s->from, s->to - s->from);
75114diff --git a/lib/radix-tree.c b/lib/radix-tree.c
75115index 92cdd99..a8149d7 100644
75116--- a/lib/radix-tree.c
75117+++ b/lib/radix-tree.c
75118@@ -81,7 +81,7 @@ struct radix_tree_preload {
75119 int nr;
75120 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
75121 };
75122-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
75123+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
75124
75125 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
75126 {
75127diff --git a/lib/random32.c b/lib/random32.c
75128index 217d5c4..45aba8a 100644
75129--- a/lib/random32.c
75130+++ b/lib/random32.c
75131@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
75132 */
75133 static inline u32 __seed(u32 x, u32 m)
75134 {
75135- return (x < m) ? x + m : x;
75136+ return (x <= m) ? x + m + 1 : x;
75137 }
75138
75139 /**
75140diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75141index 33bed5e..1477e46 100644
75142--- a/lib/vsprintf.c
75143+++ b/lib/vsprintf.c
75144@@ -16,6 +16,9 @@
75145 * - scnprintf and vscnprintf
75146 */
75147
75148+#ifdef CONFIG_GRKERNSEC_HIDESYM
75149+#define __INCLUDED_BY_HIDESYM 1
75150+#endif
75151 #include <stdarg.h>
75152 #include <linux/module.h>
75153 #include <linux/types.h>
75154@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
75155 return buf;
75156 }
75157
75158-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
75159+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
75160 {
75161 int len, i;
75162
75163 if ((unsigned long)s < PAGE_SIZE)
75164- s = "<NULL>";
75165+ s = "(null)";
75166
75167 len = strnlen(s, spec.precision);
75168
75169@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
75170 unsigned long value = (unsigned long) ptr;
75171 #ifdef CONFIG_KALLSYMS
75172 char sym[KSYM_SYMBOL_LEN];
75173- if (ext != 'f' && ext != 's')
75174+ if (ext != 'f' && ext != 's' && ext != 'a')
75175 sprint_symbol(sym, value);
75176 else
75177 kallsyms_lookup(value, NULL, NULL, NULL, sym);
75178@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
75179 * - 'f' For simple symbolic function names without offset
75180 * - 'S' For symbolic direct pointers with offset
75181 * - 's' For symbolic direct pointers without offset
75182+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75183+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75184 * - 'R' For a struct resource pointer, it prints the range of
75185 * addresses (not the name nor the flags)
75186 * - 'M' For a 6-byte MAC address, it prints the address in the
75187@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75188 struct printf_spec spec)
75189 {
75190 if (!ptr)
75191- return string(buf, end, "(null)", spec);
75192+ return string(buf, end, "(nil)", spec);
75193
75194 switch (*fmt) {
75195 case 'F':
75196@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75197 case 's':
75198 /* Fallthrough */
75199 case 'S':
75200+#ifdef CONFIG_GRKERNSEC_HIDESYM
75201+ break;
75202+#else
75203+ return symbol_string(buf, end, ptr, spec, *fmt);
75204+#endif
75205+ case 'a':
75206+ /* Fallthrough */
75207+ case 'A':
75208 return symbol_string(buf, end, ptr, spec, *fmt);
75209 case 'R':
75210 return resource_string(buf, end, ptr, spec);
75211@@ -1445,7 +1458,7 @@ do { \
75212 size_t len;
75213 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
75214 || (unsigned long)save_str < PAGE_SIZE)
75215- save_str = "<NULL>";
75216+ save_str = "(null)";
75217 len = strlen(save_str);
75218 if (str + len + 1 < end)
75219 memcpy(str, save_str, len + 1);
75220@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75221 typeof(type) value; \
75222 if (sizeof(type) == 8) { \
75223 args = PTR_ALIGN(args, sizeof(u32)); \
75224- *(u32 *)&value = *(u32 *)args; \
75225- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75226+ *(u32 *)&value = *(const u32 *)args; \
75227+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75228 } else { \
75229 args = PTR_ALIGN(args, sizeof(type)); \
75230- value = *(typeof(type) *)args; \
75231+ value = *(const typeof(type) *)args; \
75232 } \
75233 args += sizeof(type); \
75234 value; \
75235@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75236 const char *str_arg = args;
75237 size_t len = strlen(str_arg);
75238 args += len + 1;
75239- str = string(str, end, (char *)str_arg, spec);
75240+ str = string(str, end, str_arg, spec);
75241 break;
75242 }
75243
75244diff --git a/localversion-grsec b/localversion-grsec
75245new file mode 100644
75246index 0000000..7cd6065
75247--- /dev/null
75248+++ b/localversion-grsec
75249@@ -0,0 +1 @@
75250+-grsec
75251diff --git a/mm/Kconfig b/mm/Kconfig
75252index 2c19c0b..f3c3f83 100644
75253--- a/mm/Kconfig
75254+++ b/mm/Kconfig
75255@@ -228,7 +228,7 @@ config KSM
75256 config DEFAULT_MMAP_MIN_ADDR
75257 int "Low address space to protect from user allocation"
75258 depends on MMU
75259- default 4096
75260+ default 65536
75261 help
75262 This is the portion of low virtual memory which should be protected
75263 from userspace allocation. Keeping a user from writing to low pages
75264diff --git a/mm/backing-dev.c b/mm/backing-dev.c
75265index 67a33a5..094dcf1 100644
75266--- a/mm/backing-dev.c
75267+++ b/mm/backing-dev.c
75268@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
75269 list_add_tail_rcu(&wb->list, &bdi->wb_list);
75270 spin_unlock(&bdi->wb_lock);
75271
75272- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
75273+ tsk->flags |= PF_SWAPWRITE;
75274 set_freezable();
75275
75276 /*
75277@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
75278 * Add the default flusher task that gets created for any bdi
75279 * that has dirty data pending writeout
75280 */
75281-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75282+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75283 {
75284 if (!bdi_cap_writeback_dirty(bdi))
75285 return;
75286diff --git a/mm/filemap.c b/mm/filemap.c
75287index 9e0826e..4ee8f13 100644
75288--- a/mm/filemap.c
75289+++ b/mm/filemap.c
75290@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
75291 struct address_space *mapping = file->f_mapping;
75292
75293 if (!mapping->a_ops->readpage)
75294- return -ENOEXEC;
75295+ return -ENODEV;
75296 file_accessed(file);
75297 vma->vm_ops = &generic_file_vm_ops;
75298 vma->vm_flags |= VM_CAN_NONLINEAR;
75299@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
75300 *pos = i_size_read(inode);
75301
75302 if (limit != RLIM_INFINITY) {
75303+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
75304 if (*pos >= limit) {
75305 send_sig(SIGXFSZ, current, 0);
75306 return -EFBIG;
75307diff --git a/mm/fremap.c b/mm/fremap.c
75308index b6ec85a..a24ac22 100644
75309--- a/mm/fremap.c
75310+++ b/mm/fremap.c
75311@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75312 retry:
75313 vma = find_vma(mm, start);
75314
75315+#ifdef CONFIG_PAX_SEGMEXEC
75316+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
75317+ goto out;
75318+#endif
75319+
75320 /*
75321 * Make sure the vma is shared, that it supports prefaulting,
75322 * and that the remapped range is valid and fully within
75323@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75324 /*
75325 * drop PG_Mlocked flag for over-mapped range
75326 */
75327- unsigned int saved_flags = vma->vm_flags;
75328+ unsigned long saved_flags = vma->vm_flags;
75329 munlock_vma_pages_range(vma, start, start + size);
75330 vma->vm_flags = saved_flags;
75331 }
75332diff --git a/mm/highmem.c b/mm/highmem.c
75333index 9c1e627..5ca9447 100644
75334--- a/mm/highmem.c
75335+++ b/mm/highmem.c
75336@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
75337 * So no dangers, even with speculative execution.
75338 */
75339 page = pte_page(pkmap_page_table[i]);
75340+ pax_open_kernel();
75341 pte_clear(&init_mm, (unsigned long)page_address(page),
75342 &pkmap_page_table[i]);
75343-
75344+ pax_close_kernel();
75345 set_page_address(page, NULL);
75346 need_flush = 1;
75347 }
75348@@ -177,9 +178,11 @@ start:
75349 }
75350 }
75351 vaddr = PKMAP_ADDR(last_pkmap_nr);
75352+
75353+ pax_open_kernel();
75354 set_pte_at(&init_mm, vaddr,
75355 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
75356-
75357+ pax_close_kernel();
75358 pkmap_count[last_pkmap_nr] = 1;
75359 set_page_address(page, (void *)vaddr);
75360
75361diff --git a/mm/hugetlb.c b/mm/hugetlb.c
75362index 5e1e508..9f0ebad 100644
75363--- a/mm/hugetlb.c
75364+++ b/mm/hugetlb.c
75365@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
75366 return 1;
75367 }
75368
75369+#ifdef CONFIG_PAX_SEGMEXEC
75370+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
75371+{
75372+ struct mm_struct *mm = vma->vm_mm;
75373+ struct vm_area_struct *vma_m;
75374+ unsigned long address_m;
75375+ pte_t *ptep_m;
75376+
75377+ vma_m = pax_find_mirror_vma(vma);
75378+ if (!vma_m)
75379+ return;
75380+
75381+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75382+ address_m = address + SEGMEXEC_TASK_SIZE;
75383+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
75384+ get_page(page_m);
75385+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
75386+}
75387+#endif
75388+
75389 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
75390 unsigned long address, pte_t *ptep, pte_t pte,
75391 struct page *pagecache_page)
75392@@ -2004,6 +2024,11 @@ retry_avoidcopy:
75393 huge_ptep_clear_flush(vma, address, ptep);
75394 set_huge_pte_at(mm, address, ptep,
75395 make_huge_pte(vma, new_page, 1));
75396+
75397+#ifdef CONFIG_PAX_SEGMEXEC
75398+ pax_mirror_huge_pte(vma, address, new_page);
75399+#endif
75400+
75401 /* Make the old page be freed below */
75402 new_page = old_page;
75403 }
75404@@ -2135,6 +2160,10 @@ retry:
75405 && (vma->vm_flags & VM_SHARED)));
75406 set_huge_pte_at(mm, address, ptep, new_pte);
75407
75408+#ifdef CONFIG_PAX_SEGMEXEC
75409+ pax_mirror_huge_pte(vma, address, page);
75410+#endif
75411+
75412 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
75413 /* Optimization, do the COW without a second fault */
75414 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
75415@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75416 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
75417 struct hstate *h = hstate_vma(vma);
75418
75419+#ifdef CONFIG_PAX_SEGMEXEC
75420+ struct vm_area_struct *vma_m;
75421+
75422+ vma_m = pax_find_mirror_vma(vma);
75423+ if (vma_m) {
75424+ unsigned long address_m;
75425+
75426+ if (vma->vm_start > vma_m->vm_start) {
75427+ address_m = address;
75428+ address -= SEGMEXEC_TASK_SIZE;
75429+ vma = vma_m;
75430+ h = hstate_vma(vma);
75431+ } else
75432+ address_m = address + SEGMEXEC_TASK_SIZE;
75433+
75434+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
75435+ return VM_FAULT_OOM;
75436+ address_m &= HPAGE_MASK;
75437+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
75438+ }
75439+#endif
75440+
75441 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
75442 if (!ptep)
75443 return VM_FAULT_OOM;
75444diff --git a/mm/internal.h b/mm/internal.h
75445index f03e8e2..7354343 100644
75446--- a/mm/internal.h
75447+++ b/mm/internal.h
75448@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
75449 * in mm/page_alloc.c
75450 */
75451 extern void __free_pages_bootmem(struct page *page, unsigned int order);
75452+extern void free_compound_page(struct page *page);
75453 extern void prep_compound_page(struct page *page, unsigned long order);
75454
75455
75456diff --git a/mm/kmemleak.c b/mm/kmemleak.c
75457index c346660..b47382f 100644
75458--- a/mm/kmemleak.c
75459+++ b/mm/kmemleak.c
75460@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
75461
75462 for (i = 0; i < object->trace_len; i++) {
75463 void *ptr = (void *)object->trace[i];
75464- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
75465+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
75466 }
75467 }
75468
75469diff --git a/mm/maccess.c b/mm/maccess.c
75470index 9073695..1127f348 100644
75471--- a/mm/maccess.c
75472+++ b/mm/maccess.c
75473@@ -14,7 +14,7 @@
75474 * Safely read from address @src to the buffer at @dst. If a kernel fault
75475 * happens, handle that and return -EFAULT.
75476 */
75477-long probe_kernel_read(void *dst, void *src, size_t size)
75478+long probe_kernel_read(void *dst, const void *src, size_t size)
75479 {
75480 long ret;
75481 mm_segment_t old_fs = get_fs();
75482@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
75483 set_fs(KERNEL_DS);
75484 pagefault_disable();
75485 ret = __copy_from_user_inatomic(dst,
75486- (__force const void __user *)src, size);
75487+ (const void __force_user *)src, size);
75488 pagefault_enable();
75489 set_fs(old_fs);
75490
75491@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
75492 * Safely write to address @dst from the buffer at @src. If a kernel fault
75493 * happens, handle that and return -EFAULT.
75494 */
75495-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
75496+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
75497 {
75498 long ret;
75499 mm_segment_t old_fs = get_fs();
75500
75501 set_fs(KERNEL_DS);
75502 pagefault_disable();
75503- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
75504+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
75505 pagefault_enable();
75506 set_fs(old_fs);
75507
75508diff --git a/mm/madvise.c b/mm/madvise.c
75509index 35b1479..499f7d4 100644
75510--- a/mm/madvise.c
75511+++ b/mm/madvise.c
75512@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
75513 pgoff_t pgoff;
75514 unsigned long new_flags = vma->vm_flags;
75515
75516+#ifdef CONFIG_PAX_SEGMEXEC
75517+ struct vm_area_struct *vma_m;
75518+#endif
75519+
75520 switch (behavior) {
75521 case MADV_NORMAL:
75522 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
75523@@ -103,6 +107,13 @@ success:
75524 /*
75525 * vm_flags is protected by the mmap_sem held in write mode.
75526 */
75527+
75528+#ifdef CONFIG_PAX_SEGMEXEC
75529+ vma_m = pax_find_mirror_vma(vma);
75530+ if (vma_m)
75531+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
75532+#endif
75533+
75534 vma->vm_flags = new_flags;
75535
75536 out:
75537@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75538 struct vm_area_struct ** prev,
75539 unsigned long start, unsigned long end)
75540 {
75541+
75542+#ifdef CONFIG_PAX_SEGMEXEC
75543+ struct vm_area_struct *vma_m;
75544+#endif
75545+
75546 *prev = vma;
75547 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
75548 return -EINVAL;
75549@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75550 zap_page_range(vma, start, end - start, &details);
75551 } else
75552 zap_page_range(vma, start, end - start, NULL);
75553+
75554+#ifdef CONFIG_PAX_SEGMEXEC
75555+ vma_m = pax_find_mirror_vma(vma);
75556+ if (vma_m) {
75557+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
75558+ struct zap_details details = {
75559+ .nonlinear_vma = vma_m,
75560+ .last_index = ULONG_MAX,
75561+ };
75562+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
75563+ } else
75564+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
75565+ }
75566+#endif
75567+
75568 return 0;
75569 }
75570
75571@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
75572 if (end < start)
75573 goto out;
75574
75575+#ifdef CONFIG_PAX_SEGMEXEC
75576+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
75577+ if (end > SEGMEXEC_TASK_SIZE)
75578+ goto out;
75579+ } else
75580+#endif
75581+
75582+ if (end > TASK_SIZE)
75583+ goto out;
75584+
75585 error = 0;
75586 if (end == start)
75587 goto out;
75588diff --git a/mm/memory-failure.c b/mm/memory-failure.c
75589index 8aeba53..b4a4198 100644
75590--- a/mm/memory-failure.c
75591+++ b/mm/memory-failure.c
75592@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
75593
75594 int sysctl_memory_failure_recovery __read_mostly = 1;
75595
75596-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75597+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75598
75599 /*
75600 * Send all the processes who have the page mapped an ``action optional''
75601@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
75602 si.si_signo = SIGBUS;
75603 si.si_errno = 0;
75604 si.si_code = BUS_MCEERR_AO;
75605- si.si_addr = (void *)addr;
75606+ si.si_addr = (void __user *)addr;
75607 #ifdef __ARCH_SI_TRAPNO
75608 si.si_trapno = trapno;
75609 #endif
75610@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
75611 return 0;
75612 }
75613
75614- atomic_long_add(1, &mce_bad_pages);
75615+ atomic_long_add_unchecked(1, &mce_bad_pages);
75616
75617 /*
75618 * We need/can do nothing about count=0 pages.
75619diff --git a/mm/memory.c b/mm/memory.c
75620index 6c836d3..48f3264 100644
75621--- a/mm/memory.c
75622+++ b/mm/memory.c
75623@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
75624 return;
75625
75626 pmd = pmd_offset(pud, start);
75627+
75628+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
75629 pud_clear(pud);
75630 pmd_free_tlb(tlb, pmd, start);
75631+#endif
75632+
75633 }
75634
75635 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75636@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75637 if (end - 1 > ceiling - 1)
75638 return;
75639
75640+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
75641 pud = pud_offset(pgd, start);
75642 pgd_clear(pgd);
75643 pud_free_tlb(tlb, pud, start);
75644+#endif
75645+
75646 }
75647
75648 /*
75649@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75650 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
75651 i = 0;
75652
75653- do {
75654+ while (nr_pages) {
75655 struct vm_area_struct *vma;
75656
75657- vma = find_extend_vma(mm, start);
75658+ vma = find_vma(mm, start);
75659 if (!vma && in_gate_area(tsk, start)) {
75660 unsigned long pg = start & PAGE_MASK;
75661 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
75662@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75663 continue;
75664 }
75665
75666- if (!vma ||
75667+ if (!vma || start < vma->vm_start ||
75668 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
75669 !(vm_flags & vma->vm_flags))
75670 return i ? : -EFAULT;
75671@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75672 start += PAGE_SIZE;
75673 nr_pages--;
75674 } while (nr_pages && start < vma->vm_end);
75675- } while (nr_pages);
75676+ }
75677 return i;
75678 }
75679
75680@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
75681 page_add_file_rmap(page);
75682 set_pte_at(mm, addr, pte, mk_pte(page, prot));
75683
75684+#ifdef CONFIG_PAX_SEGMEXEC
75685+ pax_mirror_file_pte(vma, addr, page, ptl);
75686+#endif
75687+
75688 retval = 0;
75689 pte_unmap_unlock(pte, ptl);
75690 return retval;
75691@@ -1560,10 +1571,22 @@ out:
75692 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
75693 struct page *page)
75694 {
75695+
75696+#ifdef CONFIG_PAX_SEGMEXEC
75697+ struct vm_area_struct *vma_m;
75698+#endif
75699+
75700 if (addr < vma->vm_start || addr >= vma->vm_end)
75701 return -EFAULT;
75702 if (!page_count(page))
75703 return -EINVAL;
75704+
75705+#ifdef CONFIG_PAX_SEGMEXEC
75706+ vma_m = pax_find_mirror_vma(vma);
75707+ if (vma_m)
75708+ vma_m->vm_flags |= VM_INSERTPAGE;
75709+#endif
75710+
75711 vma->vm_flags |= VM_INSERTPAGE;
75712 return insert_page(vma, addr, page, vma->vm_page_prot);
75713 }
75714@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
75715 unsigned long pfn)
75716 {
75717 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
75718+ BUG_ON(vma->vm_mirror);
75719
75720 if (addr < vma->vm_start || addr >= vma->vm_end)
75721 return -EFAULT;
75722@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
75723 copy_user_highpage(dst, src, va, vma);
75724 }
75725
75726+#ifdef CONFIG_PAX_SEGMEXEC
75727+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
75728+{
75729+ struct mm_struct *mm = vma->vm_mm;
75730+ spinlock_t *ptl;
75731+ pte_t *pte, entry;
75732+
75733+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
75734+ entry = *pte;
75735+ if (!pte_present(entry)) {
75736+ if (!pte_none(entry)) {
75737+ BUG_ON(pte_file(entry));
75738+ free_swap_and_cache(pte_to_swp_entry(entry));
75739+ pte_clear_not_present_full(mm, address, pte, 0);
75740+ }
75741+ } else {
75742+ struct page *page;
75743+
75744+ flush_cache_page(vma, address, pte_pfn(entry));
75745+ entry = ptep_clear_flush(vma, address, pte);
75746+ BUG_ON(pte_dirty(entry));
75747+ page = vm_normal_page(vma, address, entry);
75748+ if (page) {
75749+ update_hiwater_rss(mm);
75750+ if (PageAnon(page))
75751+ dec_mm_counter(mm, anon_rss);
75752+ else
75753+ dec_mm_counter(mm, file_rss);
75754+ page_remove_rmap(page);
75755+ page_cache_release(page);
75756+ }
75757+ }
75758+ pte_unmap_unlock(pte, ptl);
75759+}
75760+
75761+/* PaX: if vma is mirrored, synchronize the mirror's PTE
75762+ *
75763+ * the ptl of the lower mapped page is held on entry and is not released on exit
75764+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
75765+ */
75766+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75767+{
75768+ struct mm_struct *mm = vma->vm_mm;
75769+ unsigned long address_m;
75770+ spinlock_t *ptl_m;
75771+ struct vm_area_struct *vma_m;
75772+ pmd_t *pmd_m;
75773+ pte_t *pte_m, entry_m;
75774+
75775+ BUG_ON(!page_m || !PageAnon(page_m));
75776+
75777+ vma_m = pax_find_mirror_vma(vma);
75778+ if (!vma_m)
75779+ return;
75780+
75781+ BUG_ON(!PageLocked(page_m));
75782+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75783+ address_m = address + SEGMEXEC_TASK_SIZE;
75784+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75785+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75786+ ptl_m = pte_lockptr(mm, pmd_m);
75787+ if (ptl != ptl_m) {
75788+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75789+ if (!pte_none(*pte_m))
75790+ goto out;
75791+ }
75792+
75793+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75794+ page_cache_get(page_m);
75795+ page_add_anon_rmap(page_m, vma_m, address_m);
75796+ inc_mm_counter(mm, anon_rss);
75797+ set_pte_at(mm, address_m, pte_m, entry_m);
75798+ update_mmu_cache(vma_m, address_m, entry_m);
75799+out:
75800+ if (ptl != ptl_m)
75801+ spin_unlock(ptl_m);
75802+ pte_unmap_nested(pte_m);
75803+ unlock_page(page_m);
75804+}
75805+
75806+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75807+{
75808+ struct mm_struct *mm = vma->vm_mm;
75809+ unsigned long address_m;
75810+ spinlock_t *ptl_m;
75811+ struct vm_area_struct *vma_m;
75812+ pmd_t *pmd_m;
75813+ pte_t *pte_m, entry_m;
75814+
75815+ BUG_ON(!page_m || PageAnon(page_m));
75816+
75817+ vma_m = pax_find_mirror_vma(vma);
75818+ if (!vma_m)
75819+ return;
75820+
75821+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75822+ address_m = address + SEGMEXEC_TASK_SIZE;
75823+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75824+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75825+ ptl_m = pte_lockptr(mm, pmd_m);
75826+ if (ptl != ptl_m) {
75827+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75828+ if (!pte_none(*pte_m))
75829+ goto out;
75830+ }
75831+
75832+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75833+ page_cache_get(page_m);
75834+ page_add_file_rmap(page_m);
75835+ inc_mm_counter(mm, file_rss);
75836+ set_pte_at(mm, address_m, pte_m, entry_m);
75837+ update_mmu_cache(vma_m, address_m, entry_m);
75838+out:
75839+ if (ptl != ptl_m)
75840+ spin_unlock(ptl_m);
75841+ pte_unmap_nested(pte_m);
75842+}
75843+
75844+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
75845+{
75846+ struct mm_struct *mm = vma->vm_mm;
75847+ unsigned long address_m;
75848+ spinlock_t *ptl_m;
75849+ struct vm_area_struct *vma_m;
75850+ pmd_t *pmd_m;
75851+ pte_t *pte_m, entry_m;
75852+
75853+ vma_m = pax_find_mirror_vma(vma);
75854+ if (!vma_m)
75855+ return;
75856+
75857+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75858+ address_m = address + SEGMEXEC_TASK_SIZE;
75859+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75860+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75861+ ptl_m = pte_lockptr(mm, pmd_m);
75862+ if (ptl != ptl_m) {
75863+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75864+ if (!pte_none(*pte_m))
75865+ goto out;
75866+ }
75867+
75868+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
75869+ set_pte_at(mm, address_m, pte_m, entry_m);
75870+out:
75871+ if (ptl != ptl_m)
75872+ spin_unlock(ptl_m);
75873+ pte_unmap_nested(pte_m);
75874+}
75875+
75876+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
75877+{
75878+ struct page *page_m;
75879+ pte_t entry;
75880+
75881+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
75882+ goto out;
75883+
75884+ entry = *pte;
75885+ page_m = vm_normal_page(vma, address, entry);
75886+ if (!page_m)
75887+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
75888+ else if (PageAnon(page_m)) {
75889+ if (pax_find_mirror_vma(vma)) {
75890+ pte_unmap_unlock(pte, ptl);
75891+ lock_page(page_m);
75892+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
75893+ if (pte_same(entry, *pte))
75894+ pax_mirror_anon_pte(vma, address, page_m, ptl);
75895+ else
75896+ unlock_page(page_m);
75897+ }
75898+ } else
75899+ pax_mirror_file_pte(vma, address, page_m, ptl);
75900+
75901+out:
75902+ pte_unmap_unlock(pte, ptl);
75903+}
75904+#endif
75905+
75906 /*
75907 * This routine handles present pages, when users try to write
75908 * to a shared page. It is done by copying the page to a new address
75909@@ -2156,6 +2360,12 @@ gotten:
75910 */
75911 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
75912 if (likely(pte_same(*page_table, orig_pte))) {
75913+
75914+#ifdef CONFIG_PAX_SEGMEXEC
75915+ if (pax_find_mirror_vma(vma))
75916+ BUG_ON(!trylock_page(new_page));
75917+#endif
75918+
75919 if (old_page) {
75920 if (!PageAnon(old_page)) {
75921 dec_mm_counter(mm, file_rss);
75922@@ -2207,6 +2417,10 @@ gotten:
75923 page_remove_rmap(old_page);
75924 }
75925
75926+#ifdef CONFIG_PAX_SEGMEXEC
75927+ pax_mirror_anon_pte(vma, address, new_page, ptl);
75928+#endif
75929+
75930 /* Free the old page.. */
75931 new_page = old_page;
75932 ret |= VM_FAULT_WRITE;
75933@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75934 swap_free(entry);
75935 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
75936 try_to_free_swap(page);
75937+
75938+#ifdef CONFIG_PAX_SEGMEXEC
75939+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
75940+#endif
75941+
75942 unlock_page(page);
75943
75944 if (flags & FAULT_FLAG_WRITE) {
75945@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75946
75947 /* No need to invalidate - it was non-present before */
75948 update_mmu_cache(vma, address, pte);
75949+
75950+#ifdef CONFIG_PAX_SEGMEXEC
75951+ pax_mirror_anon_pte(vma, address, page, ptl);
75952+#endif
75953+
75954 unlock:
75955 pte_unmap_unlock(page_table, ptl);
75956 out:
75957@@ -2632,40 +2856,6 @@ out_release:
75958 }
75959
75960 /*
75961- * This is like a special single-page "expand_{down|up}wards()",
75962- * except we must first make sure that 'address{-|+}PAGE_SIZE'
75963- * doesn't hit another vma.
75964- */
75965-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
75966-{
75967- address &= PAGE_MASK;
75968- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
75969- struct vm_area_struct *prev = vma->vm_prev;
75970-
75971- /*
75972- * Is there a mapping abutting this one below?
75973- *
75974- * That's only ok if it's the same stack mapping
75975- * that has gotten split..
75976- */
75977- if (prev && prev->vm_end == address)
75978- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
75979-
75980- expand_stack(vma, address - PAGE_SIZE);
75981- }
75982- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
75983- struct vm_area_struct *next = vma->vm_next;
75984-
75985- /* As VM_GROWSDOWN but s/below/above/ */
75986- if (next && next->vm_start == address + PAGE_SIZE)
75987- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
75988-
75989- expand_upwards(vma, address + PAGE_SIZE);
75990- }
75991- return 0;
75992-}
75993-
75994-/*
75995 * We enter with non-exclusive mmap_sem (to exclude vma changes,
75996 * but allow concurrent faults), and pte mapped but not yet locked.
75997 * We return with mmap_sem still held, but pte unmapped and unlocked.
75998@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
75999 unsigned long address, pte_t *page_table, pmd_t *pmd,
76000 unsigned int flags)
76001 {
76002- struct page *page;
76003+ struct page *page = NULL;
76004 spinlock_t *ptl;
76005 pte_t entry;
76006
76007- pte_unmap(page_table);
76008-
76009- /* Check if we need to add a guard page to the stack */
76010- if (check_stack_guard_page(vma, address) < 0)
76011- return VM_FAULT_SIGBUS;
76012-
76013- /* Use the zero-page for reads */
76014 if (!(flags & FAULT_FLAG_WRITE)) {
76015 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
76016 vma->vm_page_prot));
76017- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76018+ ptl = pte_lockptr(mm, pmd);
76019+ spin_lock(ptl);
76020 if (!pte_none(*page_table))
76021 goto unlock;
76022 goto setpte;
76023 }
76024
76025 /* Allocate our own private page. */
76026+ pte_unmap(page_table);
76027+
76028 if (unlikely(anon_vma_prepare(vma)))
76029 goto oom;
76030 page = alloc_zeroed_user_highpage_movable(vma, address);
76031@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76032 if (!pte_none(*page_table))
76033 goto release;
76034
76035+#ifdef CONFIG_PAX_SEGMEXEC
76036+ if (pax_find_mirror_vma(vma))
76037+ BUG_ON(!trylock_page(page));
76038+#endif
76039+
76040 inc_mm_counter(mm, anon_rss);
76041 page_add_new_anon_rmap(page, vma, address);
76042 setpte:
76043@@ -2720,6 +2911,12 @@ setpte:
76044
76045 /* No need to invalidate - it was non-present before */
76046 update_mmu_cache(vma, address, entry);
76047+
76048+#ifdef CONFIG_PAX_SEGMEXEC
76049+ if (page)
76050+ pax_mirror_anon_pte(vma, address, page, ptl);
76051+#endif
76052+
76053 unlock:
76054 pte_unmap_unlock(page_table, ptl);
76055 return 0;
76056@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76057 */
76058 /* Only go through if we didn't race with anybody else... */
76059 if (likely(pte_same(*page_table, orig_pte))) {
76060+
76061+#ifdef CONFIG_PAX_SEGMEXEC
76062+ if (anon && pax_find_mirror_vma(vma))
76063+ BUG_ON(!trylock_page(page));
76064+#endif
76065+
76066 flush_icache_page(vma, page);
76067 entry = mk_pte(page, vma->vm_page_prot);
76068 if (flags & FAULT_FLAG_WRITE)
76069@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76070
76071 /* no need to invalidate: a not-present page won't be cached */
76072 update_mmu_cache(vma, address, entry);
76073+
76074+#ifdef CONFIG_PAX_SEGMEXEC
76075+ if (anon)
76076+ pax_mirror_anon_pte(vma, address, page, ptl);
76077+ else
76078+ pax_mirror_file_pte(vma, address, page, ptl);
76079+#endif
76080+
76081 } else {
76082 if (charged)
76083 mem_cgroup_uncharge_page(page);
76084@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
76085 if (flags & FAULT_FLAG_WRITE)
76086 flush_tlb_page(vma, address);
76087 }
76088+
76089+#ifdef CONFIG_PAX_SEGMEXEC
76090+ pax_mirror_pte(vma, address, pte, pmd, ptl);
76091+ return 0;
76092+#endif
76093+
76094 unlock:
76095 pte_unmap_unlock(pte, ptl);
76096 return 0;
76097@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76098 pmd_t *pmd;
76099 pte_t *pte;
76100
76101+#ifdef CONFIG_PAX_SEGMEXEC
76102+ struct vm_area_struct *vma_m;
76103+#endif
76104+
76105 __set_current_state(TASK_RUNNING);
76106
76107 count_vm_event(PGFAULT);
76108@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76109 if (unlikely(is_vm_hugetlb_page(vma)))
76110 return hugetlb_fault(mm, vma, address, flags);
76111
76112+#ifdef CONFIG_PAX_SEGMEXEC
76113+ vma_m = pax_find_mirror_vma(vma);
76114+ if (vma_m) {
76115+ unsigned long address_m;
76116+ pgd_t *pgd_m;
76117+ pud_t *pud_m;
76118+ pmd_t *pmd_m;
76119+
76120+ if (vma->vm_start > vma_m->vm_start) {
76121+ address_m = address;
76122+ address -= SEGMEXEC_TASK_SIZE;
76123+ vma = vma_m;
76124+ } else
76125+ address_m = address + SEGMEXEC_TASK_SIZE;
76126+
76127+ pgd_m = pgd_offset(mm, address_m);
76128+ pud_m = pud_alloc(mm, pgd_m, address_m);
76129+ if (!pud_m)
76130+ return VM_FAULT_OOM;
76131+ pmd_m = pmd_alloc(mm, pud_m, address_m);
76132+ if (!pmd_m)
76133+ return VM_FAULT_OOM;
76134+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
76135+ return VM_FAULT_OOM;
76136+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76137+ }
76138+#endif
76139+
76140 pgd = pgd_offset(mm, address);
76141 pud = pud_alloc(mm, pgd, address);
76142 if (!pud)
76143@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
76144 gate_vma.vm_start = FIXADDR_USER_START;
76145 gate_vma.vm_end = FIXADDR_USER_END;
76146 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76147- gate_vma.vm_page_prot = __P101;
76148+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76149 /*
76150 * Make sure the vDSO gets into every core dump.
76151 * Dumping its contents makes post-mortem fully interpretable later
76152diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76153index 3c6e3e2..ad9871c 100644
76154--- a/mm/mempolicy.c
76155+++ b/mm/mempolicy.c
76156@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76157 struct vm_area_struct *next;
76158 int err;
76159
76160+#ifdef CONFIG_PAX_SEGMEXEC
76161+ struct vm_area_struct *vma_m;
76162+#endif
76163+
76164 err = 0;
76165 for (; vma && vma->vm_start < end; vma = next) {
76166 next = vma->vm_next;
76167@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76168 err = policy_vma(vma, new);
76169 if (err)
76170 break;
76171+
76172+#ifdef CONFIG_PAX_SEGMEXEC
76173+ vma_m = pax_find_mirror_vma(vma);
76174+ if (vma_m) {
76175+ err = policy_vma(vma_m, new);
76176+ if (err)
76177+ break;
76178+ }
76179+#endif
76180+
76181 }
76182 return err;
76183 }
76184@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76185
76186 if (end < start)
76187 return -EINVAL;
76188+
76189+#ifdef CONFIG_PAX_SEGMEXEC
76190+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76191+ if (end > SEGMEXEC_TASK_SIZE)
76192+ return -EINVAL;
76193+ } else
76194+#endif
76195+
76196+ if (end > TASK_SIZE)
76197+ return -EINVAL;
76198+
76199 if (end == start)
76200 return 0;
76201
76202@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76203 if (!mm)
76204 return -EINVAL;
76205
76206+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76207+ if (mm != current->mm &&
76208+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76209+ err = -EPERM;
76210+ goto out;
76211+ }
76212+#endif
76213+
76214 /*
76215 * Check if this process has the right to modify the specified
76216 * process. The right exists if the process has administrative
76217@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76218 rcu_read_lock();
76219 tcred = __task_cred(task);
76220 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76221- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76222- !capable(CAP_SYS_NICE)) {
76223+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76224 rcu_read_unlock();
76225 err = -EPERM;
76226 goto out;
76227@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
76228
76229 if (file) {
76230 seq_printf(m, " file=");
76231- seq_path(m, &file->f_path, "\n\t= ");
76232+ seq_path(m, &file->f_path, "\n\t\\= ");
76233 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
76234 seq_printf(m, " heap");
76235 } else if (vma->vm_start <= mm->start_stack &&
76236diff --git a/mm/migrate.c b/mm/migrate.c
76237index aaca868..2ebecdc 100644
76238--- a/mm/migrate.c
76239+++ b/mm/migrate.c
76240@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
76241 unsigned long chunk_start;
76242 int err;
76243
76244+ pax_track_stack();
76245+
76246 task_nodes = cpuset_mems_allowed(task);
76247
76248 err = -ENOMEM;
76249@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76250 if (!mm)
76251 return -EINVAL;
76252
76253+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76254+ if (mm != current->mm &&
76255+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76256+ err = -EPERM;
76257+ goto out;
76258+ }
76259+#endif
76260+
76261 /*
76262 * Check if this process has the right to modify the specified
76263 * process. The right exists if the process has administrative
76264@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76265 rcu_read_lock();
76266 tcred = __task_cred(task);
76267 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76268- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76269- !capable(CAP_SYS_NICE)) {
76270+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76271 rcu_read_unlock();
76272 err = -EPERM;
76273 goto out;
76274diff --git a/mm/mlock.c b/mm/mlock.c
76275index 2d846cf..98134d2 100644
76276--- a/mm/mlock.c
76277+++ b/mm/mlock.c
76278@@ -13,6 +13,7 @@
76279 #include <linux/pagemap.h>
76280 #include <linux/mempolicy.h>
76281 #include <linux/syscalls.h>
76282+#include <linux/security.h>
76283 #include <linux/sched.h>
76284 #include <linux/module.h>
76285 #include <linux/rmap.h>
76286@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
76287 }
76288 }
76289
76290-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
76291-{
76292- return (vma->vm_flags & VM_GROWSDOWN) &&
76293- (vma->vm_start == addr) &&
76294- !vma_stack_continue(vma->vm_prev, addr);
76295-}
76296-
76297 /**
76298 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
76299 * @vma: target vma
76300@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
76301 if (vma->vm_flags & VM_WRITE)
76302 gup_flags |= FOLL_WRITE;
76303
76304- /* We don't try to access the guard page of a stack vma */
76305- if (stack_guard_page(vma, start)) {
76306- addr += PAGE_SIZE;
76307- nr_pages--;
76308- }
76309-
76310 while (nr_pages > 0) {
76311 int i;
76312
76313@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
76314 {
76315 unsigned long nstart, end, tmp;
76316 struct vm_area_struct * vma, * prev;
76317- int error;
76318+ int error = -EINVAL;
76319
76320 len = PAGE_ALIGN(len);
76321 end = start + len;
76322@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
76323 return -EINVAL;
76324 if (end == start)
76325 return 0;
76326+ if (end > TASK_SIZE)
76327+ return -EINVAL;
76328+
76329 vma = find_vma_prev(current->mm, start, &prev);
76330 if (!vma || vma->vm_start > start)
76331 return -ENOMEM;
76332@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
76333 for (nstart = start ; ; ) {
76334 unsigned int newflags;
76335
76336+#ifdef CONFIG_PAX_SEGMEXEC
76337+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76338+ break;
76339+#endif
76340+
76341 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
76342
76343 newflags = vma->vm_flags | VM_LOCKED;
76344@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
76345 lock_limit >>= PAGE_SHIFT;
76346
76347 /* check against resource limits */
76348+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
76349 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
76350 error = do_mlock(start, len, 1);
76351 up_write(&current->mm->mmap_sem);
76352@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
76353 static int do_mlockall(int flags)
76354 {
76355 struct vm_area_struct * vma, * prev = NULL;
76356- unsigned int def_flags = 0;
76357
76358 if (flags & MCL_FUTURE)
76359- def_flags = VM_LOCKED;
76360- current->mm->def_flags = def_flags;
76361+ current->mm->def_flags |= VM_LOCKED;
76362+ else
76363+ current->mm->def_flags &= ~VM_LOCKED;
76364 if (flags == MCL_FUTURE)
76365 goto out;
76366
76367 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
76368- unsigned int newflags;
76369+ unsigned long newflags;
76370
76371+#ifdef CONFIG_PAX_SEGMEXEC
76372+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76373+ break;
76374+#endif
76375+
76376+ BUG_ON(vma->vm_end > TASK_SIZE);
76377 newflags = vma->vm_flags | VM_LOCKED;
76378 if (!(flags & MCL_CURRENT))
76379 newflags &= ~VM_LOCKED;
76380@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
76381 lock_limit >>= PAGE_SHIFT;
76382
76383 ret = -ENOMEM;
76384+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
76385 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
76386 capable(CAP_IPC_LOCK))
76387 ret = do_mlockall(flags);
76388diff --git a/mm/mmap.c b/mm/mmap.c
76389index 4b80cbf..c5ce1df 100644
76390--- a/mm/mmap.c
76391+++ b/mm/mmap.c
76392@@ -45,6 +45,16 @@
76393 #define arch_rebalance_pgtables(addr, len) (addr)
76394 #endif
76395
76396+static inline void verify_mm_writelocked(struct mm_struct *mm)
76397+{
76398+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
76399+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76400+ up_read(&mm->mmap_sem);
76401+ BUG();
76402+ }
76403+#endif
76404+}
76405+
76406 static void unmap_region(struct mm_struct *mm,
76407 struct vm_area_struct *vma, struct vm_area_struct *prev,
76408 unsigned long start, unsigned long end);
76409@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
76410 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
76411 *
76412 */
76413-pgprot_t protection_map[16] = {
76414+pgprot_t protection_map[16] __read_only = {
76415 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76416 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76417 };
76418
76419 pgprot_t vm_get_page_prot(unsigned long vm_flags)
76420 {
76421- return __pgprot(pgprot_val(protection_map[vm_flags &
76422+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
76423 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
76424 pgprot_val(arch_vm_get_page_prot(vm_flags)));
76425+
76426+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76427+ if (!nx_enabled &&
76428+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
76429+ (vm_flags & (VM_READ | VM_WRITE)))
76430+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
76431+#endif
76432+
76433+ return prot;
76434 }
76435 EXPORT_SYMBOL(vm_get_page_prot);
76436
76437 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76438 int sysctl_overcommit_ratio = 50; /* default is 50% */
76439 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
76440+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
76441 struct percpu_counter vm_committed_as;
76442
76443 /*
76444@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
76445 struct vm_area_struct *next = vma->vm_next;
76446
76447 might_sleep();
76448+ BUG_ON(vma->vm_mirror);
76449 if (vma->vm_ops && vma->vm_ops->close)
76450 vma->vm_ops->close(vma);
76451 if (vma->vm_file) {
76452@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
76453 * not page aligned -Ram Gupta
76454 */
76455 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
76456+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
76457 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
76458 (mm->end_data - mm->start_data) > rlim)
76459 goto out;
76460@@ -704,6 +726,12 @@ static int
76461 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
76462 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76463 {
76464+
76465+#ifdef CONFIG_PAX_SEGMEXEC
76466+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
76467+ return 0;
76468+#endif
76469+
76470 if (is_mergeable_vma(vma, file, vm_flags) &&
76471 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76472 if (vma->vm_pgoff == vm_pgoff)
76473@@ -723,6 +751,12 @@ static int
76474 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76475 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76476 {
76477+
76478+#ifdef CONFIG_PAX_SEGMEXEC
76479+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
76480+ return 0;
76481+#endif
76482+
76483 if (is_mergeable_vma(vma, file, vm_flags) &&
76484 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76485 pgoff_t vm_pglen;
76486@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76487 struct vm_area_struct *vma_merge(struct mm_struct *mm,
76488 struct vm_area_struct *prev, unsigned long addr,
76489 unsigned long end, unsigned long vm_flags,
76490- struct anon_vma *anon_vma, struct file *file,
76491+ struct anon_vma *anon_vma, struct file *file,
76492 pgoff_t pgoff, struct mempolicy *policy)
76493 {
76494 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
76495 struct vm_area_struct *area, *next;
76496
76497+#ifdef CONFIG_PAX_SEGMEXEC
76498+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
76499+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
76500+
76501+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
76502+#endif
76503+
76504 /*
76505 * We later require that vma->vm_flags == vm_flags,
76506 * so this tests vma->vm_flags & VM_SPECIAL, too.
76507@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76508 if (next && next->vm_end == end) /* cases 6, 7, 8 */
76509 next = next->vm_next;
76510
76511+#ifdef CONFIG_PAX_SEGMEXEC
76512+ if (prev)
76513+ prev_m = pax_find_mirror_vma(prev);
76514+ if (area)
76515+ area_m = pax_find_mirror_vma(area);
76516+ if (next)
76517+ next_m = pax_find_mirror_vma(next);
76518+#endif
76519+
76520 /*
76521 * Can it merge with the predecessor?
76522 */
76523@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76524 /* cases 1, 6 */
76525 vma_adjust(prev, prev->vm_start,
76526 next->vm_end, prev->vm_pgoff, NULL);
76527- } else /* cases 2, 5, 7 */
76528+
76529+#ifdef CONFIG_PAX_SEGMEXEC
76530+ if (prev_m)
76531+ vma_adjust(prev_m, prev_m->vm_start,
76532+ next_m->vm_end, prev_m->vm_pgoff, NULL);
76533+#endif
76534+
76535+ } else { /* cases 2, 5, 7 */
76536 vma_adjust(prev, prev->vm_start,
76537 end, prev->vm_pgoff, NULL);
76538+
76539+#ifdef CONFIG_PAX_SEGMEXEC
76540+ if (prev_m)
76541+ vma_adjust(prev_m, prev_m->vm_start,
76542+ end_m, prev_m->vm_pgoff, NULL);
76543+#endif
76544+
76545+ }
76546 return prev;
76547 }
76548
76549@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76550 mpol_equal(policy, vma_policy(next)) &&
76551 can_vma_merge_before(next, vm_flags,
76552 anon_vma, file, pgoff+pglen)) {
76553- if (prev && addr < prev->vm_end) /* case 4 */
76554+ if (prev && addr < prev->vm_end) { /* case 4 */
76555 vma_adjust(prev, prev->vm_start,
76556 addr, prev->vm_pgoff, NULL);
76557- else /* cases 3, 8 */
76558+
76559+#ifdef CONFIG_PAX_SEGMEXEC
76560+ if (prev_m)
76561+ vma_adjust(prev_m, prev_m->vm_start,
76562+ addr_m, prev_m->vm_pgoff, NULL);
76563+#endif
76564+
76565+ } else { /* cases 3, 8 */
76566 vma_adjust(area, addr, next->vm_end,
76567 next->vm_pgoff - pglen, NULL);
76568+
76569+#ifdef CONFIG_PAX_SEGMEXEC
76570+ if (area_m)
76571+ vma_adjust(area_m, addr_m, next_m->vm_end,
76572+ next_m->vm_pgoff - pglen, NULL);
76573+#endif
76574+
76575+ }
76576 return area;
76577 }
76578
76579@@ -898,14 +978,11 @@ none:
76580 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
76581 struct file *file, long pages)
76582 {
76583- const unsigned long stack_flags
76584- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
76585-
76586 if (file) {
76587 mm->shared_vm += pages;
76588 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
76589 mm->exec_vm += pages;
76590- } else if (flags & stack_flags)
76591+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
76592 mm->stack_vm += pages;
76593 if (flags & (VM_RESERVED|VM_IO))
76594 mm->reserved_vm += pages;
76595@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76596 * (the exception is when the underlying filesystem is noexec
76597 * mounted, in which case we dont add PROT_EXEC.)
76598 */
76599- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76600+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76601 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
76602 prot |= PROT_EXEC;
76603
76604@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76605 /* Obtain the address to map to. we verify (or select) it and ensure
76606 * that it represents a valid section of the address space.
76607 */
76608- addr = get_unmapped_area(file, addr, len, pgoff, flags);
76609+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
76610 if (addr & ~PAGE_MASK)
76611 return addr;
76612
76613@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76614 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
76615 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
76616
76617+#ifdef CONFIG_PAX_MPROTECT
76618+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76619+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76620+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
76621+ gr_log_rwxmmap(file);
76622+
76623+#ifdef CONFIG_PAX_EMUPLT
76624+ vm_flags &= ~VM_EXEC;
76625+#else
76626+ return -EPERM;
76627+#endif
76628+
76629+ }
76630+
76631+ if (!(vm_flags & VM_EXEC))
76632+ vm_flags &= ~VM_MAYEXEC;
76633+#else
76634+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76635+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76636+#endif
76637+ else
76638+ vm_flags &= ~VM_MAYWRITE;
76639+ }
76640+#endif
76641+
76642+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76643+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
76644+ vm_flags &= ~VM_PAGEEXEC;
76645+#endif
76646+
76647 if (flags & MAP_LOCKED)
76648 if (!can_do_mlock())
76649 return -EPERM;
76650@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76651 locked += mm->locked_vm;
76652 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
76653 lock_limit >>= PAGE_SHIFT;
76654+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76655 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
76656 return -EAGAIN;
76657 }
76658@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76659 if (error)
76660 return error;
76661
76662+ if (!gr_acl_handle_mmap(file, prot))
76663+ return -EACCES;
76664+
76665 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
76666 }
76667 EXPORT_SYMBOL(do_mmap_pgoff);
76668@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
76669 */
76670 int vma_wants_writenotify(struct vm_area_struct *vma)
76671 {
76672- unsigned int vm_flags = vma->vm_flags;
76673+ unsigned long vm_flags = vma->vm_flags;
76674
76675 /* If it was private or non-writable, the write bit is already clear */
76676- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
76677+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
76678 return 0;
76679
76680 /* The backer wishes to know when pages are first written to? */
76681@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
76682 unsigned long charged = 0;
76683 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
76684
76685+#ifdef CONFIG_PAX_SEGMEXEC
76686+ struct vm_area_struct *vma_m = NULL;
76687+#endif
76688+
76689+ /*
76690+ * mm->mmap_sem is required to protect against another thread
76691+ * changing the mappings in case we sleep.
76692+ */
76693+ verify_mm_writelocked(mm);
76694+
76695 /* Clear old maps */
76696 error = -ENOMEM;
76697-munmap_back:
76698 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76699 if (vma && vma->vm_start < addr + len) {
76700 if (do_munmap(mm, addr, len))
76701 return -ENOMEM;
76702- goto munmap_back;
76703+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76704+ BUG_ON(vma && vma->vm_start < addr + len);
76705 }
76706
76707 /* Check against address space limit. */
76708@@ -1173,6 +1294,16 @@ munmap_back:
76709 goto unacct_error;
76710 }
76711
76712+#ifdef CONFIG_PAX_SEGMEXEC
76713+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
76714+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76715+ if (!vma_m) {
76716+ error = -ENOMEM;
76717+ goto free_vma;
76718+ }
76719+ }
76720+#endif
76721+
76722 vma->vm_mm = mm;
76723 vma->vm_start = addr;
76724 vma->vm_end = addr + len;
76725@@ -1195,6 +1326,19 @@ munmap_back:
76726 error = file->f_op->mmap(file, vma);
76727 if (error)
76728 goto unmap_and_free_vma;
76729+
76730+#ifdef CONFIG_PAX_SEGMEXEC
76731+ if (vma_m && (vm_flags & VM_EXECUTABLE))
76732+ added_exe_file_vma(mm);
76733+#endif
76734+
76735+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76736+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
76737+ vma->vm_flags |= VM_PAGEEXEC;
76738+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76739+ }
76740+#endif
76741+
76742 if (vm_flags & VM_EXECUTABLE)
76743 added_exe_file_vma(mm);
76744
76745@@ -1218,6 +1362,11 @@ munmap_back:
76746 vma_link(mm, vma, prev, rb_link, rb_parent);
76747 file = vma->vm_file;
76748
76749+#ifdef CONFIG_PAX_SEGMEXEC
76750+ if (vma_m)
76751+ pax_mirror_vma(vma_m, vma);
76752+#endif
76753+
76754 /* Once vma denies write, undo our temporary denial count */
76755 if (correct_wcount)
76756 atomic_inc(&inode->i_writecount);
76757@@ -1226,6 +1375,7 @@ out:
76758
76759 mm->total_vm += len >> PAGE_SHIFT;
76760 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
76761+ track_exec_limit(mm, addr, addr + len, vm_flags);
76762 if (vm_flags & VM_LOCKED) {
76763 /*
76764 * makes pages present; downgrades, drops, reacquires mmap_sem
76765@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
76766 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
76767 charged = 0;
76768 free_vma:
76769+
76770+#ifdef CONFIG_PAX_SEGMEXEC
76771+ if (vma_m)
76772+ kmem_cache_free(vm_area_cachep, vma_m);
76773+#endif
76774+
76775 kmem_cache_free(vm_area_cachep, vma);
76776 unacct_error:
76777 if (charged)
76778@@ -1255,6 +1411,44 @@ unacct_error:
76779 return error;
76780 }
76781
76782+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
76783+{
76784+ if (!vma) {
76785+#ifdef CONFIG_STACK_GROWSUP
76786+ if (addr > sysctl_heap_stack_gap)
76787+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
76788+ else
76789+ vma = find_vma(current->mm, 0);
76790+ if (vma && (vma->vm_flags & VM_GROWSUP))
76791+ return false;
76792+#endif
76793+ return true;
76794+ }
76795+
76796+ if (addr + len > vma->vm_start)
76797+ return false;
76798+
76799+ if (vma->vm_flags & VM_GROWSDOWN)
76800+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
76801+#ifdef CONFIG_STACK_GROWSUP
76802+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
76803+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
76804+#endif
76805+
76806+ return true;
76807+}
76808+
76809+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
76810+{
76811+ if (vma->vm_start < len)
76812+ return -ENOMEM;
76813+ if (!(vma->vm_flags & VM_GROWSDOWN))
76814+ return vma->vm_start - len;
76815+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
76816+ return vma->vm_start - len - sysctl_heap_stack_gap;
76817+ return -ENOMEM;
76818+}
76819+
76820 /* Get an address range which is currently unmapped.
76821 * For shmat() with addr=0.
76822 *
76823@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
76824 if (flags & MAP_FIXED)
76825 return addr;
76826
76827+#ifdef CONFIG_PAX_RANDMMAP
76828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76829+#endif
76830+
76831 if (addr) {
76832 addr = PAGE_ALIGN(addr);
76833- vma = find_vma(mm, addr);
76834- if (TASK_SIZE - len >= addr &&
76835- (!vma || addr + len <= vma->vm_start))
76836- return addr;
76837+ if (TASK_SIZE - len >= addr) {
76838+ vma = find_vma(mm, addr);
76839+ if (check_heap_stack_gap(vma, addr, len))
76840+ return addr;
76841+ }
76842 }
76843 if (len > mm->cached_hole_size) {
76844- start_addr = addr = mm->free_area_cache;
76845+ start_addr = addr = mm->free_area_cache;
76846 } else {
76847- start_addr = addr = TASK_UNMAPPED_BASE;
76848- mm->cached_hole_size = 0;
76849+ start_addr = addr = mm->mmap_base;
76850+ mm->cached_hole_size = 0;
76851 }
76852
76853 full_search:
76854@@ -1303,34 +1502,40 @@ full_search:
76855 * Start a new search - just in case we missed
76856 * some holes.
76857 */
76858- if (start_addr != TASK_UNMAPPED_BASE) {
76859- addr = TASK_UNMAPPED_BASE;
76860- start_addr = addr;
76861+ if (start_addr != mm->mmap_base) {
76862+ start_addr = addr = mm->mmap_base;
76863 mm->cached_hole_size = 0;
76864 goto full_search;
76865 }
76866 return -ENOMEM;
76867 }
76868- if (!vma || addr + len <= vma->vm_start) {
76869- /*
76870- * Remember the place where we stopped the search:
76871- */
76872- mm->free_area_cache = addr + len;
76873- return addr;
76874- }
76875+ if (check_heap_stack_gap(vma, addr, len))
76876+ break;
76877 if (addr + mm->cached_hole_size < vma->vm_start)
76878 mm->cached_hole_size = vma->vm_start - addr;
76879 addr = vma->vm_end;
76880 }
76881+
76882+ /*
76883+ * Remember the place where we stopped the search:
76884+ */
76885+ mm->free_area_cache = addr + len;
76886+ return addr;
76887 }
76888 #endif
76889
76890 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
76891 {
76892+
76893+#ifdef CONFIG_PAX_SEGMEXEC
76894+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76895+ return;
76896+#endif
76897+
76898 /*
76899 * Is this a new hole at the lowest possible address?
76900 */
76901- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
76902+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
76903 mm->free_area_cache = addr;
76904 mm->cached_hole_size = ~0UL;
76905 }
76906@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76907 {
76908 struct vm_area_struct *vma;
76909 struct mm_struct *mm = current->mm;
76910- unsigned long addr = addr0;
76911+ unsigned long base = mm->mmap_base, addr = addr0;
76912
76913 /* requested length too big for entire address space */
76914 if (len > TASK_SIZE)
76915@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76916 if (flags & MAP_FIXED)
76917 return addr;
76918
76919+#ifdef CONFIG_PAX_RANDMMAP
76920+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76921+#endif
76922+
76923 /* requesting a specific address */
76924 if (addr) {
76925 addr = PAGE_ALIGN(addr);
76926- vma = find_vma(mm, addr);
76927- if (TASK_SIZE - len >= addr &&
76928- (!vma || addr + len <= vma->vm_start))
76929- return addr;
76930+ if (TASK_SIZE - len >= addr) {
76931+ vma = find_vma(mm, addr);
76932+ if (check_heap_stack_gap(vma, addr, len))
76933+ return addr;
76934+ }
76935 }
76936
76937 /* check if free_area_cache is useful for us */
76938@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76939 /* make sure it can fit in the remaining address space */
76940 if (addr > len) {
76941 vma = find_vma(mm, addr-len);
76942- if (!vma || addr <= vma->vm_start)
76943+ if (check_heap_stack_gap(vma, addr - len, len))
76944 /* remember the address as a hint for next time */
76945 return (mm->free_area_cache = addr-len);
76946 }
76947@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76948 * return with success:
76949 */
76950 vma = find_vma(mm, addr);
76951- if (!vma || addr+len <= vma->vm_start)
76952+ if (check_heap_stack_gap(vma, addr, len))
76953 /* remember the address as a hint for next time */
76954 return (mm->free_area_cache = addr);
76955
76956@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76957 mm->cached_hole_size = vma->vm_start - addr;
76958
76959 /* try just below the current vma->vm_start */
76960- addr = vma->vm_start-len;
76961- } while (len < vma->vm_start);
76962+ addr = skip_heap_stack_gap(vma, len);
76963+ } while (!IS_ERR_VALUE(addr));
76964
76965 bottomup:
76966 /*
76967@@ -1414,13 +1624,21 @@ bottomup:
76968 * can happen with large stack limits and large mmap()
76969 * allocations.
76970 */
76971+ mm->mmap_base = TASK_UNMAPPED_BASE;
76972+
76973+#ifdef CONFIG_PAX_RANDMMAP
76974+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76975+ mm->mmap_base += mm->delta_mmap;
76976+#endif
76977+
76978+ mm->free_area_cache = mm->mmap_base;
76979 mm->cached_hole_size = ~0UL;
76980- mm->free_area_cache = TASK_UNMAPPED_BASE;
76981 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
76982 /*
76983 * Restore the topdown base:
76984 */
76985- mm->free_area_cache = mm->mmap_base;
76986+ mm->mmap_base = base;
76987+ mm->free_area_cache = base;
76988 mm->cached_hole_size = ~0UL;
76989
76990 return addr;
76991@@ -1429,6 +1647,12 @@ bottomup:
76992
76993 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
76994 {
76995+
76996+#ifdef CONFIG_PAX_SEGMEXEC
76997+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76998+ return;
76999+#endif
77000+
77001 /*
77002 * Is this a new hole at the highest possible address?
77003 */
77004@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77005 mm->free_area_cache = addr;
77006
77007 /* dont allow allocations above current base */
77008- if (mm->free_area_cache > mm->mmap_base)
77009+ if (mm->free_area_cache > mm->mmap_base) {
77010 mm->free_area_cache = mm->mmap_base;
77011+ mm->cached_hole_size = ~0UL;
77012+ }
77013 }
77014
77015 unsigned long
77016@@ -1545,6 +1771,27 @@ out:
77017 return prev ? prev->vm_next : vma;
77018 }
77019
77020+#ifdef CONFIG_PAX_SEGMEXEC
77021+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
77022+{
77023+ struct vm_area_struct *vma_m;
77024+
77025+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
77026+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
77027+ BUG_ON(vma->vm_mirror);
77028+ return NULL;
77029+ }
77030+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
77031+ vma_m = vma->vm_mirror;
77032+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
77033+ BUG_ON(vma->vm_file != vma_m->vm_file);
77034+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
77035+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
77036+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
77037+ return vma_m;
77038+}
77039+#endif
77040+
77041 /*
77042 * Verify that the stack growth is acceptable and
77043 * update accounting. This is shared with both the
77044@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77045 return -ENOMEM;
77046
77047 /* Stack limit test */
77048+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
77049 if (size > rlim[RLIMIT_STACK].rlim_cur)
77050 return -ENOMEM;
77051
77052@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77053 unsigned long limit;
77054 locked = mm->locked_vm + grow;
77055 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
77056+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77057 if (locked > limit && !capable(CAP_IPC_LOCK))
77058 return -ENOMEM;
77059 }
77060@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77061 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
77062 * vma is the last one with address > vma->vm_end. Have to extend vma.
77063 */
77064+#ifndef CONFIG_IA64
77065+static
77066+#endif
77067 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77068 {
77069 int error;
77070+ bool locknext;
77071
77072 if (!(vma->vm_flags & VM_GROWSUP))
77073 return -EFAULT;
77074
77075+ /* Also guard against wrapping around to address 0. */
77076+ if (address < PAGE_ALIGN(address+1))
77077+ address = PAGE_ALIGN(address+1);
77078+ else
77079+ return -ENOMEM;
77080+
77081 /*
77082 * We must make sure the anon_vma is allocated
77083 * so that the anon_vma locking is not a noop.
77084 */
77085 if (unlikely(anon_vma_prepare(vma)))
77086 return -ENOMEM;
77087+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
77088+ if (locknext && anon_vma_prepare(vma->vm_next))
77089+ return -ENOMEM;
77090 anon_vma_lock(vma);
77091+ if (locknext)
77092+ anon_vma_lock(vma->vm_next);
77093
77094 /*
77095 * vma->vm_start/vm_end cannot change under us because the caller
77096 * is required to hold the mmap_sem in read mode. We need the
77097- * anon_vma lock to serialize against concurrent expand_stacks.
77098- * Also guard against wrapping around to address 0.
77099+ * anon_vma locks to serialize against concurrent expand_stacks
77100+ * and expand_upwards.
77101 */
77102- if (address < PAGE_ALIGN(address+4))
77103- address = PAGE_ALIGN(address+4);
77104- else {
77105- anon_vma_unlock(vma);
77106- return -ENOMEM;
77107- }
77108 error = 0;
77109
77110 /* Somebody else might have raced and expanded it already */
77111- if (address > vma->vm_end) {
77112+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
77113+ error = -ENOMEM;
77114+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
77115 unsigned long size, grow;
77116
77117 size = address - vma->vm_start;
77118@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77119 vma->vm_end = address;
77120 }
77121 }
77122+ if (locknext)
77123+ anon_vma_unlock(vma->vm_next);
77124 anon_vma_unlock(vma);
77125 return error;
77126 }
77127@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
77128 unsigned long address)
77129 {
77130 int error;
77131+ bool lockprev = false;
77132+ struct vm_area_struct *prev;
77133
77134 /*
77135 * We must make sure the anon_vma is allocated
77136@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
77137 if (error)
77138 return error;
77139
77140+ prev = vma->vm_prev;
77141+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77142+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77143+#endif
77144+ if (lockprev && anon_vma_prepare(prev))
77145+ return -ENOMEM;
77146+ if (lockprev)
77147+ anon_vma_lock(prev);
77148+
77149 anon_vma_lock(vma);
77150
77151 /*
77152@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
77153 */
77154
77155 /* Somebody else might have raced and expanded it already */
77156- if (address < vma->vm_start) {
77157+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77158+ error = -ENOMEM;
77159+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77160 unsigned long size, grow;
77161
77162+#ifdef CONFIG_PAX_SEGMEXEC
77163+ struct vm_area_struct *vma_m;
77164+
77165+ vma_m = pax_find_mirror_vma(vma);
77166+#endif
77167+
77168 size = vma->vm_end - address;
77169 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77170
77171@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
77172 if (!error) {
77173 vma->vm_start = address;
77174 vma->vm_pgoff -= grow;
77175+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77176+
77177+#ifdef CONFIG_PAX_SEGMEXEC
77178+ if (vma_m) {
77179+ vma_m->vm_start -= grow << PAGE_SHIFT;
77180+ vma_m->vm_pgoff -= grow;
77181+ }
77182+#endif
77183+
77184+
77185 }
77186 }
77187 }
77188 anon_vma_unlock(vma);
77189+ if (lockprev)
77190+ anon_vma_unlock(prev);
77191 return error;
77192 }
77193
77194@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77195 do {
77196 long nrpages = vma_pages(vma);
77197
77198+#ifdef CONFIG_PAX_SEGMEXEC
77199+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77200+ vma = remove_vma(vma);
77201+ continue;
77202+ }
77203+#endif
77204+
77205 mm->total_vm -= nrpages;
77206 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77207 vma = remove_vma(vma);
77208@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77209 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77210 vma->vm_prev = NULL;
77211 do {
77212+
77213+#ifdef CONFIG_PAX_SEGMEXEC
77214+ if (vma->vm_mirror) {
77215+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77216+ vma->vm_mirror->vm_mirror = NULL;
77217+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
77218+ vma->vm_mirror = NULL;
77219+ }
77220+#endif
77221+
77222 rb_erase(&vma->vm_rb, &mm->mm_rb);
77223 mm->map_count--;
77224 tail_vma = vma;
77225@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77226 struct mempolicy *pol;
77227 struct vm_area_struct *new;
77228
77229+#ifdef CONFIG_PAX_SEGMEXEC
77230+ struct vm_area_struct *vma_m, *new_m = NULL;
77231+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77232+#endif
77233+
77234 if (is_vm_hugetlb_page(vma) && (addr &
77235 ~(huge_page_mask(hstate_vma(vma)))))
77236 return -EINVAL;
77237
77238+#ifdef CONFIG_PAX_SEGMEXEC
77239+ vma_m = pax_find_mirror_vma(vma);
77240+
77241+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77242+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
77243+ if (mm->map_count >= sysctl_max_map_count-1)
77244+ return -ENOMEM;
77245+ } else
77246+#endif
77247+
77248 if (mm->map_count >= sysctl_max_map_count)
77249 return -ENOMEM;
77250
77251@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77252 if (!new)
77253 return -ENOMEM;
77254
77255+#ifdef CONFIG_PAX_SEGMEXEC
77256+ if (vma_m) {
77257+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77258+ if (!new_m) {
77259+ kmem_cache_free(vm_area_cachep, new);
77260+ return -ENOMEM;
77261+ }
77262+ }
77263+#endif
77264+
77265 /* most fields are the same, copy all, and then fixup */
77266 *new = *vma;
77267
77268@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77269 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
77270 }
77271
77272+#ifdef CONFIG_PAX_SEGMEXEC
77273+ if (vma_m) {
77274+ *new_m = *vma_m;
77275+ new_m->vm_mirror = new;
77276+ new->vm_mirror = new_m;
77277+
77278+ if (new_below)
77279+ new_m->vm_end = addr_m;
77280+ else {
77281+ new_m->vm_start = addr_m;
77282+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
77283+ }
77284+ }
77285+#endif
77286+
77287 pol = mpol_dup(vma_policy(vma));
77288 if (IS_ERR(pol)) {
77289+
77290+#ifdef CONFIG_PAX_SEGMEXEC
77291+ if (new_m)
77292+ kmem_cache_free(vm_area_cachep, new_m);
77293+#endif
77294+
77295 kmem_cache_free(vm_area_cachep, new);
77296 return PTR_ERR(pol);
77297 }
77298@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77299 else
77300 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
77301
77302+#ifdef CONFIG_PAX_SEGMEXEC
77303+ if (vma_m) {
77304+ mpol_get(pol);
77305+ vma_set_policy(new_m, pol);
77306+
77307+ if (new_m->vm_file) {
77308+ get_file(new_m->vm_file);
77309+ if (vma_m->vm_flags & VM_EXECUTABLE)
77310+ added_exe_file_vma(mm);
77311+ }
77312+
77313+ if (new_m->vm_ops && new_m->vm_ops->open)
77314+ new_m->vm_ops->open(new_m);
77315+
77316+ if (new_below)
77317+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
77318+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
77319+ else
77320+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
77321+ }
77322+#endif
77323+
77324 return 0;
77325 }
77326
77327@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77328 * work. This now handles partial unmappings.
77329 * Jeremy Fitzhardinge <jeremy@goop.org>
77330 */
77331+#ifdef CONFIG_PAX_SEGMEXEC
77332 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77333 {
77334+ int ret = __do_munmap(mm, start, len);
77335+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
77336+ return ret;
77337+
77338+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
77339+}
77340+
77341+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77342+#else
77343+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77344+#endif
77345+{
77346 unsigned long end;
77347 struct vm_area_struct *vma, *prev, *last;
77348
77349+ /*
77350+ * mm->mmap_sem is required to protect against another thread
77351+ * changing the mappings in case we sleep.
77352+ */
77353+ verify_mm_writelocked(mm);
77354+
77355 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
77356 return -EINVAL;
77357
77358@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77359 /* Fix up all other VM information */
77360 remove_vma_list(mm, vma);
77361
77362+ track_exec_limit(mm, start, end, 0UL);
77363+
77364 return 0;
77365 }
77366
77367@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
77368
77369 profile_munmap(addr);
77370
77371+#ifdef CONFIG_PAX_SEGMEXEC
77372+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
77373+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
77374+ return -EINVAL;
77375+#endif
77376+
77377 down_write(&mm->mmap_sem);
77378 ret = do_munmap(mm, addr, len);
77379 up_write(&mm->mmap_sem);
77380 return ret;
77381 }
77382
77383-static inline void verify_mm_writelocked(struct mm_struct *mm)
77384-{
77385-#ifdef CONFIG_DEBUG_VM
77386- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77387- WARN_ON(1);
77388- up_read(&mm->mmap_sem);
77389- }
77390-#endif
77391-}
77392-
77393 /*
77394 * this is really a simplified "do_mmap". it only handles
77395 * anonymous maps. eventually we may be able to do some
77396@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77397 struct rb_node ** rb_link, * rb_parent;
77398 pgoff_t pgoff = addr >> PAGE_SHIFT;
77399 int error;
77400+ unsigned long charged;
77401
77402 len = PAGE_ALIGN(len);
77403 if (!len)
77404@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77405
77406 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
77407
77408+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
77409+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
77410+ flags &= ~VM_EXEC;
77411+
77412+#ifdef CONFIG_PAX_MPROTECT
77413+ if (mm->pax_flags & MF_PAX_MPROTECT)
77414+ flags &= ~VM_MAYEXEC;
77415+#endif
77416+
77417+ }
77418+#endif
77419+
77420 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
77421 if (error & ~PAGE_MASK)
77422 return error;
77423
77424+ charged = len >> PAGE_SHIFT;
77425+
77426 /*
77427 * mlock MCL_FUTURE?
77428 */
77429 if (mm->def_flags & VM_LOCKED) {
77430 unsigned long locked, lock_limit;
77431- locked = len >> PAGE_SHIFT;
77432+ locked = charged;
77433 locked += mm->locked_vm;
77434 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77435 lock_limit >>= PAGE_SHIFT;
77436@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77437 /*
77438 * Clear old maps. this also does some error checking for us
77439 */
77440- munmap_back:
77441 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77442 if (vma && vma->vm_start < addr + len) {
77443 if (do_munmap(mm, addr, len))
77444 return -ENOMEM;
77445- goto munmap_back;
77446+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77447+ BUG_ON(vma && vma->vm_start < addr + len);
77448 }
77449
77450 /* Check against address space limits *after* clearing old maps... */
77451- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
77452+ if (!may_expand_vm(mm, charged))
77453 return -ENOMEM;
77454
77455 if (mm->map_count > sysctl_max_map_count)
77456 return -ENOMEM;
77457
77458- if (security_vm_enough_memory(len >> PAGE_SHIFT))
77459+ if (security_vm_enough_memory(charged))
77460 return -ENOMEM;
77461
77462 /* Can we just expand an old private anonymous mapping? */
77463@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77464 */
77465 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77466 if (!vma) {
77467- vm_unacct_memory(len >> PAGE_SHIFT);
77468+ vm_unacct_memory(charged);
77469 return -ENOMEM;
77470 }
77471
77472@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77473 vma->vm_page_prot = vm_get_page_prot(flags);
77474 vma_link(mm, vma, prev, rb_link, rb_parent);
77475 out:
77476- mm->total_vm += len >> PAGE_SHIFT;
77477+ mm->total_vm += charged;
77478 if (flags & VM_LOCKED) {
77479 if (!mlock_vma_pages_range(vma, addr, addr + len))
77480- mm->locked_vm += (len >> PAGE_SHIFT);
77481+ mm->locked_vm += charged;
77482 }
77483+ track_exec_limit(mm, addr, addr + len, flags);
77484 return addr;
77485 }
77486
77487@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
77488 * Walk the list again, actually closing and freeing it,
77489 * with preemption enabled, without holding any MM locks.
77490 */
77491- while (vma)
77492+ while (vma) {
77493+ vma->vm_mirror = NULL;
77494 vma = remove_vma(vma);
77495+ }
77496
77497 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
77498 }
77499@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77500 struct vm_area_struct * __vma, * prev;
77501 struct rb_node ** rb_link, * rb_parent;
77502
77503+#ifdef CONFIG_PAX_SEGMEXEC
77504+ struct vm_area_struct *vma_m = NULL;
77505+#endif
77506+
77507 /*
77508 * The vm_pgoff of a purely anonymous vma should be irrelevant
77509 * until its first write fault, when page's anon_vma and index
77510@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77511 if ((vma->vm_flags & VM_ACCOUNT) &&
77512 security_vm_enough_memory_mm(mm, vma_pages(vma)))
77513 return -ENOMEM;
77514+
77515+#ifdef CONFIG_PAX_SEGMEXEC
77516+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
77517+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77518+ if (!vma_m)
77519+ return -ENOMEM;
77520+ }
77521+#endif
77522+
77523 vma_link(mm, vma, prev, rb_link, rb_parent);
77524+
77525+#ifdef CONFIG_PAX_SEGMEXEC
77526+ if (vma_m)
77527+ pax_mirror_vma(vma_m, vma);
77528+#endif
77529+
77530 return 0;
77531 }
77532
77533@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77534 struct rb_node **rb_link, *rb_parent;
77535 struct mempolicy *pol;
77536
77537+ BUG_ON(vma->vm_mirror);
77538+
77539 /*
77540 * If anonymous vma has not yet been faulted, update new pgoff
77541 * to match new location, to increase its chance of merging.
77542@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77543 return new_vma;
77544 }
77545
77546+#ifdef CONFIG_PAX_SEGMEXEC
77547+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
77548+{
77549+ struct vm_area_struct *prev_m;
77550+ struct rb_node **rb_link_m, *rb_parent_m;
77551+ struct mempolicy *pol_m;
77552+
77553+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
77554+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
77555+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
77556+ *vma_m = *vma;
77557+ pol_m = vma_policy(vma_m);
77558+ mpol_get(pol_m);
77559+ vma_set_policy(vma_m, pol_m);
77560+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
77561+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
77562+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
77563+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
77564+ if (vma_m->vm_file)
77565+ get_file(vma_m->vm_file);
77566+ if (vma_m->vm_ops && vma_m->vm_ops->open)
77567+ vma_m->vm_ops->open(vma_m);
77568+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
77569+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
77570+ vma_m->vm_mirror = vma;
77571+ vma->vm_mirror = vma_m;
77572+}
77573+#endif
77574+
77575 /*
77576 * Return true if the calling process may expand its vm space by the passed
77577 * number of pages
77578@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
77579 unsigned long lim;
77580
77581 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
77582-
77583+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
77584 if (cur + npages > lim)
77585 return 0;
77586 return 1;
77587@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
77588 vma->vm_start = addr;
77589 vma->vm_end = addr + len;
77590
77591+#ifdef CONFIG_PAX_MPROTECT
77592+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77593+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77594+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
77595+ return -EPERM;
77596+ if (!(vm_flags & VM_EXEC))
77597+ vm_flags &= ~VM_MAYEXEC;
77598+#else
77599+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77600+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77601+#endif
77602+ else
77603+ vm_flags &= ~VM_MAYWRITE;
77604+ }
77605+#endif
77606+
77607 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
77608 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77609
77610diff --git a/mm/mprotect.c b/mm/mprotect.c
77611index 1737c7e..c7faeb4 100644
77612--- a/mm/mprotect.c
77613+++ b/mm/mprotect.c
77614@@ -24,10 +24,16 @@
77615 #include <linux/mmu_notifier.h>
77616 #include <linux/migrate.h>
77617 #include <linux/perf_event.h>
77618+
77619+#ifdef CONFIG_PAX_MPROTECT
77620+#include <linux/elf.h>
77621+#endif
77622+
77623 #include <asm/uaccess.h>
77624 #include <asm/pgtable.h>
77625 #include <asm/cacheflush.h>
77626 #include <asm/tlbflush.h>
77627+#include <asm/mmu_context.h>
77628
77629 #ifndef pgprot_modify
77630 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
77631@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
77632 flush_tlb_range(vma, start, end);
77633 }
77634
77635+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77636+/* called while holding the mmap semaphor for writing except stack expansion */
77637+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
77638+{
77639+ unsigned long oldlimit, newlimit = 0UL;
77640+
77641+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
77642+ return;
77643+
77644+ spin_lock(&mm->page_table_lock);
77645+ oldlimit = mm->context.user_cs_limit;
77646+ if ((prot & VM_EXEC) && oldlimit < end)
77647+ /* USER_CS limit moved up */
77648+ newlimit = end;
77649+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
77650+ /* USER_CS limit moved down */
77651+ newlimit = start;
77652+
77653+ if (newlimit) {
77654+ mm->context.user_cs_limit = newlimit;
77655+
77656+#ifdef CONFIG_SMP
77657+ wmb();
77658+ cpus_clear(mm->context.cpu_user_cs_mask);
77659+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
77660+#endif
77661+
77662+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
77663+ }
77664+ spin_unlock(&mm->page_table_lock);
77665+ if (newlimit == end) {
77666+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
77667+
77668+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
77669+ if (is_vm_hugetlb_page(vma))
77670+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
77671+ else
77672+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
77673+ }
77674+}
77675+#endif
77676+
77677 int
77678 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77679 unsigned long start, unsigned long end, unsigned long newflags)
77680@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77681 int error;
77682 int dirty_accountable = 0;
77683
77684+#ifdef CONFIG_PAX_SEGMEXEC
77685+ struct vm_area_struct *vma_m = NULL;
77686+ unsigned long start_m, end_m;
77687+
77688+ start_m = start + SEGMEXEC_TASK_SIZE;
77689+ end_m = end + SEGMEXEC_TASK_SIZE;
77690+#endif
77691+
77692 if (newflags == oldflags) {
77693 *pprev = vma;
77694 return 0;
77695 }
77696
77697+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
77698+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
77699+
77700+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
77701+ return -ENOMEM;
77702+
77703+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
77704+ return -ENOMEM;
77705+ }
77706+
77707 /*
77708 * If we make a private mapping writable we increase our commit;
77709 * but (without finer accounting) cannot reduce our commit if we
77710@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77711 }
77712 }
77713
77714+#ifdef CONFIG_PAX_SEGMEXEC
77715+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
77716+ if (start != vma->vm_start) {
77717+ error = split_vma(mm, vma, start, 1);
77718+ if (error)
77719+ goto fail;
77720+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
77721+ *pprev = (*pprev)->vm_next;
77722+ }
77723+
77724+ if (end != vma->vm_end) {
77725+ error = split_vma(mm, vma, end, 0);
77726+ if (error)
77727+ goto fail;
77728+ }
77729+
77730+ if (pax_find_mirror_vma(vma)) {
77731+ error = __do_munmap(mm, start_m, end_m - start_m);
77732+ if (error)
77733+ goto fail;
77734+ } else {
77735+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77736+ if (!vma_m) {
77737+ error = -ENOMEM;
77738+ goto fail;
77739+ }
77740+ vma->vm_flags = newflags;
77741+ pax_mirror_vma(vma_m, vma);
77742+ }
77743+ }
77744+#endif
77745+
77746 /*
77747 * First try to merge with previous and/or next vma.
77748 */
77749@@ -195,9 +293,21 @@ success:
77750 * vm_flags and vm_page_prot are protected by the mmap_sem
77751 * held in write mode.
77752 */
77753+
77754+#ifdef CONFIG_PAX_SEGMEXEC
77755+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
77756+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
77757+#endif
77758+
77759 vma->vm_flags = newflags;
77760+
77761+#ifdef CONFIG_PAX_MPROTECT
77762+ if (mm->binfmt && mm->binfmt->handle_mprotect)
77763+ mm->binfmt->handle_mprotect(vma, newflags);
77764+#endif
77765+
77766 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
77767- vm_get_page_prot(newflags));
77768+ vm_get_page_prot(vma->vm_flags));
77769
77770 if (vma_wants_writenotify(vma)) {
77771 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
77772@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77773 end = start + len;
77774 if (end <= start)
77775 return -ENOMEM;
77776+
77777+#ifdef CONFIG_PAX_SEGMEXEC
77778+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77779+ if (end > SEGMEXEC_TASK_SIZE)
77780+ return -EINVAL;
77781+ } else
77782+#endif
77783+
77784+ if (end > TASK_SIZE)
77785+ return -EINVAL;
77786+
77787 if (!arch_validate_prot(prot))
77788 return -EINVAL;
77789
77790@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77791 /*
77792 * Does the application expect PROT_READ to imply PROT_EXEC:
77793 */
77794- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77795+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77796 prot |= PROT_EXEC;
77797
77798 vm_flags = calc_vm_prot_bits(prot);
77799@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77800 if (start > vma->vm_start)
77801 prev = vma;
77802
77803+#ifdef CONFIG_PAX_MPROTECT
77804+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
77805+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
77806+#endif
77807+
77808 for (nstart = start ; ; ) {
77809 unsigned long newflags;
77810
77811@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77812
77813 /* newflags >> 4 shift VM_MAY% in place of VM_% */
77814 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
77815+ if (prot & (PROT_WRITE | PROT_EXEC))
77816+ gr_log_rwxmprotect(vma->vm_file);
77817+
77818+ error = -EACCES;
77819+ goto out;
77820+ }
77821+
77822+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
77823 error = -EACCES;
77824 goto out;
77825 }
77826@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77827 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
77828 if (error)
77829 goto out;
77830+
77831+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
77832+
77833 nstart = tmp;
77834
77835 if (nstart < prev->vm_end)
77836diff --git a/mm/mremap.c b/mm/mremap.c
77837index 3e98d79..1706cec 100644
77838--- a/mm/mremap.c
77839+++ b/mm/mremap.c
77840@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
77841 continue;
77842 pte = ptep_clear_flush(vma, old_addr, old_pte);
77843 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
77844+
77845+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77846+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
77847+ pte = pte_exprotect(pte);
77848+#endif
77849+
77850 set_pte_at(mm, new_addr, new_pte, pte);
77851 }
77852
77853@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
77854 if (is_vm_hugetlb_page(vma))
77855 goto Einval;
77856
77857+#ifdef CONFIG_PAX_SEGMEXEC
77858+ if (pax_find_mirror_vma(vma))
77859+ goto Einval;
77860+#endif
77861+
77862 /* We can't remap across vm area boundaries */
77863 if (old_len > vma->vm_end - addr)
77864 goto Efault;
77865@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
77866 unsigned long ret = -EINVAL;
77867 unsigned long charged = 0;
77868 unsigned long map_flags;
77869+ unsigned long pax_task_size = TASK_SIZE;
77870
77871 if (new_addr & ~PAGE_MASK)
77872 goto out;
77873
77874- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
77875+#ifdef CONFIG_PAX_SEGMEXEC
77876+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77877+ pax_task_size = SEGMEXEC_TASK_SIZE;
77878+#endif
77879+
77880+ pax_task_size -= PAGE_SIZE;
77881+
77882+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
77883 goto out;
77884
77885 /* Check if the location we're moving into overlaps the
77886 * old location at all, and fail if it does.
77887 */
77888- if ((new_addr <= addr) && (new_addr+new_len) > addr)
77889- goto out;
77890-
77891- if ((addr <= new_addr) && (addr+old_len) > new_addr)
77892+ if (addr + old_len > new_addr && new_addr + new_len > addr)
77893 goto out;
77894
77895 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77896@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
77897 struct vm_area_struct *vma;
77898 unsigned long ret = -EINVAL;
77899 unsigned long charged = 0;
77900+ unsigned long pax_task_size = TASK_SIZE;
77901
77902 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
77903 goto out;
77904@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
77905 if (!new_len)
77906 goto out;
77907
77908+#ifdef CONFIG_PAX_SEGMEXEC
77909+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77910+ pax_task_size = SEGMEXEC_TASK_SIZE;
77911+#endif
77912+
77913+ pax_task_size -= PAGE_SIZE;
77914+
77915+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
77916+ old_len > pax_task_size || addr > pax_task_size-old_len)
77917+ goto out;
77918+
77919 if (flags & MREMAP_FIXED) {
77920 if (flags & MREMAP_MAYMOVE)
77921 ret = mremap_to(addr, old_len, new_addr, new_len);
77922@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
77923 addr + new_len);
77924 }
77925 ret = addr;
77926+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
77927 goto out;
77928 }
77929 }
77930@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
77931 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77932 if (ret)
77933 goto out;
77934+
77935+ map_flags = vma->vm_flags;
77936 ret = move_vma(vma, addr, old_len, new_len, new_addr);
77937+ if (!(ret & ~PAGE_MASK)) {
77938+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
77939+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
77940+ }
77941 }
77942 out:
77943 if (ret & ~PAGE_MASK)
77944diff --git a/mm/nommu.c b/mm/nommu.c
77945index 406e8d4..53970d3 100644
77946--- a/mm/nommu.c
77947+++ b/mm/nommu.c
77948@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77949 int sysctl_overcommit_ratio = 50; /* default is 50% */
77950 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
77951 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
77952-int heap_stack_gap = 0;
77953
77954 atomic_long_t mmap_pages_allocated;
77955
77956@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
77957 EXPORT_SYMBOL(find_vma);
77958
77959 /*
77960- * find a VMA
77961- * - we don't extend stack VMAs under NOMMU conditions
77962- */
77963-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
77964-{
77965- return find_vma(mm, addr);
77966-}
77967-
77968-/*
77969 * expand a stack to a given address
77970 * - not supported under NOMMU conditions
77971 */
77972diff --git a/mm/page_alloc.c b/mm/page_alloc.c
77973index 3ecab7e..594a471 100644
77974--- a/mm/page_alloc.c
77975+++ b/mm/page_alloc.c
77976@@ -289,7 +289,7 @@ out:
77977 * This usage means that zero-order pages may not be compound.
77978 */
77979
77980-static void free_compound_page(struct page *page)
77981+void free_compound_page(struct page *page)
77982 {
77983 __free_pages_ok(page, compound_order(page));
77984 }
77985@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77986 int bad = 0;
77987 int wasMlocked = __TestClearPageMlocked(page);
77988
77989+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77990+ unsigned long index = 1UL << order;
77991+#endif
77992+
77993 kmemcheck_free_shadow(page, order);
77994
77995 for (i = 0 ; i < (1 << order) ; ++i)
77996@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77997 debug_check_no_obj_freed(page_address(page),
77998 PAGE_SIZE << order);
77999 }
78000+
78001+#ifdef CONFIG_PAX_MEMORY_SANITIZE
78002+ for (; index; --index)
78003+ sanitize_highpage(page + index - 1);
78004+#endif
78005+
78006 arch_free_page(page, order);
78007 kernel_map_pages(page, 1 << order, 0);
78008
78009@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
78010 arch_alloc_page(page, order);
78011 kernel_map_pages(page, 1 << order, 1);
78012
78013+#ifndef CONFIG_PAX_MEMORY_SANITIZE
78014 if (gfp_flags & __GFP_ZERO)
78015 prep_zero_page(page, order, gfp_flags);
78016+#endif
78017
78018 if (order && (gfp_flags & __GFP_COMP))
78019 prep_compound_page(page, order);
78020@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
78021 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
78022 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
78023 }
78024+
78025+#ifdef CONFIG_PAX_MEMORY_SANITIZE
78026+ sanitize_highpage(page);
78027+#endif
78028+
78029 arch_free_page(page, 0);
78030 kernel_map_pages(page, 1, 0);
78031
78032@@ -2179,6 +2196,8 @@ void show_free_areas(void)
78033 int cpu;
78034 struct zone *zone;
78035
78036+ pax_track_stack();
78037+
78038 for_each_populated_zone(zone) {
78039 show_node(zone);
78040 printk("%s per-cpu:\n", zone->name);
78041@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
78042 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
78043 }
78044 #else
78045-static void inline setup_usemap(struct pglist_data *pgdat,
78046+static inline void setup_usemap(struct pglist_data *pgdat,
78047 struct zone *zone, unsigned long zonesize) {}
78048 #endif /* CONFIG_SPARSEMEM */
78049
78050diff --git a/mm/percpu.c b/mm/percpu.c
78051index 3bfd6e2..60404b9 100644
78052--- a/mm/percpu.c
78053+++ b/mm/percpu.c
78054@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
78055 static unsigned int pcpu_last_unit_cpu __read_mostly;
78056
78057 /* the address of the first chunk which starts with the kernel static area */
78058-void *pcpu_base_addr __read_mostly;
78059+void *pcpu_base_addr __read_only;
78060 EXPORT_SYMBOL_GPL(pcpu_base_addr);
78061
78062 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
78063diff --git a/mm/rmap.c b/mm/rmap.c
78064index dd43373..d848cd7 100644
78065--- a/mm/rmap.c
78066+++ b/mm/rmap.c
78067@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78068 /* page_table_lock to protect against threads */
78069 spin_lock(&mm->page_table_lock);
78070 if (likely(!vma->anon_vma)) {
78071+
78072+#ifdef CONFIG_PAX_SEGMEXEC
78073+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
78074+
78075+ if (vma_m) {
78076+ BUG_ON(vma_m->anon_vma);
78077+ vma_m->anon_vma = anon_vma;
78078+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
78079+ }
78080+#endif
78081+
78082 vma->anon_vma = anon_vma;
78083 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
78084 allocated = NULL;
78085diff --git a/mm/shmem.c b/mm/shmem.c
78086index 3e0005b..1d659a8 100644
78087--- a/mm/shmem.c
78088+++ b/mm/shmem.c
78089@@ -31,7 +31,7 @@
78090 #include <linux/swap.h>
78091 #include <linux/ima.h>
78092
78093-static struct vfsmount *shm_mnt;
78094+struct vfsmount *shm_mnt;
78095
78096 #ifdef CONFIG_SHMEM
78097 /*
78098@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
78099 goto unlock;
78100 }
78101 entry = shmem_swp_entry(info, index, NULL);
78102+ if (!entry)
78103+ goto unlock;
78104 if (entry->val) {
78105 /*
78106 * The more uptodate page coming down from a stacked
78107@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
78108 struct vm_area_struct pvma;
78109 struct page *page;
78110
78111+ pax_track_stack();
78112+
78113 spol = mpol_cond_copy(&mpol,
78114 mpol_shared_policy_lookup(&info->policy, idx));
78115
78116@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
78117
78118 info = SHMEM_I(inode);
78119 inode->i_size = len-1;
78120- if (len <= (char *)inode - (char *)info) {
78121+ if (len <= (char *)inode - (char *)info && len <= 64) {
78122 /* do it inline */
78123 memcpy(info, symname, len);
78124 inode->i_op = &shmem_symlink_inline_operations;
78125@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
78126 int err = -ENOMEM;
78127
78128 /* Round up to L1_CACHE_BYTES to resist false sharing */
78129- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
78130- L1_CACHE_BYTES), GFP_KERNEL);
78131+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
78132 if (!sbinfo)
78133 return -ENOMEM;
78134
78135diff --git a/mm/slab.c b/mm/slab.c
78136index c8d466a..909e01e 100644
78137--- a/mm/slab.c
78138+++ b/mm/slab.c
78139@@ -174,7 +174,7 @@
78140
78141 /* Legal flag mask for kmem_cache_create(). */
78142 #if DEBUG
78143-# define CREATE_MASK (SLAB_RED_ZONE | \
78144+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
78145 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
78146 SLAB_CACHE_DMA | \
78147 SLAB_STORE_USER | \
78148@@ -182,7 +182,7 @@
78149 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78150 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
78151 #else
78152-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
78153+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
78154 SLAB_CACHE_DMA | \
78155 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
78156 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78157@@ -308,7 +308,7 @@ struct kmem_list3 {
78158 * Need this for bootstrapping a per node allocator.
78159 */
78160 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78161-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78162+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78163 #define CACHE_CACHE 0
78164 #define SIZE_AC MAX_NUMNODES
78165 #define SIZE_L3 (2 * MAX_NUMNODES)
78166@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78167 if ((x)->max_freeable < i) \
78168 (x)->max_freeable = i; \
78169 } while (0)
78170-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78171-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78172-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78173-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78174+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78175+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78176+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78177+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78178 #else
78179 #define STATS_INC_ACTIVE(x) do { } while (0)
78180 #define STATS_DEC_ACTIVE(x) do { } while (0)
78181@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78182 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78183 */
78184 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78185- const struct slab *slab, void *obj)
78186+ const struct slab *slab, const void *obj)
78187 {
78188 u32 offset = (obj - slab->s_mem);
78189 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78190@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
78191 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
78192 sizes[INDEX_AC].cs_size,
78193 ARCH_KMALLOC_MINALIGN,
78194- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78195+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78196 NULL);
78197
78198 if (INDEX_AC != INDEX_L3) {
78199@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
78200 kmem_cache_create(names[INDEX_L3].name,
78201 sizes[INDEX_L3].cs_size,
78202 ARCH_KMALLOC_MINALIGN,
78203- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78204+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78205 NULL);
78206 }
78207
78208@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
78209 sizes->cs_cachep = kmem_cache_create(names->name,
78210 sizes->cs_size,
78211 ARCH_KMALLOC_MINALIGN,
78212- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78213+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78214 NULL);
78215 }
78216 #ifdef CONFIG_ZONE_DMA
78217@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
78218 }
78219 /* cpu stats */
78220 {
78221- unsigned long allochit = atomic_read(&cachep->allochit);
78222- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78223- unsigned long freehit = atomic_read(&cachep->freehit);
78224- unsigned long freemiss = atomic_read(&cachep->freemiss);
78225+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78226+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78227+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78228+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78229
78230 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78231 allochit, allocmiss, freehit, freemiss);
78232@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
78233
78234 static int __init slab_proc_init(void)
78235 {
78236- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
78237+ mode_t gr_mode = S_IRUGO;
78238+
78239+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78240+ gr_mode = S_IRUSR;
78241+#endif
78242+
78243+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
78244 #ifdef CONFIG_DEBUG_SLAB_LEAK
78245- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
78246+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
78247 #endif
78248 return 0;
78249 }
78250 module_init(slab_proc_init);
78251 #endif
78252
78253+void check_object_size(const void *ptr, unsigned long n, bool to)
78254+{
78255+
78256+#ifdef CONFIG_PAX_USERCOPY
78257+ struct page *page;
78258+ struct kmem_cache *cachep = NULL;
78259+ struct slab *slabp;
78260+ unsigned int objnr;
78261+ unsigned long offset;
78262+ const char *type;
78263+
78264+ if (!n)
78265+ return;
78266+
78267+ type = "<null>";
78268+ if (ZERO_OR_NULL_PTR(ptr))
78269+ goto report;
78270+
78271+ if (!virt_addr_valid(ptr))
78272+ return;
78273+
78274+ page = virt_to_head_page(ptr);
78275+
78276+ type = "<process stack>";
78277+ if (!PageSlab(page)) {
78278+ if (object_is_on_stack(ptr, n) == -1)
78279+ goto report;
78280+ return;
78281+ }
78282+
78283+ cachep = page_get_cache(page);
78284+ type = cachep->name;
78285+ if (!(cachep->flags & SLAB_USERCOPY))
78286+ goto report;
78287+
78288+ slabp = page_get_slab(page);
78289+ objnr = obj_to_index(cachep, slabp, ptr);
78290+ BUG_ON(objnr >= cachep->num);
78291+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
78292+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
78293+ return;
78294+
78295+report:
78296+ pax_report_usercopy(ptr, n, to, type);
78297+#endif
78298+
78299+}
78300+EXPORT_SYMBOL(check_object_size);
78301+
78302 /**
78303 * ksize - get the actual amount of memory allocated for a given object
78304 * @objp: Pointer to the object
78305diff --git a/mm/slob.c b/mm/slob.c
78306index 837ebd6..4712174 100644
78307--- a/mm/slob.c
78308+++ b/mm/slob.c
78309@@ -29,7 +29,7 @@
78310 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
78311 * alloc_pages() directly, allocating compound pages so the page order
78312 * does not have to be separately tracked, and also stores the exact
78313- * allocation size in page->private so that it can be used to accurately
78314+ * allocation size in slob_page->size so that it can be used to accurately
78315 * provide ksize(). These objects are detected in kfree() because slob_page()
78316 * is false for them.
78317 *
78318@@ -58,6 +58,7 @@
78319 */
78320
78321 #include <linux/kernel.h>
78322+#include <linux/sched.h>
78323 #include <linux/slab.h>
78324 #include <linux/mm.h>
78325 #include <linux/swap.h> /* struct reclaim_state */
78326@@ -100,7 +101,8 @@ struct slob_page {
78327 unsigned long flags; /* mandatory */
78328 atomic_t _count; /* mandatory */
78329 slobidx_t units; /* free units left in page */
78330- unsigned long pad[2];
78331+ unsigned long pad[1];
78332+ unsigned long size; /* size when >=PAGE_SIZE */
78333 slob_t *free; /* first free slob_t in page */
78334 struct list_head list; /* linked list of free pages */
78335 };
78336@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
78337 */
78338 static inline int is_slob_page(struct slob_page *sp)
78339 {
78340- return PageSlab((struct page *)sp);
78341+ return PageSlab((struct page *)sp) && !sp->size;
78342 }
78343
78344 static inline void set_slob_page(struct slob_page *sp)
78345@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
78346
78347 static inline struct slob_page *slob_page(const void *addr)
78348 {
78349- return (struct slob_page *)virt_to_page(addr);
78350+ return (struct slob_page *)virt_to_head_page(addr);
78351 }
78352
78353 /*
78354@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
78355 /*
78356 * Return the size of a slob block.
78357 */
78358-static slobidx_t slob_units(slob_t *s)
78359+static slobidx_t slob_units(const slob_t *s)
78360 {
78361 if (s->units > 0)
78362 return s->units;
78363@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
78364 /*
78365 * Return the next free slob block pointer after this one.
78366 */
78367-static slob_t *slob_next(slob_t *s)
78368+static slob_t *slob_next(const slob_t *s)
78369 {
78370 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
78371 slobidx_t next;
78372@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
78373 /*
78374 * Returns true if s is the last free block in its page.
78375 */
78376-static int slob_last(slob_t *s)
78377+static int slob_last(const slob_t *s)
78378 {
78379 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
78380 }
78381@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
78382 if (!page)
78383 return NULL;
78384
78385+ set_slob_page(page);
78386 return page_address(page);
78387 }
78388
78389@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
78390 if (!b)
78391 return NULL;
78392 sp = slob_page(b);
78393- set_slob_page(sp);
78394
78395 spin_lock_irqsave(&slob_lock, flags);
78396 sp->units = SLOB_UNITS(PAGE_SIZE);
78397 sp->free = b;
78398+ sp->size = 0;
78399 INIT_LIST_HEAD(&sp->list);
78400 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
78401 set_slob_page_free(sp, slob_list);
78402@@ -475,10 +478,9 @@ out:
78403 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
78404 #endif
78405
78406-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78407+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
78408 {
78409- unsigned int *m;
78410- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78411+ slob_t *m;
78412 void *ret;
78413
78414 lockdep_trace_alloc(gfp);
78415@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78416
78417 if (!m)
78418 return NULL;
78419- *m = size;
78420+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
78421+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
78422+ m[0].units = size;
78423+ m[1].units = align;
78424 ret = (void *)m + align;
78425
78426 trace_kmalloc_node(_RET_IP_, ret,
78427@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78428
78429 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
78430 if (ret) {
78431- struct page *page;
78432- page = virt_to_page(ret);
78433- page->private = size;
78434+ struct slob_page *sp;
78435+ sp = slob_page(ret);
78436+ sp->size = size;
78437 }
78438
78439 trace_kmalloc_node(_RET_IP_, ret,
78440 size, PAGE_SIZE << order, gfp, node);
78441 }
78442
78443- kmemleak_alloc(ret, size, 1, gfp);
78444+ return ret;
78445+}
78446+
78447+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78448+{
78449+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78450+ void *ret = __kmalloc_node_align(size, gfp, node, align);
78451+
78452+ if (!ZERO_OR_NULL_PTR(ret))
78453+ kmemleak_alloc(ret, size, 1, gfp);
78454 return ret;
78455 }
78456 EXPORT_SYMBOL(__kmalloc_node);
78457@@ -528,13 +542,92 @@ void kfree(const void *block)
78458 sp = slob_page(block);
78459 if (is_slob_page(sp)) {
78460 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78461- unsigned int *m = (unsigned int *)(block - align);
78462- slob_free(m, *m + align);
78463- } else
78464+ slob_t *m = (slob_t *)(block - align);
78465+ slob_free(m, m[0].units + align);
78466+ } else {
78467+ clear_slob_page(sp);
78468+ free_slob_page(sp);
78469+ sp->size = 0;
78470 put_page(&sp->page);
78471+ }
78472 }
78473 EXPORT_SYMBOL(kfree);
78474
78475+void check_object_size(const void *ptr, unsigned long n, bool to)
78476+{
78477+
78478+#ifdef CONFIG_PAX_USERCOPY
78479+ struct slob_page *sp;
78480+ const slob_t *free;
78481+ const void *base;
78482+ unsigned long flags;
78483+ const char *type;
78484+
78485+ if (!n)
78486+ return;
78487+
78488+ type = "<null>";
78489+ if (ZERO_OR_NULL_PTR(ptr))
78490+ goto report;
78491+
78492+ if (!virt_addr_valid(ptr))
78493+ return;
78494+
78495+ type = "<process stack>";
78496+ sp = slob_page(ptr);
78497+ if (!PageSlab((struct page*)sp)) {
78498+ if (object_is_on_stack(ptr, n) == -1)
78499+ goto report;
78500+ return;
78501+ }
78502+
78503+ type = "<slob>";
78504+ if (sp->size) {
78505+ base = page_address(&sp->page);
78506+ if (base <= ptr && n <= sp->size - (ptr - base))
78507+ return;
78508+ goto report;
78509+ }
78510+
78511+ /* some tricky double walking to find the chunk */
78512+ spin_lock_irqsave(&slob_lock, flags);
78513+ base = (void *)((unsigned long)ptr & PAGE_MASK);
78514+ free = sp->free;
78515+
78516+ while (!slob_last(free) && (void *)free <= ptr) {
78517+ base = free + slob_units(free);
78518+ free = slob_next(free);
78519+ }
78520+
78521+ while (base < (void *)free) {
78522+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
78523+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
78524+ int offset;
78525+
78526+ if (ptr < base + align)
78527+ break;
78528+
78529+ offset = ptr - base - align;
78530+ if (offset >= m) {
78531+ base += size;
78532+ continue;
78533+ }
78534+
78535+ if (n > m - offset)
78536+ break;
78537+
78538+ spin_unlock_irqrestore(&slob_lock, flags);
78539+ return;
78540+ }
78541+
78542+ spin_unlock_irqrestore(&slob_lock, flags);
78543+report:
78544+ pax_report_usercopy(ptr, n, to, type);
78545+#endif
78546+
78547+}
78548+EXPORT_SYMBOL(check_object_size);
78549+
78550 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
78551 size_t ksize(const void *block)
78552 {
78553@@ -547,10 +640,10 @@ size_t ksize(const void *block)
78554 sp = slob_page(block);
78555 if (is_slob_page(sp)) {
78556 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78557- unsigned int *m = (unsigned int *)(block - align);
78558- return SLOB_UNITS(*m) * SLOB_UNIT;
78559+ slob_t *m = (slob_t *)(block - align);
78560+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
78561 } else
78562- return sp->page.private;
78563+ return sp->size;
78564 }
78565 EXPORT_SYMBOL(ksize);
78566
78567@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78568 {
78569 struct kmem_cache *c;
78570
78571+#ifdef CONFIG_PAX_USERCOPY
78572+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
78573+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
78574+#else
78575 c = slob_alloc(sizeof(struct kmem_cache),
78576 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
78577+#endif
78578
78579 if (c) {
78580 c->name = name;
78581@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
78582 {
78583 void *b;
78584
78585+#ifdef CONFIG_PAX_USERCOPY
78586+ b = __kmalloc_node_align(c->size, flags, node, c->align);
78587+#else
78588 if (c->size < PAGE_SIZE) {
78589 b = slob_alloc(c->size, flags, c->align, node);
78590 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78591 SLOB_UNITS(c->size) * SLOB_UNIT,
78592 flags, node);
78593 } else {
78594+ struct slob_page *sp;
78595+
78596 b = slob_new_pages(flags, get_order(c->size), node);
78597+ sp = slob_page(b);
78598+ sp->size = c->size;
78599 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78600 PAGE_SIZE << get_order(c->size),
78601 flags, node);
78602 }
78603+#endif
78604
78605 if (c->ctor)
78606 c->ctor(b);
78607@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
78608
78609 static void __kmem_cache_free(void *b, int size)
78610 {
78611- if (size < PAGE_SIZE)
78612+ struct slob_page *sp = slob_page(b);
78613+
78614+ if (is_slob_page(sp))
78615 slob_free(b, size);
78616- else
78617+ else {
78618+ clear_slob_page(sp);
78619+ free_slob_page(sp);
78620+ sp->size = 0;
78621 slob_free_pages(b, get_order(size));
78622+ }
78623 }
78624
78625 static void kmem_rcu_free(struct rcu_head *head)
78626@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
78627
78628 void kmem_cache_free(struct kmem_cache *c, void *b)
78629 {
78630+ int size = c->size;
78631+
78632+#ifdef CONFIG_PAX_USERCOPY
78633+ if (size + c->align < PAGE_SIZE) {
78634+ size += c->align;
78635+ b -= c->align;
78636+ }
78637+#endif
78638+
78639 kmemleak_free_recursive(b, c->flags);
78640 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
78641 struct slob_rcu *slob_rcu;
78642- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
78643+ slob_rcu = b + (size - sizeof(struct slob_rcu));
78644 INIT_RCU_HEAD(&slob_rcu->head);
78645- slob_rcu->size = c->size;
78646+ slob_rcu->size = size;
78647 call_rcu(&slob_rcu->head, kmem_rcu_free);
78648 } else {
78649- __kmem_cache_free(b, c->size);
78650+ __kmem_cache_free(b, size);
78651 }
78652
78653+#ifdef CONFIG_PAX_USERCOPY
78654+ trace_kfree(_RET_IP_, b);
78655+#else
78656 trace_kmem_cache_free(_RET_IP_, b);
78657+#endif
78658+
78659 }
78660 EXPORT_SYMBOL(kmem_cache_free);
78661
78662diff --git a/mm/slub.c b/mm/slub.c
78663index 4996fc7..87e01d0 100644
78664--- a/mm/slub.c
78665+++ b/mm/slub.c
78666@@ -201,7 +201,7 @@ struct track {
78667
78668 enum track_item { TRACK_ALLOC, TRACK_FREE };
78669
78670-#ifdef CONFIG_SLUB_DEBUG
78671+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78672 static int sysfs_slab_add(struct kmem_cache *);
78673 static int sysfs_slab_alias(struct kmem_cache *, const char *);
78674 static void sysfs_slab_remove(struct kmem_cache *);
78675@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
78676 if (!t->addr)
78677 return;
78678
78679- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
78680+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
78681 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
78682 }
78683
78684@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
78685
78686 page = virt_to_head_page(x);
78687
78688+ BUG_ON(!PageSlab(page));
78689+
78690 slab_free(s, page, x, _RET_IP_);
78691
78692 trace_kmem_cache_free(_RET_IP_, x);
78693@@ -1937,7 +1939,7 @@ static int slub_min_objects;
78694 * Merge control. If this is set then no merging of slab caches will occur.
78695 * (Could be removed. This was introduced to pacify the merge skeptics.)
78696 */
78697-static int slub_nomerge;
78698+static int slub_nomerge = 1;
78699
78700 /*
78701 * Calculate the order of allocation given an slab object size.
78702@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
78703 * list to avoid pounding the page allocator excessively.
78704 */
78705 set_min_partial(s, ilog2(s->size));
78706- s->refcount = 1;
78707+ atomic_set(&s->refcount, 1);
78708 #ifdef CONFIG_NUMA
78709 s->remote_node_defrag_ratio = 1000;
78710 #endif
78711@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
78712 void kmem_cache_destroy(struct kmem_cache *s)
78713 {
78714 down_write(&slub_lock);
78715- s->refcount--;
78716- if (!s->refcount) {
78717+ if (atomic_dec_and_test(&s->refcount)) {
78718 list_del(&s->list);
78719 up_write(&slub_lock);
78720 if (kmem_cache_close(s)) {
78721@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
78722 __setup("slub_nomerge", setup_slub_nomerge);
78723
78724 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
78725- const char *name, int size, gfp_t gfp_flags)
78726+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
78727 {
78728- unsigned int flags = 0;
78729-
78730 if (gfp_flags & SLUB_DMA)
78731- flags = SLAB_CACHE_DMA;
78732+ flags |= SLAB_CACHE_DMA;
78733
78734 /*
78735 * This function is called with IRQs disabled during early-boot on
78736@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
78737 EXPORT_SYMBOL(__kmalloc_node);
78738 #endif
78739
78740+void check_object_size(const void *ptr, unsigned long n, bool to)
78741+{
78742+
78743+#ifdef CONFIG_PAX_USERCOPY
78744+ struct page *page;
78745+ struct kmem_cache *s = NULL;
78746+ unsigned long offset;
78747+ const char *type;
78748+
78749+ if (!n)
78750+ return;
78751+
78752+ type = "<null>";
78753+ if (ZERO_OR_NULL_PTR(ptr))
78754+ goto report;
78755+
78756+ if (!virt_addr_valid(ptr))
78757+ return;
78758+
78759+ page = get_object_page(ptr);
78760+
78761+ type = "<process stack>";
78762+ if (!page) {
78763+ if (object_is_on_stack(ptr, n) == -1)
78764+ goto report;
78765+ return;
78766+ }
78767+
78768+ s = page->slab;
78769+ type = s->name;
78770+ if (!(s->flags & SLAB_USERCOPY))
78771+ goto report;
78772+
78773+ offset = (ptr - page_address(page)) % s->size;
78774+ if (offset <= s->objsize && n <= s->objsize - offset)
78775+ return;
78776+
78777+report:
78778+ pax_report_usercopy(ptr, n, to, type);
78779+#endif
78780+
78781+}
78782+EXPORT_SYMBOL(check_object_size);
78783+
78784 size_t ksize(const void *object)
78785 {
78786 struct page *page;
78787@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
78788 * kmem_cache_open for slab_state == DOWN.
78789 */
78790 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
78791- sizeof(struct kmem_cache_node), GFP_NOWAIT);
78792- kmalloc_caches[0].refcount = -1;
78793+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
78794+ atomic_set(&kmalloc_caches[0].refcount, -1);
78795 caches++;
78796
78797 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
78798@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
78799 /* Caches that are not of the two-to-the-power-of size */
78800 if (KMALLOC_MIN_SIZE <= 32) {
78801 create_kmalloc_cache(&kmalloc_caches[1],
78802- "kmalloc-96", 96, GFP_NOWAIT);
78803+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
78804 caches++;
78805 }
78806 if (KMALLOC_MIN_SIZE <= 64) {
78807 create_kmalloc_cache(&kmalloc_caches[2],
78808- "kmalloc-192", 192, GFP_NOWAIT);
78809+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
78810 caches++;
78811 }
78812
78813 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
78814 create_kmalloc_cache(&kmalloc_caches[i],
78815- "kmalloc", 1 << i, GFP_NOWAIT);
78816+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
78817 caches++;
78818 }
78819
78820@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
78821 /*
78822 * We may have set a slab to be unmergeable during bootstrap.
78823 */
78824- if (s->refcount < 0)
78825+ if (atomic_read(&s->refcount) < 0)
78826 return 1;
78827
78828 return 0;
78829@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78830 if (s) {
78831 int cpu;
78832
78833- s->refcount++;
78834+ atomic_inc(&s->refcount);
78835 /*
78836 * Adjust the object sizes so that we clear
78837 * the complete object on kzalloc.
78838@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78839
78840 if (sysfs_slab_alias(s, name)) {
78841 down_write(&slub_lock);
78842- s->refcount--;
78843+ atomic_dec(&s->refcount);
78844 up_write(&slub_lock);
78845 goto err;
78846 }
78847@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
78848
78849 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
78850 {
78851- return sprintf(buf, "%d\n", s->refcount - 1);
78852+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
78853 }
78854 SLAB_ATTR_RO(aliases);
78855
78856@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
78857 kfree(s);
78858 }
78859
78860-static struct sysfs_ops slab_sysfs_ops = {
78861+static const struct sysfs_ops slab_sysfs_ops = {
78862 .show = slab_attr_show,
78863 .store = slab_attr_store,
78864 };
78865@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
78866 return 0;
78867 }
78868
78869-static struct kset_uevent_ops slab_uevent_ops = {
78870+static const struct kset_uevent_ops slab_uevent_ops = {
78871 .filter = uevent_filter,
78872 };
78873
78874@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
78875 return name;
78876 }
78877
78878+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78879 static int sysfs_slab_add(struct kmem_cache *s)
78880 {
78881 int err;
78882@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
78883 kobject_del(&s->kobj);
78884 kobject_put(&s->kobj);
78885 }
78886+#endif
78887
78888 /*
78889 * Need to buffer aliases during bootup until sysfs becomes
78890@@ -4632,6 +4677,7 @@ struct saved_alias {
78891
78892 static struct saved_alias *alias_list;
78893
78894+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78895 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78896 {
78897 struct saved_alias *al;
78898@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78899 alias_list = al;
78900 return 0;
78901 }
78902+#endif
78903
78904 static int __init slab_sysfs_init(void)
78905 {
78906@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
78907
78908 static int __init slab_proc_init(void)
78909 {
78910- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
78911+ mode_t gr_mode = S_IRUGO;
78912+
78913+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78914+ gr_mode = S_IRUSR;
78915+#endif
78916+
78917+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
78918 return 0;
78919 }
78920 module_init(slab_proc_init);
78921diff --git a/mm/swap.c b/mm/swap.c
78922index 308e57d..5de19c0 100644
78923--- a/mm/swap.c
78924+++ b/mm/swap.c
78925@@ -30,6 +30,7 @@
78926 #include <linux/notifier.h>
78927 #include <linux/backing-dev.h>
78928 #include <linux/memcontrol.h>
78929+#include <linux/hugetlb.h>
78930
78931 #include "internal.h"
78932
78933@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
78934 compound_page_dtor *dtor;
78935
78936 dtor = get_compound_page_dtor(page);
78937+ if (!PageHuge(page))
78938+ BUG_ON(dtor != free_compound_page);
78939 (*dtor)(page);
78940 }
78941 }
78942diff --git a/mm/util.c b/mm/util.c
78943index b377ce4..3a891af 100644
78944--- a/mm/util.c
78945+++ b/mm/util.c
78946@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
78947 void arch_pick_mmap_layout(struct mm_struct *mm)
78948 {
78949 mm->mmap_base = TASK_UNMAPPED_BASE;
78950+
78951+#ifdef CONFIG_PAX_RANDMMAP
78952+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78953+ mm->mmap_base += mm->delta_mmap;
78954+#endif
78955+
78956 mm->get_unmapped_area = arch_get_unmapped_area;
78957 mm->unmap_area = arch_unmap_area;
78958 }
78959diff --git a/mm/vmalloc.c b/mm/vmalloc.c
78960index f34ffd0..28e94b7 100644
78961--- a/mm/vmalloc.c
78962+++ b/mm/vmalloc.c
78963@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78964
78965 pte = pte_offset_kernel(pmd, addr);
78966 do {
78967- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78968- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78969+
78970+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78971+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
78972+ BUG_ON(!pte_exec(*pte));
78973+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
78974+ continue;
78975+ }
78976+#endif
78977+
78978+ {
78979+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78980+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78981+ }
78982 } while (pte++, addr += PAGE_SIZE, addr != end);
78983 }
78984
78985@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78986 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
78987 {
78988 pte_t *pte;
78989+ int ret = -ENOMEM;
78990
78991 /*
78992 * nr is a running index into the array which helps higher level
78993@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78994 pte = pte_alloc_kernel(pmd, addr);
78995 if (!pte)
78996 return -ENOMEM;
78997+
78998+ pax_open_kernel();
78999 do {
79000 struct page *page = pages[*nr];
79001
79002- if (WARN_ON(!pte_none(*pte)))
79003- return -EBUSY;
79004- if (WARN_ON(!page))
79005- return -ENOMEM;
79006+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79007+ if (!(pgprot_val(prot) & _PAGE_NX))
79008+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
79009+ else
79010+#endif
79011+
79012+ if (WARN_ON(!pte_none(*pte))) {
79013+ ret = -EBUSY;
79014+ goto out;
79015+ }
79016+ if (WARN_ON(!page)) {
79017+ ret = -ENOMEM;
79018+ goto out;
79019+ }
79020 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
79021 (*nr)++;
79022 } while (pte++, addr += PAGE_SIZE, addr != end);
79023- return 0;
79024+ ret = 0;
79025+out:
79026+ pax_close_kernel();
79027+ return ret;
79028 }
79029
79030 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
79031@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
79032 * and fall back on vmalloc() if that fails. Others
79033 * just put it in the vmalloc space.
79034 */
79035-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
79036+#ifdef CONFIG_MODULES
79037+#ifdef MODULES_VADDR
79038 unsigned long addr = (unsigned long)x;
79039 if (addr >= MODULES_VADDR && addr < MODULES_END)
79040 return 1;
79041 #endif
79042+
79043+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79044+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
79045+ return 1;
79046+#endif
79047+
79048+#endif
79049+
79050 return is_vmalloc_addr(x);
79051 }
79052
79053@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
79054
79055 if (!pgd_none(*pgd)) {
79056 pud_t *pud = pud_offset(pgd, addr);
79057+#ifdef CONFIG_X86
79058+ if (!pud_large(*pud))
79059+#endif
79060 if (!pud_none(*pud)) {
79061 pmd_t *pmd = pmd_offset(pud, addr);
79062+#ifdef CONFIG_X86
79063+ if (!pmd_large(*pmd))
79064+#endif
79065 if (!pmd_none(*pmd)) {
79066 pte_t *ptep, pte;
79067
79068@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
79069 struct rb_node *tmp;
79070
79071 while (*p) {
79072- struct vmap_area *tmp;
79073+ struct vmap_area *varea;
79074
79075 parent = *p;
79076- tmp = rb_entry(parent, struct vmap_area, rb_node);
79077- if (va->va_start < tmp->va_end)
79078+ varea = rb_entry(parent, struct vmap_area, rb_node);
79079+ if (va->va_start < varea->va_end)
79080 p = &(*p)->rb_left;
79081- else if (va->va_end > tmp->va_start)
79082+ else if (va->va_end > varea->va_start)
79083 p = &(*p)->rb_right;
79084 else
79085 BUG();
79086@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
79087 struct vm_struct *area;
79088
79089 BUG_ON(in_interrupt());
79090+
79091+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79092+ if (flags & VM_KERNEXEC) {
79093+ if (start != VMALLOC_START || end != VMALLOC_END)
79094+ return NULL;
79095+ start = (unsigned long)MODULES_EXEC_VADDR;
79096+ end = (unsigned long)MODULES_EXEC_END;
79097+ }
79098+#endif
79099+
79100 if (flags & VM_IOREMAP) {
79101 int bit = fls(size);
79102
79103@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
79104 if (count > totalram_pages)
79105 return NULL;
79106
79107+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79108+ if (!(pgprot_val(prot) & _PAGE_NX))
79109+ flags |= VM_KERNEXEC;
79110+#endif
79111+
79112 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
79113 __builtin_return_address(0));
79114 if (!area)
79115@@ -1594,6 +1651,13 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79116 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
79117 return NULL;
79118
79119+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79120+ if (!(pgprot_val(prot) & _PAGE_NX))
79121+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
79122+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
79123+ else
79124+#endif
79125+
79126 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
79127 VMALLOC_START, VMALLOC_END, node,
79128 gfp_mask, caller);
79129@@ -1619,6 +1683,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79130 return addr;
79131 }
79132
79133+#undef __vmalloc
79134 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
79135 {
79136 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
79137@@ -1635,6 +1700,7 @@ EXPORT_SYMBOL(__vmalloc);
79138 * For tight control over page level allocator and protection flags
79139 * use __vmalloc() instead.
79140 */
79141+#undef vmalloc
79142 void *vmalloc(unsigned long size)
79143 {
79144 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79145@@ -1649,6 +1715,7 @@ EXPORT_SYMBOL(vmalloc);
79146 * The resulting memory area is zeroed so it can be mapped to userspace
79147 * without leaking data.
79148 */
79149+#undef vmalloc_user
79150 void *vmalloc_user(unsigned long size)
79151 {
79152 struct vm_struct *area;
79153@@ -1676,6 +1743,7 @@ EXPORT_SYMBOL(vmalloc_user);
79154 * For tight control over page level allocator and protection flags
79155 * use __vmalloc() instead.
79156 */
79157+#undef vmalloc_node
79158 void *vmalloc_node(unsigned long size, int node)
79159 {
79160 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79161@@ -1698,10 +1766,10 @@ EXPORT_SYMBOL(vmalloc_node);
79162 * For tight control over page level allocator and protection flags
79163 * use __vmalloc() instead.
79164 */
79165-
79166+#undef vmalloc_exec
79167 void *vmalloc_exec(unsigned long size)
79168 {
79169- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79170+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79171 -1, __builtin_return_address(0));
79172 }
79173
79174@@ -1720,6 +1788,7 @@ void *vmalloc_exec(unsigned long size)
79175 * Allocate enough 32bit PA addressable pages to cover @size from the
79176 * page level allocator and map them into contiguous kernel virtual space.
79177 */
79178+#undef vmalloc_32
79179 void *vmalloc_32(unsigned long size)
79180 {
79181 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
79182@@ -1734,6 +1803,7 @@ EXPORT_SYMBOL(vmalloc_32);
79183 * The resulting memory area is 32bit addressable and zeroed so it can be
79184 * mapped to userspace without leaking data.
79185 */
79186+#undef vmalloc_32_user
79187 void *vmalloc_32_user(unsigned long size)
79188 {
79189 struct vm_struct *area;
79190@@ -1998,6 +2068,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79191 unsigned long uaddr = vma->vm_start;
79192 unsigned long usize = vma->vm_end - vma->vm_start;
79193
79194+ BUG_ON(vma->vm_mirror);
79195+
79196 if ((PAGE_SIZE-1) & (unsigned long)addr)
79197 return -EINVAL;
79198
79199diff --git a/mm/vmstat.c b/mm/vmstat.c
79200index 42d76c6..5643dc4 100644
79201--- a/mm/vmstat.c
79202+++ b/mm/vmstat.c
79203@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
79204 *
79205 * vm_stat contains the global counters
79206 */
79207-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79208+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79209 EXPORT_SYMBOL(vm_stat);
79210
79211 #ifdef CONFIG_SMP
79212@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
79213 v = p->vm_stat_diff[i];
79214 p->vm_stat_diff[i] = 0;
79215 local_irq_restore(flags);
79216- atomic_long_add(v, &zone->vm_stat[i]);
79217+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79218 global_diff[i] += v;
79219 #ifdef CONFIG_NUMA
79220 /* 3 seconds idle till flush */
79221@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
79222
79223 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79224 if (global_diff[i])
79225- atomic_long_add(global_diff[i], &vm_stat[i]);
79226+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79227 }
79228
79229 #endif
79230@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
79231 start_cpu_timer(cpu);
79232 #endif
79233 #ifdef CONFIG_PROC_FS
79234- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79235- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79236- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79237- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
79238+ {
79239+ mode_t gr_mode = S_IRUGO;
79240+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79241+ gr_mode = S_IRUSR;
79242+#endif
79243+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
79244+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
79245+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79246+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
79247+#else
79248+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
79249+#endif
79250+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
79251+ }
79252 #endif
79253 return 0;
79254 }
79255diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
79256index a29c5ab..6143f20 100644
79257--- a/net/8021q/vlan.c
79258+++ b/net/8021q/vlan.c
79259@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
79260 err = -EPERM;
79261 if (!capable(CAP_NET_ADMIN))
79262 break;
79263- if ((args.u.name_type >= 0) &&
79264- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
79265+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
79266 struct vlan_net *vn;
79267
79268 vn = net_generic(net, vlan_net_id);
79269diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
79270index a2d2984..f9eb711 100644
79271--- a/net/9p/trans_fd.c
79272+++ b/net/9p/trans_fd.c
79273@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
79274 oldfs = get_fs();
79275 set_fs(get_ds());
79276 /* The cast to a user pointer is valid due to the set_fs() */
79277- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
79278+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
79279 set_fs(oldfs);
79280
79281 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
79282diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
79283index 02cc7e7..4514f1b 100644
79284--- a/net/atm/atm_misc.c
79285+++ b/net/atm/atm_misc.c
79286@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
79287 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
79288 return 1;
79289 atm_return(vcc,truesize);
79290- atomic_inc(&vcc->stats->rx_drop);
79291+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79292 return 0;
79293 }
79294
79295@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
79296 }
79297 }
79298 atm_return(vcc,guess);
79299- atomic_inc(&vcc->stats->rx_drop);
79300+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79301 return NULL;
79302 }
79303
79304@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
79305
79306 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79307 {
79308-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79309+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79310 __SONET_ITEMS
79311 #undef __HANDLE_ITEM
79312 }
79313@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79314
79315 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79316 {
79317-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
79318+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
79319 __SONET_ITEMS
79320 #undef __HANDLE_ITEM
79321 }
79322diff --git a/net/atm/lec.h b/net/atm/lec.h
79323index 9d14d19..5c145f3 100644
79324--- a/net/atm/lec.h
79325+++ b/net/atm/lec.h
79326@@ -48,7 +48,7 @@ struct lane2_ops {
79327 const u8 *tlvs, u32 sizeoftlvs);
79328 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
79329 const u8 *tlvs, u32 sizeoftlvs);
79330-};
79331+} __no_const;
79332
79333 /*
79334 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
79335diff --git a/net/atm/mpc.h b/net/atm/mpc.h
79336index 0919a88..a23d54e 100644
79337--- a/net/atm/mpc.h
79338+++ b/net/atm/mpc.h
79339@@ -33,7 +33,7 @@ struct mpoa_client {
79340 struct mpc_parameters parameters; /* parameters for this client */
79341
79342 const struct net_device_ops *old_ops;
79343- struct net_device_ops new_ops;
79344+ net_device_ops_no_const new_ops;
79345 };
79346
79347
79348diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
79349index 4504a4b..1733f1e 100644
79350--- a/net/atm/mpoa_caches.c
79351+++ b/net/atm/mpoa_caches.c
79352@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
79353 struct timeval now;
79354 struct k_message msg;
79355
79356+ pax_track_stack();
79357+
79358 do_gettimeofday(&now);
79359
79360 write_lock_irq(&client->egress_lock);
79361diff --git a/net/atm/proc.c b/net/atm/proc.c
79362index ab8419a..aa91497 100644
79363--- a/net/atm/proc.c
79364+++ b/net/atm/proc.c
79365@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
79366 const struct k_atm_aal_stats *stats)
79367 {
79368 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
79369- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
79370- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
79371- atomic_read(&stats->rx_drop));
79372+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
79373+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
79374+ atomic_read_unchecked(&stats->rx_drop));
79375 }
79376
79377 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
79378@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
79379 {
79380 struct sock *sk = sk_atm(vcc);
79381
79382+#ifdef CONFIG_GRKERNSEC_HIDESYM
79383+ seq_printf(seq, "%p ", NULL);
79384+#else
79385 seq_printf(seq, "%p ", vcc);
79386+#endif
79387+
79388 if (!vcc->dev)
79389 seq_printf(seq, "Unassigned ");
79390 else
79391@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
79392 {
79393 if (!vcc->dev)
79394 seq_printf(seq, sizeof(void *) == 4 ?
79395+#ifdef CONFIG_GRKERNSEC_HIDESYM
79396+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
79397+#else
79398 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
79399+#endif
79400 else
79401 seq_printf(seq, "%3d %3d %5d ",
79402 vcc->dev->number, vcc->vpi, vcc->vci);
79403diff --git a/net/atm/resources.c b/net/atm/resources.c
79404index 56b7322..c48b84e 100644
79405--- a/net/atm/resources.c
79406+++ b/net/atm/resources.c
79407@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
79408 static void copy_aal_stats(struct k_atm_aal_stats *from,
79409 struct atm_aal_stats *to)
79410 {
79411-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79412+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79413 __AAL_STAT_ITEMS
79414 #undef __HANDLE_ITEM
79415 }
79416@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
79417 static void subtract_aal_stats(struct k_atm_aal_stats *from,
79418 struct atm_aal_stats *to)
79419 {
79420-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79421+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
79422 __AAL_STAT_ITEMS
79423 #undef __HANDLE_ITEM
79424 }
79425diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
79426index 8567d47..bba2292 100644
79427--- a/net/bridge/br_private.h
79428+++ b/net/bridge/br_private.h
79429@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
79430
79431 #ifdef CONFIG_SYSFS
79432 /* br_sysfs_if.c */
79433-extern struct sysfs_ops brport_sysfs_ops;
79434+extern const struct sysfs_ops brport_sysfs_ops;
79435 extern int br_sysfs_addif(struct net_bridge_port *p);
79436
79437 /* br_sysfs_br.c */
79438diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
79439index 9a52ac5..c97538e 100644
79440--- a/net/bridge/br_stp_if.c
79441+++ b/net/bridge/br_stp_if.c
79442@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
79443 char *envp[] = { NULL };
79444
79445 if (br->stp_enabled == BR_USER_STP) {
79446- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
79447+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
79448 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
79449 br->dev->name, r);
79450
79451diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
79452index 820643a..ce77fb3 100644
79453--- a/net/bridge/br_sysfs_if.c
79454+++ b/net/bridge/br_sysfs_if.c
79455@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
79456 return ret;
79457 }
79458
79459-struct sysfs_ops brport_sysfs_ops = {
79460+const struct sysfs_ops brport_sysfs_ops = {
79461 .show = brport_show,
79462 .store = brport_store,
79463 };
79464diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
79465index d73d47f..72df42a 100644
79466--- a/net/bridge/netfilter/ebtables.c
79467+++ b/net/bridge/netfilter/ebtables.c
79468@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
79469 unsigned int entries_size, nentries;
79470 char *entries;
79471
79472+ pax_track_stack();
79473+
79474 if (cmd == EBT_SO_GET_ENTRIES) {
79475 entries_size = t->private->entries_size;
79476 nentries = t->private->nentries;
79477diff --git a/net/can/bcm.c b/net/can/bcm.c
79478index 2ffd2e0..72a7486 100644
79479--- a/net/can/bcm.c
79480+++ b/net/can/bcm.c
79481@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
79482 struct bcm_sock *bo = bcm_sk(sk);
79483 struct bcm_op *op;
79484
79485+#ifdef CONFIG_GRKERNSEC_HIDESYM
79486+ seq_printf(m, ">>> socket %p", NULL);
79487+ seq_printf(m, " / sk %p", NULL);
79488+ seq_printf(m, " / bo %p", NULL);
79489+#else
79490 seq_printf(m, ">>> socket %p", sk->sk_socket);
79491 seq_printf(m, " / sk %p", sk);
79492 seq_printf(m, " / bo %p", bo);
79493+#endif
79494 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
79495 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
79496 seq_printf(m, " <<<\n");
79497diff --git a/net/compat.c b/net/compat.c
79498index 9559afc..ccd74e1 100644
79499--- a/net/compat.c
79500+++ b/net/compat.c
79501@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79502 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
79503 __get_user(kmsg->msg_flags, &umsg->msg_flags))
79504 return -EFAULT;
79505- kmsg->msg_name = compat_ptr(tmp1);
79506- kmsg->msg_iov = compat_ptr(tmp2);
79507- kmsg->msg_control = compat_ptr(tmp3);
79508+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
79509+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
79510+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
79511 return 0;
79512 }
79513
79514@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79515 kern_msg->msg_name = NULL;
79516
79517 tot_len = iov_from_user_compat_to_kern(kern_iov,
79518- (struct compat_iovec __user *)kern_msg->msg_iov,
79519+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
79520 kern_msg->msg_iovlen);
79521 if (tot_len >= 0)
79522 kern_msg->msg_iov = kern_iov;
79523@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79524
79525 #define CMSG_COMPAT_FIRSTHDR(msg) \
79526 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
79527- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
79528+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
79529 (struct compat_cmsghdr __user *)NULL)
79530
79531 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
79532 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
79533 (ucmlen) <= (unsigned long) \
79534 ((mhdr)->msg_controllen - \
79535- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
79536+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
79537
79538 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
79539 struct compat_cmsghdr __user *cmsg, int cmsg_len)
79540 {
79541 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
79542- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
79543+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
79544 msg->msg_controllen)
79545 return NULL;
79546 return (struct compat_cmsghdr __user *)ptr;
79547@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79548 {
79549 struct compat_timeval ctv;
79550 struct compat_timespec cts[3];
79551- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79552+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79553 struct compat_cmsghdr cmhdr;
79554 int cmlen;
79555
79556@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79557
79558 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
79559 {
79560- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79561+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79562 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
79563 int fdnum = scm->fp->count;
79564 struct file **fp = scm->fp->fp;
79565@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
79566 len = sizeof(ktime);
79567 old_fs = get_fs();
79568 set_fs(KERNEL_DS);
79569- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
79570+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
79571 set_fs(old_fs);
79572
79573 if (!err) {
79574@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79575 case MCAST_JOIN_GROUP:
79576 case MCAST_LEAVE_GROUP:
79577 {
79578- struct compat_group_req __user *gr32 = (void *)optval;
79579+ struct compat_group_req __user *gr32 = (void __user *)optval;
79580 struct group_req __user *kgr =
79581 compat_alloc_user_space(sizeof(struct group_req));
79582 u32 interface;
79583@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79584 case MCAST_BLOCK_SOURCE:
79585 case MCAST_UNBLOCK_SOURCE:
79586 {
79587- struct compat_group_source_req __user *gsr32 = (void *)optval;
79588+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
79589 struct group_source_req __user *kgsr = compat_alloc_user_space(
79590 sizeof(struct group_source_req));
79591 u32 interface;
79592@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79593 }
79594 case MCAST_MSFILTER:
79595 {
79596- struct compat_group_filter __user *gf32 = (void *)optval;
79597+ struct compat_group_filter __user *gf32 = (void __user *)optval;
79598 struct group_filter __user *kgf;
79599 u32 interface, fmode, numsrc;
79600
79601diff --git a/net/core/dev.c b/net/core/dev.c
79602index 84a0705..575db4c 100644
79603--- a/net/core/dev.c
79604+++ b/net/core/dev.c
79605@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
79606 if (no_module && capable(CAP_NET_ADMIN))
79607 no_module = request_module("netdev-%s", name);
79608 if (no_module && capable(CAP_SYS_MODULE)) {
79609+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79610+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
79611+#else
79612 if (!request_module("%s", name))
79613 pr_err("Loading kernel module for a network device "
79614 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
79615 "instead\n", name);
79616+#endif
79617 }
79618 }
79619 EXPORT_SYMBOL(dev_load);
79620@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
79621
79622 struct dev_gso_cb {
79623 void (*destructor)(struct sk_buff *skb);
79624-};
79625+} __no_const;
79626
79627 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
79628
79629@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
79630 }
79631 EXPORT_SYMBOL(netif_rx_ni);
79632
79633-static void net_tx_action(struct softirq_action *h)
79634+static void net_tx_action(void)
79635 {
79636 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79637
79638@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
79639 EXPORT_SYMBOL(netif_napi_del);
79640
79641
79642-static void net_rx_action(struct softirq_action *h)
79643+static void net_rx_action(void)
79644 {
79645 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
79646 unsigned long time_limit = jiffies + 2;
79647diff --git a/net/core/flow.c b/net/core/flow.c
79648index 9601587..8c4824e 100644
79649--- a/net/core/flow.c
79650+++ b/net/core/flow.c
79651@@ -35,11 +35,11 @@ struct flow_cache_entry {
79652 atomic_t *object_ref;
79653 };
79654
79655-atomic_t flow_cache_genid = ATOMIC_INIT(0);
79656+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
79657
79658 static u32 flow_hash_shift;
79659 #define flow_hash_size (1 << flow_hash_shift)
79660-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
79661+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
79662
79663 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
79664
79665@@ -52,7 +52,7 @@ struct flow_percpu_info {
79666 u32 hash_rnd;
79667 int count;
79668 };
79669-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
79670+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
79671
79672 #define flow_hash_rnd_recalc(cpu) \
79673 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
79674@@ -69,7 +69,7 @@ struct flow_flush_info {
79675 atomic_t cpuleft;
79676 struct completion completion;
79677 };
79678-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
79679+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
79680
79681 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
79682
79683@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
79684 if (fle->family == family &&
79685 fle->dir == dir &&
79686 flow_key_compare(key, &fle->key) == 0) {
79687- if (fle->genid == atomic_read(&flow_cache_genid)) {
79688+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
79689 void *ret = fle->object;
79690
79691 if (ret)
79692@@ -228,7 +228,7 @@ nocache:
79693 err = resolver(net, key, family, dir, &obj, &obj_ref);
79694
79695 if (fle && !err) {
79696- fle->genid = atomic_read(&flow_cache_genid);
79697+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
79698
79699 if (fle->object)
79700 atomic_dec(fle->object_ref);
79701@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
79702
79703 fle = flow_table(cpu)[i];
79704 for (; fle; fle = fle->next) {
79705- unsigned genid = atomic_read(&flow_cache_genid);
79706+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
79707
79708 if (!fle->object || fle->genid == genid)
79709 continue;
79710diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
79711index d4fd895..ac9b1e6 100644
79712--- a/net/core/rtnetlink.c
79713+++ b/net/core/rtnetlink.c
79714@@ -57,7 +57,7 @@ struct rtnl_link
79715 {
79716 rtnl_doit_func doit;
79717 rtnl_dumpit_func dumpit;
79718-};
79719+} __no_const;
79720
79721 static DEFINE_MUTEX(rtnl_mutex);
79722
79723diff --git a/net/core/scm.c b/net/core/scm.c
79724index d98eafc..1a190a9 100644
79725--- a/net/core/scm.c
79726+++ b/net/core/scm.c
79727@@ -191,7 +191,7 @@ error:
79728 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79729 {
79730 struct cmsghdr __user *cm
79731- = (__force struct cmsghdr __user *)msg->msg_control;
79732+ = (struct cmsghdr __force_user *)msg->msg_control;
79733 struct cmsghdr cmhdr;
79734 int cmlen = CMSG_LEN(len);
79735 int err;
79736@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79737 err = -EFAULT;
79738 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
79739 goto out;
79740- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
79741+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
79742 goto out;
79743 cmlen = CMSG_SPACE(len);
79744 if (msg->msg_controllen < cmlen)
79745@@ -229,7 +229,7 @@ out:
79746 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79747 {
79748 struct cmsghdr __user *cm
79749- = (__force struct cmsghdr __user*)msg->msg_control;
79750+ = (struct cmsghdr __force_user *)msg->msg_control;
79751
79752 int fdmax = 0;
79753 int fdnum = scm->fp->count;
79754@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79755 if (fdnum < fdmax)
79756 fdmax = fdnum;
79757
79758- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
79759+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
79760 i++, cmfptr++)
79761 {
79762 int new_fd;
79763diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
79764index 45329d7..626aaa6 100644
79765--- a/net/core/secure_seq.c
79766+++ b/net/core/secure_seq.c
79767@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
79768 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
79769
79770 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79771- __be16 dport)
79772+ __be16 dport)
79773 {
79774 u32 secret[MD5_MESSAGE_BYTES / 4];
79775 u32 hash[MD5_DIGEST_WORDS];
79776@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79777 secret[i] = net_secret[i];
79778
79779 md5_transform(hash, secret);
79780-
79781 return hash[0];
79782 }
79783 #endif
79784diff --git a/net/core/skbuff.c b/net/core/skbuff.c
79785index a807f8c..65f906f 100644
79786--- a/net/core/skbuff.c
79787+++ b/net/core/skbuff.c
79788@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
79789 struct sk_buff *frag_iter;
79790 struct sock *sk = skb->sk;
79791
79792+ pax_track_stack();
79793+
79794 /*
79795 * __skb_splice_bits() only fails if the output has no room left,
79796 * so no point in going over the frag_list for the error case.
79797diff --git a/net/core/sock.c b/net/core/sock.c
79798index 6605e75..3acebda 100644
79799--- a/net/core/sock.c
79800+++ b/net/core/sock.c
79801@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79802 break;
79803
79804 case SO_PEERCRED:
79805+ {
79806+ struct ucred peercred;
79807 if (len > sizeof(sk->sk_peercred))
79808 len = sizeof(sk->sk_peercred);
79809- if (copy_to_user(optval, &sk->sk_peercred, len))
79810+ peercred = sk->sk_peercred;
79811+ if (copy_to_user(optval, &peercred, len))
79812 return -EFAULT;
79813 goto lenout;
79814+ }
79815
79816 case SO_PEERNAME:
79817 {
79818@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79819 */
79820 smp_wmb();
79821 atomic_set(&sk->sk_refcnt, 1);
79822- atomic_set(&sk->sk_drops, 0);
79823+ atomic_set_unchecked(&sk->sk_drops, 0);
79824 }
79825 EXPORT_SYMBOL(sock_init_data);
79826
79827diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79828index 2036568..c55883d 100644
79829--- a/net/decnet/sysctl_net_decnet.c
79830+++ b/net/decnet/sysctl_net_decnet.c
79831@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79832
79833 if (len > *lenp) len = *lenp;
79834
79835- if (copy_to_user(buffer, addr, len))
79836+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79837 return -EFAULT;
79838
79839 *lenp = len;
79840@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79841
79842 if (len > *lenp) len = *lenp;
79843
79844- if (copy_to_user(buffer, devname, len))
79845+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79846 return -EFAULT;
79847
79848 *lenp = len;
79849diff --git a/net/econet/Kconfig b/net/econet/Kconfig
79850index 39a2d29..f39c0fe 100644
79851--- a/net/econet/Kconfig
79852+++ b/net/econet/Kconfig
79853@@ -4,7 +4,7 @@
79854
79855 config ECONET
79856 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
79857- depends on EXPERIMENTAL && INET
79858+ depends on EXPERIMENTAL && INET && BROKEN
79859 ---help---
79860 Econet is a fairly old and slow networking protocol mainly used by
79861 Acorn computers to access file and print servers. It uses native
79862diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
79863index a413b1b..380849c 100644
79864--- a/net/ieee802154/dgram.c
79865+++ b/net/ieee802154/dgram.c
79866@@ -318,7 +318,7 @@ out:
79867 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
79868 {
79869 if (sock_queue_rcv_skb(sk, skb) < 0) {
79870- atomic_inc(&sk->sk_drops);
79871+ atomic_inc_unchecked(&sk->sk_drops);
79872 kfree_skb(skb);
79873 return NET_RX_DROP;
79874 }
79875diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
79876index 30e74ee..bfc6ee0 100644
79877--- a/net/ieee802154/raw.c
79878+++ b/net/ieee802154/raw.c
79879@@ -206,7 +206,7 @@ out:
79880 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79881 {
79882 if (sock_queue_rcv_skb(sk, skb) < 0) {
79883- atomic_inc(&sk->sk_drops);
79884+ atomic_inc_unchecked(&sk->sk_drops);
79885 kfree_skb(skb);
79886 return NET_RX_DROP;
79887 }
79888diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
79889index dba56d2..acee5d6 100644
79890--- a/net/ipv4/inet_diag.c
79891+++ b/net/ipv4/inet_diag.c
79892@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
79893 r->idiag_retrans = 0;
79894
79895 r->id.idiag_if = sk->sk_bound_dev_if;
79896+#ifdef CONFIG_GRKERNSEC_HIDESYM
79897+ r->id.idiag_cookie[0] = 0;
79898+ r->id.idiag_cookie[1] = 0;
79899+#else
79900 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
79901 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79902+#endif
79903
79904 r->id.idiag_sport = inet->sport;
79905 r->id.idiag_dport = inet->dport;
79906@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
79907 r->idiag_family = tw->tw_family;
79908 r->idiag_retrans = 0;
79909 r->id.idiag_if = tw->tw_bound_dev_if;
79910+
79911+#ifdef CONFIG_GRKERNSEC_HIDESYM
79912+ r->id.idiag_cookie[0] = 0;
79913+ r->id.idiag_cookie[1] = 0;
79914+#else
79915 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
79916 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
79917+#endif
79918+
79919 r->id.idiag_sport = tw->tw_sport;
79920 r->id.idiag_dport = tw->tw_dport;
79921 r->id.idiag_src[0] = tw->tw_rcv_saddr;
79922@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
79923 if (sk == NULL)
79924 goto unlock;
79925
79926+#ifndef CONFIG_GRKERNSEC_HIDESYM
79927 err = -ESTALE;
79928 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
79929 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
79930 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
79931 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
79932 goto out;
79933+#endif
79934
79935 err = -ENOMEM;
79936 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
79937@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
79938 r->idiag_retrans = req->retrans;
79939
79940 r->id.idiag_if = sk->sk_bound_dev_if;
79941+
79942+#ifdef CONFIG_GRKERNSEC_HIDESYM
79943+ r->id.idiag_cookie[0] = 0;
79944+ r->id.idiag_cookie[1] = 0;
79945+#else
79946 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
79947 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
79948+#endif
79949
79950 tmo = req->expires - jiffies;
79951 if (tmo < 0)
79952diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79953index d717267..56de7e7 100644
79954--- a/net/ipv4/inet_hashtables.c
79955+++ b/net/ipv4/inet_hashtables.c
79956@@ -18,12 +18,15 @@
79957 #include <linux/sched.h>
79958 #include <linux/slab.h>
79959 #include <linux/wait.h>
79960+#include <linux/security.h>
79961
79962 #include <net/inet_connection_sock.h>
79963 #include <net/inet_hashtables.h>
79964 #include <net/secure_seq.h>
79965 #include <net/ip.h>
79966
79967+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79968+
79969 /*
79970 * Allocate and initialize a new local port bind bucket.
79971 * The bindhash mutex for snum's hash chain must be held here.
79972@@ -491,6 +494,8 @@ ok:
79973 }
79974 spin_unlock(&head->lock);
79975
79976+ gr_update_task_in_ip_table(current, inet_sk(sk));
79977+
79978 if (tw) {
79979 inet_twsk_deschedule(tw, death_row);
79980 inet_twsk_put(tw);
79981diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79982index 13b229f..6956484 100644
79983--- a/net/ipv4/inetpeer.c
79984+++ b/net/ipv4/inetpeer.c
79985@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79986 struct inet_peer *p, *n;
79987 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
79988
79989+ pax_track_stack();
79990+
79991 /* Look up for the address quickly. */
79992 read_lock_bh(&peer_pool_lock);
79993 p = lookup(daddr, NULL);
79994@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79995 return NULL;
79996 n->v4daddr = daddr;
79997 atomic_set(&n->refcnt, 1);
79998- atomic_set(&n->rid, 0);
79999+ atomic_set_unchecked(&n->rid, 0);
80000 n->ip_id_count = secure_ip_id(daddr);
80001 n->tcp_ts_stamp = 0;
80002
80003diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
80004index d3fe10b..feeafc9 100644
80005--- a/net/ipv4/ip_fragment.c
80006+++ b/net/ipv4/ip_fragment.c
80007@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
80008 return 0;
80009
80010 start = qp->rid;
80011- end = atomic_inc_return(&peer->rid);
80012+ end = atomic_inc_return_unchecked(&peer->rid);
80013 qp->rid = end;
80014
80015 rc = qp->q.fragments && (end - start) > max;
80016diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
80017index e982b5c..f079d75 100644
80018--- a/net/ipv4/ip_sockglue.c
80019+++ b/net/ipv4/ip_sockglue.c
80020@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80021 int val;
80022 int len;
80023
80024+ pax_track_stack();
80025+
80026 if (level != SOL_IP)
80027 return -EOPNOTSUPP;
80028
80029@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80030 if (sk->sk_type != SOCK_STREAM)
80031 return -ENOPROTOOPT;
80032
80033- msg.msg_control = optval;
80034+ msg.msg_control = (void __force_kernel *)optval;
80035 msg.msg_controllen = len;
80036 msg.msg_flags = 0;
80037
80038diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
80039index f8d04c2..c1188f2 100644
80040--- a/net/ipv4/ipconfig.c
80041+++ b/net/ipv4/ipconfig.c
80042@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
80043
80044 mm_segment_t oldfs = get_fs();
80045 set_fs(get_ds());
80046- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80047+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80048 set_fs(oldfs);
80049 return res;
80050 }
80051@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
80052
80053 mm_segment_t oldfs = get_fs();
80054 set_fs(get_ds());
80055- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80056+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80057 set_fs(oldfs);
80058 return res;
80059 }
80060@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
80061
80062 mm_segment_t oldfs = get_fs();
80063 set_fs(get_ds());
80064- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
80065+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
80066 set_fs(oldfs);
80067 return res;
80068 }
80069diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
80070index c8b0cc3..4da5ae2 100644
80071--- a/net/ipv4/netfilter/arp_tables.c
80072+++ b/net/ipv4/netfilter/arp_tables.c
80073@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80074 private = &tmp;
80075 }
80076 #endif
80077+ memset(&info, 0, sizeof(info));
80078 info.valid_hooks = t->valid_hooks;
80079 memcpy(info.hook_entry, private->hook_entry,
80080 sizeof(info.hook_entry));
80081diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
80082index c156db2..e772975 100644
80083--- a/net/ipv4/netfilter/ip_queue.c
80084+++ b/net/ipv4/netfilter/ip_queue.c
80085@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80086
80087 if (v->data_len < sizeof(*user_iph))
80088 return 0;
80089+ if (v->data_len > 65535)
80090+ return -EMSGSIZE;
80091+
80092 diff = v->data_len - e->skb->len;
80093 if (diff < 0) {
80094 if (pskb_trim(e->skb, v->data_len))
80095@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
80096 static inline void
80097 __ipq_rcv_skb(struct sk_buff *skb)
80098 {
80099- int status, type, pid, flags, nlmsglen, skblen;
80100+ int status, type, pid, flags;
80101+ unsigned int nlmsglen, skblen;
80102 struct nlmsghdr *nlh;
80103
80104 skblen = skb->len;
80105diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
80106index 0606db1..02e7e4c 100644
80107--- a/net/ipv4/netfilter/ip_tables.c
80108+++ b/net/ipv4/netfilter/ip_tables.c
80109@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80110 private = &tmp;
80111 }
80112 #endif
80113+ memset(&info, 0, sizeof(info));
80114 info.valid_hooks = t->valid_hooks;
80115 memcpy(info.hook_entry, private->hook_entry,
80116 sizeof(info.hook_entry));
80117diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80118index d9521f6..3c3eb25 100644
80119--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
80120+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80121@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
80122
80123 *len = 0;
80124
80125- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
80126+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
80127 if (*octets == NULL) {
80128 if (net_ratelimit())
80129 printk("OOM in bsalg (%d)\n", __LINE__);
80130diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
80131index ab996f9..3da5f96 100644
80132--- a/net/ipv4/raw.c
80133+++ b/net/ipv4/raw.c
80134@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80135 /* Charge it to the socket. */
80136
80137 if (sock_queue_rcv_skb(sk, skb) < 0) {
80138- atomic_inc(&sk->sk_drops);
80139+ atomic_inc_unchecked(&sk->sk_drops);
80140 kfree_skb(skb);
80141 return NET_RX_DROP;
80142 }
80143@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80144 int raw_rcv(struct sock *sk, struct sk_buff *skb)
80145 {
80146 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
80147- atomic_inc(&sk->sk_drops);
80148+ atomic_inc_unchecked(&sk->sk_drops);
80149 kfree_skb(skb);
80150 return NET_RX_DROP;
80151 }
80152@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
80153
80154 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
80155 {
80156+ struct icmp_filter filter;
80157+
80158+ if (optlen < 0)
80159+ return -EINVAL;
80160 if (optlen > sizeof(struct icmp_filter))
80161 optlen = sizeof(struct icmp_filter);
80162- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
80163+ if (copy_from_user(&filter, optval, optlen))
80164 return -EFAULT;
80165+ raw_sk(sk)->filter = filter;
80166+
80167 return 0;
80168 }
80169
80170 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
80171 {
80172 int len, ret = -EFAULT;
80173+ struct icmp_filter filter;
80174
80175 if (get_user(len, optlen))
80176 goto out;
80177@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
80178 if (len > sizeof(struct icmp_filter))
80179 len = sizeof(struct icmp_filter);
80180 ret = -EFAULT;
80181- if (put_user(len, optlen) ||
80182- copy_to_user(optval, &raw_sk(sk)->filter, len))
80183+ filter = raw_sk(sk)->filter;
80184+ if (put_user(len, optlen) || len > sizeof filter ||
80185+ copy_to_user(optval, &filter, len))
80186 goto out;
80187 ret = 0;
80188 out: return ret;
80189@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80190 sk_wmem_alloc_get(sp),
80191 sk_rmem_alloc_get(sp),
80192 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80193- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80194+ atomic_read(&sp->sk_refcnt),
80195+#ifdef CONFIG_GRKERNSEC_HIDESYM
80196+ NULL,
80197+#else
80198+ sp,
80199+#endif
80200+ atomic_read_unchecked(&sp->sk_drops));
80201 }
80202
80203 static int raw_seq_show(struct seq_file *seq, void *v)
80204diff --git a/net/ipv4/route.c b/net/ipv4/route.c
80205index 58f141b..b759702 100644
80206--- a/net/ipv4/route.c
80207+++ b/net/ipv4/route.c
80208@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
80209
80210 static inline int rt_genid(struct net *net)
80211 {
80212- return atomic_read(&net->ipv4.rt_genid);
80213+ return atomic_read_unchecked(&net->ipv4.rt_genid);
80214 }
80215
80216 #ifdef CONFIG_PROC_FS
80217@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
80218 unsigned char shuffle;
80219
80220 get_random_bytes(&shuffle, sizeof(shuffle));
80221- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
80222+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
80223 }
80224
80225 /*
80226@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
80227
80228 static __net_init int rt_secret_timer_init(struct net *net)
80229 {
80230- atomic_set(&net->ipv4.rt_genid,
80231+ atomic_set_unchecked(&net->ipv4.rt_genid,
80232 (int) ((num_physpages ^ (num_physpages>>8)) ^
80233 (jiffies ^ (jiffies >> 7))));
80234
80235diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
80236index f095659..adc892a 100644
80237--- a/net/ipv4/tcp.c
80238+++ b/net/ipv4/tcp.c
80239@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
80240 int val;
80241 int err = 0;
80242
80243+ pax_track_stack();
80244+
80245 /* This is a string value all the others are int's */
80246 if (optname == TCP_CONGESTION) {
80247 char name[TCP_CA_NAME_MAX];
80248@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
80249 struct tcp_sock *tp = tcp_sk(sk);
80250 int val, len;
80251
80252+ pax_track_stack();
80253+
80254 if (get_user(len, optlen))
80255 return -EFAULT;
80256
80257diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
80258index 6fc7961..33bad4a 100644
80259--- a/net/ipv4/tcp_ipv4.c
80260+++ b/net/ipv4/tcp_ipv4.c
80261@@ -85,6 +85,9 @@
80262 int sysctl_tcp_tw_reuse __read_mostly;
80263 int sysctl_tcp_low_latency __read_mostly;
80264
80265+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80266+extern int grsec_enable_blackhole;
80267+#endif
80268
80269 #ifdef CONFIG_TCP_MD5SIG
80270 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
80271@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
80272 return 0;
80273
80274 reset:
80275+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80276+ if (!grsec_enable_blackhole)
80277+#endif
80278 tcp_v4_send_reset(rsk, skb);
80279 discard:
80280 kfree_skb(skb);
80281@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
80282 TCP_SKB_CB(skb)->sacked = 0;
80283
80284 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80285- if (!sk)
80286+ if (!sk) {
80287+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80288+ ret = 1;
80289+#endif
80290 goto no_tcp_socket;
80291+ }
80292
80293 process:
80294- if (sk->sk_state == TCP_TIME_WAIT)
80295+ if (sk->sk_state == TCP_TIME_WAIT) {
80296+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80297+ ret = 2;
80298+#endif
80299 goto do_time_wait;
80300+ }
80301
80302 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
80303 goto discard_and_relse;
80304@@ -1651,6 +1665,10 @@ no_tcp_socket:
80305 bad_packet:
80306 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80307 } else {
80308+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80309+ if (!grsec_enable_blackhole || (ret == 1 &&
80310+ (skb->dev->flags & IFF_LOOPBACK)))
80311+#endif
80312 tcp_v4_send_reset(NULL, skb);
80313 }
80314
80315@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
80316 0, /* non standard timer */
80317 0, /* open_requests have no inode */
80318 atomic_read(&sk->sk_refcnt),
80319+#ifdef CONFIG_GRKERNSEC_HIDESYM
80320+ NULL,
80321+#else
80322 req,
80323+#endif
80324 len);
80325 }
80326
80327@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
80328 sock_i_uid(sk),
80329 icsk->icsk_probes_out,
80330 sock_i_ino(sk),
80331- atomic_read(&sk->sk_refcnt), sk,
80332+ atomic_read(&sk->sk_refcnt),
80333+#ifdef CONFIG_GRKERNSEC_HIDESYM
80334+ NULL,
80335+#else
80336+ sk,
80337+#endif
80338 jiffies_to_clock_t(icsk->icsk_rto),
80339 jiffies_to_clock_t(icsk->icsk_ack.ato),
80340 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
80341@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
80342 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
80343 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
80344 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80345- atomic_read(&tw->tw_refcnt), tw, len);
80346+ atomic_read(&tw->tw_refcnt),
80347+#ifdef CONFIG_GRKERNSEC_HIDESYM
80348+ NULL,
80349+#else
80350+ tw,
80351+#endif
80352+ len);
80353 }
80354
80355 #define TMPSZ 150
80356diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
80357index 4c03598..e09a8e8 100644
80358--- a/net/ipv4/tcp_minisocks.c
80359+++ b/net/ipv4/tcp_minisocks.c
80360@@ -26,6 +26,10 @@
80361 #include <net/inet_common.h>
80362 #include <net/xfrm.h>
80363
80364+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80365+extern int grsec_enable_blackhole;
80366+#endif
80367+
80368 #ifdef CONFIG_SYSCTL
80369 #define SYNC_INIT 0 /* let the user enable it */
80370 #else
80371@@ -672,6 +676,10 @@ listen_overflow:
80372
80373 embryonic_reset:
80374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
80375+
80376+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80377+ if (!grsec_enable_blackhole)
80378+#endif
80379 if (!(flg & TCP_FLAG_RST))
80380 req->rsk_ops->send_reset(sk, skb);
80381
80382diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
80383index af83bdf..ec91cb2 100644
80384--- a/net/ipv4/tcp_output.c
80385+++ b/net/ipv4/tcp_output.c
80386@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
80387 __u8 *md5_hash_location;
80388 int mss;
80389
80390+ pax_track_stack();
80391+
80392 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
80393 if (skb == NULL)
80394 return NULL;
80395diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
80396index 59f5b5e..193860f 100644
80397--- a/net/ipv4/tcp_probe.c
80398+++ b/net/ipv4/tcp_probe.c
80399@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
80400 if (cnt + width >= len)
80401 break;
80402
80403- if (copy_to_user(buf + cnt, tbuf, width))
80404+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
80405 return -EFAULT;
80406 cnt += width;
80407 }
80408diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
80409index 57d5501..a9ed13a 100644
80410--- a/net/ipv4/tcp_timer.c
80411+++ b/net/ipv4/tcp_timer.c
80412@@ -21,6 +21,10 @@
80413 #include <linux/module.h>
80414 #include <net/tcp.h>
80415
80416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80417+extern int grsec_lastack_retries;
80418+#endif
80419+
80420 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
80421 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
80422 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
80423@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
80424 }
80425 }
80426
80427+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80428+ if ((sk->sk_state == TCP_LAST_ACK) &&
80429+ (grsec_lastack_retries > 0) &&
80430+ (grsec_lastack_retries < retry_until))
80431+ retry_until = grsec_lastack_retries;
80432+#endif
80433+
80434 if (retransmits_timed_out(sk, retry_until)) {
80435 /* Has it gone just too far? */
80436 tcp_write_err(sk);
80437diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
80438index 0ac8833..58d8c43 100644
80439--- a/net/ipv4/udp.c
80440+++ b/net/ipv4/udp.c
80441@@ -86,6 +86,7 @@
80442 #include <linux/types.h>
80443 #include <linux/fcntl.h>
80444 #include <linux/module.h>
80445+#include <linux/security.h>
80446 #include <linux/socket.h>
80447 #include <linux/sockios.h>
80448 #include <linux/igmp.h>
80449@@ -106,6 +107,10 @@
80450 #include <net/xfrm.h>
80451 #include "udp_impl.h"
80452
80453+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80454+extern int grsec_enable_blackhole;
80455+#endif
80456+
80457 struct udp_table udp_table;
80458 EXPORT_SYMBOL(udp_table);
80459
80460@@ -371,6 +376,9 @@ found:
80461 return s;
80462 }
80463
80464+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
80465+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
80466+
80467 /*
80468 * This routine is called by the ICMP module when it gets some
80469 * sort of error condition. If err < 0 then the socket should
80470@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
80471 dport = usin->sin_port;
80472 if (dport == 0)
80473 return -EINVAL;
80474+
80475+ err = gr_search_udp_sendmsg(sk, usin);
80476+ if (err)
80477+ return err;
80478 } else {
80479 if (sk->sk_state != TCP_ESTABLISHED)
80480 return -EDESTADDRREQ;
80481+
80482+ err = gr_search_udp_sendmsg(sk, NULL);
80483+ if (err)
80484+ return err;
80485+
80486 daddr = inet->daddr;
80487 dport = inet->dport;
80488 /* Open fast path for connected socket.
80489@@ -945,6 +962,10 @@ try_again:
80490 if (!skb)
80491 goto out;
80492
80493+ err = gr_search_udp_recvmsg(sk, skb);
80494+ if (err)
80495+ goto out_free;
80496+
80497 ulen = skb->len - sizeof(struct udphdr);
80498 copied = len;
80499 if (copied > ulen)
80500@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80501 if (rc == -ENOMEM) {
80502 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80503 is_udplite);
80504- atomic_inc(&sk->sk_drops);
80505+ atomic_inc_unchecked(&sk->sk_drops);
80506 }
80507 goto drop;
80508 }
80509@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80510 goto csum_error;
80511
80512 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80513+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80514+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80515+#endif
80516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80517
80518 /*
80519@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80520 sk_wmem_alloc_get(sp),
80521 sk_rmem_alloc_get(sp),
80522 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80523- atomic_read(&sp->sk_refcnt), sp,
80524- atomic_read(&sp->sk_drops), len);
80525+ atomic_read(&sp->sk_refcnt),
80526+#ifdef CONFIG_GRKERNSEC_HIDESYM
80527+ NULL,
80528+#else
80529+ sp,
80530+#endif
80531+ atomic_read_unchecked(&sp->sk_drops), len);
80532 }
80533
80534 int udp4_seq_show(struct seq_file *seq, void *v)
80535diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80536index 8ac3d09..fc58c5f 100644
80537--- a/net/ipv6/addrconf.c
80538+++ b/net/ipv6/addrconf.c
80539@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80540 p.iph.ihl = 5;
80541 p.iph.protocol = IPPROTO_IPV6;
80542 p.iph.ttl = 64;
80543- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80544+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80545
80546 if (ops->ndo_do_ioctl) {
80547 mm_segment_t oldfs = get_fs();
80548diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
80549index cc4797d..7cfdfcc 100644
80550--- a/net/ipv6/inet6_connection_sock.c
80551+++ b/net/ipv6/inet6_connection_sock.c
80552@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
80553 #ifdef CONFIG_XFRM
80554 {
80555 struct rt6_info *rt = (struct rt6_info *)dst;
80556- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
80557+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
80558 }
80559 #endif
80560 }
80561@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
80562 #ifdef CONFIG_XFRM
80563 if (dst) {
80564 struct rt6_info *rt = (struct rt6_info *)dst;
80565- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
80566+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
80567 sk->sk_dst_cache = NULL;
80568 dst_release(dst);
80569 dst = NULL;
80570diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
80571index 093e9b2..f72cddb 100644
80572--- a/net/ipv6/inet6_hashtables.c
80573+++ b/net/ipv6/inet6_hashtables.c
80574@@ -119,7 +119,7 @@ out:
80575 }
80576 EXPORT_SYMBOL(__inet6_lookup_established);
80577
80578-static int inline compute_score(struct sock *sk, struct net *net,
80579+static inline int compute_score(struct sock *sk, struct net *net,
80580 const unsigned short hnum,
80581 const struct in6_addr *daddr,
80582 const int dif)
80583diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
80584index 4f7aaf6..f7acf45 100644
80585--- a/net/ipv6/ipv6_sockglue.c
80586+++ b/net/ipv6/ipv6_sockglue.c
80587@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
80588 int val, valbool;
80589 int retv = -ENOPROTOOPT;
80590
80591+ pax_track_stack();
80592+
80593 if (optval == NULL)
80594 val=0;
80595 else {
80596@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80597 int len;
80598 int val;
80599
80600+ pax_track_stack();
80601+
80602 if (ip6_mroute_opt(optname))
80603 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
80604
80605@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80606 if (sk->sk_type != SOCK_STREAM)
80607 return -ENOPROTOOPT;
80608
80609- msg.msg_control = optval;
80610+ msg.msg_control = (void __force_kernel *)optval;
80611 msg.msg_controllen = len;
80612 msg.msg_flags = 0;
80613
80614diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
80615index 1cf3f0c..1d4376f 100644
80616--- a/net/ipv6/netfilter/ip6_queue.c
80617+++ b/net/ipv6/netfilter/ip6_queue.c
80618@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80619
80620 if (v->data_len < sizeof(*user_iph))
80621 return 0;
80622+ if (v->data_len > 65535)
80623+ return -EMSGSIZE;
80624+
80625 diff = v->data_len - e->skb->len;
80626 if (diff < 0) {
80627 if (pskb_trim(e->skb, v->data_len))
80628@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
80629 static inline void
80630 __ipq_rcv_skb(struct sk_buff *skb)
80631 {
80632- int status, type, pid, flags, nlmsglen, skblen;
80633+ int status, type, pid, flags;
80634+ unsigned int nlmsglen, skblen;
80635 struct nlmsghdr *nlh;
80636
80637 skblen = skb->len;
80638diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
80639index 78b5a36..7f37433 100644
80640--- a/net/ipv6/netfilter/ip6_tables.c
80641+++ b/net/ipv6/netfilter/ip6_tables.c
80642@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80643 private = &tmp;
80644 }
80645 #endif
80646+ memset(&info, 0, sizeof(info));
80647 info.valid_hooks = t->valid_hooks;
80648 memcpy(info.hook_entry, private->hook_entry,
80649 sizeof(info.hook_entry));
80650diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
80651index 4f24570..b813b34 100644
80652--- a/net/ipv6/raw.c
80653+++ b/net/ipv6/raw.c
80654@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
80655 {
80656 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
80657 skb_checksum_complete(skb)) {
80658- atomic_inc(&sk->sk_drops);
80659+ atomic_inc_unchecked(&sk->sk_drops);
80660 kfree_skb(skb);
80661 return NET_RX_DROP;
80662 }
80663
80664 /* Charge it to the socket. */
80665 if (sock_queue_rcv_skb(sk,skb)<0) {
80666- atomic_inc(&sk->sk_drops);
80667+ atomic_inc_unchecked(&sk->sk_drops);
80668 kfree_skb(skb);
80669 return NET_RX_DROP;
80670 }
80671@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80672 struct raw6_sock *rp = raw6_sk(sk);
80673
80674 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
80675- atomic_inc(&sk->sk_drops);
80676+ atomic_inc_unchecked(&sk->sk_drops);
80677 kfree_skb(skb);
80678 return NET_RX_DROP;
80679 }
80680@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80681
80682 if (inet->hdrincl) {
80683 if (skb_checksum_complete(skb)) {
80684- atomic_inc(&sk->sk_drops);
80685+ atomic_inc_unchecked(&sk->sk_drops);
80686 kfree_skb(skb);
80687 return NET_RX_DROP;
80688 }
80689@@ -518,7 +518,7 @@ csum_copy_err:
80690 as some normal condition.
80691 */
80692 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
80693- atomic_inc(&sk->sk_drops);
80694+ atomic_inc_unchecked(&sk->sk_drops);
80695 goto out;
80696 }
80697
80698@@ -600,7 +600,7 @@ out:
80699 return err;
80700 }
80701
80702-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
80703+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
80704 struct flowi *fl, struct rt6_info *rt,
80705 unsigned int flags)
80706 {
80707@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
80708 u16 proto;
80709 int err;
80710
80711+ pax_track_stack();
80712+
80713 /* Rough check on arithmetic overflow,
80714 better check is made in ip6_append_data().
80715 */
80716@@ -916,12 +918,17 @@ do_confirm:
80717 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
80718 char __user *optval, int optlen)
80719 {
80720+ struct icmp6_filter filter;
80721+
80722 switch (optname) {
80723 case ICMPV6_FILTER:
80724+ if (optlen < 0)
80725+ return -EINVAL;
80726 if (optlen > sizeof(struct icmp6_filter))
80727 optlen = sizeof(struct icmp6_filter);
80728- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
80729+ if (copy_from_user(&filter, optval, optlen))
80730 return -EFAULT;
80731+ raw6_sk(sk)->filter = filter;
80732 return 0;
80733 default:
80734 return -ENOPROTOOPT;
80735@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80736 char __user *optval, int __user *optlen)
80737 {
80738 int len;
80739+ struct icmp6_filter filter;
80740
80741 switch (optname) {
80742 case ICMPV6_FILTER:
80743@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80744 len = sizeof(struct icmp6_filter);
80745 if (put_user(len, optlen))
80746 return -EFAULT;
80747- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
80748+ filter = raw6_sk(sk)->filter;
80749+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
80750 return -EFAULT;
80751 return 0;
80752 default:
80753@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80754 0, 0L, 0,
80755 sock_i_uid(sp), 0,
80756 sock_i_ino(sp),
80757- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80758+ atomic_read(&sp->sk_refcnt),
80759+#ifdef CONFIG_GRKERNSEC_HIDESYM
80760+ NULL,
80761+#else
80762+ sp,
80763+#endif
80764+ atomic_read_unchecked(&sp->sk_drops));
80765 }
80766
80767 static int raw6_seq_show(struct seq_file *seq, void *v)
80768diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80769index faae6df..d4430c1 100644
80770--- a/net/ipv6/tcp_ipv6.c
80771+++ b/net/ipv6/tcp_ipv6.c
80772@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
80773 }
80774 #endif
80775
80776+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80777+extern int grsec_enable_blackhole;
80778+#endif
80779+
80780 static void tcp_v6_hash(struct sock *sk)
80781 {
80782 if (sk->sk_state != TCP_CLOSE) {
80783@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80784 return 0;
80785
80786 reset:
80787+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80788+ if (!grsec_enable_blackhole)
80789+#endif
80790 tcp_v6_send_reset(sk, skb);
80791 discard:
80792 if (opt_skb)
80793@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80794 TCP_SKB_CB(skb)->sacked = 0;
80795
80796 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80797- if (!sk)
80798+ if (!sk) {
80799+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80800+ ret = 1;
80801+#endif
80802 goto no_tcp_socket;
80803+ }
80804
80805 process:
80806- if (sk->sk_state == TCP_TIME_WAIT)
80807+ if (sk->sk_state == TCP_TIME_WAIT) {
80808+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80809+ ret = 2;
80810+#endif
80811 goto do_time_wait;
80812+ }
80813
80814 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
80815 goto discard_and_relse;
80816@@ -1701,6 +1716,10 @@ no_tcp_socket:
80817 bad_packet:
80818 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80819 } else {
80820+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80821+ if (!grsec_enable_blackhole || (ret == 1 &&
80822+ (skb->dev->flags & IFF_LOOPBACK)))
80823+#endif
80824 tcp_v6_send_reset(NULL, skb);
80825 }
80826
80827@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
80828 uid,
80829 0, /* non standard timer */
80830 0, /* open_requests have no inode */
80831- 0, req);
80832+ 0,
80833+#ifdef CONFIG_GRKERNSEC_HIDESYM
80834+ NULL
80835+#else
80836+ req
80837+#endif
80838+ );
80839 }
80840
80841 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80842@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80843 sock_i_uid(sp),
80844 icsk->icsk_probes_out,
80845 sock_i_ino(sp),
80846- atomic_read(&sp->sk_refcnt), sp,
80847+ atomic_read(&sp->sk_refcnt),
80848+#ifdef CONFIG_GRKERNSEC_HIDESYM
80849+ NULL,
80850+#else
80851+ sp,
80852+#endif
80853 jiffies_to_clock_t(icsk->icsk_rto),
80854 jiffies_to_clock_t(icsk->icsk_ack.ato),
80855 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
80856@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
80857 dest->s6_addr32[2], dest->s6_addr32[3], destp,
80858 tw->tw_substate, 0, 0,
80859 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80860- atomic_read(&tw->tw_refcnt), tw);
80861+ atomic_read(&tw->tw_refcnt),
80862+#ifdef CONFIG_GRKERNSEC_HIDESYM
80863+ NULL
80864+#else
80865+ tw
80866+#endif
80867+ );
80868 }
80869
80870 static int tcp6_seq_show(struct seq_file *seq, void *v)
80871diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80872index 9cc6289..052c521 100644
80873--- a/net/ipv6/udp.c
80874+++ b/net/ipv6/udp.c
80875@@ -49,6 +49,10 @@
80876 #include <linux/seq_file.h>
80877 #include "udp_impl.h"
80878
80879+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80880+extern int grsec_enable_blackhole;
80881+#endif
80882+
80883 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80884 {
80885 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80886@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
80887 if (rc == -ENOMEM) {
80888 UDP6_INC_STATS_BH(sock_net(sk),
80889 UDP_MIB_RCVBUFERRORS, is_udplite);
80890- atomic_inc(&sk->sk_drops);
80891+ atomic_inc_unchecked(&sk->sk_drops);
80892 }
80893 goto drop;
80894 }
80895@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80896 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
80897 proto == IPPROTO_UDPLITE);
80898
80899+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80900+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80901+#endif
80902 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
80903
80904 kfree_skb(skb);
80905@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80906 0, 0L, 0,
80907 sock_i_uid(sp), 0,
80908 sock_i_ino(sp),
80909- atomic_read(&sp->sk_refcnt), sp,
80910- atomic_read(&sp->sk_drops));
80911+ atomic_read(&sp->sk_refcnt),
80912+#ifdef CONFIG_GRKERNSEC_HIDESYM
80913+ NULL,
80914+#else
80915+ sp,
80916+#endif
80917+ atomic_read_unchecked(&sp->sk_drops));
80918 }
80919
80920 int udp6_seq_show(struct seq_file *seq, void *v)
80921diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80922index 811984d..11f59b7 100644
80923--- a/net/irda/ircomm/ircomm_tty.c
80924+++ b/net/irda/ircomm/ircomm_tty.c
80925@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80926 add_wait_queue(&self->open_wait, &wait);
80927
80928 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80929- __FILE__,__LINE__, tty->driver->name, self->open_count );
80930+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80931
80932 /* As far as I can see, we protect open_count - Jean II */
80933 spin_lock_irqsave(&self->spinlock, flags);
80934 if (!tty_hung_up_p(filp)) {
80935 extra_count = 1;
80936- self->open_count--;
80937+ local_dec(&self->open_count);
80938 }
80939 spin_unlock_irqrestore(&self->spinlock, flags);
80940- self->blocked_open++;
80941+ local_inc(&self->blocked_open);
80942
80943 while (1) {
80944 if (tty->termios->c_cflag & CBAUD) {
80945@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80946 }
80947
80948 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80949- __FILE__,__LINE__, tty->driver->name, self->open_count );
80950+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80951
80952 schedule();
80953 }
80954@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80955 if (extra_count) {
80956 /* ++ is not atomic, so this should be protected - Jean II */
80957 spin_lock_irqsave(&self->spinlock, flags);
80958- self->open_count++;
80959+ local_inc(&self->open_count);
80960 spin_unlock_irqrestore(&self->spinlock, flags);
80961 }
80962- self->blocked_open--;
80963+ local_dec(&self->blocked_open);
80964
80965 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80966- __FILE__,__LINE__, tty->driver->name, self->open_count);
80967+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
80968
80969 if (!retval)
80970 self->flags |= ASYNC_NORMAL_ACTIVE;
80971@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80972 }
80973 /* ++ is not atomic, so this should be protected - Jean II */
80974 spin_lock_irqsave(&self->spinlock, flags);
80975- self->open_count++;
80976+ local_inc(&self->open_count);
80977
80978 tty->driver_data = self;
80979 self->tty = tty;
80980 spin_unlock_irqrestore(&self->spinlock, flags);
80981
80982 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80983- self->line, self->open_count);
80984+ self->line, local_read(&self->open_count));
80985
80986 /* Not really used by us, but lets do it anyway */
80987 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80988@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80989 return;
80990 }
80991
80992- if ((tty->count == 1) && (self->open_count != 1)) {
80993+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
80994 /*
80995 * Uh, oh. tty->count is 1, which means that the tty
80996 * structure will be freed. state->count should always
80997@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80998 */
80999 IRDA_DEBUG(0, "%s(), bad serial port count; "
81000 "tty->count is 1, state->count is %d\n", __func__ ,
81001- self->open_count);
81002- self->open_count = 1;
81003+ local_read(&self->open_count));
81004+ local_set(&self->open_count, 1);
81005 }
81006
81007- if (--self->open_count < 0) {
81008+ if (local_dec_return(&self->open_count) < 0) {
81009 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
81010- __func__, self->line, self->open_count);
81011- self->open_count = 0;
81012+ __func__, self->line, local_read(&self->open_count));
81013+ local_set(&self->open_count, 0);
81014 }
81015- if (self->open_count) {
81016+ if (local_read(&self->open_count)) {
81017 spin_unlock_irqrestore(&self->spinlock, flags);
81018
81019 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
81020@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81021 tty->closing = 0;
81022 self->tty = NULL;
81023
81024- if (self->blocked_open) {
81025+ if (local_read(&self->blocked_open)) {
81026 if (self->close_delay)
81027 schedule_timeout_interruptible(self->close_delay);
81028 wake_up_interruptible(&self->open_wait);
81029@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
81030 spin_lock_irqsave(&self->spinlock, flags);
81031 self->flags &= ~ASYNC_NORMAL_ACTIVE;
81032 self->tty = NULL;
81033- self->open_count = 0;
81034+ local_set(&self->open_count, 0);
81035 spin_unlock_irqrestore(&self->spinlock, flags);
81036
81037 wake_up_interruptible(&self->open_wait);
81038@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
81039 seq_putc(m, '\n');
81040
81041 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
81042- seq_printf(m, "Open count: %d\n", self->open_count);
81043+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
81044 seq_printf(m, "Max data size: %d\n", self->max_data_size);
81045 seq_printf(m, "Max header size: %d\n", self->max_header_size);
81046
81047diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
81048index bada1b9..f325943 100644
81049--- a/net/iucv/af_iucv.c
81050+++ b/net/iucv/af_iucv.c
81051@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
81052
81053 write_lock_bh(&iucv_sk_list.lock);
81054
81055- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
81056+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81057 while (__iucv_get_sock_by_name(name)) {
81058 sprintf(name, "%08x",
81059- atomic_inc_return(&iucv_sk_list.autobind_name));
81060+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81061 }
81062
81063 write_unlock_bh(&iucv_sk_list.lock);
81064diff --git a/net/key/af_key.c b/net/key/af_key.c
81065index 4e98193..439b449 100644
81066--- a/net/key/af_key.c
81067+++ b/net/key/af_key.c
81068@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
81069 struct xfrm_migrate m[XFRM_MAX_DEPTH];
81070 struct xfrm_kmaddress k;
81071
81072+ pax_track_stack();
81073+
81074 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
81075 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
81076 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
81077@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
81078 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
81079 else
81080 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
81081+#ifdef CONFIG_GRKERNSEC_HIDESYM
81082+ NULL,
81083+#else
81084 s,
81085+#endif
81086 atomic_read(&s->sk_refcnt),
81087 sk_rmem_alloc_get(s),
81088 sk_wmem_alloc_get(s),
81089diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
81090index bda96d1..c038b72 100644
81091--- a/net/lapb/lapb_iface.c
81092+++ b/net/lapb/lapb_iface.c
81093@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
81094 goto out;
81095
81096 lapb->dev = dev;
81097- lapb->callbacks = *callbacks;
81098+ lapb->callbacks = callbacks;
81099
81100 __lapb_insert_cb(lapb);
81101
81102@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
81103
81104 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
81105 {
81106- if (lapb->callbacks.connect_confirmation)
81107- lapb->callbacks.connect_confirmation(lapb->dev, reason);
81108+ if (lapb->callbacks->connect_confirmation)
81109+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
81110 }
81111
81112 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
81113 {
81114- if (lapb->callbacks.connect_indication)
81115- lapb->callbacks.connect_indication(lapb->dev, reason);
81116+ if (lapb->callbacks->connect_indication)
81117+ lapb->callbacks->connect_indication(lapb->dev, reason);
81118 }
81119
81120 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
81121 {
81122- if (lapb->callbacks.disconnect_confirmation)
81123- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
81124+ if (lapb->callbacks->disconnect_confirmation)
81125+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
81126 }
81127
81128 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
81129 {
81130- if (lapb->callbacks.disconnect_indication)
81131- lapb->callbacks.disconnect_indication(lapb->dev, reason);
81132+ if (lapb->callbacks->disconnect_indication)
81133+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
81134 }
81135
81136 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
81137 {
81138- if (lapb->callbacks.data_indication)
81139- return lapb->callbacks.data_indication(lapb->dev, skb);
81140+ if (lapb->callbacks->data_indication)
81141+ return lapb->callbacks->data_indication(lapb->dev, skb);
81142
81143 kfree_skb(skb);
81144 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
81145@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
81146 {
81147 int used = 0;
81148
81149- if (lapb->callbacks.data_transmit) {
81150- lapb->callbacks.data_transmit(lapb->dev, skb);
81151+ if (lapb->callbacks->data_transmit) {
81152+ lapb->callbacks->data_transmit(lapb->dev, skb);
81153 used = 1;
81154 }
81155
81156diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81157index fe2d3f8..e57f683 100644
81158--- a/net/mac80211/cfg.c
81159+++ b/net/mac80211/cfg.c
81160@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
81161 return err;
81162 }
81163
81164-struct cfg80211_ops mac80211_config_ops = {
81165+const struct cfg80211_ops mac80211_config_ops = {
81166 .add_virtual_intf = ieee80211_add_iface,
81167 .del_virtual_intf = ieee80211_del_iface,
81168 .change_virtual_intf = ieee80211_change_iface,
81169diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
81170index 7d7879f..2d51f62 100644
81171--- a/net/mac80211/cfg.h
81172+++ b/net/mac80211/cfg.h
81173@@ -4,6 +4,6 @@
81174 #ifndef __CFG_H
81175 #define __CFG_H
81176
81177-extern struct cfg80211_ops mac80211_config_ops;
81178+extern const struct cfg80211_ops mac80211_config_ops;
81179
81180 #endif /* __CFG_H */
81181diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
81182index 99c7525..9cb4937 100644
81183--- a/net/mac80211/debugfs_key.c
81184+++ b/net/mac80211/debugfs_key.c
81185@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
81186 size_t count, loff_t *ppos)
81187 {
81188 struct ieee80211_key *key = file->private_data;
81189- int i, res, bufsize = 2 * key->conf.keylen + 2;
81190+ int i, bufsize = 2 * key->conf.keylen + 2;
81191 char *buf = kmalloc(bufsize, GFP_KERNEL);
81192 char *p = buf;
81193+ ssize_t res;
81194+
81195+ if (buf == NULL)
81196+ return -ENOMEM;
81197
81198 for (i = 0; i < key->conf.keylen; i++)
81199 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
81200diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
81201index 33a2e89..08650c8 100644
81202--- a/net/mac80211/debugfs_sta.c
81203+++ b/net/mac80211/debugfs_sta.c
81204@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
81205 int i;
81206 struct sta_info *sta = file->private_data;
81207
81208+ pax_track_stack();
81209+
81210 spin_lock_bh(&sta->lock);
81211 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
81212 sta->ampdu_mlme.dialog_token_allocator + 1);
81213diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81214index ca62bfe..6657a03 100644
81215--- a/net/mac80211/ieee80211_i.h
81216+++ b/net/mac80211/ieee80211_i.h
81217@@ -25,6 +25,7 @@
81218 #include <linux/etherdevice.h>
81219 #include <net/cfg80211.h>
81220 #include <net/mac80211.h>
81221+#include <asm/local.h>
81222 #include "key.h"
81223 #include "sta_info.h"
81224
81225@@ -635,7 +636,7 @@ struct ieee80211_local {
81226 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81227 spinlock_t queue_stop_reason_lock;
81228
81229- int open_count;
81230+ local_t open_count;
81231 int monitors, cooked_mntrs;
81232 /* number of interfaces with corresponding FIF_ flags */
81233 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
81234diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81235index 079c500..eb3c6d4 100644
81236--- a/net/mac80211/iface.c
81237+++ b/net/mac80211/iface.c
81238@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
81239 break;
81240 }
81241
81242- if (local->open_count == 0) {
81243+ if (local_read(&local->open_count) == 0) {
81244 res = drv_start(local);
81245 if (res)
81246 goto err_del_bss;
81247@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
81248 * Validate the MAC address for this device.
81249 */
81250 if (!is_valid_ether_addr(dev->dev_addr)) {
81251- if (!local->open_count)
81252+ if (!local_read(&local->open_count))
81253 drv_stop(local);
81254 return -EADDRNOTAVAIL;
81255 }
81256@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
81257
81258 hw_reconf_flags |= __ieee80211_recalc_idle(local);
81259
81260- local->open_count++;
81261+ local_inc(&local->open_count);
81262 if (hw_reconf_flags) {
81263 ieee80211_hw_config(local, hw_reconf_flags);
81264 /*
81265@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
81266 err_del_interface:
81267 drv_remove_interface(local, &conf);
81268 err_stop:
81269- if (!local->open_count)
81270+ if (!local_read(&local->open_count))
81271 drv_stop(local);
81272 err_del_bss:
81273 sdata->bss = NULL;
81274@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
81275 WARN_ON(!list_empty(&sdata->u.ap.vlans));
81276 }
81277
81278- local->open_count--;
81279+ local_dec(&local->open_count);
81280
81281 switch (sdata->vif.type) {
81282 case NL80211_IFTYPE_AP_VLAN:
81283@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
81284
81285 ieee80211_recalc_ps(local, -1);
81286
81287- if (local->open_count == 0) {
81288+ if (local_read(&local->open_count) == 0) {
81289 ieee80211_clear_tx_pending(local);
81290 ieee80211_stop_device(local);
81291
81292diff --git a/net/mac80211/main.c b/net/mac80211/main.c
81293index 2dfe176..74e4388 100644
81294--- a/net/mac80211/main.c
81295+++ b/net/mac80211/main.c
81296@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
81297 local->hw.conf.power_level = power;
81298 }
81299
81300- if (changed && local->open_count) {
81301+ if (changed && local_read(&local->open_count)) {
81302 ret = drv_config(local, changed);
81303 /*
81304 * Goal:
81305diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
81306index e67eea7..fcc227e 100644
81307--- a/net/mac80211/mlme.c
81308+++ b/net/mac80211/mlme.c
81309@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
81310 bool have_higher_than_11mbit = false, newsta = false;
81311 u16 ap_ht_cap_flags;
81312
81313+ pax_track_stack();
81314+
81315 /*
81316 * AssocResp and ReassocResp have identical structure, so process both
81317 * of them in this function.
81318diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
81319index e535f1c..4d733d1 100644
81320--- a/net/mac80211/pm.c
81321+++ b/net/mac80211/pm.c
81322@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
81323 }
81324
81325 /* stop hardware - this must stop RX */
81326- if (local->open_count)
81327+ if (local_read(&local->open_count))
81328 ieee80211_stop_device(local);
81329
81330 local->suspended = true;
81331diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
81332index b33efc4..0a2efb6 100644
81333--- a/net/mac80211/rate.c
81334+++ b/net/mac80211/rate.c
81335@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
81336 struct rate_control_ref *ref, *old;
81337
81338 ASSERT_RTNL();
81339- if (local->open_count)
81340+ if (local_read(&local->open_count))
81341 return -EBUSY;
81342
81343 ref = rate_control_alloc(name, local);
81344diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
81345index b1d7904..57e4da7 100644
81346--- a/net/mac80211/tx.c
81347+++ b/net/mac80211/tx.c
81348@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
81349 return cpu_to_le16(dur);
81350 }
81351
81352-static int inline is_ieee80211_device(struct ieee80211_local *local,
81353+static inline int is_ieee80211_device(struct ieee80211_local *local,
81354 struct net_device *dev)
81355 {
81356 return local == wdev_priv(dev->ieee80211_ptr);
81357diff --git a/net/mac80211/util.c b/net/mac80211/util.c
81358index 31b1085..48fb26d 100644
81359--- a/net/mac80211/util.c
81360+++ b/net/mac80211/util.c
81361@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
81362 local->resuming = true;
81363
81364 /* restart hardware */
81365- if (local->open_count) {
81366+ if (local_read(&local->open_count)) {
81367 /*
81368 * Upon resume hardware can sometimes be goofy due to
81369 * various platform / driver / bus issues, so restarting
81370diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
81371index 634d14a..b35a608 100644
81372--- a/net/netfilter/Kconfig
81373+++ b/net/netfilter/Kconfig
81374@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
81375
81376 To compile it as a module, choose M here. If unsure, say N.
81377
81378+config NETFILTER_XT_MATCH_GRADM
81379+ tristate '"gradm" match support'
81380+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
81381+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
81382+ ---help---
81383+ The gradm match allows to match on grsecurity RBAC being enabled.
81384+ It is useful when iptables rules are applied early on bootup to
81385+ prevent connections to the machine (except from a trusted host)
81386+ while the RBAC system is disabled.
81387+
81388 config NETFILTER_XT_MATCH_HASHLIMIT
81389 tristate '"hashlimit" match support'
81390 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
81391diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
81392index 49f62ee..a17b2c6 100644
81393--- a/net/netfilter/Makefile
81394+++ b/net/netfilter/Makefile
81395@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
81396 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
81397 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
81398 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
81399+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
81400 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
81401 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
81402 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
81403diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
81404index 3c7e427..724043c 100644
81405--- a/net/netfilter/ipvs/ip_vs_app.c
81406+++ b/net/netfilter/ipvs/ip_vs_app.c
81407@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
81408 .open = ip_vs_app_open,
81409 .read = seq_read,
81410 .llseek = seq_lseek,
81411- .release = seq_release,
81412+ .release = seq_release_net,
81413 };
81414 #endif
81415
81416diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
81417index 95682e5..457dbac 100644
81418--- a/net/netfilter/ipvs/ip_vs_conn.c
81419+++ b/net/netfilter/ipvs/ip_vs_conn.c
81420@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
81421 /* if the connection is not template and is created
81422 * by sync, preserve the activity flag.
81423 */
81424- cp->flags |= atomic_read(&dest->conn_flags) &
81425+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
81426 (~IP_VS_CONN_F_INACTIVE);
81427 else
81428- cp->flags |= atomic_read(&dest->conn_flags);
81429+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
81430 cp->dest = dest;
81431
81432 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
81433@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
81434 atomic_set(&cp->refcnt, 1);
81435
81436 atomic_set(&cp->n_control, 0);
81437- atomic_set(&cp->in_pkts, 0);
81438+ atomic_set_unchecked(&cp->in_pkts, 0);
81439
81440 atomic_inc(&ip_vs_conn_count);
81441 if (flags & IP_VS_CONN_F_NO_CPORT)
81442@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
81443 .open = ip_vs_conn_open,
81444 .read = seq_read,
81445 .llseek = seq_lseek,
81446- .release = seq_release,
81447+ .release = seq_release_net,
81448 };
81449
81450 static const char *ip_vs_origin_name(unsigned flags)
81451@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
81452 .open = ip_vs_conn_sync_open,
81453 .read = seq_read,
81454 .llseek = seq_lseek,
81455- .release = seq_release,
81456+ .release = seq_release_net,
81457 };
81458
81459 #endif
81460@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
81461
81462 /* Don't drop the entry if its number of incoming packets is not
81463 located in [0, 8] */
81464- i = atomic_read(&cp->in_pkts);
81465+ i = atomic_read_unchecked(&cp->in_pkts);
81466 if (i > 8 || i < 0) return 0;
81467
81468 if (!todrop_rate[i]) return 0;
81469diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
81470index b95699f..5fee919 100644
81471--- a/net/netfilter/ipvs/ip_vs_core.c
81472+++ b/net/netfilter/ipvs/ip_vs_core.c
81473@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
81474 ret = cp->packet_xmit(skb, cp, pp);
81475 /* do not touch skb anymore */
81476
81477- atomic_inc(&cp->in_pkts);
81478+ atomic_inc_unchecked(&cp->in_pkts);
81479 ip_vs_conn_put(cp);
81480 return ret;
81481 }
81482@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
81483 * Sync connection if it is about to close to
81484 * encorage the standby servers to update the connections timeout
81485 */
81486- pkts = atomic_add_return(1, &cp->in_pkts);
81487+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
81488 if (af == AF_INET &&
81489 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
81490 (((cp->protocol != IPPROTO_TCP ||
81491diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
81492index 02b2610..2d89424 100644
81493--- a/net/netfilter/ipvs/ip_vs_ctl.c
81494+++ b/net/netfilter/ipvs/ip_vs_ctl.c
81495@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
81496 ip_vs_rs_hash(dest);
81497 write_unlock_bh(&__ip_vs_rs_lock);
81498 }
81499- atomic_set(&dest->conn_flags, conn_flags);
81500+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
81501
81502 /* bind the service */
81503 if (!dest->svc) {
81504@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81505 " %-7s %-6d %-10d %-10d\n",
81506 &dest->addr.in6,
81507 ntohs(dest->port),
81508- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81509+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81510 atomic_read(&dest->weight),
81511 atomic_read(&dest->activeconns),
81512 atomic_read(&dest->inactconns));
81513@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81514 "%-7s %-6d %-10d %-10d\n",
81515 ntohl(dest->addr.ip),
81516 ntohs(dest->port),
81517- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81518+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81519 atomic_read(&dest->weight),
81520 atomic_read(&dest->activeconns),
81521 atomic_read(&dest->inactconns));
81522@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
81523 .open = ip_vs_info_open,
81524 .read = seq_read,
81525 .llseek = seq_lseek,
81526- .release = seq_release_private,
81527+ .release = seq_release_net,
81528 };
81529
81530 #endif
81531@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
81532 .open = ip_vs_stats_seq_open,
81533 .read = seq_read,
81534 .llseek = seq_lseek,
81535- .release = single_release,
81536+ .release = single_release_net,
81537 };
81538
81539 #endif
81540@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
81541
81542 entry.addr = dest->addr.ip;
81543 entry.port = dest->port;
81544- entry.conn_flags = atomic_read(&dest->conn_flags);
81545+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
81546 entry.weight = atomic_read(&dest->weight);
81547 entry.u_threshold = dest->u_threshold;
81548 entry.l_threshold = dest->l_threshold;
81549@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81550 unsigned char arg[128];
81551 int ret = 0;
81552
81553+ pax_track_stack();
81554+
81555 if (!capable(CAP_NET_ADMIN))
81556 return -EPERM;
81557
81558@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
81559 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
81560
81561 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
81562- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81563+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81564 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
81565 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
81566 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
81567diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
81568index e177f0d..55e8581 100644
81569--- a/net/netfilter/ipvs/ip_vs_sync.c
81570+++ b/net/netfilter/ipvs/ip_vs_sync.c
81571@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
81572
81573 if (opt)
81574 memcpy(&cp->in_seq, opt, sizeof(*opt));
81575- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81576+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81577 cp->state = state;
81578 cp->old_state = cp->state;
81579 /*
81580diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
81581index 30b3189..e2e4b55 100644
81582--- a/net/netfilter/ipvs/ip_vs_xmit.c
81583+++ b/net/netfilter/ipvs/ip_vs_xmit.c
81584@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
81585 else
81586 rc = NF_ACCEPT;
81587 /* do not touch skb anymore */
81588- atomic_inc(&cp->in_pkts);
81589+ atomic_inc_unchecked(&cp->in_pkts);
81590 goto out;
81591 }
81592
81593@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
81594 else
81595 rc = NF_ACCEPT;
81596 /* do not touch skb anymore */
81597- atomic_inc(&cp->in_pkts);
81598+ atomic_inc_unchecked(&cp->in_pkts);
81599 goto out;
81600 }
81601
81602diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
81603index d521718..d0fd7a1 100644
81604--- a/net/netfilter/nf_conntrack_netlink.c
81605+++ b/net/netfilter/nf_conntrack_netlink.c
81606@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
81607 static int
81608 ctnetlink_parse_tuple(const struct nlattr * const cda[],
81609 struct nf_conntrack_tuple *tuple,
81610- enum ctattr_tuple type, u_int8_t l3num)
81611+ enum ctattr_type type, u_int8_t l3num)
81612 {
81613 struct nlattr *tb[CTA_TUPLE_MAX+1];
81614 int err;
81615diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
81616index f900dc3..5e45346 100644
81617--- a/net/netfilter/nfnetlink_log.c
81618+++ b/net/netfilter/nfnetlink_log.c
81619@@ -68,7 +68,7 @@ struct nfulnl_instance {
81620 };
81621
81622 static DEFINE_RWLOCK(instances_lock);
81623-static atomic_t global_seq;
81624+static atomic_unchecked_t global_seq;
81625
81626 #define INSTANCE_BUCKETS 16
81627 static struct hlist_head instance_table[INSTANCE_BUCKETS];
81628@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
81629 /* global sequence number */
81630 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
81631 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
81632- htonl(atomic_inc_return(&global_seq)));
81633+ htonl(atomic_inc_return_unchecked(&global_seq)));
81634
81635 if (data_len) {
81636 struct nlattr *nla;
81637diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
81638new file mode 100644
81639index 0000000..b1bac76
81640--- /dev/null
81641+++ b/net/netfilter/xt_gradm.c
81642@@ -0,0 +1,51 @@
81643+/*
81644+ * gradm match for netfilter
81645